1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/fcntl.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/domain.h> 74 #include <sys/file.h> /* for struct knote */ 75 #include <sys/kernel.h> 76 #include <sys/event.h> 77 #include <sys/proc.h> 78 #include <sys/protosw.h> 79 #include <sys/socket.h> 80 #include <sys/socketvar.h> 81 #include <sys/socketops.h> 82 #include <sys/resourcevar.h> 83 #include <sys/signalvar.h> 84 #include <sys/sysctl.h> 85 #include <sys/uio.h> 86 #include <sys/jail.h> 87 #include <vm/vm_zone.h> 88 #include <vm/pmap.h> 89 #include <net/netmsg2.h> 90 #include <net/netisr2.h> 91 92 #include <sys/thread2.h> 93 #include <sys/socketvar2.h> 94 #include <sys/spinlock2.h> 95 96 #include <machine/limits.h> 97 98 #ifdef INET 99 extern int tcp_sosend_agglim; 100 extern int tcp_sosend_async; 101 extern int tcp_sosend_jcluster; 102 extern int udp_sosend_async; 103 extern int udp_sosend_prepend; 104 105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 106 #endif /* INET */ 107 108 static void filt_sordetach(struct knote *kn); 109 static int filt_soread(struct knote *kn, long hint); 110 static void filt_sowdetach(struct knote *kn); 111 static int filt_sowrite(struct knote *kn, long hint); 112 static int filt_solisten(struct knote *kn, long hint); 113 114 static int soclose_sync(struct socket *so, int fflag); 115 static void soclose_fast(struct socket *so); 116 117 static struct filterops solisten_filtops = 118 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 119 static struct filterops soread_filtops = 120 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 121 static struct filterops sowrite_filtops = 122 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 123 static struct filterops soexcept_filtops = 124 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 125 126 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 127 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 128 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 129 130 131 static int somaxconn = SOMAXCONN; 132 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 133 &somaxconn, 0, "Maximum pending socket connection queue size"); 134 135 static int use_soclose_fast = 1; 136 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 137 &use_soclose_fast, 0, "Fast socket close"); 138 139 int use_soaccept_pred_fast = 1; 140 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 141 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 142 143 int use_sendfile_async = 1; 144 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 145 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 146 147 int use_soconnect_async = 1; 148 SYSCTL_INT(_kern_ipc, OID_AUTO, soconnect_async, CTLFLAG_RW, 149 &use_soconnect_async, 0, "soconnect uses asynchronized pru_connect"); 150 151 /* 152 * Socket operation routines. 153 * These routines are called by the routines in 154 * sys_socket.c or from a system process, and 155 * implement the semantics of socket operations by 156 * switching out to the protocol specific routines. 157 */ 158 159 /* 160 * Get a socket structure, and initialize it. 161 * Note that it would probably be better to allocate socket 162 * and PCB at the same time, but I'm not convinced that all 163 * the protocols can be easily modified to do this. 164 */ 165 struct socket * 166 soalloc(int waitok, struct protosw *pr) 167 { 168 struct socket *so; 169 unsigned waitmask; 170 171 waitmask = waitok ? M_WAITOK : M_NOWAIT; 172 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 173 if (so) { 174 /* XXX race condition for reentrant kernel */ 175 so->so_proto = pr; 176 TAILQ_INIT(&so->so_aiojobq); 177 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist); 178 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist); 179 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 180 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 181 spin_init(&so->so_rcvd_spin, "soalloc"); 182 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 183 MSGF_DROPABLE | MSGF_PRIORITY, 184 so->so_proto->pr_usrreqs->pru_rcvd); 185 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 186 so->so_state = SS_NOFDREF; 187 so->so_refs = 1; 188 } 189 return so; 190 } 191 192 int 193 socreate(int dom, struct socket **aso, int type, 194 int proto, struct thread *td) 195 { 196 struct proc *p = td->td_proc; 197 struct protosw *prp; 198 struct socket *so; 199 struct pru_attach_info ai; 200 int error; 201 202 if (proto) 203 prp = pffindproto(dom, proto, type); 204 else 205 prp = pffindtype(dom, type); 206 207 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 208 return (EPROTONOSUPPORT); 209 210 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 211 prp->pr_domain->dom_family != PF_LOCAL && 212 prp->pr_domain->dom_family != PF_INET && 213 prp->pr_domain->dom_family != PF_INET6 && 214 prp->pr_domain->dom_family != PF_ROUTE) { 215 return (EPROTONOSUPPORT); 216 } 217 218 if (prp->pr_type != type) 219 return (EPROTOTYPE); 220 so = soalloc(p != NULL, prp); 221 if (so == NULL) 222 return (ENOBUFS); 223 224 /* 225 * Callers of socreate() presumably will connect up a descriptor 226 * and call soclose() if they cannot. This represents our so_refs 227 * (which should be 1) from soalloc(). 228 */ 229 soclrstate(so, SS_NOFDREF); 230 231 /* 232 * Set a default port for protocol processing. No action will occur 233 * on the socket on this port until an inpcb is attached to it and 234 * is able to match incoming packets, or until the socket becomes 235 * available to userland. 236 * 237 * We normally default the socket to the protocol thread on cpu 0, 238 * if protocol does not provide its own method to initialize the 239 * default port. 240 * 241 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 242 * thread and all pr_*()/pru_*() calls are executed synchronously. 243 */ 244 if (prp->pr_flags & PR_SYNC_PORT) 245 so->so_port = &netisr_sync_port; 246 else if (prp->pr_initport != NULL) 247 so->so_port = prp->pr_initport(); 248 else 249 so->so_port = netisr_cpuport(0); 250 251 TAILQ_INIT(&so->so_incomp); 252 TAILQ_INIT(&so->so_comp); 253 so->so_type = type; 254 so->so_cred = crhold(p->p_ucred); 255 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 256 ai.p_ucred = p->p_ucred; 257 ai.fd_rdir = p->p_fd->fd_rdir; 258 259 /* 260 * Auto-sizing of socket buffers is managed by the protocols and 261 * the appropriate flags must be set in the pru_attach function. 262 */ 263 error = so_pru_attach(so, proto, &ai); 264 if (error) { 265 sosetstate(so, SS_NOFDREF); 266 sofree(so); /* from soalloc */ 267 return error; 268 } 269 270 /* 271 * NOTE: Returns referenced socket. 272 */ 273 *aso = so; 274 return (0); 275 } 276 277 int 278 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 279 { 280 int error; 281 282 error = so_pru_bind(so, nam, td); 283 return (error); 284 } 285 286 static void 287 sodealloc(struct socket *so) 288 { 289 if (so->so_rcv.ssb_hiwat) 290 (void)chgsbsize(so->so_cred->cr_uidinfo, 291 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 292 if (so->so_snd.ssb_hiwat) 293 (void)chgsbsize(so->so_cred->cr_uidinfo, 294 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 295 #ifdef INET 296 /* remove accept filter if present */ 297 if (so->so_accf != NULL) 298 do_setopt_accept_filter(so, NULL); 299 #endif /* INET */ 300 crfree(so->so_cred); 301 if (so->so_faddr != NULL) 302 kfree(so->so_faddr, M_SONAME); 303 kfree(so, M_SOCKET); 304 } 305 306 int 307 solisten(struct socket *so, int backlog, struct thread *td) 308 { 309 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 310 return (EINVAL); 311 312 lwkt_gettoken(&so->so_rcv.ssb_token); 313 if (TAILQ_EMPTY(&so->so_comp)) 314 so->so_options |= SO_ACCEPTCONN; 315 lwkt_reltoken(&so->so_rcv.ssb_token); 316 if (backlog < 0 || backlog > somaxconn) 317 backlog = somaxconn; 318 so->so_qlimit = backlog; 319 return so_pru_listen(so, td); 320 } 321 322 /* 323 * Destroy a disconnected socket. This routine is a NOP if entities 324 * still have a reference on the socket: 325 * 326 * so_pcb - The protocol stack still has a reference 327 * SS_NOFDREF - There is no longer a file pointer reference 328 */ 329 void 330 sofree(struct socket *so) 331 { 332 struct socket *head; 333 334 /* 335 * This is a bit hackish at the moment. We need to interlock 336 * any accept queue we are on before we potentially lose the 337 * last reference to avoid races against a re-reference from 338 * someone operating on the queue. 339 */ 340 while ((head = so->so_head) != NULL) { 341 lwkt_getpooltoken(head); 342 if (so->so_head == head) 343 break; 344 lwkt_relpooltoken(head); 345 } 346 347 /* 348 * Arbitrage the last free. 349 */ 350 KKASSERT(so->so_refs > 0); 351 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 352 if (head) 353 lwkt_relpooltoken(head); 354 return; 355 } 356 357 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 358 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 359 360 /* 361 * We're done, remove ourselves from the accept queue we are 362 * on, if we are on one. 363 */ 364 if (head != NULL) { 365 if (so->so_state & SS_INCOMP) { 366 TAILQ_REMOVE(&head->so_incomp, so, so_list); 367 head->so_incqlen--; 368 } else if (so->so_state & SS_COMP) { 369 /* 370 * We must not decommission a socket that's 371 * on the accept(2) queue. If we do, then 372 * accept(2) may hang after select(2) indicated 373 * that the listening socket was ready. 374 */ 375 lwkt_relpooltoken(head); 376 return; 377 } else { 378 panic("sofree: not queued"); 379 } 380 soclrstate(so, SS_INCOMP); 381 so->so_head = NULL; 382 lwkt_relpooltoken(head); 383 } 384 ssb_release(&so->so_snd, so); 385 sorflush(so); 386 sodealloc(so); 387 } 388 389 /* 390 * Close a socket on last file table reference removal. 391 * Initiate disconnect if connected. 392 * Free socket when disconnect complete. 393 */ 394 int 395 soclose(struct socket *so, int fflag) 396 { 397 int error; 398 399 funsetown(&so->so_sigio); 400 sosetstate(so, SS_ISCLOSING); 401 if (!use_soclose_fast || 402 (so->so_proto->pr_flags & PR_SYNC_PORT) || 403 ((so->so_state & SS_ISCONNECTED) && 404 (so->so_options & SO_LINGER))) { 405 error = soclose_sync(so, fflag); 406 } else { 407 soclose_fast(so); 408 error = 0; 409 } 410 return error; 411 } 412 413 void 414 sodiscard(struct socket *so) 415 { 416 lwkt_getpooltoken(so); 417 if (so->so_options & SO_ACCEPTCONN) { 418 struct socket *sp; 419 420 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 421 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 422 so->so_incqlen--; 423 soclrstate(sp, SS_INCOMP); 424 soabort_async(sp, TRUE); 425 } 426 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 427 TAILQ_REMOVE(&so->so_comp, sp, so_list); 428 so->so_qlen--; 429 soclrstate(sp, SS_COMP); 430 soabort_async(sp, TRUE); 431 } 432 } 433 lwkt_relpooltoken(so); 434 435 if (so->so_state & SS_NOFDREF) 436 panic("soclose: NOFDREF"); 437 sosetstate(so, SS_NOFDREF); /* take ref */ 438 } 439 440 /* 441 * Append the completed queue of head to head_inh (inherting listen socket). 442 */ 443 void 444 soinherit(struct socket *head, struct socket *head_inh) 445 { 446 boolean_t do_wakeup = FALSE; 447 448 KASSERT(head->so_options & SO_ACCEPTCONN, 449 ("head does not accept connection")); 450 KASSERT(head_inh->so_options & SO_ACCEPTCONN, 451 ("head_inh does not accept connection")); 452 453 lwkt_getpooltoken(head); 454 lwkt_getpooltoken(head_inh); 455 456 if (head->so_qlen > 0) 457 do_wakeup = TRUE; 458 459 while (!TAILQ_EMPTY(&head->so_comp)) { 460 struct ucred *old_cr; 461 struct socket *sp; 462 463 sp = TAILQ_FIRST(&head->so_comp); 464 465 /* 466 * Remove this socket from the current listen socket 467 * completed queue. 468 */ 469 TAILQ_REMOVE(&head->so_comp, sp, so_list); 470 head->so_qlen--; 471 472 /* Save the old ucred for later free. */ 473 old_cr = sp->so_cred; 474 475 /* 476 * Install this socket to the inheriting listen socket 477 * completed queue. 478 */ 479 sp->so_cred = crhold(head_inh->so_cred); /* non-blocking */ 480 sp->so_head = head_inh; 481 482 TAILQ_INSERT_TAIL(&head_inh->so_comp, sp, so_list); 483 head_inh->so_qlen++; 484 485 /* 486 * NOTE: 487 * crfree() may block and release the tokens temporarily. 488 * However, we are fine here, since the transition is done. 489 */ 490 crfree(old_cr); 491 } 492 493 lwkt_relpooltoken(head_inh); 494 lwkt_relpooltoken(head); 495 496 if (do_wakeup) { 497 /* 498 * "New" connections have arrived 499 */ 500 sorwakeup(head_inh); 501 wakeup(&head_inh->so_timeo); 502 } 503 } 504 505 static int 506 soclose_sync(struct socket *so, int fflag) 507 { 508 int error = 0; 509 510 if (so->so_pcb == NULL) 511 goto discard; 512 if (so->so_state & SS_ISCONNECTED) { 513 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 514 error = sodisconnect(so); 515 if (error) 516 goto drop; 517 } 518 if (so->so_options & SO_LINGER) { 519 if ((so->so_state & SS_ISDISCONNECTING) && 520 (fflag & FNONBLOCK)) 521 goto drop; 522 while (so->so_state & SS_ISCONNECTED) { 523 error = tsleep(&so->so_timeo, PCATCH, 524 "soclos", so->so_linger * hz); 525 if (error) 526 break; 527 } 528 } 529 } 530 drop: 531 if (so->so_pcb) { 532 int error2; 533 534 error2 = so_pru_detach(so); 535 if (error2 == EJUSTRETURN) { 536 /* 537 * Protocol will call sodiscard() 538 * and sofree() for us. 539 */ 540 return error; 541 } 542 if (error == 0) 543 error = error2; 544 } 545 discard: 546 sodiscard(so); 547 so_pru_sync(so); /* unpend async sending */ 548 sofree(so); /* dispose of ref */ 549 550 return (error); 551 } 552 553 static void 554 soclose_sofree_async_handler(netmsg_t msg) 555 { 556 sofree(msg->base.nm_so); 557 } 558 559 static void 560 soclose_sofree_async(struct socket *so) 561 { 562 struct netmsg_base *base = &so->so_clomsg; 563 564 netmsg_init(base, so, &netisr_apanic_rport, 0, 565 soclose_sofree_async_handler); 566 lwkt_sendmsg(so->so_port, &base->lmsg); 567 } 568 569 static void 570 soclose_disconn_async_handler(netmsg_t msg) 571 { 572 struct socket *so = msg->base.nm_so; 573 574 if ((so->so_state & SS_ISCONNECTED) && 575 (so->so_state & SS_ISDISCONNECTING) == 0) 576 so_pru_disconnect_direct(so); 577 578 if (so->so_pcb) { 579 int error; 580 581 error = so_pru_detach_direct(so); 582 if (error == EJUSTRETURN) { 583 /* 584 * Protocol will call sodiscard() 585 * and sofree() for us. 586 */ 587 return; 588 } 589 } 590 591 sodiscard(so); 592 sofree(so); 593 } 594 595 static void 596 soclose_disconn_async(struct socket *so) 597 { 598 struct netmsg_base *base = &so->so_clomsg; 599 600 netmsg_init(base, so, &netisr_apanic_rport, 0, 601 soclose_disconn_async_handler); 602 lwkt_sendmsg(so->so_port, &base->lmsg); 603 } 604 605 static void 606 soclose_detach_async_handler(netmsg_t msg) 607 { 608 struct socket *so = msg->base.nm_so; 609 610 if (so->so_pcb) { 611 int error; 612 613 error = so_pru_detach_direct(so); 614 if (error == EJUSTRETURN) { 615 /* 616 * Protocol will call sodiscard() 617 * and sofree() for us. 618 */ 619 return; 620 } 621 } 622 623 sodiscard(so); 624 sofree(so); 625 } 626 627 static void 628 soclose_detach_async(struct socket *so) 629 { 630 struct netmsg_base *base = &so->so_clomsg; 631 632 netmsg_init(base, so, &netisr_apanic_rport, 0, 633 soclose_detach_async_handler); 634 lwkt_sendmsg(so->so_port, &base->lmsg); 635 } 636 637 static void 638 soclose_fast(struct socket *so) 639 { 640 if (so->so_pcb == NULL) 641 goto discard; 642 643 if ((so->so_state & SS_ISCONNECTED) && 644 (so->so_state & SS_ISDISCONNECTING) == 0) { 645 soclose_disconn_async(so); 646 return; 647 } 648 649 if (so->so_pcb) { 650 soclose_detach_async(so); 651 return; 652 } 653 654 discard: 655 sodiscard(so); 656 soclose_sofree_async(so); 657 } 658 659 /* 660 * Abort and destroy a socket. Only one abort can be in progress 661 * at any given moment. 662 */ 663 void 664 soabort_async(struct socket *so, boolean_t clr_head) 665 { 666 /* 667 * Keep a reference before clearing the so_head 668 * to avoid racing socket close in netisr. 669 */ 670 soreference(so); 671 if (clr_head) 672 so->so_head = NULL; 673 so_pru_abort_async(so); 674 } 675 676 void 677 soabort_oncpu(struct socket *so) 678 { 679 soreference(so); 680 so_pru_abort_direct(so); 681 } 682 683 /* 684 * so is passed in ref'd, which becomes owned by 685 * the cleared SS_NOFDREF flag. 686 */ 687 void 688 soaccept_generic(struct socket *so) 689 { 690 if ((so->so_state & SS_NOFDREF) == 0) 691 panic("soaccept: !NOFDREF"); 692 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 693 } 694 695 int 696 soaccept(struct socket *so, struct sockaddr **nam) 697 { 698 int error; 699 700 soaccept_generic(so); 701 error = so_pru_accept(so, nam); 702 return (error); 703 } 704 705 int 706 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td, 707 boolean_t sync) 708 { 709 int error; 710 711 if (so->so_options & SO_ACCEPTCONN) 712 return (EOPNOTSUPP); 713 /* 714 * If protocol is connection-based, can only connect once. 715 * Otherwise, if connected, try to disconnect first. 716 * This allows user to disconnect by connecting to, e.g., 717 * a null address. 718 */ 719 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 720 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 721 (error = sodisconnect(so)))) { 722 error = EISCONN; 723 } else { 724 /* 725 * Prevent accumulated error from previous connection 726 * from biting us. 727 */ 728 so->so_error = 0; 729 if (!sync && so->so_proto->pr_usrreqs->pru_preconnect) 730 error = so_pru_connect_async(so, nam, td); 731 else 732 error = so_pru_connect(so, nam, td); 733 } 734 return (error); 735 } 736 737 int 738 soconnect2(struct socket *so1, struct socket *so2) 739 { 740 int error; 741 742 error = so_pru_connect2(so1, so2); 743 return (error); 744 } 745 746 int 747 sodisconnect(struct socket *so) 748 { 749 int error; 750 751 if ((so->so_state & SS_ISCONNECTED) == 0) { 752 error = ENOTCONN; 753 goto bad; 754 } 755 if (so->so_state & SS_ISDISCONNECTING) { 756 error = EALREADY; 757 goto bad; 758 } 759 error = so_pru_disconnect(so); 760 bad: 761 return (error); 762 } 763 764 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 765 /* 766 * Send on a socket. 767 * If send must go all at once and message is larger than 768 * send buffering, then hard error. 769 * Lock against other senders. 770 * If must go all at once and not enough room now, then 771 * inform user that this would block and do nothing. 772 * Otherwise, if nonblocking, send as much as possible. 773 * The data to be sent is described by "uio" if nonzero, 774 * otherwise by the mbuf chain "top" (which must be null 775 * if uio is not). Data provided in mbuf chain must be small 776 * enough to send all at once. 777 * 778 * Returns nonzero on error, timeout or signal; callers 779 * must check for short counts if EINTR/ERESTART are returned. 780 * Data and control buffers are freed on return. 781 */ 782 int 783 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 784 struct mbuf *top, struct mbuf *control, int flags, 785 struct thread *td) 786 { 787 struct mbuf **mp; 788 struct mbuf *m; 789 size_t resid; 790 int space, len; 791 int clen = 0, error, dontroute, mlen; 792 int atomic = sosendallatonce(so) || top; 793 int pru_flags; 794 795 if (uio) { 796 resid = uio->uio_resid; 797 } else { 798 resid = (size_t)top->m_pkthdr.len; 799 #ifdef INVARIANTS 800 len = 0; 801 for (m = top; m; m = m->m_next) 802 len += m->m_len; 803 KKASSERT(top->m_pkthdr.len == len); 804 #endif 805 } 806 807 /* 808 * WARNING! resid is unsigned, space and len are signed. space 809 * can wind up negative if the sockbuf is overcommitted. 810 * 811 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 812 * type sockets since that's an error. 813 */ 814 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 815 error = EINVAL; 816 goto out; 817 } 818 819 dontroute = 820 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 821 (so->so_proto->pr_flags & PR_ATOMIC); 822 if (td->td_lwp != NULL) 823 td->td_lwp->lwp_ru.ru_msgsnd++; 824 if (control) 825 clen = control->m_len; 826 #define gotoerr(errcode) { error = errcode; goto release; } 827 828 restart: 829 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 830 if (error) 831 goto out; 832 833 do { 834 if (so->so_state & SS_CANTSENDMORE) 835 gotoerr(EPIPE); 836 if (so->so_error) { 837 error = so->so_error; 838 so->so_error = 0; 839 goto release; 840 } 841 if ((so->so_state & SS_ISCONNECTED) == 0) { 842 /* 843 * `sendto' and `sendmsg' is allowed on a connection- 844 * based socket if it supports implied connect. 845 * Return ENOTCONN if not connected and no address is 846 * supplied. 847 */ 848 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 849 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 850 if ((so->so_state & SS_ISCONFIRMING) == 0 && 851 !(resid == 0 && clen != 0)) 852 gotoerr(ENOTCONN); 853 } else if (addr == NULL) 854 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 855 ENOTCONN : EDESTADDRREQ); 856 } 857 if ((atomic && resid > so->so_snd.ssb_hiwat) || 858 clen > so->so_snd.ssb_hiwat) { 859 gotoerr(EMSGSIZE); 860 } 861 space = ssb_space(&so->so_snd); 862 if (flags & MSG_OOB) 863 space += 1024; 864 if ((space < 0 || (size_t)space < resid + clen) && uio && 865 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 866 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 867 gotoerr(EWOULDBLOCK); 868 ssb_unlock(&so->so_snd); 869 error = ssb_wait(&so->so_snd); 870 if (error) 871 goto out; 872 goto restart; 873 } 874 mp = ⊤ 875 space -= clen; 876 do { 877 if (uio == NULL) { 878 /* 879 * Data is prepackaged in "top". 880 */ 881 resid = 0; 882 if (flags & MSG_EOR) 883 top->m_flags |= M_EOR; 884 } else do { 885 if (resid > INT_MAX) 886 resid = INT_MAX; 887 m = m_getl((int)resid, M_WAITOK, MT_DATA, 888 top == NULL ? M_PKTHDR : 0, &mlen); 889 if (top == NULL) { 890 m->m_pkthdr.len = 0; 891 m->m_pkthdr.rcvif = NULL; 892 } 893 len = imin((int)szmin(mlen, resid), space); 894 if (resid < MINCLSIZE) { 895 /* 896 * For datagram protocols, leave room 897 * for protocol headers in first mbuf. 898 */ 899 if (atomic && top == NULL && len < mlen) 900 MH_ALIGN(m, len); 901 } 902 space -= len; 903 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 904 resid = uio->uio_resid; 905 m->m_len = len; 906 *mp = m; 907 top->m_pkthdr.len += len; 908 if (error) 909 goto release; 910 mp = &m->m_next; 911 if (resid == 0) { 912 if (flags & MSG_EOR) 913 top->m_flags |= M_EOR; 914 break; 915 } 916 } while (space > 0 && atomic); 917 if (dontroute) 918 so->so_options |= SO_DONTROUTE; 919 if (flags & MSG_OOB) { 920 pru_flags = PRUS_OOB; 921 } else if ((flags & MSG_EOF) && 922 (so->so_proto->pr_flags & PR_IMPLOPCL) && 923 (resid == 0)) { 924 /* 925 * If the user set MSG_EOF, the protocol 926 * understands this flag and nothing left to 927 * send then use PRU_SEND_EOF instead of PRU_SEND. 928 */ 929 pru_flags = PRUS_EOF; 930 } else if (resid > 0 && space > 0) { 931 /* If there is more to send, set PRUS_MORETOCOME */ 932 pru_flags = PRUS_MORETOCOME; 933 } else { 934 pru_flags = 0; 935 } 936 /* 937 * XXX all the SS_CANTSENDMORE checks previously 938 * done could be out of date. We could have recieved 939 * a reset packet in an interrupt or maybe we slept 940 * while doing page faults in uiomove() etc. We could 941 * probably recheck again inside the splnet() protection 942 * here, but there are probably other places that this 943 * also happens. We must rethink this. 944 */ 945 error = so_pru_send(so, pru_flags, top, addr, control, td); 946 if (dontroute) 947 so->so_options &= ~SO_DONTROUTE; 948 clen = 0; 949 control = NULL; 950 top = NULL; 951 mp = ⊤ 952 if (error) 953 goto release; 954 } while (resid && space > 0); 955 } while (resid); 956 957 release: 958 ssb_unlock(&so->so_snd); 959 out: 960 if (top) 961 m_freem(top); 962 if (control) 963 m_freem(control); 964 return (error); 965 } 966 967 #ifdef INET 968 /* 969 * A specialization of sosend() for UDP based on protocol-specific knowledge: 970 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 971 * sosendallatonce() returns true, 972 * the "atomic" variable is true, 973 * and sosendudp() blocks until space is available for the entire send. 974 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 975 * PR_IMPLOPCL flags set. 976 * UDP has no out-of-band data. 977 * UDP has no control data. 978 * UDP does not support MSG_EOR. 979 */ 980 int 981 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 982 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 983 { 984 size_t resid; 985 int error, pru_flags = 0; 986 int space; 987 988 if (td->td_lwp != NULL) 989 td->td_lwp->lwp_ru.ru_msgsnd++; 990 if (control) 991 m_freem(control); 992 993 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 994 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 995 996 restart: 997 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 998 if (error) 999 goto out; 1000 1001 if (so->so_state & SS_CANTSENDMORE) 1002 gotoerr(EPIPE); 1003 if (so->so_error) { 1004 error = so->so_error; 1005 so->so_error = 0; 1006 goto release; 1007 } 1008 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 1009 gotoerr(EDESTADDRREQ); 1010 if (resid > so->so_snd.ssb_hiwat) 1011 gotoerr(EMSGSIZE); 1012 space = ssb_space(&so->so_snd); 1013 if (uio && (space < 0 || (size_t)space < resid)) { 1014 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1015 gotoerr(EWOULDBLOCK); 1016 ssb_unlock(&so->so_snd); 1017 error = ssb_wait(&so->so_snd); 1018 if (error) 1019 goto out; 1020 goto restart; 1021 } 1022 1023 if (uio) { 1024 int hdrlen = max_hdr; 1025 1026 /* 1027 * We try to optimize out the additional mbuf 1028 * allocations in M_PREPEND() on output path, e.g. 1029 * - udp_output(), when it tries to prepend protocol 1030 * headers. 1031 * - Link layer output function, when it tries to 1032 * prepend link layer header. 1033 * 1034 * This probably will not benefit any data that will 1035 * be fragmented, so this optimization is only performed 1036 * when the size of data and max size of protocol+link 1037 * headers fit into one mbuf cluster. 1038 */ 1039 if (uio->uio_resid > MCLBYTES - hdrlen || 1040 !udp_sosend_prepend) { 1041 top = m_uiomove(uio); 1042 if (top == NULL) 1043 goto release; 1044 } else { 1045 int nsize; 1046 1047 top = m_getl(uio->uio_resid + hdrlen, M_WAITOK, 1048 MT_DATA, M_PKTHDR, &nsize); 1049 KASSERT(nsize >= uio->uio_resid + hdrlen, 1050 ("sosendudp invalid nsize %d, " 1051 "resid %zu, hdrlen %d", 1052 nsize, uio->uio_resid, hdrlen)); 1053 1054 top->m_len = uio->uio_resid; 1055 top->m_pkthdr.len = uio->uio_resid; 1056 top->m_data += hdrlen; 1057 1058 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 1059 if (error) 1060 goto out; 1061 } 1062 } 1063 1064 if (flags & MSG_DONTROUTE) 1065 pru_flags |= PRUS_DONTROUTE; 1066 1067 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 1068 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 1069 error = 0; 1070 } else { 1071 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 1072 } 1073 top = NULL; /* sent or freed in lower layer */ 1074 1075 release: 1076 ssb_unlock(&so->so_snd); 1077 out: 1078 if (top) 1079 m_freem(top); 1080 return (error); 1081 } 1082 1083 int 1084 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1085 struct mbuf *top, struct mbuf *control, int flags, 1086 struct thread *td) 1087 { 1088 struct mbuf **mp; 1089 struct mbuf *m; 1090 size_t resid; 1091 int space, len; 1092 int error, mlen; 1093 int allatonce; 1094 int pru_flags; 1095 1096 if (uio) { 1097 KKASSERT(top == NULL); 1098 allatonce = 0; 1099 resid = uio->uio_resid; 1100 } else { 1101 allatonce = 1; 1102 resid = (size_t)top->m_pkthdr.len; 1103 #ifdef INVARIANTS 1104 len = 0; 1105 for (m = top; m; m = m->m_next) 1106 len += m->m_len; 1107 KKASSERT(top->m_pkthdr.len == len); 1108 #endif 1109 } 1110 1111 /* 1112 * WARNING! resid is unsigned, space and len are signed. space 1113 * can wind up negative if the sockbuf is overcommitted. 1114 * 1115 * Also check to make sure that MSG_EOR isn't used on TCP 1116 */ 1117 if (flags & MSG_EOR) { 1118 error = EINVAL; 1119 goto out; 1120 } 1121 1122 if (control) { 1123 /* TCP doesn't do control messages (rights, creds, etc) */ 1124 if (control->m_len) { 1125 error = EINVAL; 1126 goto out; 1127 } 1128 m_freem(control); /* empty control, just free it */ 1129 control = NULL; 1130 } 1131 1132 if (td->td_lwp != NULL) 1133 td->td_lwp->lwp_ru.ru_msgsnd++; 1134 1135 #define gotoerr(errcode) { error = errcode; goto release; } 1136 1137 restart: 1138 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1139 if (error) 1140 goto out; 1141 1142 do { 1143 if (so->so_state & SS_CANTSENDMORE) 1144 gotoerr(EPIPE); 1145 if (so->so_error) { 1146 error = so->so_error; 1147 so->so_error = 0; 1148 goto release; 1149 } 1150 if ((so->so_state & SS_ISCONNECTED) == 0 && 1151 (so->so_state & SS_ISCONFIRMING) == 0) 1152 gotoerr(ENOTCONN); 1153 if (allatonce && resid > so->so_snd.ssb_hiwat) 1154 gotoerr(EMSGSIZE); 1155 1156 space = ssb_space_prealloc(&so->so_snd); 1157 if (flags & MSG_OOB) 1158 space += 1024; 1159 if ((space < 0 || (size_t)space < resid) && !allatonce && 1160 space < so->so_snd.ssb_lowat) { 1161 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1162 gotoerr(EWOULDBLOCK); 1163 ssb_unlock(&so->so_snd); 1164 error = ssb_wait(&so->so_snd); 1165 if (error) 1166 goto out; 1167 goto restart; 1168 } 1169 mp = ⊤ 1170 do { 1171 int cnt = 0, async = 0; 1172 1173 if (uio == NULL) { 1174 /* 1175 * Data is prepackaged in "top". 1176 */ 1177 resid = 0; 1178 } else do { 1179 if (resid > INT_MAX) 1180 resid = INT_MAX; 1181 if (tcp_sosend_jcluster) { 1182 m = m_getlj((int)resid, M_WAITOK, MT_DATA, 1183 top == NULL ? M_PKTHDR : 0, &mlen); 1184 } else { 1185 m = m_getl((int)resid, M_WAITOK, MT_DATA, 1186 top == NULL ? M_PKTHDR : 0, &mlen); 1187 } 1188 if (top == NULL) { 1189 m->m_pkthdr.len = 0; 1190 m->m_pkthdr.rcvif = NULL; 1191 } 1192 len = imin((int)szmin(mlen, resid), space); 1193 space -= len; 1194 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1195 resid = uio->uio_resid; 1196 m->m_len = len; 1197 *mp = m; 1198 top->m_pkthdr.len += len; 1199 if (error) 1200 goto release; 1201 mp = &m->m_next; 1202 if (resid == 0) 1203 break; 1204 ++cnt; 1205 } while (space > 0 && cnt < tcp_sosend_agglim); 1206 1207 if (tcp_sosend_async) 1208 async = 1; 1209 1210 if (flags & MSG_OOB) { 1211 pru_flags = PRUS_OOB; 1212 async = 0; 1213 } else if ((flags & MSG_EOF) && resid == 0) { 1214 pru_flags = PRUS_EOF; 1215 } else if (resid > 0 && space > 0) { 1216 /* If there is more to send, set PRUS_MORETOCOME */ 1217 pru_flags = PRUS_MORETOCOME; 1218 async = 1; 1219 } else { 1220 pru_flags = 0; 1221 } 1222 1223 if (flags & MSG_SYNC) 1224 async = 0; 1225 1226 /* 1227 * XXX all the SS_CANTSENDMORE checks previously 1228 * done could be out of date. We could have recieved 1229 * a reset packet in an interrupt or maybe we slept 1230 * while doing page faults in uiomove() etc. We could 1231 * probably recheck again inside the splnet() protection 1232 * here, but there are probably other places that this 1233 * also happens. We must rethink this. 1234 */ 1235 for (m = top; m; m = m->m_next) 1236 ssb_preallocstream(&so->so_snd, m); 1237 if (!async) { 1238 error = so_pru_send(so, pru_flags, top, 1239 NULL, NULL, td); 1240 } else { 1241 so_pru_send_async(so, pru_flags, top, 1242 NULL, NULL, td); 1243 error = 0; 1244 } 1245 1246 top = NULL; 1247 mp = ⊤ 1248 if (error) 1249 goto release; 1250 } while (resid && space > 0); 1251 } while (resid); 1252 1253 release: 1254 ssb_unlock(&so->so_snd); 1255 out: 1256 if (top) 1257 m_freem(top); 1258 if (control) 1259 m_freem(control); 1260 return (error); 1261 } 1262 #endif 1263 1264 /* 1265 * Implement receive operations on a socket. 1266 * 1267 * We depend on the way that records are added to the signalsockbuf 1268 * by sbappend*. In particular, each record (mbufs linked through m_next) 1269 * must begin with an address if the protocol so specifies, 1270 * followed by an optional mbuf or mbufs containing ancillary data, 1271 * and then zero or more mbufs of data. 1272 * 1273 * Although the signalsockbuf is locked, new data may still be appended. 1274 * A token inside the ssb_lock deals with MP issues and still allows 1275 * the network to access the socket if we block in a uio. 1276 * 1277 * The caller may receive the data as a single mbuf chain by supplying 1278 * an mbuf **mp0 for use in returning the chain. The uio is then used 1279 * only for the count in uio_resid. 1280 */ 1281 int 1282 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1283 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1284 { 1285 struct mbuf *m, *n; 1286 struct mbuf *free_chain = NULL; 1287 int flags, len, error, offset; 1288 struct protosw *pr = so->so_proto; 1289 int moff, type = 0; 1290 size_t resid, orig_resid; 1291 1292 if (uio) 1293 resid = uio->uio_resid; 1294 else 1295 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1296 orig_resid = resid; 1297 1298 if (psa) 1299 *psa = NULL; 1300 if (controlp) 1301 *controlp = NULL; 1302 if (flagsp) 1303 flags = *flagsp &~ MSG_EOR; 1304 else 1305 flags = 0; 1306 if (flags & MSG_OOB) { 1307 m = m_get(M_WAITOK, MT_DATA); 1308 if (m == NULL) 1309 return (ENOBUFS); 1310 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1311 if (error) 1312 goto bad; 1313 if (sio) { 1314 do { 1315 sbappend(sio, m); 1316 KKASSERT(resid >= (size_t)m->m_len); 1317 resid -= (size_t)m->m_len; 1318 } while (resid > 0 && m); 1319 } else { 1320 do { 1321 uio->uio_resid = resid; 1322 error = uiomove(mtod(m, caddr_t), 1323 (int)szmin(resid, m->m_len), 1324 uio); 1325 resid = uio->uio_resid; 1326 m = m_free(m); 1327 } while (uio->uio_resid && error == 0 && m); 1328 } 1329 bad: 1330 if (m) 1331 m_freem(m); 1332 return (error); 1333 } 1334 if ((so->so_state & SS_ISCONFIRMING) && resid) 1335 so_pru_rcvd(so, 0); 1336 1337 /* 1338 * The token interlocks against the protocol thread while 1339 * ssb_lock is a blocking lock against other userland entities. 1340 */ 1341 lwkt_gettoken(&so->so_rcv.ssb_token); 1342 restart: 1343 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1344 if (error) 1345 goto done; 1346 1347 m = so->so_rcv.ssb_mb; 1348 /* 1349 * If we have less data than requested, block awaiting more 1350 * (subject to any timeout) if: 1351 * 1. the current count is less than the low water mark, or 1352 * 2. MSG_WAITALL is set, and it is possible to do the entire 1353 * receive operation at once if we block (resid <= hiwat). 1354 * 3. MSG_DONTWAIT is not set 1355 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1356 * we have to do the receive in sections, and thus risk returning 1357 * a short count if a timeout or signal occurs after we start. 1358 */ 1359 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1360 (size_t)so->so_rcv.ssb_cc < resid) && 1361 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1362 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1363 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1364 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1365 if (so->so_error) { 1366 if (m) 1367 goto dontblock; 1368 error = so->so_error; 1369 if ((flags & MSG_PEEK) == 0) 1370 so->so_error = 0; 1371 goto release; 1372 } 1373 if (so->so_state & SS_CANTRCVMORE) { 1374 if (m) 1375 goto dontblock; 1376 else 1377 goto release; 1378 } 1379 for (; m; m = m->m_next) { 1380 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1381 m = so->so_rcv.ssb_mb; 1382 goto dontblock; 1383 } 1384 } 1385 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1386 (pr->pr_flags & PR_CONNREQUIRED)) { 1387 error = ENOTCONN; 1388 goto release; 1389 } 1390 if (resid == 0) 1391 goto release; 1392 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1393 error = EWOULDBLOCK; 1394 goto release; 1395 } 1396 ssb_unlock(&so->so_rcv); 1397 error = ssb_wait(&so->so_rcv); 1398 if (error) 1399 goto done; 1400 goto restart; 1401 } 1402 dontblock: 1403 if (uio && uio->uio_td && uio->uio_td->td_proc) 1404 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1405 1406 /* 1407 * note: m should be == sb_mb here. Cache the next record while 1408 * cleaning up. Note that calling m_free*() will break out critical 1409 * section. 1410 */ 1411 KKASSERT(m == so->so_rcv.ssb_mb); 1412 1413 /* 1414 * Skip any address mbufs prepending the record. 1415 */ 1416 if (pr->pr_flags & PR_ADDR) { 1417 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1418 orig_resid = 0; 1419 if (psa) 1420 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1421 if (flags & MSG_PEEK) 1422 m = m->m_next; 1423 else 1424 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1425 } 1426 1427 /* 1428 * Skip any control mbufs prepending the record. 1429 */ 1430 while (m && m->m_type == MT_CONTROL && error == 0) { 1431 if (flags & MSG_PEEK) { 1432 if (controlp) 1433 *controlp = m_copy(m, 0, m->m_len); 1434 m = m->m_next; /* XXX race */ 1435 } else { 1436 if (controlp) { 1437 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1438 if (pr->pr_domain->dom_externalize && 1439 mtod(m, struct cmsghdr *)->cmsg_type == 1440 SCM_RIGHTS) 1441 error = (*pr->pr_domain->dom_externalize)(m); 1442 *controlp = m; 1443 m = n; 1444 } else { 1445 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1446 } 1447 } 1448 if (controlp && *controlp) { 1449 orig_resid = 0; 1450 controlp = &(*controlp)->m_next; 1451 } 1452 } 1453 1454 /* 1455 * flag OOB data. 1456 */ 1457 if (m) { 1458 type = m->m_type; 1459 if (type == MT_OOBDATA) 1460 flags |= MSG_OOB; 1461 } 1462 1463 /* 1464 * Copy to the UIO or mbuf return chain (*mp). 1465 */ 1466 moff = 0; 1467 offset = 0; 1468 while (m && resid > 0 && error == 0) { 1469 if (m->m_type == MT_OOBDATA) { 1470 if (type != MT_OOBDATA) 1471 break; 1472 } else if (type == MT_OOBDATA) 1473 break; 1474 else 1475 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1476 ("receive 3")); 1477 soclrstate(so, SS_RCVATMARK); 1478 len = (resid > INT_MAX) ? INT_MAX : resid; 1479 if (so->so_oobmark && len > so->so_oobmark - offset) 1480 len = so->so_oobmark - offset; 1481 if (len > m->m_len - moff) 1482 len = m->m_len - moff; 1483 1484 /* 1485 * Copy out to the UIO or pass the mbufs back to the SIO. 1486 * The SIO is dealt with when we eat the mbuf, but deal 1487 * with the resid here either way. 1488 */ 1489 if (uio) { 1490 uio->uio_resid = resid; 1491 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1492 resid = uio->uio_resid; 1493 if (error) 1494 goto release; 1495 } else { 1496 resid -= (size_t)len; 1497 } 1498 1499 /* 1500 * Eat the entire mbuf or just a piece of it 1501 */ 1502 if (len == m->m_len - moff) { 1503 if (m->m_flags & M_EOR) 1504 flags |= MSG_EOR; 1505 if (flags & MSG_PEEK) { 1506 m = m->m_next; 1507 moff = 0; 1508 } else { 1509 if (sio) { 1510 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1511 sbappend(sio, m); 1512 m = n; 1513 } else { 1514 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1515 } 1516 } 1517 } else { 1518 if (flags & MSG_PEEK) { 1519 moff += len; 1520 } else { 1521 if (sio) { 1522 n = m_copym(m, 0, len, M_WAITOK); 1523 if (n) 1524 sbappend(sio, n); 1525 } 1526 m->m_data += len; 1527 m->m_len -= len; 1528 so->so_rcv.ssb_cc -= len; 1529 } 1530 } 1531 if (so->so_oobmark) { 1532 if ((flags & MSG_PEEK) == 0) { 1533 so->so_oobmark -= len; 1534 if (so->so_oobmark == 0) { 1535 sosetstate(so, SS_RCVATMARK); 1536 break; 1537 } 1538 } else { 1539 offset += len; 1540 if (offset == so->so_oobmark) 1541 break; 1542 } 1543 } 1544 if (flags & MSG_EOR) 1545 break; 1546 /* 1547 * If the MSG_WAITALL flag is set (for non-atomic socket), 1548 * we must not quit until resid == 0 or an error 1549 * termination. If a signal/timeout occurs, return 1550 * with a short count but without error. 1551 * Keep signalsockbuf locked against other readers. 1552 */ 1553 while ((flags & MSG_WAITALL) && m == NULL && 1554 resid > 0 && !sosendallatonce(so) && 1555 so->so_rcv.ssb_mb == NULL) { 1556 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1557 break; 1558 /* 1559 * The window might have closed to zero, make 1560 * sure we send an ack now that we've drained 1561 * the buffer or we might end up blocking until 1562 * the idle takes over (5 seconds). 1563 */ 1564 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1565 so_pru_rcvd(so, flags); 1566 error = ssb_wait(&so->so_rcv); 1567 if (error) { 1568 ssb_unlock(&so->so_rcv); 1569 error = 0; 1570 goto done; 1571 } 1572 m = so->so_rcv.ssb_mb; 1573 } 1574 } 1575 1576 /* 1577 * If an atomic read was requested but unread data still remains 1578 * in the record, set MSG_TRUNC. 1579 */ 1580 if (m && pr->pr_flags & PR_ATOMIC) 1581 flags |= MSG_TRUNC; 1582 1583 /* 1584 * Cleanup. If an atomic read was requested drop any unread data. 1585 */ 1586 if ((flags & MSG_PEEK) == 0) { 1587 if (m && (pr->pr_flags & PR_ATOMIC)) 1588 sbdroprecord(&so->so_rcv.sb); 1589 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1590 so_pru_rcvd(so, flags); 1591 } 1592 1593 if (orig_resid == resid && orig_resid && 1594 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1595 ssb_unlock(&so->so_rcv); 1596 goto restart; 1597 } 1598 1599 if (flagsp) 1600 *flagsp |= flags; 1601 release: 1602 ssb_unlock(&so->so_rcv); 1603 done: 1604 lwkt_reltoken(&so->so_rcv.ssb_token); 1605 if (free_chain) 1606 m_freem(free_chain); 1607 return (error); 1608 } 1609 1610 int 1611 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1612 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1613 { 1614 struct mbuf *m, *n; 1615 struct mbuf *free_chain = NULL; 1616 int flags, len, error, offset; 1617 struct protosw *pr = so->so_proto; 1618 int moff; 1619 int didoob; 1620 size_t resid, orig_resid, restmp; 1621 1622 if (uio) 1623 resid = uio->uio_resid; 1624 else 1625 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1626 orig_resid = resid; 1627 1628 if (psa) 1629 *psa = NULL; 1630 if (controlp) 1631 *controlp = NULL; 1632 if (flagsp) 1633 flags = *flagsp &~ MSG_EOR; 1634 else 1635 flags = 0; 1636 if (flags & MSG_OOB) { 1637 m = m_get(M_WAITOK, MT_DATA); 1638 if (m == NULL) 1639 return (ENOBUFS); 1640 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1641 if (error) 1642 goto bad; 1643 if (sio) { 1644 do { 1645 sbappend(sio, m); 1646 KKASSERT(resid >= (size_t)m->m_len); 1647 resid -= (size_t)m->m_len; 1648 } while (resid > 0 && m); 1649 } else { 1650 do { 1651 uio->uio_resid = resid; 1652 error = uiomove(mtod(m, caddr_t), 1653 (int)szmin(resid, m->m_len), 1654 uio); 1655 resid = uio->uio_resid; 1656 m = m_free(m); 1657 } while (uio->uio_resid && error == 0 && m); 1658 } 1659 bad: 1660 if (m) 1661 m_freem(m); 1662 return (error); 1663 } 1664 1665 /* 1666 * The token interlocks against the protocol thread while 1667 * ssb_lock is a blocking lock against other userland entities. 1668 * 1669 * Lock a limited number of mbufs (not all, so sbcompress() still 1670 * works well). The token is used as an interlock for sbwait() so 1671 * release it afterwords. 1672 */ 1673 restart: 1674 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1675 if (error) 1676 goto done; 1677 1678 lwkt_gettoken(&so->so_rcv.ssb_token); 1679 m = so->so_rcv.ssb_mb; 1680 1681 /* 1682 * If we have less data than requested, block awaiting more 1683 * (subject to any timeout) if: 1684 * 1. the current count is less than the low water mark, or 1685 * 2. MSG_WAITALL is set, and it is possible to do the entire 1686 * receive operation at once if we block (resid <= hiwat). 1687 * 3. MSG_DONTWAIT is not set 1688 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1689 * we have to do the receive in sections, and thus risk returning 1690 * a short count if a timeout or signal occurs after we start. 1691 */ 1692 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1693 (size_t)so->so_rcv.ssb_cc < resid) && 1694 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1695 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1696 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1697 if (so->so_error) { 1698 if (m) 1699 goto dontblock; 1700 lwkt_reltoken(&so->so_rcv.ssb_token); 1701 error = so->so_error; 1702 if ((flags & MSG_PEEK) == 0) 1703 so->so_error = 0; 1704 goto release; 1705 } 1706 if (so->so_state & SS_CANTRCVMORE) { 1707 if (m) 1708 goto dontblock; 1709 lwkt_reltoken(&so->so_rcv.ssb_token); 1710 goto release; 1711 } 1712 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1713 (pr->pr_flags & PR_CONNREQUIRED)) { 1714 lwkt_reltoken(&so->so_rcv.ssb_token); 1715 error = ENOTCONN; 1716 goto release; 1717 } 1718 if (resid == 0) { 1719 lwkt_reltoken(&so->so_rcv.ssb_token); 1720 goto release; 1721 } 1722 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1723 lwkt_reltoken(&so->so_rcv.ssb_token); 1724 error = EWOULDBLOCK; 1725 goto release; 1726 } 1727 ssb_unlock(&so->so_rcv); 1728 error = ssb_wait(&so->so_rcv); 1729 lwkt_reltoken(&so->so_rcv.ssb_token); 1730 if (error) 1731 goto done; 1732 goto restart; 1733 } 1734 1735 /* 1736 * Token still held 1737 */ 1738 dontblock: 1739 n = m; 1740 restmp = 0; 1741 while (n && restmp < resid) { 1742 n->m_flags |= M_SOLOCKED; 1743 restmp += n->m_len; 1744 if (n->m_next == NULL) 1745 n = n->m_nextpkt; 1746 else 1747 n = n->m_next; 1748 } 1749 1750 /* 1751 * Release token for loop 1752 */ 1753 lwkt_reltoken(&so->so_rcv.ssb_token); 1754 if (uio && uio->uio_td && uio->uio_td->td_proc) 1755 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1756 1757 /* 1758 * note: m should be == sb_mb here. Cache the next record while 1759 * cleaning up. Note that calling m_free*() will break out critical 1760 * section. 1761 */ 1762 KKASSERT(m == so->so_rcv.ssb_mb); 1763 1764 /* 1765 * Copy to the UIO or mbuf return chain (*mp). 1766 * 1767 * NOTE: Token is not held for loop 1768 */ 1769 moff = 0; 1770 offset = 0; 1771 didoob = 0; 1772 1773 while (m && (m->m_flags & M_SOLOCKED) && resid > 0 && error == 0) { 1774 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1775 ("receive 3")); 1776 1777 soclrstate(so, SS_RCVATMARK); 1778 len = (resid > INT_MAX) ? INT_MAX : resid; 1779 if (so->so_oobmark && len > so->so_oobmark - offset) 1780 len = so->so_oobmark - offset; 1781 if (len > m->m_len - moff) 1782 len = m->m_len - moff; 1783 1784 /* 1785 * Copy out to the UIO or pass the mbufs back to the SIO. 1786 * The SIO is dealt with when we eat the mbuf, but deal 1787 * with the resid here either way. 1788 */ 1789 if (uio) { 1790 uio->uio_resid = resid; 1791 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1792 resid = uio->uio_resid; 1793 if (error) 1794 goto release; 1795 } else { 1796 resid -= (size_t)len; 1797 } 1798 1799 /* 1800 * Eat the entire mbuf or just a piece of it 1801 */ 1802 offset += len; 1803 if (len == m->m_len - moff) { 1804 m = m->m_next; 1805 moff = 0; 1806 } else { 1807 moff += len; 1808 } 1809 1810 /* 1811 * Check oobmark 1812 */ 1813 if (so->so_oobmark && offset == so->so_oobmark) { 1814 didoob = 1; 1815 break; 1816 } 1817 } 1818 1819 /* 1820 * Synchronize sockbuf with data we read. 1821 * 1822 * NOTE: (m) is junk on entry (it could be left over from the 1823 * previous loop). 1824 */ 1825 if ((flags & MSG_PEEK) == 0) { 1826 lwkt_gettoken(&so->so_rcv.ssb_token); 1827 m = so->so_rcv.ssb_mb; 1828 while (m && offset >= m->m_len) { 1829 if (so->so_oobmark) { 1830 so->so_oobmark -= m->m_len; 1831 if (so->so_oobmark == 0) { 1832 sosetstate(so, SS_RCVATMARK); 1833 didoob = 1; 1834 } 1835 } 1836 offset -= m->m_len; 1837 if (sio) { 1838 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1839 sbappend(sio, m); 1840 m = n; 1841 } else { 1842 m = sbunlinkmbuf(&so->so_rcv.sb, 1843 m, &free_chain); 1844 } 1845 } 1846 if (offset) { 1847 KKASSERT(m); 1848 if (sio) { 1849 n = m_copym(m, 0, offset, M_WAITOK); 1850 if (n) 1851 sbappend(sio, n); 1852 } 1853 m->m_data += offset; 1854 m->m_len -= offset; 1855 so->so_rcv.ssb_cc -= offset; 1856 if (so->so_oobmark) { 1857 so->so_oobmark -= offset; 1858 if (so->so_oobmark == 0) { 1859 sosetstate(so, SS_RCVATMARK); 1860 didoob = 1; 1861 } 1862 } 1863 offset = 0; 1864 } 1865 lwkt_reltoken(&so->so_rcv.ssb_token); 1866 } 1867 1868 /* 1869 * If the MSG_WAITALL flag is set (for non-atomic socket), 1870 * we must not quit until resid == 0 or an error termination. 1871 * 1872 * If a signal/timeout occurs, return with a short count but without 1873 * error. 1874 * 1875 * Keep signalsockbuf locked against other readers. 1876 * 1877 * XXX if MSG_PEEK we currently do quit. 1878 */ 1879 if ((flags & MSG_WAITALL) && !(flags & MSG_PEEK) && 1880 didoob == 0 && resid > 0 && 1881 !sosendallatonce(so)) { 1882 lwkt_gettoken(&so->so_rcv.ssb_token); 1883 error = 0; 1884 while ((m = so->so_rcv.ssb_mb) == NULL) { 1885 if (so->so_error || (so->so_state & SS_CANTRCVMORE)) { 1886 error = so->so_error; 1887 break; 1888 } 1889 /* 1890 * The window might have closed to zero, make 1891 * sure we send an ack now that we've drained 1892 * the buffer or we might end up blocking until 1893 * the idle takes over (5 seconds). 1894 */ 1895 if (so->so_pcb) 1896 so_pru_rcvd_async(so); 1897 if (so->so_rcv.ssb_mb == NULL) 1898 error = ssb_wait(&so->so_rcv); 1899 if (error) { 1900 lwkt_reltoken(&so->so_rcv.ssb_token); 1901 ssb_unlock(&so->so_rcv); 1902 error = 0; 1903 goto done; 1904 } 1905 } 1906 if (m && error == 0) 1907 goto dontblock; 1908 lwkt_reltoken(&so->so_rcv.ssb_token); 1909 } 1910 1911 /* 1912 * Token not held here. 1913 * 1914 * Cleanup. If an atomic read was requested drop any unread data XXX 1915 */ 1916 if ((flags & MSG_PEEK) == 0) { 1917 if (so->so_pcb) 1918 so_pru_rcvd_async(so); 1919 } 1920 1921 if (orig_resid == resid && orig_resid && 1922 (so->so_state & SS_CANTRCVMORE) == 0) { 1923 ssb_unlock(&so->so_rcv); 1924 goto restart; 1925 } 1926 1927 if (flagsp) 1928 *flagsp |= flags; 1929 release: 1930 ssb_unlock(&so->so_rcv); 1931 done: 1932 if (free_chain) 1933 m_freem(free_chain); 1934 return (error); 1935 } 1936 1937 /* 1938 * Shut a socket down. Note that we do not get a frontend lock as we 1939 * want to be able to shut the socket down even if another thread is 1940 * blocked in a read(), thus waking it up. 1941 */ 1942 int 1943 soshutdown(struct socket *so, int how) 1944 { 1945 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1946 return (EINVAL); 1947 1948 if (how != SHUT_WR) { 1949 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1950 sorflush(so); 1951 /*ssb_unlock(&so->so_rcv);*/ 1952 } 1953 if (how != SHUT_RD) 1954 return (so_pru_shutdown(so)); 1955 return (0); 1956 } 1957 1958 void 1959 sorflush(struct socket *so) 1960 { 1961 struct signalsockbuf *ssb = &so->so_rcv; 1962 struct protosw *pr = so->so_proto; 1963 struct signalsockbuf asb; 1964 1965 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1966 1967 lwkt_gettoken(&ssb->ssb_token); 1968 socantrcvmore(so); 1969 asb = *ssb; 1970 1971 /* 1972 * Can't just blow up the ssb structure here 1973 */ 1974 bzero(&ssb->sb, sizeof(ssb->sb)); 1975 ssb->ssb_timeo = 0; 1976 ssb->ssb_lowat = 0; 1977 ssb->ssb_hiwat = 0; 1978 ssb->ssb_mbmax = 0; 1979 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1980 1981 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1982 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1983 ssb_release(&asb, so); 1984 1985 lwkt_reltoken(&ssb->ssb_token); 1986 } 1987 1988 #ifdef INET 1989 static int 1990 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1991 { 1992 struct accept_filter_arg *afap = NULL; 1993 struct accept_filter *afp; 1994 struct so_accf *af = so->so_accf; 1995 int error = 0; 1996 1997 /* do not set/remove accept filters on non listen sockets */ 1998 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1999 error = EINVAL; 2000 goto out; 2001 } 2002 2003 /* removing the filter */ 2004 if (sopt == NULL) { 2005 if (af != NULL) { 2006 if (af->so_accept_filter != NULL && 2007 af->so_accept_filter->accf_destroy != NULL) { 2008 af->so_accept_filter->accf_destroy(so); 2009 } 2010 if (af->so_accept_filter_str != NULL) { 2011 kfree(af->so_accept_filter_str, M_ACCF); 2012 } 2013 kfree(af, M_ACCF); 2014 so->so_accf = NULL; 2015 } 2016 so->so_options &= ~SO_ACCEPTFILTER; 2017 return (0); 2018 } 2019 /* adding a filter */ 2020 /* must remove previous filter first */ 2021 if (af != NULL) { 2022 error = EINVAL; 2023 goto out; 2024 } 2025 /* don't put large objects on the kernel stack */ 2026 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 2027 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 2028 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 2029 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 2030 if (error) 2031 goto out; 2032 afp = accept_filt_get(afap->af_name); 2033 if (afp == NULL) { 2034 error = ENOENT; 2035 goto out; 2036 } 2037 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 2038 if (afp->accf_create != NULL) { 2039 if (afap->af_name[0] != '\0') { 2040 int len = strlen(afap->af_name) + 1; 2041 2042 af->so_accept_filter_str = kmalloc(len, M_ACCF, 2043 M_WAITOK); 2044 strcpy(af->so_accept_filter_str, afap->af_name); 2045 } 2046 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 2047 if (af->so_accept_filter_arg == NULL) { 2048 kfree(af->so_accept_filter_str, M_ACCF); 2049 kfree(af, M_ACCF); 2050 so->so_accf = NULL; 2051 error = EINVAL; 2052 goto out; 2053 } 2054 } 2055 af->so_accept_filter = afp; 2056 so->so_accf = af; 2057 so->so_options |= SO_ACCEPTFILTER; 2058 out: 2059 if (afap != NULL) 2060 kfree(afap, M_TEMP); 2061 return (error); 2062 } 2063 #endif /* INET */ 2064 2065 /* 2066 * Perhaps this routine, and sooptcopyout(), below, ought to come in 2067 * an additional variant to handle the case where the option value needs 2068 * to be some kind of integer, but not a specific size. 2069 * In addition to their use here, these functions are also called by the 2070 * protocol-level pr_ctloutput() routines. 2071 */ 2072 int 2073 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2074 { 2075 return soopt_to_kbuf(sopt, buf, len, minlen); 2076 } 2077 2078 int 2079 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2080 { 2081 size_t valsize; 2082 2083 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2084 KKASSERT(kva_p(buf)); 2085 2086 /* 2087 * If the user gives us more than we wanted, we ignore it, 2088 * but if we don't get the minimum length the caller 2089 * wants, we return EINVAL. On success, sopt->sopt_valsize 2090 * is set to however much we actually retrieved. 2091 */ 2092 if ((valsize = sopt->sopt_valsize) < minlen) 2093 return EINVAL; 2094 if (valsize > len) 2095 sopt->sopt_valsize = valsize = len; 2096 2097 bcopy(sopt->sopt_val, buf, valsize); 2098 return 0; 2099 } 2100 2101 2102 int 2103 sosetopt(struct socket *so, struct sockopt *sopt) 2104 { 2105 int error, optval; 2106 struct linger l; 2107 struct timeval tv; 2108 u_long val; 2109 struct signalsockbuf *sotmp; 2110 2111 error = 0; 2112 sopt->sopt_dir = SOPT_SET; 2113 if (sopt->sopt_level != SOL_SOCKET) { 2114 if (so->so_proto && so->so_proto->pr_ctloutput) { 2115 return (so_pr_ctloutput(so, sopt)); 2116 } 2117 error = ENOPROTOOPT; 2118 } else { 2119 switch (sopt->sopt_name) { 2120 #ifdef INET 2121 case SO_ACCEPTFILTER: 2122 error = do_setopt_accept_filter(so, sopt); 2123 if (error) 2124 goto bad; 2125 break; 2126 #endif /* INET */ 2127 case SO_LINGER: 2128 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2129 if (error) 2130 goto bad; 2131 2132 so->so_linger = l.l_linger; 2133 if (l.l_onoff) 2134 so->so_options |= SO_LINGER; 2135 else 2136 so->so_options &= ~SO_LINGER; 2137 break; 2138 2139 case SO_DEBUG: 2140 case SO_KEEPALIVE: 2141 case SO_DONTROUTE: 2142 case SO_USELOOPBACK: 2143 case SO_BROADCAST: 2144 case SO_REUSEADDR: 2145 case SO_REUSEPORT: 2146 case SO_OOBINLINE: 2147 case SO_TIMESTAMP: 2148 case SO_NOSIGPIPE: 2149 error = sooptcopyin(sopt, &optval, sizeof optval, 2150 sizeof optval); 2151 if (error) 2152 goto bad; 2153 if (optval) 2154 so->so_options |= sopt->sopt_name; 2155 else 2156 so->so_options &= ~sopt->sopt_name; 2157 break; 2158 2159 case SO_SNDBUF: 2160 case SO_RCVBUF: 2161 case SO_SNDLOWAT: 2162 case SO_RCVLOWAT: 2163 error = sooptcopyin(sopt, &optval, sizeof optval, 2164 sizeof optval); 2165 if (error) 2166 goto bad; 2167 2168 /* 2169 * Values < 1 make no sense for any of these 2170 * options, so disallow them. 2171 */ 2172 if (optval < 1) { 2173 error = EINVAL; 2174 goto bad; 2175 } 2176 2177 switch (sopt->sopt_name) { 2178 case SO_SNDBUF: 2179 case SO_RCVBUF: 2180 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2181 &so->so_snd : &so->so_rcv, (u_long)optval, 2182 so, 2183 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2184 error = ENOBUFS; 2185 goto bad; 2186 } 2187 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2188 &so->so_snd : &so->so_rcv; 2189 atomic_clear_int(&sotmp->ssb_flags, 2190 SSB_AUTOSIZE); 2191 break; 2192 2193 /* 2194 * Make sure the low-water is never greater than 2195 * the high-water. 2196 */ 2197 case SO_SNDLOWAT: 2198 so->so_snd.ssb_lowat = 2199 (optval > so->so_snd.ssb_hiwat) ? 2200 so->so_snd.ssb_hiwat : optval; 2201 atomic_clear_int(&so->so_snd.ssb_flags, 2202 SSB_AUTOLOWAT); 2203 break; 2204 case SO_RCVLOWAT: 2205 so->so_rcv.ssb_lowat = 2206 (optval > so->so_rcv.ssb_hiwat) ? 2207 so->so_rcv.ssb_hiwat : optval; 2208 atomic_clear_int(&so->so_rcv.ssb_flags, 2209 SSB_AUTOLOWAT); 2210 break; 2211 } 2212 break; 2213 2214 case SO_SNDTIMEO: 2215 case SO_RCVTIMEO: 2216 error = sooptcopyin(sopt, &tv, sizeof tv, 2217 sizeof tv); 2218 if (error) 2219 goto bad; 2220 2221 /* assert(hz > 0); */ 2222 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2223 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2224 error = EDOM; 2225 goto bad; 2226 } 2227 /* assert(tick > 0); */ 2228 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2229 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2230 if (val > INT_MAX) { 2231 error = EDOM; 2232 goto bad; 2233 } 2234 if (val == 0 && tv.tv_usec != 0) 2235 val = 1; 2236 2237 switch (sopt->sopt_name) { 2238 case SO_SNDTIMEO: 2239 so->so_snd.ssb_timeo = val; 2240 break; 2241 case SO_RCVTIMEO: 2242 so->so_rcv.ssb_timeo = val; 2243 break; 2244 } 2245 break; 2246 default: 2247 error = ENOPROTOOPT; 2248 break; 2249 } 2250 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2251 (void) so_pr_ctloutput(so, sopt); 2252 } 2253 } 2254 bad: 2255 return (error); 2256 } 2257 2258 /* Helper routine for getsockopt */ 2259 int 2260 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2261 { 2262 soopt_from_kbuf(sopt, buf, len); 2263 return 0; 2264 } 2265 2266 void 2267 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2268 { 2269 size_t valsize; 2270 2271 if (len == 0) { 2272 sopt->sopt_valsize = 0; 2273 return; 2274 } 2275 2276 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2277 KKASSERT(kva_p(buf)); 2278 2279 /* 2280 * Documented get behavior is that we always return a value, 2281 * possibly truncated to fit in the user's buffer. 2282 * Traditional behavior is that we always tell the user 2283 * precisely how much we copied, rather than something useful 2284 * like the total amount we had available for her. 2285 * Note that this interface is not idempotent; the entire answer must 2286 * generated ahead of time. 2287 */ 2288 valsize = szmin(len, sopt->sopt_valsize); 2289 sopt->sopt_valsize = valsize; 2290 if (sopt->sopt_val != 0) { 2291 bcopy(buf, sopt->sopt_val, valsize); 2292 } 2293 } 2294 2295 int 2296 sogetopt(struct socket *so, struct sockopt *sopt) 2297 { 2298 int error, optval; 2299 long optval_l; 2300 struct linger l; 2301 struct timeval tv; 2302 #ifdef INET 2303 struct accept_filter_arg *afap; 2304 #endif 2305 2306 error = 0; 2307 sopt->sopt_dir = SOPT_GET; 2308 if (sopt->sopt_level != SOL_SOCKET) { 2309 if (so->so_proto && so->so_proto->pr_ctloutput) { 2310 return (so_pr_ctloutput(so, sopt)); 2311 } else 2312 return (ENOPROTOOPT); 2313 } else { 2314 switch (sopt->sopt_name) { 2315 #ifdef INET 2316 case SO_ACCEPTFILTER: 2317 if ((so->so_options & SO_ACCEPTCONN) == 0) 2318 return (EINVAL); 2319 afap = kmalloc(sizeof(*afap), M_TEMP, 2320 M_WAITOK | M_ZERO); 2321 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2322 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2323 if (so->so_accf->so_accept_filter_str != NULL) 2324 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2325 } 2326 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2327 kfree(afap, M_TEMP); 2328 break; 2329 #endif /* INET */ 2330 2331 case SO_LINGER: 2332 l.l_onoff = so->so_options & SO_LINGER; 2333 l.l_linger = so->so_linger; 2334 error = sooptcopyout(sopt, &l, sizeof l); 2335 break; 2336 2337 case SO_USELOOPBACK: 2338 case SO_DONTROUTE: 2339 case SO_DEBUG: 2340 case SO_KEEPALIVE: 2341 case SO_REUSEADDR: 2342 case SO_REUSEPORT: 2343 case SO_BROADCAST: 2344 case SO_OOBINLINE: 2345 case SO_TIMESTAMP: 2346 case SO_NOSIGPIPE: 2347 optval = so->so_options & sopt->sopt_name; 2348 integer: 2349 error = sooptcopyout(sopt, &optval, sizeof optval); 2350 break; 2351 2352 case SO_TYPE: 2353 optval = so->so_type; 2354 goto integer; 2355 2356 case SO_ERROR: 2357 optval = so->so_error; 2358 so->so_error = 0; 2359 goto integer; 2360 2361 case SO_SNDBUF: 2362 optval = so->so_snd.ssb_hiwat; 2363 goto integer; 2364 2365 case SO_RCVBUF: 2366 optval = so->so_rcv.ssb_hiwat; 2367 goto integer; 2368 2369 case SO_SNDLOWAT: 2370 optval = so->so_snd.ssb_lowat; 2371 goto integer; 2372 2373 case SO_RCVLOWAT: 2374 optval = so->so_rcv.ssb_lowat; 2375 goto integer; 2376 2377 case SO_SNDTIMEO: 2378 case SO_RCVTIMEO: 2379 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2380 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2381 2382 tv.tv_sec = optval / hz; 2383 tv.tv_usec = (optval % hz) * ustick; 2384 error = sooptcopyout(sopt, &tv, sizeof tv); 2385 break; 2386 2387 case SO_SNDSPACE: 2388 optval_l = ssb_space(&so->so_snd); 2389 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2390 break; 2391 2392 case SO_CPUHINT: 2393 optval = -1; /* no hint */ 2394 goto integer; 2395 2396 default: 2397 error = ENOPROTOOPT; 2398 break; 2399 } 2400 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) 2401 so_pr_ctloutput(so, sopt); 2402 return (error); 2403 } 2404 } 2405 2406 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2407 int 2408 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2409 { 2410 struct mbuf *m, *m_prev; 2411 int sopt_size = sopt->sopt_valsize, msize; 2412 2413 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, MT_DATA, 2414 0, &msize); 2415 if (m == NULL) 2416 return (ENOBUFS); 2417 m->m_len = min(msize, sopt_size); 2418 sopt_size -= m->m_len; 2419 *mp = m; 2420 m_prev = m; 2421 2422 while (sopt_size > 0) { 2423 m = m_getl(sopt_size, sopt->sopt_td ? M_WAITOK : M_NOWAIT, 2424 MT_DATA, 0, &msize); 2425 if (m == NULL) { 2426 m_freem(*mp); 2427 return (ENOBUFS); 2428 } 2429 m->m_len = min(msize, sopt_size); 2430 sopt_size -= m->m_len; 2431 m_prev->m_next = m; 2432 m_prev = m; 2433 } 2434 return (0); 2435 } 2436 2437 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2438 int 2439 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2440 { 2441 soopt_to_mbuf(sopt, m); 2442 return 0; 2443 } 2444 2445 void 2446 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2447 { 2448 size_t valsize; 2449 void *val; 2450 2451 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2452 KKASSERT(kva_p(m)); 2453 if (sopt->sopt_val == NULL) 2454 return; 2455 val = sopt->sopt_val; 2456 valsize = sopt->sopt_valsize; 2457 while (m != NULL && valsize >= m->m_len) { 2458 bcopy(val, mtod(m, char *), m->m_len); 2459 valsize -= m->m_len; 2460 val = (caddr_t)val + m->m_len; 2461 m = m->m_next; 2462 } 2463 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2464 panic("ip6_sooptmcopyin"); 2465 } 2466 2467 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2468 int 2469 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2470 { 2471 return soopt_from_mbuf(sopt, m); 2472 } 2473 2474 int 2475 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2476 { 2477 struct mbuf *m0 = m; 2478 size_t valsize = 0; 2479 size_t maxsize; 2480 void *val; 2481 2482 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2483 KKASSERT(kva_p(m)); 2484 if (sopt->sopt_val == NULL) 2485 return 0; 2486 val = sopt->sopt_val; 2487 maxsize = sopt->sopt_valsize; 2488 while (m != NULL && maxsize >= m->m_len) { 2489 bcopy(mtod(m, char *), val, m->m_len); 2490 maxsize -= m->m_len; 2491 val = (caddr_t)val + m->m_len; 2492 valsize += m->m_len; 2493 m = m->m_next; 2494 } 2495 if (m != NULL) { 2496 /* enough soopt buffer should be given from user-land */ 2497 m_freem(m0); 2498 return (EINVAL); 2499 } 2500 sopt->sopt_valsize = valsize; 2501 return 0; 2502 } 2503 2504 void 2505 sohasoutofband(struct socket *so) 2506 { 2507 if (so->so_sigio != NULL) 2508 pgsigio(so->so_sigio, SIGURG, 0); 2509 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB); 2510 } 2511 2512 int 2513 sokqfilter(struct file *fp, struct knote *kn) 2514 { 2515 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2516 struct signalsockbuf *ssb; 2517 2518 switch (kn->kn_filter) { 2519 case EVFILT_READ: 2520 if (so->so_options & SO_ACCEPTCONN) 2521 kn->kn_fop = &solisten_filtops; 2522 else 2523 kn->kn_fop = &soread_filtops; 2524 ssb = &so->so_rcv; 2525 break; 2526 case EVFILT_WRITE: 2527 kn->kn_fop = &sowrite_filtops; 2528 ssb = &so->so_snd; 2529 break; 2530 case EVFILT_EXCEPT: 2531 kn->kn_fop = &soexcept_filtops; 2532 ssb = &so->so_rcv; 2533 break; 2534 default: 2535 return (EOPNOTSUPP); 2536 } 2537 2538 knote_insert(&ssb->ssb_kq.ki_note, kn); 2539 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2540 return (0); 2541 } 2542 2543 static void 2544 filt_sordetach(struct knote *kn) 2545 { 2546 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2547 2548 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2549 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2550 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2551 } 2552 2553 /*ARGSUSED*/ 2554 static int 2555 filt_soread(struct knote *kn, long hint) 2556 { 2557 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2558 2559 if (kn->kn_sfflags & NOTE_OOB) { 2560 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2561 kn->kn_fflags |= NOTE_OOB; 2562 return (1); 2563 } 2564 return (0); 2565 } 2566 kn->kn_data = so->so_rcv.ssb_cc; 2567 2568 if (so->so_state & SS_CANTRCVMORE) { 2569 /* 2570 * Only set NODATA if all data has been exhausted. 2571 */ 2572 if (kn->kn_data == 0) 2573 kn->kn_flags |= EV_NODATA; 2574 kn->kn_flags |= EV_EOF; 2575 kn->kn_fflags = so->so_error; 2576 return (1); 2577 } 2578 if (so->so_error) /* temporary udp error */ 2579 return (1); 2580 if (kn->kn_sfflags & NOTE_LOWAT) 2581 return (kn->kn_data >= kn->kn_sdata); 2582 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2583 !TAILQ_EMPTY(&so->so_comp)); 2584 } 2585 2586 static void 2587 filt_sowdetach(struct knote *kn) 2588 { 2589 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2590 2591 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2592 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2593 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2594 } 2595 2596 /*ARGSUSED*/ 2597 static int 2598 filt_sowrite(struct knote *kn, long hint) 2599 { 2600 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2601 2602 kn->kn_data = ssb_space(&so->so_snd); 2603 if (so->so_state & SS_CANTSENDMORE) { 2604 kn->kn_flags |= (EV_EOF | EV_NODATA); 2605 kn->kn_fflags = so->so_error; 2606 return (1); 2607 } 2608 if (so->so_error) /* temporary udp error */ 2609 return (1); 2610 if (((so->so_state & SS_ISCONNECTED) == 0) && 2611 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2612 return (0); 2613 if (kn->kn_sfflags & NOTE_LOWAT) 2614 return (kn->kn_data >= kn->kn_sdata); 2615 return (kn->kn_data >= so->so_snd.ssb_lowat); 2616 } 2617 2618 /*ARGSUSED*/ 2619 static int 2620 filt_solisten(struct knote *kn, long hint) 2621 { 2622 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2623 2624 kn->kn_data = so->so_qlen; 2625 return (! TAILQ_EMPTY(&so->so_comp)); 2626 } 2627