1 /* 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved. 3 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 4 * 5 * This code is derived from software contributed to The DragonFly Project 6 * by Jeffrey M. Hsu. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of The DragonFly Project nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific, prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (c) 1982, 1986, 1988, 1990, 1993 36 * The Regents of the University of California. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. Neither the name of the University nor the names of its contributors 47 * may be used to endorse or promote products derived from this software 48 * without specific prior written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 * 62 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 63 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $ 64 */ 65 66 #include "opt_inet.h" 67 #include "opt_sctp.h" 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/fcntl.h> 72 #include <sys/malloc.h> 73 #include <sys/mbuf.h> 74 #include <sys/domain.h> 75 #include <sys/file.h> /* for struct knote */ 76 #include <sys/kernel.h> 77 #include <sys/event.h> 78 #include <sys/proc.h> 79 #include <sys/protosw.h> 80 #include <sys/socket.h> 81 #include <sys/socketvar.h> 82 #include <sys/socketops.h> 83 #include <sys/resourcevar.h> 84 #include <sys/signalvar.h> 85 #include <sys/sysctl.h> 86 #include <sys/uio.h> 87 #include <sys/jail.h> 88 #include <vm/vm_zone.h> 89 #include <vm/pmap.h> 90 #include <net/netmsg2.h> 91 #include <net/netisr2.h> 92 93 #include <sys/thread2.h> 94 #include <sys/socketvar2.h> 95 #include <sys/spinlock2.h> 96 97 #include <machine/limits.h> 98 99 #ifdef INET 100 extern int tcp_sosend_agglim; 101 extern int tcp_sosend_async; 102 extern int udp_sosend_async; 103 extern int udp_sosend_prepend; 104 105 static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); 106 #endif /* INET */ 107 108 static void filt_sordetach(struct knote *kn); 109 static int filt_soread(struct knote *kn, long hint); 110 static void filt_sowdetach(struct knote *kn); 111 static int filt_sowrite(struct knote *kn, long hint); 112 static int filt_solisten(struct knote *kn, long hint); 113 114 static void sodiscard(struct socket *so); 115 static int soclose_sync(struct socket *so, int fflag); 116 static void soclose_fast(struct socket *so); 117 118 static struct filterops solisten_filtops = 119 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_solisten }; 120 static struct filterops soread_filtops = 121 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 122 static struct filterops sowrite_filtops = 123 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sowdetach, filt_sowrite }; 124 static struct filterops soexcept_filtops = 125 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_sordetach, filt_soread }; 126 127 MALLOC_DEFINE(M_SOCKET, "socket", "socket struct"); 128 MALLOC_DEFINE(M_SONAME, "soname", "socket name"); 129 MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); 130 131 132 static int somaxconn = SOMAXCONN; 133 SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, 134 &somaxconn, 0, "Maximum pending socket connection queue size"); 135 136 static int use_soclose_fast = 1; 137 SYSCTL_INT(_kern_ipc, OID_AUTO, soclose_fast, CTLFLAG_RW, 138 &use_soclose_fast, 0, "Fast socket close"); 139 140 int use_soaccept_pred_fast = 1; 141 SYSCTL_INT(_kern_ipc, OID_AUTO, soaccept_pred_fast, CTLFLAG_RW, 142 &use_soaccept_pred_fast, 0, "Fast socket accept predication"); 143 144 int use_sendfile_async = 1; 145 SYSCTL_INT(_kern_ipc, OID_AUTO, sendfile_async, CTLFLAG_RW, 146 &use_sendfile_async, 0, "sendfile uses asynchronized pru_send"); 147 148 /* 149 * Socket operation routines. 150 * These routines are called by the routines in 151 * sys_socket.c or from a system process, and 152 * implement the semantics of socket operations by 153 * switching out to the protocol specific routines. 154 */ 155 156 /* 157 * Get a socket structure, and initialize it. 158 * Note that it would probably be better to allocate socket 159 * and PCB at the same time, but I'm not convinced that all 160 * the protocols can be easily modified to do this. 161 */ 162 struct socket * 163 soalloc(int waitok, struct protosw *pr) 164 { 165 struct socket *so; 166 unsigned waitmask; 167 168 waitmask = waitok ? M_WAITOK : M_NOWAIT; 169 so = kmalloc(sizeof(struct socket), M_SOCKET, M_ZERO|waitmask); 170 if (so) { 171 /* XXX race condition for reentrant kernel */ 172 so->so_proto = pr; 173 TAILQ_INIT(&so->so_aiojobq); 174 TAILQ_INIT(&so->so_rcv.ssb_kq.ki_mlist); 175 TAILQ_INIT(&so->so_snd.ssb_kq.ki_mlist); 176 lwkt_token_init(&so->so_rcv.ssb_token, "rcvtok"); 177 lwkt_token_init(&so->so_snd.ssb_token, "sndtok"); 178 spin_init(&so->so_rcvd_spin); 179 netmsg_init(&so->so_rcvd_msg.base, so, &netisr_adone_rport, 180 MSGF_DROPABLE | MSGF_PRIORITY, 181 so->so_proto->pr_usrreqs->pru_rcvd); 182 so->so_rcvd_msg.nm_pru_flags |= PRUR_ASYNC; 183 so->so_state = SS_NOFDREF; 184 so->so_refs = 1; 185 } 186 return so; 187 } 188 189 int 190 socreate(int dom, struct socket **aso, int type, 191 int proto, struct thread *td) 192 { 193 struct proc *p = td->td_proc; 194 struct protosw *prp; 195 struct socket *so; 196 struct pru_attach_info ai; 197 int error; 198 199 if (proto) 200 prp = pffindproto(dom, proto, type); 201 else 202 prp = pffindtype(dom, type); 203 204 if (prp == NULL || prp->pr_usrreqs->pru_attach == 0) 205 return (EPROTONOSUPPORT); 206 207 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only && 208 prp->pr_domain->dom_family != PF_LOCAL && 209 prp->pr_domain->dom_family != PF_INET && 210 prp->pr_domain->dom_family != PF_INET6 && 211 prp->pr_domain->dom_family != PF_ROUTE) { 212 return (EPROTONOSUPPORT); 213 } 214 215 if (prp->pr_type != type) 216 return (EPROTOTYPE); 217 so = soalloc(p != NULL, prp); 218 if (so == NULL) 219 return (ENOBUFS); 220 221 /* 222 * Callers of socreate() presumably will connect up a descriptor 223 * and call soclose() if they cannot. This represents our so_refs 224 * (which should be 1) from soalloc(). 225 */ 226 soclrstate(so, SS_NOFDREF); 227 228 /* 229 * Set a default port for protocol processing. No action will occur 230 * on the socket on this port until an inpcb is attached to it and 231 * is able to match incoming packets, or until the socket becomes 232 * available to userland. 233 * 234 * We normally default the socket to the protocol thread on cpu 0. 235 * If PR_SYNC_PORT is set (unix domain sockets) there is no protocol 236 * thread and all pr_*()/pru_*() calls are executed synchronously. 237 */ 238 if (prp->pr_flags & PR_SYNC_PORT) 239 so->so_port = &netisr_sync_port; 240 else 241 so->so_port = netisr_cpuport(0); 242 243 TAILQ_INIT(&so->so_incomp); 244 TAILQ_INIT(&so->so_comp); 245 so->so_type = type; 246 so->so_cred = crhold(p->p_ucred); 247 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE]; 248 ai.p_ucred = p->p_ucred; 249 ai.fd_rdir = p->p_fd->fd_rdir; 250 251 /* 252 * Auto-sizing of socket buffers is managed by the protocols and 253 * the appropriate flags must be set in the pru_attach function. 254 */ 255 error = so_pru_attach(so, proto, &ai); 256 if (error) { 257 sosetstate(so, SS_NOFDREF); 258 sofree(so); /* from soalloc */ 259 return error; 260 } 261 262 /* 263 * NOTE: Returns referenced socket. 264 */ 265 *aso = so; 266 return (0); 267 } 268 269 int 270 sobind(struct socket *so, struct sockaddr *nam, struct thread *td) 271 { 272 int error; 273 274 error = so_pru_bind(so, nam, td); 275 return (error); 276 } 277 278 static void 279 sodealloc(struct socket *so) 280 { 281 if (so->so_rcv.ssb_hiwat) 282 (void)chgsbsize(so->so_cred->cr_uidinfo, 283 &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); 284 if (so->so_snd.ssb_hiwat) 285 (void)chgsbsize(so->so_cred->cr_uidinfo, 286 &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); 287 #ifdef INET 288 /* remove accept filter if present */ 289 if (so->so_accf != NULL) 290 do_setopt_accept_filter(so, NULL); 291 #endif /* INET */ 292 crfree(so->so_cred); 293 if (so->so_faddr != NULL) 294 kfree(so->so_faddr, M_SONAME); 295 kfree(so, M_SOCKET); 296 } 297 298 int 299 solisten(struct socket *so, int backlog, struct thread *td) 300 { 301 int error; 302 #ifdef SCTP 303 short oldopt, oldqlimit; 304 #endif /* SCTP */ 305 306 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) 307 return (EINVAL); 308 309 #ifdef SCTP 310 oldopt = so->so_options; 311 oldqlimit = so->so_qlimit; 312 #endif /* SCTP */ 313 314 lwkt_gettoken(&so->so_rcv.ssb_token); 315 if (TAILQ_EMPTY(&so->so_comp)) 316 so->so_options |= SO_ACCEPTCONN; 317 lwkt_reltoken(&so->so_rcv.ssb_token); 318 if (backlog < 0 || backlog > somaxconn) 319 backlog = somaxconn; 320 so->so_qlimit = backlog; 321 /* SCTP needs to look at tweak both the inbound backlog parameter AND 322 * the so_options (UDP model both connect's and gets inbound 323 * connections .. implicitly). 324 */ 325 error = so_pru_listen(so, td); 326 if (error) { 327 #ifdef SCTP 328 /* Restore the params */ 329 so->so_options = oldopt; 330 so->so_qlimit = oldqlimit; 331 #endif /* SCTP */ 332 return (error); 333 } 334 return (0); 335 } 336 337 /* 338 * Destroy a disconnected socket. This routine is a NOP if entities 339 * still have a reference on the socket: 340 * 341 * so_pcb - The protocol stack still has a reference 342 * SS_NOFDREF - There is no longer a file pointer reference 343 */ 344 void 345 sofree(struct socket *so) 346 { 347 struct socket *head; 348 349 /* 350 * This is a bit hackish at the moment. We need to interlock 351 * any accept queue we are on before we potentially lose the 352 * last reference to avoid races against a re-reference from 353 * someone operating on the queue. 354 */ 355 while ((head = so->so_head) != NULL) { 356 lwkt_getpooltoken(head); 357 if (so->so_head == head) 358 break; 359 lwkt_relpooltoken(head); 360 } 361 362 /* 363 * Arbitrage the last free. 364 */ 365 KKASSERT(so->so_refs > 0); 366 if (atomic_fetchadd_int(&so->so_refs, -1) != 1) { 367 if (head) 368 lwkt_relpooltoken(head); 369 return; 370 } 371 372 KKASSERT(so->so_pcb == NULL && (so->so_state & SS_NOFDREF)); 373 KKASSERT((so->so_state & SS_ASSERTINPROG) == 0); 374 375 /* 376 * We're done, remove ourselves from the accept queue we are 377 * on, if we are on one. 378 */ 379 if (head != NULL) { 380 if (so->so_state & SS_INCOMP) { 381 TAILQ_REMOVE(&head->so_incomp, so, so_list); 382 head->so_incqlen--; 383 } else if (so->so_state & SS_COMP) { 384 /* 385 * We must not decommission a socket that's 386 * on the accept(2) queue. If we do, then 387 * accept(2) may hang after select(2) indicated 388 * that the listening socket was ready. 389 */ 390 lwkt_relpooltoken(head); 391 return; 392 } else { 393 panic("sofree: not queued"); 394 } 395 soclrstate(so, SS_INCOMP); 396 so->so_head = NULL; 397 lwkt_relpooltoken(head); 398 } 399 ssb_release(&so->so_snd, so); 400 sorflush(so); 401 sodealloc(so); 402 } 403 404 /* 405 * Close a socket on last file table reference removal. 406 * Initiate disconnect if connected. 407 * Free socket when disconnect complete. 408 */ 409 int 410 soclose(struct socket *so, int fflag) 411 { 412 int error; 413 414 funsetown(&so->so_sigio); 415 if (!use_soclose_fast || 416 (so->so_proto->pr_flags & PR_SYNC_PORT) || 417 ((so->so_state & SS_ISCONNECTED) && 418 (so->so_options & SO_LINGER))) { 419 error = soclose_sync(so, fflag); 420 } else { 421 soclose_fast(so); 422 error = 0; 423 } 424 return error; 425 } 426 427 static void 428 sodiscard(struct socket *so) 429 { 430 lwkt_getpooltoken(so); 431 if (so->so_options & SO_ACCEPTCONN) { 432 struct socket *sp; 433 434 while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { 435 TAILQ_REMOVE(&so->so_incomp, sp, so_list); 436 soclrstate(sp, SS_INCOMP); 437 sp->so_head = NULL; 438 so->so_incqlen--; 439 soaborta(sp); 440 } 441 while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { 442 TAILQ_REMOVE(&so->so_comp, sp, so_list); 443 soclrstate(sp, SS_COMP); 444 sp->so_head = NULL; 445 so->so_qlen--; 446 soaborta(sp); 447 } 448 } 449 lwkt_relpooltoken(so); 450 451 if (so->so_state & SS_NOFDREF) 452 panic("soclose: NOFDREF"); 453 sosetstate(so, SS_NOFDREF); /* take ref */ 454 } 455 456 void 457 soinherit(struct socket *so, struct socket *so_inh) 458 { 459 TAILQ_HEAD(, socket) comp, incomp; 460 struct socket *sp; 461 int qlen, incqlen; 462 463 KASSERT(so->so_options & SO_ACCEPTCONN, 464 ("so does not accept connection")); 465 KASSERT(so_inh->so_options & SO_ACCEPTCONN, 466 ("so_inh does not accept connection")); 467 468 TAILQ_INIT(&comp); 469 TAILQ_INIT(&incomp); 470 471 lwkt_getpooltoken(so); 472 lwkt_getpooltoken(so_inh); 473 474 /* 475 * Save completed queue and incompleted queue 476 */ 477 TAILQ_CONCAT(&comp, &so->so_comp, so_list); 478 qlen = so->so_qlen; 479 so->so_qlen = 0; 480 481 TAILQ_CONCAT(&incomp, &so->so_incomp, so_list); 482 incqlen = so->so_incqlen; 483 so->so_incqlen = 0; 484 485 /* 486 * Append the saved completed queue and incompleted 487 * queue to the socket inherits them. 488 * 489 * XXX 490 * This may temporarily break the inheriting socket's 491 * so_qlimit. 492 */ 493 TAILQ_FOREACH(sp, &comp, so_list) { 494 sp->so_head = so_inh; 495 crfree(sp->so_cred); 496 sp->so_cred = crhold(so_inh->so_cred); 497 } 498 499 TAILQ_FOREACH(sp, &incomp, so_list) { 500 sp->so_head = so_inh; 501 crfree(sp->so_cred); 502 sp->so_cred = crhold(so_inh->so_cred); 503 } 504 505 TAILQ_CONCAT(&so_inh->so_comp, &comp, so_list); 506 so_inh->so_qlen += qlen; 507 508 TAILQ_CONCAT(&so_inh->so_incomp, &incomp, so_list); 509 so_inh->so_incqlen += incqlen; 510 511 lwkt_relpooltoken(so_inh); 512 lwkt_relpooltoken(so); 513 514 if (qlen) { 515 /* 516 * "New" connections have arrived 517 */ 518 sorwakeup(so_inh); 519 wakeup(&so_inh->so_timeo); 520 } 521 } 522 523 static int 524 soclose_sync(struct socket *so, int fflag) 525 { 526 int error = 0; 527 528 if (so->so_pcb == NULL) 529 goto discard; 530 if (so->so_state & SS_ISCONNECTED) { 531 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 532 error = sodisconnect(so); 533 if (error) 534 goto drop; 535 } 536 if (so->so_options & SO_LINGER) { 537 if ((so->so_state & SS_ISDISCONNECTING) && 538 (fflag & FNONBLOCK)) 539 goto drop; 540 while (so->so_state & SS_ISCONNECTED) { 541 error = tsleep(&so->so_timeo, PCATCH, 542 "soclos", so->so_linger * hz); 543 if (error) 544 break; 545 } 546 } 547 } 548 drop: 549 if (so->so_pcb) { 550 int error2; 551 552 error2 = so_pru_detach(so); 553 if (error == 0) 554 error = error2; 555 } 556 discard: 557 sodiscard(so); 558 so_pru_sync(so); /* unpend async sending */ 559 sofree(so); /* dispose of ref */ 560 561 return (error); 562 } 563 564 static void 565 soclose_sofree_async_handler(netmsg_t msg) 566 { 567 sofree(msg->base.nm_so); 568 } 569 570 static void 571 soclose_sofree_async(struct socket *so) 572 { 573 struct netmsg_base *base = &so->so_clomsg; 574 575 netmsg_init(base, so, &netisr_apanic_rport, 0, 576 soclose_sofree_async_handler); 577 lwkt_sendmsg(so->so_port, &base->lmsg); 578 } 579 580 static void 581 soclose_disconn_async_handler(netmsg_t msg) 582 { 583 struct socket *so = msg->base.nm_so; 584 585 if ((so->so_state & SS_ISCONNECTED) && 586 (so->so_state & SS_ISDISCONNECTING) == 0) 587 so_pru_disconnect_direct(so); 588 589 if (so->so_pcb) 590 so_pru_detach_direct(so); 591 592 sodiscard(so); 593 sofree(so); 594 } 595 596 static void 597 soclose_disconn_async(struct socket *so) 598 { 599 struct netmsg_base *base = &so->so_clomsg; 600 601 netmsg_init(base, so, &netisr_apanic_rport, 0, 602 soclose_disconn_async_handler); 603 lwkt_sendmsg(so->so_port, &base->lmsg); 604 } 605 606 static void 607 soclose_detach_async_handler(netmsg_t msg) 608 { 609 struct socket *so = msg->base.nm_so; 610 611 if (so->so_pcb) 612 so_pru_detach_direct(so); 613 614 sodiscard(so); 615 sofree(so); 616 } 617 618 static void 619 soclose_detach_async(struct socket *so) 620 { 621 struct netmsg_base *base = &so->so_clomsg; 622 623 netmsg_init(base, so, &netisr_apanic_rport, 0, 624 soclose_detach_async_handler); 625 lwkt_sendmsg(so->so_port, &base->lmsg); 626 } 627 628 static void 629 soclose_fast(struct socket *so) 630 { 631 if (so->so_pcb == NULL) 632 goto discard; 633 634 if ((so->so_state & SS_ISCONNECTED) && 635 (so->so_state & SS_ISDISCONNECTING) == 0) { 636 soclose_disconn_async(so); 637 return; 638 } 639 640 if (so->so_pcb) { 641 soclose_detach_async(so); 642 return; 643 } 644 645 discard: 646 sodiscard(so); 647 soclose_sofree_async(so); 648 } 649 650 /* 651 * Abort and destroy a socket. Only one abort can be in progress 652 * at any given moment. 653 */ 654 void 655 soabort(struct socket *so) 656 { 657 soreference(so); 658 so_pru_abort(so); 659 } 660 661 void 662 soaborta(struct socket *so) 663 { 664 soreference(so); 665 so_pru_aborta(so); 666 } 667 668 void 669 soabort_oncpu(struct socket *so) 670 { 671 soreference(so); 672 so_pru_abort_oncpu(so); 673 } 674 675 /* 676 * so is passed in ref'd, which becomes owned by 677 * the cleared SS_NOFDREF flag. 678 */ 679 void 680 soaccept_generic(struct socket *so) 681 { 682 if ((so->so_state & SS_NOFDREF) == 0) 683 panic("soaccept: !NOFDREF"); 684 soclrstate(so, SS_NOFDREF); /* owned by lack of SS_NOFDREF */ 685 } 686 687 int 688 soaccept(struct socket *so, struct sockaddr **nam) 689 { 690 int error; 691 692 soaccept_generic(so); 693 error = so_pru_accept(so, nam); 694 return (error); 695 } 696 697 int 698 soconnect(struct socket *so, struct sockaddr *nam, struct thread *td) 699 { 700 int error; 701 702 if (so->so_options & SO_ACCEPTCONN) 703 return (EOPNOTSUPP); 704 /* 705 * If protocol is connection-based, can only connect once. 706 * Otherwise, if connected, try to disconnect first. 707 * This allows user to disconnect by connecting to, e.g., 708 * a null address. 709 */ 710 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 711 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 712 (error = sodisconnect(so)))) { 713 error = EISCONN; 714 } else { 715 /* 716 * Prevent accumulated error from previous connection 717 * from biting us. 718 */ 719 so->so_error = 0; 720 error = so_pru_connect(so, nam, td); 721 } 722 return (error); 723 } 724 725 int 726 soconnect2(struct socket *so1, struct socket *so2) 727 { 728 int error; 729 730 error = so_pru_connect2(so1, so2); 731 return (error); 732 } 733 734 int 735 sodisconnect(struct socket *so) 736 { 737 int error; 738 739 if ((so->so_state & SS_ISCONNECTED) == 0) { 740 error = ENOTCONN; 741 goto bad; 742 } 743 if (so->so_state & SS_ISDISCONNECTING) { 744 error = EALREADY; 745 goto bad; 746 } 747 error = so_pru_disconnect(so); 748 bad: 749 return (error); 750 } 751 752 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 753 /* 754 * Send on a socket. 755 * If send must go all at once and message is larger than 756 * send buffering, then hard error. 757 * Lock against other senders. 758 * If must go all at once and not enough room now, then 759 * inform user that this would block and do nothing. 760 * Otherwise, if nonblocking, send as much as possible. 761 * The data to be sent is described by "uio" if nonzero, 762 * otherwise by the mbuf chain "top" (which must be null 763 * if uio is not). Data provided in mbuf chain must be small 764 * enough to send all at once. 765 * 766 * Returns nonzero on error, timeout or signal; callers 767 * must check for short counts if EINTR/ERESTART are returned. 768 * Data and control buffers are freed on return. 769 */ 770 int 771 sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 772 struct mbuf *top, struct mbuf *control, int flags, 773 struct thread *td) 774 { 775 struct mbuf **mp; 776 struct mbuf *m; 777 size_t resid; 778 int space, len; 779 int clen = 0, error, dontroute, mlen; 780 int atomic = sosendallatonce(so) || top; 781 int pru_flags; 782 783 if (uio) { 784 resid = uio->uio_resid; 785 } else { 786 resid = (size_t)top->m_pkthdr.len; 787 #ifdef INVARIANTS 788 len = 0; 789 for (m = top; m; m = m->m_next) 790 len += m->m_len; 791 KKASSERT(top->m_pkthdr.len == len); 792 #endif 793 } 794 795 /* 796 * WARNING! resid is unsigned, space and len are signed. space 797 * can wind up negative if the sockbuf is overcommitted. 798 * 799 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM 800 * type sockets since that's an error. 801 */ 802 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 803 error = EINVAL; 804 goto out; 805 } 806 807 dontroute = 808 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && 809 (so->so_proto->pr_flags & PR_ATOMIC); 810 if (td->td_lwp != NULL) 811 td->td_lwp->lwp_ru.ru_msgsnd++; 812 if (control) 813 clen = control->m_len; 814 #define gotoerr(errcode) { error = errcode; goto release; } 815 816 restart: 817 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 818 if (error) 819 goto out; 820 821 do { 822 if (so->so_state & SS_CANTSENDMORE) 823 gotoerr(EPIPE); 824 if (so->so_error) { 825 error = so->so_error; 826 so->so_error = 0; 827 goto release; 828 } 829 if ((so->so_state & SS_ISCONNECTED) == 0) { 830 /* 831 * `sendto' and `sendmsg' is allowed on a connection- 832 * based socket if it supports implied connect. 833 * Return ENOTCONN if not connected and no address is 834 * supplied. 835 */ 836 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && 837 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { 838 if ((so->so_state & SS_ISCONFIRMING) == 0 && 839 !(resid == 0 && clen != 0)) 840 gotoerr(ENOTCONN); 841 } else if (addr == NULL) 842 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ? 843 ENOTCONN : EDESTADDRREQ); 844 } 845 if ((atomic && resid > so->so_snd.ssb_hiwat) || 846 clen > so->so_snd.ssb_hiwat) { 847 gotoerr(EMSGSIZE); 848 } 849 space = ssb_space(&so->so_snd); 850 if (flags & MSG_OOB) 851 space += 1024; 852 if ((space < 0 || (size_t)space < resid + clen) && uio && 853 (atomic || space < so->so_snd.ssb_lowat || space < clen)) { 854 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 855 gotoerr(EWOULDBLOCK); 856 ssb_unlock(&so->so_snd); 857 error = ssb_wait(&so->so_snd); 858 if (error) 859 goto out; 860 goto restart; 861 } 862 mp = ⊤ 863 space -= clen; 864 do { 865 if (uio == NULL) { 866 /* 867 * Data is prepackaged in "top". 868 */ 869 resid = 0; 870 if (flags & MSG_EOR) 871 top->m_flags |= M_EOR; 872 } else do { 873 if (resid > INT_MAX) 874 resid = INT_MAX; 875 m = m_getl((int)resid, MB_WAIT, MT_DATA, 876 top == NULL ? M_PKTHDR : 0, &mlen); 877 if (top == NULL) { 878 m->m_pkthdr.len = 0; 879 m->m_pkthdr.rcvif = NULL; 880 } 881 len = imin((int)szmin(mlen, resid), space); 882 if (resid < MINCLSIZE) { 883 /* 884 * For datagram protocols, leave room 885 * for protocol headers in first mbuf. 886 */ 887 if (atomic && top == NULL && len < mlen) 888 MH_ALIGN(m, len); 889 } 890 space -= len; 891 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 892 resid = uio->uio_resid; 893 m->m_len = len; 894 *mp = m; 895 top->m_pkthdr.len += len; 896 if (error) 897 goto release; 898 mp = &m->m_next; 899 if (resid == 0) { 900 if (flags & MSG_EOR) 901 top->m_flags |= M_EOR; 902 break; 903 } 904 } while (space > 0 && atomic); 905 if (dontroute) 906 so->so_options |= SO_DONTROUTE; 907 if (flags & MSG_OOB) { 908 pru_flags = PRUS_OOB; 909 } else if ((flags & MSG_EOF) && 910 (so->so_proto->pr_flags & PR_IMPLOPCL) && 911 (resid == 0)) { 912 /* 913 * If the user set MSG_EOF, the protocol 914 * understands this flag and nothing left to 915 * send then use PRU_SEND_EOF instead of PRU_SEND. 916 */ 917 pru_flags = PRUS_EOF; 918 } else if (resid > 0 && space > 0) { 919 /* If there is more to send, set PRUS_MORETOCOME */ 920 pru_flags = PRUS_MORETOCOME; 921 } else { 922 pru_flags = 0; 923 } 924 /* 925 * XXX all the SS_CANTSENDMORE checks previously 926 * done could be out of date. We could have recieved 927 * a reset packet in an interrupt or maybe we slept 928 * while doing page faults in uiomove() etc. We could 929 * probably recheck again inside the splnet() protection 930 * here, but there are probably other places that this 931 * also happens. We must rethink this. 932 */ 933 error = so_pru_send(so, pru_flags, top, addr, control, td); 934 if (dontroute) 935 so->so_options &= ~SO_DONTROUTE; 936 clen = 0; 937 control = NULL; 938 top = NULL; 939 mp = ⊤ 940 if (error) 941 goto release; 942 } while (resid && space > 0); 943 } while (resid); 944 945 release: 946 ssb_unlock(&so->so_snd); 947 out: 948 if (top) 949 m_freem(top); 950 if (control) 951 m_freem(control); 952 return (error); 953 } 954 955 #ifdef INET 956 /* 957 * A specialization of sosend() for UDP based on protocol-specific knowledge: 958 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that 959 * sosendallatonce() returns true, 960 * the "atomic" variable is true, 961 * and sosendudp() blocks until space is available for the entire send. 962 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or 963 * PR_IMPLOPCL flags set. 964 * UDP has no out-of-band data. 965 * UDP has no control data. 966 * UDP does not support MSG_EOR. 967 */ 968 int 969 sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio, 970 struct mbuf *top, struct mbuf *control, int flags, struct thread *td) 971 { 972 size_t resid; 973 int error, pru_flags = 0; 974 int space; 975 976 if (td->td_lwp != NULL) 977 td->td_lwp->lwp_ru.ru_msgsnd++; 978 if (control) 979 m_freem(control); 980 981 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp")); 982 resid = uio ? uio->uio_resid : (size_t)top->m_pkthdr.len; 983 984 restart: 985 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 986 if (error) 987 goto out; 988 989 if (so->so_state & SS_CANTSENDMORE) 990 gotoerr(EPIPE); 991 if (so->so_error) { 992 error = so->so_error; 993 so->so_error = 0; 994 goto release; 995 } 996 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL) 997 gotoerr(EDESTADDRREQ); 998 if (resid > so->so_snd.ssb_hiwat) 999 gotoerr(EMSGSIZE); 1000 space = ssb_space(&so->so_snd); 1001 if (uio && (space < 0 || (size_t)space < resid)) { 1002 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1003 gotoerr(EWOULDBLOCK); 1004 ssb_unlock(&so->so_snd); 1005 error = ssb_wait(&so->so_snd); 1006 if (error) 1007 goto out; 1008 goto restart; 1009 } 1010 1011 if (uio) { 1012 int hdrlen = max_hdr; 1013 1014 /* 1015 * We try to optimize out the additional mbuf 1016 * allocations in M_PREPEND() on output path, e.g. 1017 * - udp_output(), when it tries to prepend protocol 1018 * headers. 1019 * - Link layer output function, when it tries to 1020 * prepend link layer header. 1021 * 1022 * This probably will not benefit any data that will 1023 * be fragmented, so this optimization is only performed 1024 * when the size of data and max size of protocol+link 1025 * headers fit into one mbuf cluster. 1026 */ 1027 if (uio->uio_resid > MCLBYTES - hdrlen || 1028 !udp_sosend_prepend) { 1029 top = m_uiomove(uio); 1030 if (top == NULL) 1031 goto release; 1032 } else { 1033 int nsize; 1034 1035 top = m_getl(uio->uio_resid + hdrlen, MB_WAIT, 1036 MT_DATA, M_PKTHDR, &nsize); 1037 KASSERT(nsize >= uio->uio_resid + hdrlen, 1038 ("sosendudp invalid nsize %d, " 1039 "resid %zu, hdrlen %d", 1040 nsize, uio->uio_resid, hdrlen)); 1041 1042 top->m_len = uio->uio_resid; 1043 top->m_pkthdr.len = uio->uio_resid; 1044 top->m_data += hdrlen; 1045 1046 error = uiomove(mtod(top, caddr_t), top->m_len, uio); 1047 if (error) 1048 goto out; 1049 } 1050 } 1051 1052 if (flags & MSG_DONTROUTE) 1053 pru_flags |= PRUS_DONTROUTE; 1054 1055 if (udp_sosend_async && (flags & MSG_SYNC) == 0) { 1056 so_pru_send_async(so, pru_flags, top, addr, NULL, td); 1057 error = 0; 1058 } else { 1059 error = so_pru_send(so, pru_flags, top, addr, NULL, td); 1060 } 1061 top = NULL; /* sent or freed in lower layer */ 1062 1063 release: 1064 ssb_unlock(&so->so_snd); 1065 out: 1066 if (top) 1067 m_freem(top); 1068 return (error); 1069 } 1070 1071 int 1072 sosendtcp(struct socket *so, struct sockaddr *addr, struct uio *uio, 1073 struct mbuf *top, struct mbuf *control, int flags, 1074 struct thread *td) 1075 { 1076 struct mbuf **mp; 1077 struct mbuf *m; 1078 size_t resid; 1079 int space, len; 1080 int error, mlen; 1081 int allatonce; 1082 int pru_flags; 1083 1084 if (uio) { 1085 KKASSERT(top == NULL); 1086 allatonce = 0; 1087 resid = uio->uio_resid; 1088 } else { 1089 allatonce = 1; 1090 resid = (size_t)top->m_pkthdr.len; 1091 #ifdef INVARIANTS 1092 len = 0; 1093 for (m = top; m; m = m->m_next) 1094 len += m->m_len; 1095 KKASSERT(top->m_pkthdr.len == len); 1096 #endif 1097 } 1098 1099 /* 1100 * WARNING! resid is unsigned, space and len are signed. space 1101 * can wind up negative if the sockbuf is overcommitted. 1102 * 1103 * Also check to make sure that MSG_EOR isn't used on TCP 1104 */ 1105 if (flags & MSG_EOR) { 1106 error = EINVAL; 1107 goto out; 1108 } 1109 1110 if (control) { 1111 /* TCP doesn't do control messages (rights, creds, etc) */ 1112 if (control->m_len) { 1113 error = EINVAL; 1114 goto out; 1115 } 1116 m_freem(control); /* empty control, just free it */ 1117 control = NULL; 1118 } 1119 1120 if (td->td_lwp != NULL) 1121 td->td_lwp->lwp_ru.ru_msgsnd++; 1122 1123 #define gotoerr(errcode) { error = errcode; goto release; } 1124 1125 restart: 1126 error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags)); 1127 if (error) 1128 goto out; 1129 1130 do { 1131 if (so->so_state & SS_CANTSENDMORE) 1132 gotoerr(EPIPE); 1133 if (so->so_error) { 1134 error = so->so_error; 1135 so->so_error = 0; 1136 goto release; 1137 } 1138 if ((so->so_state & SS_ISCONNECTED) == 0 && 1139 (so->so_state & SS_ISCONFIRMING) == 0) 1140 gotoerr(ENOTCONN); 1141 if (allatonce && resid > so->so_snd.ssb_hiwat) 1142 gotoerr(EMSGSIZE); 1143 1144 space = ssb_space_prealloc(&so->so_snd); 1145 if (flags & MSG_OOB) 1146 space += 1024; 1147 if ((space < 0 || (size_t)space < resid) && !allatonce && 1148 space < so->so_snd.ssb_lowat) { 1149 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) 1150 gotoerr(EWOULDBLOCK); 1151 ssb_unlock(&so->so_snd); 1152 error = ssb_wait(&so->so_snd); 1153 if (error) 1154 goto out; 1155 goto restart; 1156 } 1157 mp = ⊤ 1158 do { 1159 int cnt = 0, async = 0; 1160 1161 if (uio == NULL) { 1162 /* 1163 * Data is prepackaged in "top". 1164 */ 1165 resid = 0; 1166 } else do { 1167 if (resid > INT_MAX) 1168 resid = INT_MAX; 1169 m = m_getl((int)resid, MB_WAIT, MT_DATA, 1170 top == NULL ? M_PKTHDR : 0, &mlen); 1171 if (top == NULL) { 1172 m->m_pkthdr.len = 0; 1173 m->m_pkthdr.rcvif = NULL; 1174 } 1175 len = imin((int)szmin(mlen, resid), space); 1176 space -= len; 1177 error = uiomove(mtod(m, caddr_t), (size_t)len, uio); 1178 resid = uio->uio_resid; 1179 m->m_len = len; 1180 *mp = m; 1181 top->m_pkthdr.len += len; 1182 if (error) 1183 goto release; 1184 mp = &m->m_next; 1185 if (resid == 0) 1186 break; 1187 ++cnt; 1188 } while (space > 0 && cnt < tcp_sosend_agglim); 1189 1190 if (tcp_sosend_async) 1191 async = 1; 1192 1193 if (flags & MSG_OOB) { 1194 pru_flags = PRUS_OOB; 1195 async = 0; 1196 } else if ((flags & MSG_EOF) && resid == 0) { 1197 pru_flags = PRUS_EOF; 1198 } else if (resid > 0 && space > 0) { 1199 /* If there is more to send, set PRUS_MORETOCOME */ 1200 pru_flags = PRUS_MORETOCOME; 1201 async = 1; 1202 } else { 1203 pru_flags = 0; 1204 } 1205 1206 if (flags & MSG_SYNC) 1207 async = 0; 1208 1209 /* 1210 * XXX all the SS_CANTSENDMORE checks previously 1211 * done could be out of date. We could have recieved 1212 * a reset packet in an interrupt or maybe we slept 1213 * while doing page faults in uiomove() etc. We could 1214 * probably recheck again inside the splnet() protection 1215 * here, but there are probably other places that this 1216 * also happens. We must rethink this. 1217 */ 1218 for (m = top; m; m = m->m_next) 1219 ssb_preallocstream(&so->so_snd, m); 1220 if (!async) { 1221 error = so_pru_send(so, pru_flags, top, 1222 NULL, NULL, td); 1223 } else { 1224 so_pru_send_async(so, pru_flags, top, 1225 NULL, NULL, td); 1226 error = 0; 1227 } 1228 1229 top = NULL; 1230 mp = ⊤ 1231 if (error) 1232 goto release; 1233 } while (resid && space > 0); 1234 } while (resid); 1235 1236 release: 1237 ssb_unlock(&so->so_snd); 1238 out: 1239 if (top) 1240 m_freem(top); 1241 if (control) 1242 m_freem(control); 1243 return (error); 1244 } 1245 #endif 1246 1247 /* 1248 * Implement receive operations on a socket. 1249 * 1250 * We depend on the way that records are added to the signalsockbuf 1251 * by sbappend*. In particular, each record (mbufs linked through m_next) 1252 * must begin with an address if the protocol so specifies, 1253 * followed by an optional mbuf or mbufs containing ancillary data, 1254 * and then zero or more mbufs of data. 1255 * 1256 * Although the signalsockbuf is locked, new data may still be appended. 1257 * A token inside the ssb_lock deals with MP issues and still allows 1258 * the network to access the socket if we block in a uio. 1259 * 1260 * The caller may receive the data as a single mbuf chain by supplying 1261 * an mbuf **mp0 for use in returning the chain. The uio is then used 1262 * only for the count in uio_resid. 1263 */ 1264 int 1265 soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 1266 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1267 { 1268 struct mbuf *m, *n; 1269 struct mbuf *free_chain = NULL; 1270 int flags, len, error, offset; 1271 struct protosw *pr = so->so_proto; 1272 int moff, type = 0; 1273 size_t resid, orig_resid; 1274 1275 if (uio) 1276 resid = uio->uio_resid; 1277 else 1278 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1279 orig_resid = resid; 1280 1281 if (psa) 1282 *psa = NULL; 1283 if (controlp) 1284 *controlp = NULL; 1285 if (flagsp) 1286 flags = *flagsp &~ MSG_EOR; 1287 else 1288 flags = 0; 1289 if (flags & MSG_OOB) { 1290 m = m_get(MB_WAIT, MT_DATA); 1291 if (m == NULL) 1292 return (ENOBUFS); 1293 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1294 if (error) 1295 goto bad; 1296 if (sio) { 1297 do { 1298 sbappend(sio, m); 1299 KKASSERT(resid >= (size_t)m->m_len); 1300 resid -= (size_t)m->m_len; 1301 } while (resid > 0 && m); 1302 } else { 1303 do { 1304 uio->uio_resid = resid; 1305 error = uiomove(mtod(m, caddr_t), 1306 (int)szmin(resid, m->m_len), 1307 uio); 1308 resid = uio->uio_resid; 1309 m = m_free(m); 1310 } while (uio->uio_resid && error == 0 && m); 1311 } 1312 bad: 1313 if (m) 1314 m_freem(m); 1315 return (error); 1316 } 1317 if ((so->so_state & SS_ISCONFIRMING) && resid) 1318 so_pru_rcvd(so, 0); 1319 1320 /* 1321 * The token interlocks against the protocol thread while 1322 * ssb_lock is a blocking lock against other userland entities. 1323 */ 1324 lwkt_gettoken(&so->so_rcv.ssb_token); 1325 restart: 1326 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1327 if (error) 1328 goto done; 1329 1330 m = so->so_rcv.ssb_mb; 1331 /* 1332 * If we have less data than requested, block awaiting more 1333 * (subject to any timeout) if: 1334 * 1. the current count is less than the low water mark, or 1335 * 2. MSG_WAITALL is set, and it is possible to do the entire 1336 * receive operation at once if we block (resid <= hiwat). 1337 * 3. MSG_DONTWAIT is not set 1338 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1339 * we have to do the receive in sections, and thus risk returning 1340 * a short count if a timeout or signal occurs after we start. 1341 */ 1342 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1343 (size_t)so->so_rcv.ssb_cc < resid) && 1344 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1345 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) && 1346 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { 1347 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1348 if (so->so_error) { 1349 if (m) 1350 goto dontblock; 1351 error = so->so_error; 1352 if ((flags & MSG_PEEK) == 0) 1353 so->so_error = 0; 1354 goto release; 1355 } 1356 if (so->so_state & SS_CANTRCVMORE) { 1357 if (m) 1358 goto dontblock; 1359 else 1360 goto release; 1361 } 1362 for (; m; m = m->m_next) { 1363 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 1364 m = so->so_rcv.ssb_mb; 1365 goto dontblock; 1366 } 1367 } 1368 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1369 (pr->pr_flags & PR_CONNREQUIRED)) { 1370 error = ENOTCONN; 1371 goto release; 1372 } 1373 if (resid == 0) 1374 goto release; 1375 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1376 error = EWOULDBLOCK; 1377 goto release; 1378 } 1379 ssb_unlock(&so->so_rcv); 1380 error = ssb_wait(&so->so_rcv); 1381 if (error) 1382 goto done; 1383 goto restart; 1384 } 1385 dontblock: 1386 if (uio && uio->uio_td && uio->uio_td->td_proc) 1387 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1388 1389 /* 1390 * note: m should be == sb_mb here. Cache the next record while 1391 * cleaning up. Note that calling m_free*() will break out critical 1392 * section. 1393 */ 1394 KKASSERT(m == so->so_rcv.ssb_mb); 1395 1396 /* 1397 * Skip any address mbufs prepending the record. 1398 */ 1399 if (pr->pr_flags & PR_ADDR) { 1400 KASSERT(m->m_type == MT_SONAME, ("receive 1a")); 1401 orig_resid = 0; 1402 if (psa) 1403 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1404 if (flags & MSG_PEEK) 1405 m = m->m_next; 1406 else 1407 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1408 } 1409 1410 /* 1411 * Skip any control mbufs prepending the record. 1412 */ 1413 #ifdef SCTP 1414 if (pr->pr_flags & PR_ADDR_OPT) { 1415 /* 1416 * For SCTP we may be getting a 1417 * whole message OR a partial delivery. 1418 */ 1419 if (m && m->m_type == MT_SONAME) { 1420 orig_resid = 0; 1421 if (psa) 1422 *psa = dup_sockaddr(mtod(m, struct sockaddr *)); 1423 if (flags & MSG_PEEK) 1424 m = m->m_next; 1425 else 1426 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1427 } 1428 } 1429 #endif /* SCTP */ 1430 while (m && m->m_type == MT_CONTROL && error == 0) { 1431 if (flags & MSG_PEEK) { 1432 if (controlp) 1433 *controlp = m_copy(m, 0, m->m_len); 1434 m = m->m_next; /* XXX race */ 1435 } else { 1436 if (controlp) { 1437 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1438 if (pr->pr_domain->dom_externalize && 1439 mtod(m, struct cmsghdr *)->cmsg_type == 1440 SCM_RIGHTS) 1441 error = (*pr->pr_domain->dom_externalize)(m); 1442 *controlp = m; 1443 m = n; 1444 } else { 1445 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1446 } 1447 } 1448 if (controlp && *controlp) { 1449 orig_resid = 0; 1450 controlp = &(*controlp)->m_next; 1451 } 1452 } 1453 1454 /* 1455 * flag OOB data. 1456 */ 1457 if (m) { 1458 type = m->m_type; 1459 if (type == MT_OOBDATA) 1460 flags |= MSG_OOB; 1461 } 1462 1463 /* 1464 * Copy to the UIO or mbuf return chain (*mp). 1465 */ 1466 moff = 0; 1467 offset = 0; 1468 while (m && resid > 0 && error == 0) { 1469 if (m->m_type == MT_OOBDATA) { 1470 if (type != MT_OOBDATA) 1471 break; 1472 } else if (type == MT_OOBDATA) 1473 break; 1474 else 1475 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1476 ("receive 3")); 1477 soclrstate(so, SS_RCVATMARK); 1478 len = (resid > INT_MAX) ? INT_MAX : resid; 1479 if (so->so_oobmark && len > so->so_oobmark - offset) 1480 len = so->so_oobmark - offset; 1481 if (len > m->m_len - moff) 1482 len = m->m_len - moff; 1483 1484 /* 1485 * Copy out to the UIO or pass the mbufs back to the SIO. 1486 * The SIO is dealt with when we eat the mbuf, but deal 1487 * with the resid here either way. 1488 */ 1489 if (uio) { 1490 uio->uio_resid = resid; 1491 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1492 resid = uio->uio_resid; 1493 if (error) 1494 goto release; 1495 } else { 1496 resid -= (size_t)len; 1497 } 1498 1499 /* 1500 * Eat the entire mbuf or just a piece of it 1501 */ 1502 if (len == m->m_len - moff) { 1503 if (m->m_flags & M_EOR) 1504 flags |= MSG_EOR; 1505 #ifdef SCTP 1506 if (m->m_flags & M_NOTIFICATION) 1507 flags |= MSG_NOTIFICATION; 1508 #endif /* SCTP */ 1509 if (flags & MSG_PEEK) { 1510 m = m->m_next; 1511 moff = 0; 1512 } else { 1513 if (sio) { 1514 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1515 sbappend(sio, m); 1516 m = n; 1517 } else { 1518 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1519 } 1520 } 1521 } else { 1522 if (flags & MSG_PEEK) { 1523 moff += len; 1524 } else { 1525 if (sio) { 1526 n = m_copym(m, 0, len, MB_WAIT); 1527 if (n) 1528 sbappend(sio, n); 1529 } 1530 m->m_data += len; 1531 m->m_len -= len; 1532 so->so_rcv.ssb_cc -= len; 1533 } 1534 } 1535 if (so->so_oobmark) { 1536 if ((flags & MSG_PEEK) == 0) { 1537 so->so_oobmark -= len; 1538 if (so->so_oobmark == 0) { 1539 sosetstate(so, SS_RCVATMARK); 1540 break; 1541 } 1542 } else { 1543 offset += len; 1544 if (offset == so->so_oobmark) 1545 break; 1546 } 1547 } 1548 if (flags & MSG_EOR) 1549 break; 1550 /* 1551 * If the MSG_WAITALL flag is set (for non-atomic socket), 1552 * we must not quit until resid == 0 or an error 1553 * termination. If a signal/timeout occurs, return 1554 * with a short count but without error. 1555 * Keep signalsockbuf locked against other readers. 1556 */ 1557 while ((flags & MSG_WAITALL) && m == NULL && 1558 resid > 0 && !sosendallatonce(so) && 1559 so->so_rcv.ssb_mb == NULL) { 1560 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1561 break; 1562 /* 1563 * The window might have closed to zero, make 1564 * sure we send an ack now that we've drained 1565 * the buffer or we might end up blocking until 1566 * the idle takes over (5 seconds). 1567 */ 1568 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1569 so_pru_rcvd(so, flags); 1570 error = ssb_wait(&so->so_rcv); 1571 if (error) { 1572 ssb_unlock(&so->so_rcv); 1573 error = 0; 1574 goto done; 1575 } 1576 m = so->so_rcv.ssb_mb; 1577 } 1578 } 1579 1580 /* 1581 * If an atomic read was requested but unread data still remains 1582 * in the record, set MSG_TRUNC. 1583 */ 1584 if (m && pr->pr_flags & PR_ATOMIC) 1585 flags |= MSG_TRUNC; 1586 1587 /* 1588 * Cleanup. If an atomic read was requested drop any unread data. 1589 */ 1590 if ((flags & MSG_PEEK) == 0) { 1591 if (m && (pr->pr_flags & PR_ATOMIC)) 1592 sbdroprecord(&so->so_rcv.sb); 1593 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb) 1594 so_pru_rcvd(so, flags); 1595 } 1596 1597 if (orig_resid == resid && orig_resid && 1598 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1599 ssb_unlock(&so->so_rcv); 1600 goto restart; 1601 } 1602 1603 if (flagsp) 1604 *flagsp |= flags; 1605 release: 1606 ssb_unlock(&so->so_rcv); 1607 done: 1608 lwkt_reltoken(&so->so_rcv.ssb_token); 1609 if (free_chain) 1610 m_freem(free_chain); 1611 return (error); 1612 } 1613 1614 int 1615 sorecvtcp(struct socket *so, struct sockaddr **psa, struct uio *uio, 1616 struct sockbuf *sio, struct mbuf **controlp, int *flagsp) 1617 { 1618 struct mbuf *m, *n; 1619 struct mbuf *free_chain = NULL; 1620 int flags, len, error, offset; 1621 struct protosw *pr = so->so_proto; 1622 int moff; 1623 size_t resid, orig_resid; 1624 1625 if (uio) 1626 resid = uio->uio_resid; 1627 else 1628 resid = (size_t)(sio->sb_climit - sio->sb_cc); 1629 orig_resid = resid; 1630 1631 if (psa) 1632 *psa = NULL; 1633 if (controlp) 1634 *controlp = NULL; 1635 if (flagsp) 1636 flags = *flagsp &~ MSG_EOR; 1637 else 1638 flags = 0; 1639 if (flags & MSG_OOB) { 1640 m = m_get(MB_WAIT, MT_DATA); 1641 if (m == NULL) 1642 return (ENOBUFS); 1643 error = so_pru_rcvoob(so, m, flags & MSG_PEEK); 1644 if (error) 1645 goto bad; 1646 if (sio) { 1647 do { 1648 sbappend(sio, m); 1649 KKASSERT(resid >= (size_t)m->m_len); 1650 resid -= (size_t)m->m_len; 1651 } while (resid > 0 && m); 1652 } else { 1653 do { 1654 uio->uio_resid = resid; 1655 error = uiomove(mtod(m, caddr_t), 1656 (int)szmin(resid, m->m_len), 1657 uio); 1658 resid = uio->uio_resid; 1659 m = m_free(m); 1660 } while (uio->uio_resid && error == 0 && m); 1661 } 1662 bad: 1663 if (m) 1664 m_freem(m); 1665 return (error); 1666 } 1667 1668 /* 1669 * The token interlocks against the protocol thread while 1670 * ssb_lock is a blocking lock against other userland entities. 1671 */ 1672 lwkt_gettoken(&so->so_rcv.ssb_token); 1673 restart: 1674 error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags)); 1675 if (error) 1676 goto done; 1677 1678 m = so->so_rcv.ssb_mb; 1679 /* 1680 * If we have less data than requested, block awaiting more 1681 * (subject to any timeout) if: 1682 * 1. the current count is less than the low water mark, or 1683 * 2. MSG_WAITALL is set, and it is possible to do the entire 1684 * receive operation at once if we block (resid <= hiwat). 1685 * 3. MSG_DONTWAIT is not set 1686 * If MSG_WAITALL is set but resid is larger than the receive buffer, 1687 * we have to do the receive in sections, and thus risk returning 1688 * a short count if a timeout or signal occurs after we start. 1689 */ 1690 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 1691 (size_t)so->so_rcv.ssb_cc < resid) && 1692 (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat || 1693 ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)))) { 1694 KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1")); 1695 if (so->so_error) { 1696 if (m) 1697 goto dontblock; 1698 error = so->so_error; 1699 if ((flags & MSG_PEEK) == 0) 1700 so->so_error = 0; 1701 goto release; 1702 } 1703 if (so->so_state & SS_CANTRCVMORE) { 1704 if (m) 1705 goto dontblock; 1706 else 1707 goto release; 1708 } 1709 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1710 (pr->pr_flags & PR_CONNREQUIRED)) { 1711 error = ENOTCONN; 1712 goto release; 1713 } 1714 if (resid == 0) 1715 goto release; 1716 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) { 1717 error = EWOULDBLOCK; 1718 goto release; 1719 } 1720 ssb_unlock(&so->so_rcv); 1721 error = ssb_wait(&so->so_rcv); 1722 if (error) 1723 goto done; 1724 goto restart; 1725 } 1726 dontblock: 1727 if (uio && uio->uio_td && uio->uio_td->td_proc) 1728 uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++; 1729 1730 /* 1731 * note: m should be == sb_mb here. Cache the next record while 1732 * cleaning up. Note that calling m_free*() will break out critical 1733 * section. 1734 */ 1735 KKASSERT(m == so->so_rcv.ssb_mb); 1736 1737 /* 1738 * Copy to the UIO or mbuf return chain (*mp). 1739 */ 1740 moff = 0; 1741 offset = 0; 1742 while (m && resid > 0 && error == 0) { 1743 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, 1744 ("receive 3")); 1745 1746 soclrstate(so, SS_RCVATMARK); 1747 len = (resid > INT_MAX) ? INT_MAX : resid; 1748 if (so->so_oobmark && len > so->so_oobmark - offset) 1749 len = so->so_oobmark - offset; 1750 if (len > m->m_len - moff) 1751 len = m->m_len - moff; 1752 1753 /* 1754 * Copy out to the UIO or pass the mbufs back to the SIO. 1755 * The SIO is dealt with when we eat the mbuf, but deal 1756 * with the resid here either way. 1757 */ 1758 if (uio) { 1759 uio->uio_resid = resid; 1760 error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1761 resid = uio->uio_resid; 1762 if (error) 1763 goto release; 1764 } else { 1765 resid -= (size_t)len; 1766 } 1767 1768 /* 1769 * Eat the entire mbuf or just a piece of it 1770 */ 1771 if (len == m->m_len - moff) { 1772 if (flags & MSG_PEEK) { 1773 m = m->m_next; 1774 moff = 0; 1775 } else { 1776 if (sio) { 1777 n = sbunlinkmbuf(&so->so_rcv.sb, m, NULL); 1778 sbappend(sio, m); 1779 m = n; 1780 } else { 1781 m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain); 1782 } 1783 } 1784 } else { 1785 if (flags & MSG_PEEK) { 1786 moff += len; 1787 } else { 1788 if (sio) { 1789 n = m_copym(m, 0, len, MB_WAIT); 1790 if (n) 1791 sbappend(sio, n); 1792 } 1793 m->m_data += len; 1794 m->m_len -= len; 1795 so->so_rcv.ssb_cc -= len; 1796 } 1797 } 1798 if (so->so_oobmark) { 1799 if ((flags & MSG_PEEK) == 0) { 1800 so->so_oobmark -= len; 1801 if (so->so_oobmark == 0) { 1802 sosetstate(so, SS_RCVATMARK); 1803 break; 1804 } 1805 } else { 1806 offset += len; 1807 if (offset == so->so_oobmark) 1808 break; 1809 } 1810 } 1811 /* 1812 * If the MSG_WAITALL flag is set (for non-atomic socket), 1813 * we must not quit until resid == 0 or an error 1814 * termination. If a signal/timeout occurs, return 1815 * with a short count but without error. 1816 * Keep signalsockbuf locked against other readers. 1817 */ 1818 while ((flags & MSG_WAITALL) && m == NULL && 1819 resid > 0 && !sosendallatonce(so) && 1820 so->so_rcv.ssb_mb == NULL) { 1821 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1822 break; 1823 /* 1824 * The window might have closed to zero, make 1825 * sure we send an ack now that we've drained 1826 * the buffer or we might end up blocking until 1827 * the idle takes over (5 seconds). 1828 */ 1829 if (so->so_pcb) 1830 so_pru_rcvd_async(so); 1831 error = ssb_wait(&so->so_rcv); 1832 if (error) { 1833 ssb_unlock(&so->so_rcv); 1834 error = 0; 1835 goto done; 1836 } 1837 m = so->so_rcv.ssb_mb; 1838 } 1839 } 1840 1841 /* 1842 * Cleanup. If an atomic read was requested drop any unread data. 1843 */ 1844 if ((flags & MSG_PEEK) == 0) { 1845 if (so->so_pcb) 1846 so_pru_rcvd_async(so); 1847 } 1848 1849 if (orig_resid == resid && orig_resid && 1850 (so->so_state & SS_CANTRCVMORE) == 0) { 1851 ssb_unlock(&so->so_rcv); 1852 goto restart; 1853 } 1854 1855 if (flagsp) 1856 *flagsp |= flags; 1857 release: 1858 ssb_unlock(&so->so_rcv); 1859 done: 1860 lwkt_reltoken(&so->so_rcv.ssb_token); 1861 if (free_chain) 1862 m_freem(free_chain); 1863 return (error); 1864 } 1865 1866 /* 1867 * Shut a socket down. Note that we do not get a frontend lock as we 1868 * want to be able to shut the socket down even if another thread is 1869 * blocked in a read(), thus waking it up. 1870 */ 1871 int 1872 soshutdown(struct socket *so, int how) 1873 { 1874 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR)) 1875 return (EINVAL); 1876 1877 if (how != SHUT_WR) { 1878 /*ssb_lock(&so->so_rcv, M_WAITOK);*/ 1879 sorflush(so); 1880 /*ssb_unlock(&so->so_rcv);*/ 1881 } 1882 if (how != SHUT_RD) 1883 return (so_pru_shutdown(so)); 1884 return (0); 1885 } 1886 1887 void 1888 sorflush(struct socket *so) 1889 { 1890 struct signalsockbuf *ssb = &so->so_rcv; 1891 struct protosw *pr = so->so_proto; 1892 struct signalsockbuf asb; 1893 1894 atomic_set_int(&ssb->ssb_flags, SSB_NOINTR); 1895 1896 lwkt_gettoken(&ssb->ssb_token); 1897 socantrcvmore(so); 1898 asb = *ssb; 1899 1900 /* 1901 * Can't just blow up the ssb structure here 1902 */ 1903 bzero(&ssb->sb, sizeof(ssb->sb)); 1904 ssb->ssb_timeo = 0; 1905 ssb->ssb_lowat = 0; 1906 ssb->ssb_hiwat = 0; 1907 ssb->ssb_mbmax = 0; 1908 atomic_clear_int(&ssb->ssb_flags, SSB_CLEAR_MASK); 1909 1910 if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) 1911 (*pr->pr_domain->dom_dispose)(asb.ssb_mb); 1912 ssb_release(&asb, so); 1913 1914 lwkt_reltoken(&ssb->ssb_token); 1915 } 1916 1917 #ifdef INET 1918 static int 1919 do_setopt_accept_filter(struct socket *so, struct sockopt *sopt) 1920 { 1921 struct accept_filter_arg *afap = NULL; 1922 struct accept_filter *afp; 1923 struct so_accf *af = so->so_accf; 1924 int error = 0; 1925 1926 /* do not set/remove accept filters on non listen sockets */ 1927 if ((so->so_options & SO_ACCEPTCONN) == 0) { 1928 error = EINVAL; 1929 goto out; 1930 } 1931 1932 /* removing the filter */ 1933 if (sopt == NULL) { 1934 if (af != NULL) { 1935 if (af->so_accept_filter != NULL && 1936 af->so_accept_filter->accf_destroy != NULL) { 1937 af->so_accept_filter->accf_destroy(so); 1938 } 1939 if (af->so_accept_filter_str != NULL) { 1940 kfree(af->so_accept_filter_str, M_ACCF); 1941 } 1942 kfree(af, M_ACCF); 1943 so->so_accf = NULL; 1944 } 1945 so->so_options &= ~SO_ACCEPTFILTER; 1946 return (0); 1947 } 1948 /* adding a filter */ 1949 /* must remove previous filter first */ 1950 if (af != NULL) { 1951 error = EINVAL; 1952 goto out; 1953 } 1954 /* don't put large objects on the kernel stack */ 1955 afap = kmalloc(sizeof(*afap), M_TEMP, M_WAITOK); 1956 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap); 1957 afap->af_name[sizeof(afap->af_name)-1] = '\0'; 1958 afap->af_arg[sizeof(afap->af_arg)-1] = '\0'; 1959 if (error) 1960 goto out; 1961 afp = accept_filt_get(afap->af_name); 1962 if (afp == NULL) { 1963 error = ENOENT; 1964 goto out; 1965 } 1966 af = kmalloc(sizeof(*af), M_ACCF, M_WAITOK | M_ZERO); 1967 if (afp->accf_create != NULL) { 1968 if (afap->af_name[0] != '\0') { 1969 int len = strlen(afap->af_name) + 1; 1970 1971 af->so_accept_filter_str = kmalloc(len, M_ACCF, 1972 M_WAITOK); 1973 strcpy(af->so_accept_filter_str, afap->af_name); 1974 } 1975 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg); 1976 if (af->so_accept_filter_arg == NULL) { 1977 kfree(af->so_accept_filter_str, M_ACCF); 1978 kfree(af, M_ACCF); 1979 so->so_accf = NULL; 1980 error = EINVAL; 1981 goto out; 1982 } 1983 } 1984 af->so_accept_filter = afp; 1985 so->so_accf = af; 1986 so->so_options |= SO_ACCEPTFILTER; 1987 out: 1988 if (afap != NULL) 1989 kfree(afap, M_TEMP); 1990 return (error); 1991 } 1992 #endif /* INET */ 1993 1994 /* 1995 * Perhaps this routine, and sooptcopyout(), below, ought to come in 1996 * an additional variant to handle the case where the option value needs 1997 * to be some kind of integer, but not a specific size. 1998 * In addition to their use here, these functions are also called by the 1999 * protocol-level pr_ctloutput() routines. 2000 */ 2001 int 2002 sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2003 { 2004 return soopt_to_kbuf(sopt, buf, len, minlen); 2005 } 2006 2007 int 2008 soopt_to_kbuf(struct sockopt *sopt, void *buf, size_t len, size_t minlen) 2009 { 2010 size_t valsize; 2011 2012 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2013 KKASSERT(kva_p(buf)); 2014 2015 /* 2016 * If the user gives us more than we wanted, we ignore it, 2017 * but if we don't get the minimum length the caller 2018 * wants, we return EINVAL. On success, sopt->sopt_valsize 2019 * is set to however much we actually retrieved. 2020 */ 2021 if ((valsize = sopt->sopt_valsize) < minlen) 2022 return EINVAL; 2023 if (valsize > len) 2024 sopt->sopt_valsize = valsize = len; 2025 2026 bcopy(sopt->sopt_val, buf, valsize); 2027 return 0; 2028 } 2029 2030 2031 int 2032 sosetopt(struct socket *so, struct sockopt *sopt) 2033 { 2034 int error, optval; 2035 struct linger l; 2036 struct timeval tv; 2037 u_long val; 2038 struct signalsockbuf *sotmp; 2039 2040 error = 0; 2041 sopt->sopt_dir = SOPT_SET; 2042 if (sopt->sopt_level != SOL_SOCKET) { 2043 if (so->so_proto && so->so_proto->pr_ctloutput) { 2044 return (so_pr_ctloutput(so, sopt)); 2045 } 2046 error = ENOPROTOOPT; 2047 } else { 2048 switch (sopt->sopt_name) { 2049 #ifdef INET 2050 case SO_ACCEPTFILTER: 2051 error = do_setopt_accept_filter(so, sopt); 2052 if (error) 2053 goto bad; 2054 break; 2055 #endif /* INET */ 2056 case SO_LINGER: 2057 error = sooptcopyin(sopt, &l, sizeof l, sizeof l); 2058 if (error) 2059 goto bad; 2060 2061 so->so_linger = l.l_linger; 2062 if (l.l_onoff) 2063 so->so_options |= SO_LINGER; 2064 else 2065 so->so_options &= ~SO_LINGER; 2066 break; 2067 2068 case SO_DEBUG: 2069 case SO_KEEPALIVE: 2070 case SO_DONTROUTE: 2071 case SO_USELOOPBACK: 2072 case SO_BROADCAST: 2073 case SO_REUSEADDR: 2074 case SO_REUSEPORT: 2075 case SO_OOBINLINE: 2076 case SO_TIMESTAMP: 2077 case SO_NOSIGPIPE: 2078 error = sooptcopyin(sopt, &optval, sizeof optval, 2079 sizeof optval); 2080 if (error) 2081 goto bad; 2082 if (optval) 2083 so->so_options |= sopt->sopt_name; 2084 else 2085 so->so_options &= ~sopt->sopt_name; 2086 break; 2087 2088 case SO_SNDBUF: 2089 case SO_RCVBUF: 2090 case SO_SNDLOWAT: 2091 case SO_RCVLOWAT: 2092 error = sooptcopyin(sopt, &optval, sizeof optval, 2093 sizeof optval); 2094 if (error) 2095 goto bad; 2096 2097 /* 2098 * Values < 1 make no sense for any of these 2099 * options, so disallow them. 2100 */ 2101 if (optval < 1) { 2102 error = EINVAL; 2103 goto bad; 2104 } 2105 2106 switch (sopt->sopt_name) { 2107 case SO_SNDBUF: 2108 case SO_RCVBUF: 2109 if (ssb_reserve(sopt->sopt_name == SO_SNDBUF ? 2110 &so->so_snd : &so->so_rcv, (u_long)optval, 2111 so, 2112 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) { 2113 error = ENOBUFS; 2114 goto bad; 2115 } 2116 sotmp = (sopt->sopt_name == SO_SNDBUF) ? 2117 &so->so_snd : &so->so_rcv; 2118 atomic_clear_int(&sotmp->ssb_flags, 2119 SSB_AUTOSIZE); 2120 break; 2121 2122 /* 2123 * Make sure the low-water is never greater than 2124 * the high-water. 2125 */ 2126 case SO_SNDLOWAT: 2127 so->so_snd.ssb_lowat = 2128 (optval > so->so_snd.ssb_hiwat) ? 2129 so->so_snd.ssb_hiwat : optval; 2130 atomic_clear_int(&so->so_snd.ssb_flags, 2131 SSB_AUTOLOWAT); 2132 break; 2133 case SO_RCVLOWAT: 2134 so->so_rcv.ssb_lowat = 2135 (optval > so->so_rcv.ssb_hiwat) ? 2136 so->so_rcv.ssb_hiwat : optval; 2137 atomic_clear_int(&so->so_rcv.ssb_flags, 2138 SSB_AUTOLOWAT); 2139 break; 2140 } 2141 break; 2142 2143 case SO_SNDTIMEO: 2144 case SO_RCVTIMEO: 2145 error = sooptcopyin(sopt, &tv, sizeof tv, 2146 sizeof tv); 2147 if (error) 2148 goto bad; 2149 2150 /* assert(hz > 0); */ 2151 if (tv.tv_sec < 0 || tv.tv_sec > INT_MAX / hz || 2152 tv.tv_usec < 0 || tv.tv_usec >= 1000000) { 2153 error = EDOM; 2154 goto bad; 2155 } 2156 /* assert(tick > 0); */ 2157 /* assert(ULONG_MAX - INT_MAX >= 1000000); */ 2158 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / ustick; 2159 if (val > INT_MAX) { 2160 error = EDOM; 2161 goto bad; 2162 } 2163 if (val == 0 && tv.tv_usec != 0) 2164 val = 1; 2165 2166 switch (sopt->sopt_name) { 2167 case SO_SNDTIMEO: 2168 so->so_snd.ssb_timeo = val; 2169 break; 2170 case SO_RCVTIMEO: 2171 so->so_rcv.ssb_timeo = val; 2172 break; 2173 } 2174 break; 2175 default: 2176 error = ENOPROTOOPT; 2177 break; 2178 } 2179 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { 2180 (void) so_pr_ctloutput(so, sopt); 2181 } 2182 } 2183 bad: 2184 return (error); 2185 } 2186 2187 /* Helper routine for getsockopt */ 2188 int 2189 sooptcopyout(struct sockopt *sopt, const void *buf, size_t len) 2190 { 2191 soopt_from_kbuf(sopt, buf, len); 2192 return 0; 2193 } 2194 2195 void 2196 soopt_from_kbuf(struct sockopt *sopt, const void *buf, size_t len) 2197 { 2198 size_t valsize; 2199 2200 if (len == 0) { 2201 sopt->sopt_valsize = 0; 2202 return; 2203 } 2204 2205 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2206 KKASSERT(kva_p(buf)); 2207 2208 /* 2209 * Documented get behavior is that we always return a value, 2210 * possibly truncated to fit in the user's buffer. 2211 * Traditional behavior is that we always tell the user 2212 * precisely how much we copied, rather than something useful 2213 * like the total amount we had available for her. 2214 * Note that this interface is not idempotent; the entire answer must 2215 * generated ahead of time. 2216 */ 2217 valsize = szmin(len, sopt->sopt_valsize); 2218 sopt->sopt_valsize = valsize; 2219 if (sopt->sopt_val != 0) { 2220 bcopy(buf, sopt->sopt_val, valsize); 2221 } 2222 } 2223 2224 int 2225 sogetopt(struct socket *so, struct sockopt *sopt) 2226 { 2227 int error, optval; 2228 long optval_l; 2229 struct linger l; 2230 struct timeval tv; 2231 #ifdef INET 2232 struct accept_filter_arg *afap; 2233 #endif 2234 2235 error = 0; 2236 sopt->sopt_dir = SOPT_GET; 2237 if (sopt->sopt_level != SOL_SOCKET) { 2238 if (so->so_proto && so->so_proto->pr_ctloutput) { 2239 return (so_pr_ctloutput(so, sopt)); 2240 } else 2241 return (ENOPROTOOPT); 2242 } else { 2243 switch (sopt->sopt_name) { 2244 #ifdef INET 2245 case SO_ACCEPTFILTER: 2246 if ((so->so_options & SO_ACCEPTCONN) == 0) 2247 return (EINVAL); 2248 afap = kmalloc(sizeof(*afap), M_TEMP, 2249 M_WAITOK | M_ZERO); 2250 if ((so->so_options & SO_ACCEPTFILTER) != 0) { 2251 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name); 2252 if (so->so_accf->so_accept_filter_str != NULL) 2253 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str); 2254 } 2255 error = sooptcopyout(sopt, afap, sizeof(*afap)); 2256 kfree(afap, M_TEMP); 2257 break; 2258 #endif /* INET */ 2259 2260 case SO_LINGER: 2261 l.l_onoff = so->so_options & SO_LINGER; 2262 l.l_linger = so->so_linger; 2263 error = sooptcopyout(sopt, &l, sizeof l); 2264 break; 2265 2266 case SO_USELOOPBACK: 2267 case SO_DONTROUTE: 2268 case SO_DEBUG: 2269 case SO_KEEPALIVE: 2270 case SO_REUSEADDR: 2271 case SO_REUSEPORT: 2272 case SO_BROADCAST: 2273 case SO_OOBINLINE: 2274 case SO_TIMESTAMP: 2275 case SO_NOSIGPIPE: 2276 optval = so->so_options & sopt->sopt_name; 2277 integer: 2278 error = sooptcopyout(sopt, &optval, sizeof optval); 2279 break; 2280 2281 case SO_TYPE: 2282 optval = so->so_type; 2283 goto integer; 2284 2285 case SO_ERROR: 2286 optval = so->so_error; 2287 so->so_error = 0; 2288 goto integer; 2289 2290 case SO_SNDBUF: 2291 optval = so->so_snd.ssb_hiwat; 2292 goto integer; 2293 2294 case SO_RCVBUF: 2295 optval = so->so_rcv.ssb_hiwat; 2296 goto integer; 2297 2298 case SO_SNDLOWAT: 2299 optval = so->so_snd.ssb_lowat; 2300 goto integer; 2301 2302 case SO_RCVLOWAT: 2303 optval = so->so_rcv.ssb_lowat; 2304 goto integer; 2305 2306 case SO_SNDTIMEO: 2307 case SO_RCVTIMEO: 2308 optval = (sopt->sopt_name == SO_SNDTIMEO ? 2309 so->so_snd.ssb_timeo : so->so_rcv.ssb_timeo); 2310 2311 tv.tv_sec = optval / hz; 2312 tv.tv_usec = (optval % hz) * ustick; 2313 error = sooptcopyout(sopt, &tv, sizeof tv); 2314 break; 2315 2316 case SO_SNDSPACE: 2317 optval_l = ssb_space(&so->so_snd); 2318 error = sooptcopyout(sopt, &optval_l, sizeof(optval_l)); 2319 break; 2320 2321 default: 2322 error = ENOPROTOOPT; 2323 break; 2324 } 2325 return (error); 2326 } 2327 } 2328 2329 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ 2330 int 2331 soopt_getm(struct sockopt *sopt, struct mbuf **mp) 2332 { 2333 struct mbuf *m, *m_prev; 2334 int sopt_size = sopt->sopt_valsize, msize; 2335 2336 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA, 2337 0, &msize); 2338 if (m == NULL) 2339 return (ENOBUFS); 2340 m->m_len = min(msize, sopt_size); 2341 sopt_size -= m->m_len; 2342 *mp = m; 2343 m_prev = m; 2344 2345 while (sopt_size > 0) { 2346 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, 2347 MT_DATA, 0, &msize); 2348 if (m == NULL) { 2349 m_freem(*mp); 2350 return (ENOBUFS); 2351 } 2352 m->m_len = min(msize, sopt_size); 2353 sopt_size -= m->m_len; 2354 m_prev->m_next = m; 2355 m_prev = m; 2356 } 2357 return (0); 2358 } 2359 2360 /* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ 2361 int 2362 soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) 2363 { 2364 soopt_to_mbuf(sopt, m); 2365 return 0; 2366 } 2367 2368 void 2369 soopt_to_mbuf(struct sockopt *sopt, struct mbuf *m) 2370 { 2371 size_t valsize; 2372 void *val; 2373 2374 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2375 KKASSERT(kva_p(m)); 2376 if (sopt->sopt_val == NULL) 2377 return; 2378 val = sopt->sopt_val; 2379 valsize = sopt->sopt_valsize; 2380 while (m != NULL && valsize >= m->m_len) { 2381 bcopy(val, mtod(m, char *), m->m_len); 2382 valsize -= m->m_len; 2383 val = (caddr_t)val + m->m_len; 2384 m = m->m_next; 2385 } 2386 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ 2387 panic("ip6_sooptmcopyin"); 2388 } 2389 2390 /* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ 2391 int 2392 soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) 2393 { 2394 return soopt_from_mbuf(sopt, m); 2395 } 2396 2397 int 2398 soopt_from_mbuf(struct sockopt *sopt, struct mbuf *m) 2399 { 2400 struct mbuf *m0 = m; 2401 size_t valsize = 0; 2402 size_t maxsize; 2403 void *val; 2404 2405 KKASSERT(!sopt->sopt_val || kva_p(sopt->sopt_val)); 2406 KKASSERT(kva_p(m)); 2407 if (sopt->sopt_val == NULL) 2408 return 0; 2409 val = sopt->sopt_val; 2410 maxsize = sopt->sopt_valsize; 2411 while (m != NULL && maxsize >= m->m_len) { 2412 bcopy(mtod(m, char *), val, m->m_len); 2413 maxsize -= m->m_len; 2414 val = (caddr_t)val + m->m_len; 2415 valsize += m->m_len; 2416 m = m->m_next; 2417 } 2418 if (m != NULL) { 2419 /* enough soopt buffer should be given from user-land */ 2420 m_freem(m0); 2421 return (EINVAL); 2422 } 2423 sopt->sopt_valsize = valsize; 2424 return 0; 2425 } 2426 2427 void 2428 sohasoutofband(struct socket *so) 2429 { 2430 if (so->so_sigio != NULL) 2431 pgsigio(so->so_sigio, SIGURG, 0); 2432 KNOTE(&so->so_rcv.ssb_kq.ki_note, NOTE_OOB); 2433 } 2434 2435 int 2436 sokqfilter(struct file *fp, struct knote *kn) 2437 { 2438 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2439 struct signalsockbuf *ssb; 2440 2441 switch (kn->kn_filter) { 2442 case EVFILT_READ: 2443 if (so->so_options & SO_ACCEPTCONN) 2444 kn->kn_fop = &solisten_filtops; 2445 else 2446 kn->kn_fop = &soread_filtops; 2447 ssb = &so->so_rcv; 2448 break; 2449 case EVFILT_WRITE: 2450 kn->kn_fop = &sowrite_filtops; 2451 ssb = &so->so_snd; 2452 break; 2453 case EVFILT_EXCEPT: 2454 kn->kn_fop = &soexcept_filtops; 2455 ssb = &so->so_rcv; 2456 break; 2457 default: 2458 return (EOPNOTSUPP); 2459 } 2460 2461 knote_insert(&ssb->ssb_kq.ki_note, kn); 2462 atomic_set_int(&ssb->ssb_flags, SSB_KNOTE); 2463 return (0); 2464 } 2465 2466 static void 2467 filt_sordetach(struct knote *kn) 2468 { 2469 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2470 2471 knote_remove(&so->so_rcv.ssb_kq.ki_note, kn); 2472 if (SLIST_EMPTY(&so->so_rcv.ssb_kq.ki_note)) 2473 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_KNOTE); 2474 } 2475 2476 /*ARGSUSED*/ 2477 static int 2478 filt_soread(struct knote *kn, long hint) 2479 { 2480 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2481 2482 if (kn->kn_sfflags & NOTE_OOB) { 2483 if ((so->so_oobmark || (so->so_state & SS_RCVATMARK))) { 2484 kn->kn_fflags |= NOTE_OOB; 2485 return (1); 2486 } 2487 return (0); 2488 } 2489 kn->kn_data = so->so_rcv.ssb_cc; 2490 2491 if (so->so_state & SS_CANTRCVMORE) { 2492 /* 2493 * Only set NODATA if all data has been exhausted. 2494 */ 2495 if (kn->kn_data == 0) 2496 kn->kn_flags |= EV_NODATA; 2497 kn->kn_flags |= EV_EOF; 2498 kn->kn_fflags = so->so_error; 2499 return (1); 2500 } 2501 if (so->so_error) /* temporary udp error */ 2502 return (1); 2503 if (kn->kn_sfflags & NOTE_LOWAT) 2504 return (kn->kn_data >= kn->kn_sdata); 2505 return ((kn->kn_data >= so->so_rcv.ssb_lowat) || 2506 !TAILQ_EMPTY(&so->so_comp)); 2507 } 2508 2509 static void 2510 filt_sowdetach(struct knote *kn) 2511 { 2512 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2513 2514 knote_remove(&so->so_snd.ssb_kq.ki_note, kn); 2515 if (SLIST_EMPTY(&so->so_snd.ssb_kq.ki_note)) 2516 atomic_clear_int(&so->so_snd.ssb_flags, SSB_KNOTE); 2517 } 2518 2519 /*ARGSUSED*/ 2520 static int 2521 filt_sowrite(struct knote *kn, long hint) 2522 { 2523 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2524 2525 kn->kn_data = ssb_space(&so->so_snd); 2526 if (so->so_state & SS_CANTSENDMORE) { 2527 kn->kn_flags |= (EV_EOF | EV_NODATA); 2528 kn->kn_fflags = so->so_error; 2529 return (1); 2530 } 2531 if (so->so_error) /* temporary udp error */ 2532 return (1); 2533 if (((so->so_state & SS_ISCONNECTED) == 0) && 2534 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 2535 return (0); 2536 if (kn->kn_sfflags & NOTE_LOWAT) 2537 return (kn->kn_data >= kn->kn_sdata); 2538 return (kn->kn_data >= so->so_snd.ssb_lowat); 2539 } 2540 2541 /*ARGSUSED*/ 2542 static int 2543 filt_solisten(struct knote *kn, long hint) 2544 { 2545 struct socket *so = (struct socket *)kn->kn_fp->f_data; 2546 2547 kn->kn_data = so->so_qlen; 2548 return (! TAILQ_EMPTY(&so->so_comp)); 2549 } 2550