1 /* $OpenBSD: uipc_socket.c,v 1.362 2025/01/21 17:41:39 mvs Exp $ */ 2 /* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/file.h> 39 #include <sys/filedesc.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/domain.h> 43 #include <sys/event.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/unpcb.h> 47 #include <sys/socketvar.h> 48 #include <sys/signalvar.h> 49 #include <sys/pool.h> 50 #include <sys/atomic.h> 51 #include <sys/rwlock.h> 52 #include <sys/time.h> 53 #include <sys/refcnt.h> 54 55 #ifdef DDB 56 #include <machine/db_machdep.h> 57 #endif 58 59 void sbsync(struct sockbuf *, struct mbuf *); 60 61 int sosplice(struct socket *, int, off_t, struct timeval *); 62 void sounsplice(struct socket *, struct socket *, int); 63 void soidle(void *); 64 void sotask(void *); 65 int somove(struct socket *, int); 66 void sorflush(struct socket *); 67 68 void filt_sordetach(struct knote *kn); 69 int filt_soread(struct knote *kn, long hint); 70 void filt_sowdetach(struct knote *kn); 71 int filt_sowrite(struct knote *kn, long hint); 72 int filt_soexcept(struct knote *kn, long hint); 73 74 int filt_sowmodify(struct kevent *kev, struct knote *kn); 75 int filt_sowprocess(struct knote *kn, struct kevent *kev); 76 77 int filt_sormodify(struct kevent *kev, struct knote *kn); 78 int filt_sorprocess(struct knote *kn, struct kevent *kev); 79 80 const struct filterops soread_filtops = { 81 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 82 .f_attach = NULL, 83 .f_detach = filt_sordetach, 84 .f_event = filt_soread, 85 .f_modify = filt_sormodify, 86 .f_process = filt_sorprocess, 87 }; 88 89 const struct filterops sowrite_filtops = { 90 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 91 .f_attach = NULL, 92 .f_detach = filt_sowdetach, 93 .f_event = filt_sowrite, 94 .f_modify = filt_sowmodify, 95 .f_process = filt_sowprocess, 96 }; 97 98 const struct filterops soexcept_filtops = { 99 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 100 .f_attach = NULL, 101 .f_detach = filt_sordetach, 102 .f_event = filt_soexcept, 103 .f_modify = filt_sormodify, 104 .f_process = filt_sorprocess, 105 }; 106 107 #ifndef SOMINCONN 108 #define SOMINCONN 80 109 #endif /* SOMINCONN */ 110 111 int somaxconn = SOMAXCONN; 112 int sominconn = SOMINCONN; 113 114 struct pool socket_pool; 115 #ifdef SOCKET_SPLICE 116 struct pool sosplice_pool; 117 struct taskq *sosplice_taskq; 118 struct rwlock sosplice_lock = RWLOCK_INITIALIZER("sosplicelk"); 119 #endif 120 121 void 122 soinit(void) 123 { 124 pool_init(&socket_pool, sizeof(struct socket), 0, IPL_SOFTNET, 0, 125 "sockpl", NULL); 126 #ifdef SOCKET_SPLICE 127 pool_init(&sosplice_pool, sizeof(struct sosplice), 0, IPL_SOFTNET, 0, 128 "sosppl", NULL); 129 #endif 130 } 131 132 struct socket * 133 soalloc(const struct protosw *prp, int wait) 134 { 135 const struct domain *dp = prp->pr_domain; 136 const char *dom_name = dp->dom_name; 137 struct socket *so; 138 139 so = pool_get(&socket_pool, (wait == M_WAIT ? PR_WAITOK : PR_NOWAIT) | 140 PR_ZERO); 141 if (so == NULL) 142 return (NULL); 143 144 #ifdef WITNESS 145 /* 146 * XXX: Make WITNESS happy. AF_INET and AF_INET6 sockets could be 147 * spliced together. 148 */ 149 switch (dp->dom_family) { 150 case AF_INET: 151 case AF_INET6: 152 dom_name = "inet46"; 153 break; 154 } 155 #endif 156 157 refcnt_init_trace(&so->so_refcnt, DT_REFCNT_IDX_SOCKET); 158 rw_init_flags(&so->so_lock, dom_name, RWL_DUPOK); 159 rw_init(&so->so_rcv.sb_lock, "sbufrcv"); 160 rw_init(&so->so_snd.sb_lock, "sbufsnd"); 161 mtx_init_flags(&so->so_rcv.sb_mtx, IPL_MPFLOOR, "sbrcv", 0); 162 mtx_init_flags(&so->so_snd.sb_mtx, IPL_MPFLOOR, "sbsnd", 0); 163 klist_init_mutex(&so->so_rcv.sb_klist, &so->so_rcv.sb_mtx); 164 klist_init_mutex(&so->so_snd.sb_klist, &so->so_snd.sb_mtx); 165 sigio_init(&so->so_sigio); 166 TAILQ_INIT(&so->so_q0); 167 TAILQ_INIT(&so->so_q); 168 169 so->so_snd.sb_flags |= SB_MTXLOCK; 170 so->so_rcv.sb_flags |= SB_MTXLOCK; 171 172 return (so); 173 } 174 175 /* 176 * Socket operation routines. 177 * These routines are called by the routines in 178 * sys_socket.c or from a system process, and 179 * implement the semantics of socket operations by 180 * switching out to the protocol specific routines. 181 */ 182 int 183 socreate(int dom, struct socket **aso, int type, int proto) 184 { 185 struct proc *p = curproc; /* XXX */ 186 const struct protosw *prp; 187 struct socket *so; 188 int error; 189 190 if (proto) 191 prp = pffindproto(dom, proto, type); 192 else 193 prp = pffindtype(dom, type); 194 if (prp == NULL || prp->pr_usrreqs == NULL) 195 return (EPROTONOSUPPORT); 196 if (prp->pr_type != type) 197 return (EPROTOTYPE); 198 so = soalloc(prp, M_WAIT); 199 so->so_type = type; 200 if (suser(p) == 0) 201 so->so_state = SS_PRIV; 202 so->so_ruid = p->p_ucred->cr_ruid; 203 so->so_euid = p->p_ucred->cr_uid; 204 so->so_rgid = p->p_ucred->cr_rgid; 205 so->so_egid = p->p_ucred->cr_gid; 206 so->so_cpid = p->p_p->ps_pid; 207 so->so_proto = prp; 208 so->so_snd.sb_timeo_nsecs = INFSLP; 209 so->so_rcv.sb_timeo_nsecs = INFSLP; 210 211 solock(so); 212 error = pru_attach(so, proto, M_WAIT); 213 if (error) { 214 so->so_state |= SS_NOFDREF; 215 /* sofree() calls sounlock(). */ 216 sofree(so, 0); 217 return (error); 218 } 219 sounlock(so); 220 *aso = so; 221 return (0); 222 } 223 224 int 225 sobind(struct socket *so, struct mbuf *nam, struct proc *p) 226 { 227 soassertlocked(so); 228 return pru_bind(so, nam, p); 229 } 230 231 int 232 solisten(struct socket *so, int backlog) 233 { 234 int somaxconn_local = atomic_load_int(&somaxconn); 235 int sominconn_local = atomic_load_int(&sominconn); 236 int error; 237 238 switch (so->so_type) { 239 case SOCK_STREAM: 240 case SOCK_SEQPACKET: 241 break; 242 default: 243 return (EOPNOTSUPP); 244 } 245 246 soassertlocked(so); 247 248 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) 249 return (EINVAL); 250 #ifdef SOCKET_SPLICE 251 if (isspliced(so) || issplicedback(so)) 252 return (EOPNOTSUPP); 253 #endif /* SOCKET_SPLICE */ 254 error = pru_listen(so); 255 if (error) 256 return (error); 257 if (TAILQ_FIRST(&so->so_q) == NULL) 258 so->so_options |= SO_ACCEPTCONN; 259 if (backlog < 0 || backlog > somaxconn_local) 260 backlog = somaxconn_local; 261 if (backlog < sominconn_local) 262 backlog = sominconn_local; 263 so->so_qlimit = backlog; 264 return (0); 265 } 266 267 void 268 sorele(struct socket *so) 269 { 270 if (refcnt_rele(&so->so_refcnt) == 0) 271 return; 272 273 sigio_free(&so->so_sigio); 274 klist_free(&so->so_rcv.sb_klist); 275 klist_free(&so->so_snd.sb_klist); 276 277 mtx_enter(&so->so_snd.sb_mtx); 278 sbrelease(so, &so->so_snd); 279 mtx_leave(&so->so_snd.sb_mtx); 280 281 if (so->so_proto->pr_flags & PR_RIGHTS && 282 so->so_proto->pr_domain->dom_dispose) 283 (*so->so_proto->pr_domain->dom_dispose)(so->so_rcv.sb_mb); 284 m_purge(so->so_rcv.sb_mb); 285 286 #ifdef SOCKET_SPLICE 287 if (so->so_sp) 288 pool_put(&sosplice_pool, so->so_sp); 289 #endif 290 pool_put(&socket_pool, so); 291 } 292 293 #define SOSP_FREEING_READ 1 294 #define SOSP_FREEING_WRITE 2 295 void 296 sofree(struct socket *so, int keep_lock) 297 { 298 int persocket = solock_persocket(so); 299 300 soassertlocked(so); 301 302 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) { 303 if (!keep_lock) 304 sounlock(so); 305 return; 306 } 307 if (so->so_head) { 308 struct socket *head = so->so_head; 309 310 /* 311 * We must not decommission a socket that's on the accept(2) 312 * queue. If we do, then accept(2) may hang after select(2) 313 * indicated that the listening socket was ready. 314 */ 315 if (so->so_onq == &head->so_q) { 316 if (!keep_lock) 317 sounlock(so); 318 return; 319 } 320 321 if (persocket) { 322 soref(head); 323 sounlock(so); 324 solock(head); 325 solock(so); 326 327 if (so->so_onq != &head->so_q0) { 328 sounlock(so); 329 sounlock(head); 330 sorele(head); 331 return; 332 } 333 } 334 335 soqremque(so, 0); 336 337 if (persocket) { 338 sounlock(head); 339 sorele(head); 340 } 341 } 342 343 if (!keep_lock) 344 sounlock(so); 345 sorele(so); 346 } 347 348 static inline uint64_t 349 solinger_nsec(struct socket *so) 350 { 351 if (so->so_linger == 0) 352 return INFSLP; 353 354 return SEC_TO_NSEC(so->so_linger); 355 } 356 357 /* 358 * Close a socket on last file table reference removal. 359 * Initiate disconnect if connected. 360 * Free socket when disconnect complete. 361 */ 362 int 363 soclose(struct socket *so, int flags) 364 { 365 struct socket *so2; 366 int error = 0; 367 368 solock(so); 369 /* Revoke async IO early. There is a final revocation in sofree(). */ 370 sigio_free(&so->so_sigio); 371 if (so->so_state & SS_ISCONNECTED) { 372 if (so->so_pcb == NULL) 373 goto discard; 374 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 375 error = sodisconnect(so); 376 if (error) 377 goto drop; 378 } 379 if (so->so_options & SO_LINGER) { 380 if ((so->so_state & SS_ISDISCONNECTING) && 381 (flags & MSG_DONTWAIT)) 382 goto drop; 383 while (so->so_state & SS_ISCONNECTED) { 384 error = sosleep_nsec(so, &so->so_timeo, 385 PSOCK | PCATCH, "netcls", 386 solinger_nsec(so)); 387 if (error) 388 break; 389 } 390 } 391 } 392 drop: 393 if (so->so_pcb) { 394 int error2; 395 error2 = pru_detach(so); 396 if (error == 0) 397 error = error2; 398 } 399 if (so->so_options & SO_ACCEPTCONN) { 400 int persocket = solock_persocket(so); 401 402 while ((so2 = TAILQ_FIRST(&so->so_q0)) != NULL) { 403 soref(so2); 404 solock(so2); 405 (void) soqremque(so2, 0); 406 sounlock(so); 407 soabort(so2); 408 sounlock(so2); 409 sorele(so2); 410 solock(so); 411 } 412 while ((so2 = TAILQ_FIRST(&so->so_q)) != NULL) { 413 soref(so2); 414 solock_nonet(so2); 415 (void) soqremque(so2, 1); 416 if (persocket) 417 sounlock(so); 418 soabort(so2); 419 sounlock_nonet(so2); 420 sorele(so2); 421 if (persocket) 422 solock(so); 423 } 424 } 425 discard: 426 #ifdef SOCKET_SPLICE 427 if (so->so_sp) { 428 struct socket *soback; 429 430 sounlock(so); 431 mtx_enter(&so->so_snd.sb_mtx); 432 /* 433 * Concurrent sounsplice() locks `sb_mtx' mutexes on 434 * both `so_snd' and `so_rcv' before unsplice sockets. 435 */ 436 if ((soback = so->so_sp->ssp_soback) == NULL) { 437 mtx_leave(&so->so_snd.sb_mtx); 438 goto notsplicedback; 439 } 440 soref(soback); 441 mtx_leave(&so->so_snd.sb_mtx); 442 443 /* 444 * `so' can be only unspliced, and never spliced again. 445 * Thus if issplicedback(so) check is positive, socket is 446 * still spliced and `ssp_soback' points to the same 447 * socket that `soback'. 448 */ 449 sblock(&soback->so_rcv, SBL_WAIT | SBL_NOINTR); 450 if (issplicedback(so)) { 451 int freeing = SOSP_FREEING_WRITE; 452 453 if (so->so_sp->ssp_soback == so) 454 freeing |= SOSP_FREEING_READ; 455 sounsplice(so->so_sp->ssp_soback, so, freeing); 456 } 457 sbunlock(&soback->so_rcv); 458 sorele(soback); 459 460 notsplicedback: 461 sblock(&so->so_rcv, SBL_WAIT | SBL_NOINTR); 462 if (isspliced(so)) { 463 struct socket *sosp; 464 int freeing = SOSP_FREEING_READ; 465 466 if (so == so->so_sp->ssp_socket) 467 freeing |= SOSP_FREEING_WRITE; 468 sosp = soref(so->so_sp->ssp_socket); 469 sounsplice(so, so->so_sp->ssp_socket, freeing); 470 sorele(sosp); 471 } 472 sbunlock(&so->so_rcv); 473 474 timeout_del_barrier(&so->so_sp->ssp_idleto); 475 task_del(sosplice_taskq, &so->so_sp->ssp_task); 476 taskq_barrier(sosplice_taskq); 477 478 solock(so); 479 } 480 #endif /* SOCKET_SPLICE */ 481 482 if (so->so_state & SS_NOFDREF) 483 panic("soclose NOFDREF: so %p, so_type %d", so, so->so_type); 484 so->so_state |= SS_NOFDREF; 485 486 /* sofree() calls sounlock(). */ 487 sofree(so, 0); 488 return (error); 489 } 490 491 void 492 soabort(struct socket *so) 493 { 494 soassertlocked(so); 495 pru_abort(so); 496 } 497 498 int 499 soaccept(struct socket *so, struct mbuf *nam) 500 { 501 int error = 0; 502 503 soassertlocked(so); 504 505 if ((so->so_state & SS_NOFDREF) == 0) 506 panic("soaccept !NOFDREF: so %p, so_type %d", so, so->so_type); 507 so->so_state &= ~SS_NOFDREF; 508 if ((so->so_state & SS_ISDISCONNECTED) == 0 || 509 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) 510 error = pru_accept(so, nam); 511 else 512 error = ECONNABORTED; 513 return (error); 514 } 515 516 int 517 soconnect(struct socket *so, struct mbuf *nam) 518 { 519 int error; 520 521 soassertlocked(so); 522 523 if (so->so_options & SO_ACCEPTCONN) 524 return (EOPNOTSUPP); 525 /* 526 * If protocol is connection-based, can only connect once. 527 * Otherwise, if connected, try to disconnect first. 528 * This allows user to disconnect by connecting to, e.g., 529 * a null address. 530 */ 531 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 532 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 533 (error = sodisconnect(so)))) 534 error = EISCONN; 535 else 536 error = pru_connect(so, nam); 537 return (error); 538 } 539 540 int 541 soconnect2(struct socket *so1, struct socket *so2) 542 { 543 int persocket, error; 544 545 if ((persocket = solock_persocket(so1))) 546 solock_pair(so1, so2); 547 else 548 solock(so1); 549 550 error = pru_connect2(so1, so2); 551 552 if (persocket) 553 sounlock(so2); 554 sounlock(so1); 555 return (error); 556 } 557 558 int 559 sodisconnect(struct socket *so) 560 { 561 int error; 562 563 soassertlocked(so); 564 565 if ((so->so_state & SS_ISCONNECTED) == 0) 566 return (ENOTCONN); 567 if (so->so_state & SS_ISDISCONNECTING) 568 return (EALREADY); 569 error = pru_disconnect(so); 570 return (error); 571 } 572 573 int m_getuio(struct mbuf **, int, long, struct uio *); 574 575 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) 576 /* 577 * Send on a socket. 578 * If send must go all at once and message is larger than 579 * send buffering, then hard error. 580 * Lock against other senders. 581 * If must go all at once and not enough room now, then 582 * inform user that this would block and do nothing. 583 * Otherwise, if nonblocking, send as much as possible. 584 * The data to be sent is described by "uio" if nonzero, 585 * otherwise by the mbuf chain "top" (which must be null 586 * if uio is not). Data provided in mbuf chain must be small 587 * enough to send all at once. 588 * 589 * Returns nonzero on error, timeout or signal; callers 590 * must check for short counts if EINTR/ERESTART are returned. 591 * Data and control buffers are freed on return. 592 */ 593 int 594 sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, 595 struct mbuf *control, int flags) 596 { 597 long space, clen = 0; 598 size_t resid; 599 int error; 600 int atomic = sosendallatonce(so) || top; 601 602 if (uio) 603 resid = uio->uio_resid; 604 else 605 resid = top->m_pkthdr.len; 606 /* MSG_EOR on a SOCK_STREAM socket is invalid. */ 607 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 608 m_freem(top); 609 m_freem(control); 610 return (EINVAL); 611 } 612 if (uio && uio->uio_procp) 613 uio->uio_procp->p_ru.ru_msgsnd++; 614 if (control) { 615 /* 616 * In theory clen should be unsigned (since control->m_len is). 617 * However, space must be signed, as it might be less than 0 618 * if we over-committed, and we must use a signed comparison 619 * of space and clen. 620 */ 621 clen = control->m_len; 622 /* reserve extra space for AF_UNIX's internalize */ 623 if (so->so_proto->pr_domain->dom_family == AF_UNIX && 624 clen >= CMSG_ALIGN(sizeof(struct cmsghdr)) && 625 mtod(control, struct cmsghdr *)->cmsg_type == SCM_RIGHTS) 626 clen = CMSG_SPACE( 627 (clen - CMSG_ALIGN(sizeof(struct cmsghdr))) * 628 (sizeof(struct fdpass) / sizeof(int))); 629 } 630 631 #define snderr(errno) { error = errno; goto release; } 632 633 restart: 634 if ((error = sblock(&so->so_snd, SBLOCKWAIT(flags))) != 0) 635 goto out; 636 mtx_enter(&so->so_snd.sb_mtx); 637 so->so_snd.sb_state |= SS_ISSENDING; 638 do { 639 if (so->so_snd.sb_state & SS_CANTSENDMORE) 640 snderr(EPIPE); 641 if ((error = READ_ONCE(so->so_error))) { 642 so->so_error = 0; 643 snderr(error); 644 } 645 if ((so->so_state & SS_ISCONNECTED) == 0) { 646 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 647 if (!(resid == 0 && clen != 0)) 648 snderr(ENOTCONN); 649 } else if (addr == NULL) 650 snderr(EDESTADDRREQ); 651 } 652 space = sbspace_locked(so, &so->so_snd); 653 if (flags & MSG_OOB) 654 space += 1024; 655 if (so->so_proto->pr_domain->dom_family == AF_UNIX) { 656 if (atomic && resid > so->so_snd.sb_hiwat) 657 snderr(EMSGSIZE); 658 } else { 659 if (clen > so->so_snd.sb_hiwat || 660 (atomic && resid > so->so_snd.sb_hiwat - clen)) 661 snderr(EMSGSIZE); 662 } 663 if (space < clen || 664 (space - clen < resid && 665 (atomic || space < so->so_snd.sb_lowat))) { 666 if (flags & MSG_DONTWAIT) 667 snderr(EWOULDBLOCK); 668 sbunlock(&so->so_snd); 669 error = sbwait(so, &so->so_snd); 670 so->so_snd.sb_state &= ~SS_ISSENDING; 671 mtx_leave(&so->so_snd.sb_mtx); 672 if (error) 673 goto out; 674 goto restart; 675 } 676 space -= clen; 677 do { 678 if (uio == NULL) { 679 /* 680 * Data is prepackaged in "top". 681 */ 682 resid = 0; 683 if (flags & MSG_EOR) 684 top->m_flags |= M_EOR; 685 } else { 686 mtx_leave(&so->so_snd.sb_mtx); 687 error = m_getuio(&top, atomic, space, uio); 688 mtx_enter(&so->so_snd.sb_mtx); 689 if (error) 690 goto release; 691 space -= top->m_pkthdr.len; 692 resid = uio->uio_resid; 693 if (flags & MSG_EOR) 694 top->m_flags |= M_EOR; 695 } 696 if (resid == 0) 697 so->so_snd.sb_state &= ~SS_ISSENDING; 698 if (top && so->so_options & SO_ZEROIZE) 699 top->m_flags |= M_ZEROIZE; 700 mtx_leave(&so->so_snd.sb_mtx); 701 solock_shared(so); 702 if (flags & MSG_OOB) 703 error = pru_sendoob(so, top, addr, control); 704 else 705 error = pru_send(so, top, addr, control); 706 sounlock_shared(so); 707 mtx_enter(&so->so_snd.sb_mtx); 708 clen = 0; 709 control = NULL; 710 top = NULL; 711 if (error) 712 goto release; 713 } while (resid && space > 0); 714 } while (resid); 715 716 release: 717 so->so_snd.sb_state &= ~SS_ISSENDING; 718 mtx_leave(&so->so_snd.sb_mtx); 719 sbunlock(&so->so_snd); 720 out: 721 m_freem(top); 722 m_freem(control); 723 return (error); 724 } 725 726 int 727 m_getuio(struct mbuf **mp, int atomic, long space, struct uio *uio) 728 { 729 struct mbuf *m, *top = NULL; 730 struct mbuf **nextp = ⊤ 731 u_long len, mlen; 732 size_t resid = uio->uio_resid; 733 int error; 734 735 do { 736 if (top == NULL) { 737 MGETHDR(m, M_WAIT, MT_DATA); 738 mlen = MHLEN; 739 } else { 740 MGET(m, M_WAIT, MT_DATA); 741 mlen = MLEN; 742 } 743 /* chain mbuf together */ 744 *nextp = m; 745 nextp = &m->m_next; 746 747 resid = ulmin(resid, space); 748 if (resid >= MINCLSIZE) { 749 MCLGETL(m, M_NOWAIT, ulmin(resid, MAXMCLBYTES)); 750 if ((m->m_flags & M_EXT) == 0) 751 MCLGETL(m, M_NOWAIT, MCLBYTES); 752 if ((m->m_flags & M_EXT) == 0) 753 goto nopages; 754 mlen = m->m_ext.ext_size; 755 len = ulmin(mlen, resid); 756 /* 757 * For datagram protocols, leave room 758 * for protocol headers in first mbuf. 759 */ 760 if (atomic && m == top && len < mlen - max_hdr) 761 m->m_data += max_hdr; 762 } else { 763 nopages: 764 len = ulmin(mlen, resid); 765 /* 766 * For datagram protocols, leave room 767 * for protocol headers in first mbuf. 768 */ 769 if (atomic && m == top && len < mlen - max_hdr) 770 m_align(m, len); 771 } 772 773 error = uiomove(mtod(m, caddr_t), len, uio); 774 if (error) { 775 m_freem(top); 776 return (error); 777 } 778 779 /* adjust counters */ 780 resid = uio->uio_resid; 781 space -= len; 782 m->m_len = len; 783 top->m_pkthdr.len += len; 784 785 /* Is there more space and more data? */ 786 } while (space > 0 && resid > 0); 787 788 *mp = top; 789 return 0; 790 } 791 792 /* 793 * Following replacement or removal of the first mbuf on the first 794 * mbuf chain of a socket buffer, push necessary state changes back 795 * into the socket buffer so that other consumers see the values 796 * consistently. 'nextrecord' is the callers locally stored value of 797 * the original value of sb->sb_mb->m_nextpkt which must be restored 798 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. 799 */ 800 void 801 sbsync(struct sockbuf *sb, struct mbuf *nextrecord) 802 { 803 804 /* 805 * First, update for the new value of nextrecord. If necessary, 806 * make it the first record. 807 */ 808 if (sb->sb_mb != NULL) 809 sb->sb_mb->m_nextpkt = nextrecord; 810 else 811 sb->sb_mb = nextrecord; 812 813 /* 814 * Now update any dependent socket buffer fields to reflect 815 * the new state. This is an inline of SB_EMPTY_FIXUP, with 816 * the addition of a second clause that takes care of the 817 * case where sb_mb has been updated, but remains the last 818 * record. 819 */ 820 if (sb->sb_mb == NULL) { 821 sb->sb_mbtail = NULL; 822 sb->sb_lastrecord = NULL; 823 } else if (sb->sb_mb->m_nextpkt == NULL) 824 sb->sb_lastrecord = sb->sb_mb; 825 } 826 827 /* 828 * Implement receive operations on a socket. 829 * We depend on the way that records are added to the sockbuf 830 * by sbappend*. In particular, each record (mbufs linked through m_next) 831 * must begin with an address if the protocol so specifies, 832 * followed by an optional mbuf or mbufs containing ancillary data, 833 * and then zero or more mbufs of data. 834 * In order to avoid blocking network for the entire time here, we release 835 * the solock() while doing the actual copy to user space. 836 * Although the sockbuf is locked, new data may still be appended, 837 * and thus we must maintain consistency of the sockbuf during that time. 838 * 839 * The caller may receive the data as a single mbuf chain by supplying 840 * an mbuf **mp0 for use in returning the chain. The uio is then used 841 * only for the count in uio_resid. 842 */ 843 int 844 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, 845 struct mbuf **mp0, struct mbuf **controlp, int *flagsp, 846 socklen_t controllen) 847 { 848 struct mbuf *m, **mp; 849 struct mbuf *cm; 850 u_long len, offset, moff; 851 int flags, error, error2, type, uio_error = 0; 852 const struct protosw *pr = so->so_proto; 853 struct mbuf *nextrecord; 854 size_t resid, orig_resid = uio->uio_resid; 855 856 mp = mp0; 857 if (paddr) 858 *paddr = NULL; 859 if (controlp) 860 *controlp = NULL; 861 if (flagsp) 862 flags = *flagsp &~ MSG_EOR; 863 else 864 flags = 0; 865 if (flags & MSG_OOB) { 866 m = m_get(M_WAIT, MT_DATA); 867 solock_shared(so); 868 error = pru_rcvoob(so, m, flags & MSG_PEEK); 869 sounlock_shared(so); 870 if (error) 871 goto bad; 872 do { 873 error = uiomove(mtod(m, caddr_t), 874 ulmin(uio->uio_resid, m->m_len), uio); 875 m = m_free(m); 876 } while (uio->uio_resid && error == 0 && m); 877 bad: 878 m_freem(m); 879 return (error); 880 } 881 if (mp) 882 *mp = NULL; 883 884 restart: 885 if ((error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) != 0) 886 return (error); 887 mtx_enter(&so->so_rcv.sb_mtx); 888 889 m = so->so_rcv.sb_mb; 890 #ifdef SOCKET_SPLICE 891 if (isspliced(so)) 892 m = NULL; 893 #endif /* SOCKET_SPLICE */ 894 /* 895 * If we have less data than requested, block awaiting more 896 * (subject to any timeout) if: 897 * 1. the current count is less than the low water mark, 898 * 2. MSG_WAITALL is set, and it is possible to do the entire 899 * receive operation at once if we block (resid <= hiwat), or 900 * 3. MSG_DONTWAIT is not set. 901 * If MSG_WAITALL is set but resid is larger than the receive buffer, 902 * we have to do the receive in sections, and thus risk returning 903 * a short count if a timeout or signal occurs after we start. 904 */ 905 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 906 so->so_rcv.sb_cc < uio->uio_resid) && 907 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 908 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 909 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 910 #ifdef DIAGNOSTIC 911 if (m == NULL && so->so_rcv.sb_cc) 912 #ifdef SOCKET_SPLICE 913 if (!isspliced(so)) 914 #endif /* SOCKET_SPLICE */ 915 panic("receive 1: so %p, so_type %d, sb_cc %lu", 916 so, so->so_type, so->so_rcv.sb_cc); 917 #endif 918 if ((error2 = READ_ONCE(so->so_error))) { 919 if (m) 920 goto dontblock; 921 error = error2; 922 if ((flags & MSG_PEEK) == 0) 923 so->so_error = 0; 924 goto release; 925 } 926 if (so->so_rcv.sb_state & SS_CANTRCVMORE) { 927 if (m) 928 goto dontblock; 929 else if (so->so_rcv.sb_cc == 0) 930 goto release; 931 } 932 for (; m; m = m->m_next) 933 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 934 m = so->so_rcv.sb_mb; 935 goto dontblock; 936 } 937 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 938 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 939 error = ENOTCONN; 940 goto release; 941 } 942 if (uio->uio_resid == 0 && controlp == NULL) 943 goto release; 944 if (flags & MSG_DONTWAIT) { 945 error = EWOULDBLOCK; 946 goto release; 947 } 948 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); 949 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); 950 951 sbunlock(&so->so_rcv); 952 error = sbwait(so, &so->so_rcv); 953 mtx_leave(&so->so_rcv.sb_mtx); 954 if (error) 955 return (error); 956 goto restart; 957 } 958 dontblock: 959 /* 960 * On entry here, m points to the first record of the socket buffer. 961 * From this point onward, we maintain 'nextrecord' as a cache of the 962 * pointer to the next record in the socket buffer. We must keep the 963 * various socket buffer pointers and local stack versions of the 964 * pointers in sync, pushing out modifications before operations that 965 * may sleep, and re-reading them afterwards. 966 * 967 * Otherwise, we will race with the network stack appending new data 968 * or records onto the socket buffer by using inconsistent/stale 969 * versions of the field, possibly resulting in socket buffer 970 * corruption. 971 */ 972 if (uio->uio_procp) 973 uio->uio_procp->p_ru.ru_msgrcv++; 974 KASSERT(m == so->so_rcv.sb_mb); 975 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); 976 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); 977 nextrecord = m->m_nextpkt; 978 if (pr->pr_flags & PR_ADDR) { 979 #ifdef DIAGNOSTIC 980 if (m->m_type != MT_SONAME) 981 panic("receive 1a: so %p, so_type %d, m %p, m_type %d", 982 so, so->so_type, m, m->m_type); 983 #endif 984 orig_resid = 0; 985 if (flags & MSG_PEEK) { 986 if (paddr) 987 *paddr = m_copym(m, 0, m->m_len, M_NOWAIT); 988 m = m->m_next; 989 } else { 990 sbfree(so, &so->so_rcv, m); 991 if (paddr) { 992 *paddr = m; 993 so->so_rcv.sb_mb = m->m_next; 994 m->m_next = NULL; 995 m = so->so_rcv.sb_mb; 996 } else { 997 so->so_rcv.sb_mb = m_free(m); 998 m = so->so_rcv.sb_mb; 999 } 1000 sbsync(&so->so_rcv, nextrecord); 1001 } 1002 } 1003 while (m && m->m_type == MT_CONTROL && error == 0) { 1004 int skip = 0; 1005 if (flags & MSG_PEEK) { 1006 if (mtod(m, struct cmsghdr *)->cmsg_type == 1007 SCM_RIGHTS) { 1008 /* don't leak internalized SCM_RIGHTS msgs */ 1009 skip = 1; 1010 } else if (controlp) 1011 *controlp = m_copym(m, 0, m->m_len, M_NOWAIT); 1012 m = m->m_next; 1013 } else { 1014 sbfree(so, &so->so_rcv, m); 1015 so->so_rcv.sb_mb = m->m_next; 1016 m->m_nextpkt = m->m_next = NULL; 1017 cm = m; 1018 m = so->so_rcv.sb_mb; 1019 sbsync(&so->so_rcv, nextrecord); 1020 if (controlp) { 1021 if (pr->pr_domain->dom_externalize) { 1022 mtx_leave(&so->so_rcv.sb_mtx); 1023 error = 1024 (*pr->pr_domain->dom_externalize) 1025 (cm, controllen, flags); 1026 mtx_enter(&so->so_rcv.sb_mtx); 1027 } 1028 *controlp = cm; 1029 } else { 1030 /* 1031 * Dispose of any SCM_RIGHTS message that went 1032 * through the read path rather than recv. 1033 */ 1034 if (pr->pr_domain->dom_dispose) { 1035 mtx_leave(&so->so_rcv.sb_mtx); 1036 pr->pr_domain->dom_dispose(cm); 1037 mtx_enter(&so->so_rcv.sb_mtx); 1038 } 1039 m_free(cm); 1040 } 1041 } 1042 if (m != NULL) 1043 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1044 else 1045 nextrecord = so->so_rcv.sb_mb; 1046 if (controlp && !skip) 1047 controlp = &(*controlp)->m_next; 1048 orig_resid = 0; 1049 } 1050 1051 /* If m is non-NULL, we have some data to read. */ 1052 if (m) { 1053 type = m->m_type; 1054 if (type == MT_OOBDATA) 1055 flags |= MSG_OOB; 1056 if (m->m_flags & M_BCAST) 1057 flags |= MSG_BCAST; 1058 if (m->m_flags & M_MCAST) 1059 flags |= MSG_MCAST; 1060 } 1061 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); 1062 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); 1063 1064 moff = 0; 1065 offset = 0; 1066 while (m && uio->uio_resid > 0 && error == 0) { 1067 if (m->m_type == MT_OOBDATA) { 1068 if (type != MT_OOBDATA) 1069 break; 1070 } else if (type == MT_OOBDATA) { 1071 break; 1072 } else if (m->m_type == MT_CONTROL) { 1073 /* 1074 * If there is more than one control message in the 1075 * stream, we do a short read. Next can be received 1076 * or disposed by another system call. 1077 */ 1078 break; 1079 #ifdef DIAGNOSTIC 1080 } else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) { 1081 panic("receive 3: so %p, so_type %d, m %p, m_type %d", 1082 so, so->so_type, m, m->m_type); 1083 #endif 1084 } 1085 so->so_rcv.sb_state &= ~SS_RCVATMARK; 1086 len = uio->uio_resid; 1087 if (so->so_oobmark && len > so->so_oobmark - offset) 1088 len = so->so_oobmark - offset; 1089 if (len > m->m_len - moff) 1090 len = m->m_len - moff; 1091 /* 1092 * If mp is set, just pass back the mbufs. 1093 * Otherwise copy them out via the uio, then free. 1094 * Sockbuf must be consistent here (points to current mbuf, 1095 * it points to next record) when we drop priority; 1096 * we must note any additions to the sockbuf when we 1097 * block interrupts again. 1098 */ 1099 if (mp == NULL && uio_error == 0) { 1100 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); 1101 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); 1102 resid = uio->uio_resid; 1103 mtx_leave(&so->so_rcv.sb_mtx); 1104 uio_error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1105 mtx_enter(&so->so_rcv.sb_mtx); 1106 if (uio_error) 1107 uio->uio_resid = resid - len; 1108 } else 1109 uio->uio_resid -= len; 1110 if (len == m->m_len - moff) { 1111 if (m->m_flags & M_EOR) 1112 flags |= MSG_EOR; 1113 if (flags & MSG_PEEK) { 1114 m = m->m_next; 1115 moff = 0; 1116 orig_resid = 0; 1117 } else { 1118 nextrecord = m->m_nextpkt; 1119 sbfree(so, &so->so_rcv, m); 1120 if (mp) { 1121 *mp = m; 1122 mp = &m->m_next; 1123 so->so_rcv.sb_mb = m = m->m_next; 1124 *mp = NULL; 1125 } else { 1126 so->so_rcv.sb_mb = m_free(m); 1127 m = so->so_rcv.sb_mb; 1128 } 1129 /* 1130 * If m != NULL, we also know that 1131 * so->so_rcv.sb_mb != NULL. 1132 */ 1133 KASSERT(so->so_rcv.sb_mb == m); 1134 if (m) { 1135 m->m_nextpkt = nextrecord; 1136 if (nextrecord == NULL) 1137 so->so_rcv.sb_lastrecord = m; 1138 } else { 1139 so->so_rcv.sb_mb = nextrecord; 1140 SB_EMPTY_FIXUP(&so->so_rcv); 1141 } 1142 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); 1143 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); 1144 } 1145 } else { 1146 if (flags & MSG_PEEK) { 1147 moff += len; 1148 orig_resid = 0; 1149 } else { 1150 if (mp) 1151 *mp = m_copym(m, 0, len, M_WAIT); 1152 m->m_data += len; 1153 m->m_len -= len; 1154 so->so_rcv.sb_cc -= len; 1155 so->so_rcv.sb_datacc -= len; 1156 } 1157 } 1158 if (so->so_oobmark) { 1159 if ((flags & MSG_PEEK) == 0) { 1160 so->so_oobmark -= len; 1161 if (so->so_oobmark == 0) { 1162 so->so_rcv.sb_state |= SS_RCVATMARK; 1163 break; 1164 } 1165 } else { 1166 offset += len; 1167 if (offset == so->so_oobmark) 1168 break; 1169 } 1170 } 1171 if (flags & MSG_EOR) 1172 break; 1173 /* 1174 * If the MSG_WAITALL flag is set (for non-atomic socket), 1175 * we must not quit until "uio->uio_resid == 0" or an error 1176 * termination. If a signal/timeout occurs, return 1177 * with a short count but without error. 1178 * Keep sockbuf locked against other readers. 1179 */ 1180 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1181 !sosendallatonce(so) && !nextrecord) { 1182 if (so->so_rcv.sb_state & SS_CANTRCVMORE || 1183 so->so_error) 1184 break; 1185 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); 1186 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); 1187 if (sbwait(so, &so->so_rcv)) { 1188 mtx_leave(&so->so_rcv.sb_mtx); 1189 sbunlock(&so->so_rcv); 1190 return (0); 1191 } 1192 if ((m = so->so_rcv.sb_mb) != NULL) 1193 nextrecord = m->m_nextpkt; 1194 } 1195 } 1196 1197 if (m && pr->pr_flags & PR_ATOMIC) { 1198 flags |= MSG_TRUNC; 1199 if ((flags & MSG_PEEK) == 0) 1200 (void) sbdroprecord(so, &so->so_rcv); 1201 } 1202 if ((flags & MSG_PEEK) == 0) { 1203 if (m == NULL) { 1204 /* 1205 * First part is an inline SB_EMPTY_FIXUP(). Second 1206 * part makes sure sb_lastrecord is up-to-date if 1207 * there is still data in the socket buffer. 1208 */ 1209 so->so_rcv.sb_mb = nextrecord; 1210 if (so->so_rcv.sb_mb == NULL) { 1211 so->so_rcv.sb_mbtail = NULL; 1212 so->so_rcv.sb_lastrecord = NULL; 1213 } else if (nextrecord->m_nextpkt == NULL) 1214 so->so_rcv.sb_lastrecord = nextrecord; 1215 } 1216 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); 1217 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); 1218 if (pr->pr_flags & PR_WANTRCVD) { 1219 mtx_leave(&so->so_rcv.sb_mtx); 1220 solock_shared(so); 1221 pru_rcvd(so); 1222 sounlock_shared(so); 1223 mtx_enter(&so->so_rcv.sb_mtx); 1224 } 1225 } 1226 if (orig_resid == uio->uio_resid && orig_resid && 1227 (flags & MSG_EOR) == 0 && 1228 (so->so_rcv.sb_state & SS_CANTRCVMORE) == 0) { 1229 mtx_leave(&so->so_rcv.sb_mtx); 1230 sbunlock(&so->so_rcv); 1231 goto restart; 1232 } 1233 1234 if (uio_error) 1235 error = uio_error; 1236 1237 if (flagsp) 1238 *flagsp |= flags; 1239 release: 1240 mtx_leave(&so->so_rcv.sb_mtx); 1241 sbunlock(&so->so_rcv); 1242 return (error); 1243 } 1244 1245 int 1246 soshutdown(struct socket *so, int how) 1247 { 1248 int error = 0; 1249 1250 switch (how) { 1251 case SHUT_RD: 1252 sorflush(so); 1253 break; 1254 case SHUT_RDWR: 1255 sorflush(so); 1256 /* FALLTHROUGH */ 1257 case SHUT_WR: 1258 solock(so); 1259 error = pru_shutdown(so); 1260 sounlock(so); 1261 break; 1262 default: 1263 error = EINVAL; 1264 break; 1265 } 1266 1267 return (error); 1268 } 1269 1270 void 1271 sorflush(struct socket *so) 1272 { 1273 struct sockbuf *sb = &so->so_rcv; 1274 struct mbuf *m; 1275 const struct protosw *pr = so->so_proto; 1276 int error; 1277 1278 error = sblock(sb, SBL_WAIT | SBL_NOINTR); 1279 /* with SBL_WAIT and SLB_NOINTR sblock() must not fail */ 1280 KASSERT(error == 0); 1281 1282 solock_shared(so); 1283 socantrcvmore(so); 1284 mtx_enter(&sb->sb_mtx); 1285 m = sb->sb_mb; 1286 memset(&sb->sb_startzero, 0, 1287 (caddr_t)&sb->sb_endzero - (caddr_t)&sb->sb_startzero); 1288 sb->sb_timeo_nsecs = INFSLP; 1289 mtx_leave(&sb->sb_mtx); 1290 sounlock_shared(so); 1291 sbunlock(sb); 1292 1293 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 1294 (*pr->pr_domain->dom_dispose)(m); 1295 m_purge(m); 1296 } 1297 1298 #ifdef SOCKET_SPLICE 1299 1300 #define so_splicelen so_sp->ssp_len 1301 #define so_splicemax so_sp->ssp_max 1302 #define so_idletv so_sp->ssp_idletv 1303 #define so_idleto so_sp->ssp_idleto 1304 #define so_splicetask so_sp->ssp_task 1305 1306 void 1307 sosplice_solock_pair(struct socket *so1, struct socket *so2) 1308 { 1309 NET_LOCK_SHARED(); 1310 1311 if (so1 == so2) 1312 rw_enter_write(&so1->so_lock); 1313 else if (so1 < so2) { 1314 rw_enter_write(&so1->so_lock); 1315 rw_enter_write(&so2->so_lock); 1316 } else { 1317 rw_enter_write(&so2->so_lock); 1318 rw_enter_write(&so1->so_lock); 1319 } 1320 } 1321 1322 void 1323 sosplice_sounlock_pair(struct socket *so1, struct socket *so2) 1324 { 1325 if (so1 == so2) 1326 rw_exit_write(&so1->so_lock); 1327 else if (so1 < so2) { 1328 rw_exit_write(&so2->so_lock); 1329 rw_exit_write(&so1->so_lock); 1330 } else { 1331 rw_exit_write(&so1->so_lock); 1332 rw_exit_write(&so2->so_lock); 1333 } 1334 1335 NET_UNLOCK_SHARED(); 1336 } 1337 1338 int 1339 sosplice(struct socket *so, int fd, off_t max, struct timeval *tv) 1340 { 1341 struct file *fp; 1342 struct socket *sosp; 1343 struct taskq *tq; 1344 int error = 0; 1345 1346 if ((so->so_proto->pr_flags & PR_SPLICE) == 0) 1347 return (EPROTONOSUPPORT); 1348 if (max && max < 0) 1349 return (EINVAL); 1350 if (tv && (tv->tv_sec < 0 || !timerisvalid(tv))) 1351 return (EINVAL); 1352 1353 /* If no fd is given, unsplice by removing existing link. */ 1354 if (fd < 0) { 1355 if ((error = sblock(&so->so_rcv, SBL_WAIT)) != 0) 1356 return (error); 1357 if (so->so_sp && so->so_sp->ssp_socket) { 1358 sosp = soref(so->so_sp->ssp_socket); 1359 sounsplice(so, so->so_sp->ssp_socket, 0); 1360 sorele(sosp); 1361 } else 1362 error = EPROTO; 1363 sbunlock(&so->so_rcv); 1364 return (error); 1365 } 1366 1367 if (sosplice_taskq == NULL) { 1368 rw_enter_write(&sosplice_lock); 1369 if (sosplice_taskq == NULL) { 1370 tq = taskq_create("sosplice", 1, IPL_SOFTNET, 1371 TASKQ_MPSAFE); 1372 if (tq == NULL) { 1373 rw_exit_write(&sosplice_lock); 1374 return (ENOMEM); 1375 } 1376 /* Ensure the taskq is fully visible to other CPUs. */ 1377 membar_producer(); 1378 sosplice_taskq = tq; 1379 } 1380 rw_exit_write(&sosplice_lock); 1381 } else { 1382 /* Ensure the taskq is fully visible on this CPU. */ 1383 membar_consumer(); 1384 } 1385 1386 /* Find sosp, the drain socket where data will be spliced into. */ 1387 if ((error = getsock(curproc, fd, &fp)) != 0) 1388 return (error); 1389 sosp = fp->f_data; 1390 1391 if (sosp->so_proto->pr_usrreqs->pru_send != 1392 so->so_proto->pr_usrreqs->pru_send) { 1393 error = EPROTONOSUPPORT; 1394 goto frele; 1395 } 1396 1397 if ((error = sblock(&so->so_rcv, SBL_WAIT)) != 0) 1398 goto frele; 1399 if ((error = sblock(&sosp->so_snd, SBL_WAIT)) != 0) { 1400 sbunlock(&so->so_rcv); 1401 goto frele; 1402 } 1403 sosplice_solock_pair(so, sosp); 1404 1405 if ((so->so_options & SO_ACCEPTCONN) || 1406 (sosp->so_options & SO_ACCEPTCONN)) { 1407 error = EOPNOTSUPP; 1408 goto release; 1409 } 1410 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1411 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1412 error = ENOTCONN; 1413 goto release; 1414 } 1415 if ((sosp->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) { 1416 error = ENOTCONN; 1417 goto release; 1418 } 1419 if (so->so_sp == NULL) { 1420 struct sosplice *so_sp; 1421 1422 so_sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1423 timeout_set_flags(&so_sp->ssp_idleto, soidle, so, 1424 KCLOCK_NONE, TIMEOUT_PROC | TIMEOUT_MPSAFE); 1425 task_set(&so_sp->ssp_task, sotask, so); 1426 1427 so->so_sp = so_sp; 1428 } 1429 if (sosp->so_sp == NULL) { 1430 struct sosplice *so_sp; 1431 1432 so_sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1433 timeout_set_flags(&so_sp->ssp_idleto, soidle, sosp, 1434 KCLOCK_NONE, TIMEOUT_PROC | TIMEOUT_MPSAFE); 1435 task_set(&so_sp->ssp_task, sotask, sosp); 1436 1437 sosp->so_sp = so_sp; 1438 } 1439 if (so->so_sp->ssp_socket || sosp->so_sp->ssp_soback) { 1440 error = EBUSY; 1441 goto release; 1442 } 1443 1444 so->so_splicelen = 0; 1445 so->so_splicemax = max; 1446 if (tv) 1447 so->so_idletv = *tv; 1448 else 1449 timerclear(&so->so_idletv); 1450 1451 /* 1452 * To prevent sorwakeup() calling somove() before this somove() 1453 * has finished, the socket buffers are not marked as spliced yet. 1454 */ 1455 1456 /* Splice so and sosp together. */ 1457 mtx_enter(&so->so_rcv.sb_mtx); 1458 mtx_enter(&sosp->so_snd.sb_mtx); 1459 so->so_sp->ssp_socket = sosp; 1460 sosp->so_sp->ssp_soback = so; 1461 mtx_leave(&sosp->so_snd.sb_mtx); 1462 mtx_leave(&so->so_rcv.sb_mtx); 1463 1464 sosplice_sounlock_pair(so, sosp); 1465 sbunlock(&sosp->so_snd); 1466 1467 if (somove(so, M_WAIT)) { 1468 mtx_enter(&so->so_rcv.sb_mtx); 1469 mtx_enter(&sosp->so_snd.sb_mtx); 1470 so->so_rcv.sb_flags |= SB_SPLICE; 1471 sosp->so_snd.sb_flags |= SB_SPLICE; 1472 mtx_leave(&sosp->so_snd.sb_mtx); 1473 mtx_leave(&so->so_rcv.sb_mtx); 1474 } 1475 1476 sbunlock(&so->so_rcv); 1477 FRELE(fp, curproc); 1478 return (0); 1479 1480 release: 1481 sosplice_sounlock_pair(so, sosp); 1482 sbunlock(&sosp->so_snd); 1483 sbunlock(&so->so_rcv); 1484 frele: 1485 FRELE(fp, curproc); 1486 return (error); 1487 } 1488 1489 void 1490 sounsplice(struct socket *so, struct socket *sosp, int freeing) 1491 { 1492 sbassertlocked(&so->so_rcv); 1493 1494 mtx_enter(&so->so_rcv.sb_mtx); 1495 mtx_enter(&sosp->so_snd.sb_mtx); 1496 so->so_rcv.sb_flags &= ~SB_SPLICE; 1497 sosp->so_snd.sb_flags &= ~SB_SPLICE; 1498 so->so_sp->ssp_socket = sosp->so_sp->ssp_soback = NULL; 1499 mtx_leave(&sosp->so_snd.sb_mtx); 1500 mtx_leave(&so->so_rcv.sb_mtx); 1501 1502 task_del(sosplice_taskq, &so->so_splicetask); 1503 timeout_del(&so->so_idleto); 1504 1505 /* Do not wakeup a socket that is about to be freed. */ 1506 if ((freeing & SOSP_FREEING_READ) == 0) { 1507 int readable; 1508 1509 solock_shared(so); 1510 mtx_enter(&so->so_rcv.sb_mtx); 1511 readable = soreadable(so); 1512 mtx_leave(&so->so_rcv.sb_mtx); 1513 if (readable) 1514 sorwakeup(so); 1515 sounlock_shared(so); 1516 } 1517 if ((freeing & SOSP_FREEING_WRITE) == 0) { 1518 solock_shared(sosp); 1519 if (sowriteable(sosp)) 1520 sowwakeup(sosp); 1521 sounlock_shared(sosp); 1522 } 1523 } 1524 1525 void 1526 soidle(void *arg) 1527 { 1528 struct socket *so = arg; 1529 1530 sblock(&so->so_rcv, SBL_WAIT | SBL_NOINTR); 1531 if (so->so_rcv.sb_flags & SB_SPLICE) { 1532 struct socket *sosp; 1533 1534 WRITE_ONCE(so->so_error, ETIMEDOUT); 1535 sosp = soref(so->so_sp->ssp_socket); 1536 sounsplice(so, so->so_sp->ssp_socket, 0); 1537 sorele(sosp); 1538 } 1539 sbunlock(&so->so_rcv); 1540 } 1541 1542 void 1543 sotask(void *arg) 1544 { 1545 struct socket *so = arg; 1546 int doyield = 0; 1547 1548 sblock(&so->so_rcv, SBL_WAIT | SBL_NOINTR); 1549 if (so->so_rcv.sb_flags & SB_SPLICE) { 1550 if (so->so_proto->pr_flags & PR_WANTRCVD) 1551 doyield = 1; 1552 somove(so, M_DONTWAIT); 1553 } 1554 sbunlock(&so->so_rcv); 1555 1556 if (doyield) { 1557 /* Avoid user land starvation. */ 1558 yield(); 1559 } 1560 } 1561 1562 /* 1563 * Move data from receive buffer of spliced source socket to send 1564 * buffer of drain socket. Try to move as much as possible in one 1565 * big chunk. It is a TCP only implementation. 1566 * Return value 0 means splicing has been finished, 1 continue. 1567 */ 1568 int 1569 somove(struct socket *so, int wait) 1570 { 1571 struct socket *sosp = so->so_sp->ssp_socket; 1572 struct mbuf *m, **mp, *nextrecord; 1573 u_long len, off, oobmark; 1574 long space; 1575 int error = 0, maxreached = 0, unsplice = 0; 1576 unsigned int rcvstate; 1577 1578 sbassertlocked(&so->so_rcv); 1579 1580 if (so->so_proto->pr_flags & PR_WANTRCVD) 1581 sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR); 1582 1583 mtx_enter(&so->so_rcv.sb_mtx); 1584 mtx_enter(&sosp->so_snd.sb_mtx); 1585 1586 nextpkt: 1587 if ((error = READ_ONCE(so->so_error))) 1588 goto release; 1589 if (sosp->so_snd.sb_state & SS_CANTSENDMORE) { 1590 error = EPIPE; 1591 goto release; 1592 } 1593 1594 error = READ_ONCE(sosp->so_error); 1595 if (error) { 1596 if (error != ETIMEDOUT && error != EFBIG && error != ELOOP) 1597 goto release; 1598 error = 0; 1599 } 1600 if ((sosp->so_state & SS_ISCONNECTED) == 0) 1601 goto release; 1602 1603 /* Calculate how many bytes can be copied now. */ 1604 len = so->so_rcv.sb_datacc; 1605 if (so->so_splicemax) { 1606 KASSERT(so->so_splicelen < so->so_splicemax); 1607 if (so->so_splicemax <= so->so_splicelen + len) { 1608 len = so->so_splicemax - so->so_splicelen; 1609 maxreached = 1; 1610 } 1611 } 1612 space = sbspace_locked(sosp, &sosp->so_snd); 1613 if (so->so_oobmark && so->so_oobmark < len && 1614 so->so_oobmark < space + 1024) 1615 space += 1024; 1616 if (space <= 0) { 1617 maxreached = 0; 1618 goto release; 1619 } 1620 if (space < len) { 1621 maxreached = 0; 1622 if (space < sosp->so_snd.sb_lowat) 1623 goto release; 1624 len = space; 1625 } 1626 sosp->so_snd.sb_state |= SS_ISSENDING; 1627 1628 SBLASTRECORDCHK(&so->so_rcv, "somove 1"); 1629 SBLASTMBUFCHK(&so->so_rcv, "somove 1"); 1630 m = so->so_rcv.sb_mb; 1631 if (m == NULL) 1632 goto release; 1633 nextrecord = m->m_nextpkt; 1634 1635 /* Drop address and control information not used with splicing. */ 1636 if (so->so_proto->pr_flags & PR_ADDR) { 1637 #ifdef DIAGNOSTIC 1638 if (m->m_type != MT_SONAME) 1639 panic("somove soname: so %p, so_type %d, m %p, " 1640 "m_type %d", so, so->so_type, m, m->m_type); 1641 #endif 1642 m = m->m_next; 1643 } 1644 while (m && m->m_type == MT_CONTROL) 1645 m = m->m_next; 1646 if (m == NULL) { 1647 sbdroprecord(so, &so->so_rcv); 1648 if (so->so_proto->pr_flags & PR_WANTRCVD) { 1649 mtx_leave(&sosp->so_snd.sb_mtx); 1650 mtx_leave(&so->so_rcv.sb_mtx); 1651 solock_shared(so); 1652 pru_rcvd(so); 1653 sounlock_shared(so); 1654 mtx_enter(&so->so_rcv.sb_mtx); 1655 mtx_enter(&sosp->so_snd.sb_mtx); 1656 } 1657 goto nextpkt; 1658 } 1659 1660 /* 1661 * By splicing sockets connected to localhost, userland might create a 1662 * loop. Dissolve splicing with error if loop is detected by counter. 1663 * 1664 * If we deal with looped broadcast/multicast packet we bail out with 1665 * no error to suppress splice termination. 1666 */ 1667 if ((m->m_flags & M_PKTHDR) && 1668 ((m->m_pkthdr.ph_loopcnt++ >= M_MAXLOOP) || 1669 ((m->m_flags & M_LOOP) && (m->m_flags & (M_BCAST|M_MCAST))))) { 1670 error = ELOOP; 1671 goto release; 1672 } 1673 1674 if (so->so_proto->pr_flags & PR_ATOMIC) { 1675 if ((m->m_flags & M_PKTHDR) == 0) 1676 panic("somove !PKTHDR: so %p, so_type %d, m %p, " 1677 "m_type %d", so, so->so_type, m, m->m_type); 1678 if (sosp->so_snd.sb_hiwat < m->m_pkthdr.len) { 1679 error = EMSGSIZE; 1680 goto release; 1681 } 1682 if (len < m->m_pkthdr.len) 1683 goto release; 1684 if (m->m_pkthdr.len < len) { 1685 maxreached = 0; 1686 len = m->m_pkthdr.len; 1687 } 1688 /* 1689 * Throw away the name mbuf after it has been assured 1690 * that the whole first record can be processed. 1691 */ 1692 m = so->so_rcv.sb_mb; 1693 sbfree(so, &so->so_rcv, m); 1694 so->so_rcv.sb_mb = m_free(m); 1695 sbsync(&so->so_rcv, nextrecord); 1696 } 1697 /* 1698 * Throw away the control mbufs after it has been assured 1699 * that the whole first record can be processed. 1700 */ 1701 m = so->so_rcv.sb_mb; 1702 while (m && m->m_type == MT_CONTROL) { 1703 sbfree(so, &so->so_rcv, m); 1704 so->so_rcv.sb_mb = m_free(m); 1705 m = so->so_rcv.sb_mb; 1706 sbsync(&so->so_rcv, nextrecord); 1707 } 1708 1709 SBLASTRECORDCHK(&so->so_rcv, "somove 2"); 1710 SBLASTMBUFCHK(&so->so_rcv, "somove 2"); 1711 1712 /* Take at most len mbufs out of receive buffer. */ 1713 for (off = 0, mp = &m; off <= len && *mp; 1714 off += (*mp)->m_len, mp = &(*mp)->m_next) { 1715 u_long size = len - off; 1716 1717 #ifdef DIAGNOSTIC 1718 if ((*mp)->m_type != MT_DATA && (*mp)->m_type != MT_HEADER) 1719 panic("somove type: so %p, so_type %d, m %p, " 1720 "m_type %d", so, so->so_type, *mp, (*mp)->m_type); 1721 #endif 1722 if ((*mp)->m_len > size) { 1723 /* 1724 * Move only a partial mbuf at maximum splice length or 1725 * if the drain buffer is too small for this large mbuf. 1726 */ 1727 if (!maxreached && sosp->so_snd.sb_datacc > 0) { 1728 len -= size; 1729 break; 1730 } 1731 *mp = m_copym(so->so_rcv.sb_mb, 0, size, wait); 1732 if (*mp == NULL) { 1733 len -= size; 1734 break; 1735 } 1736 so->so_rcv.sb_mb->m_data += size; 1737 so->so_rcv.sb_mb->m_len -= size; 1738 so->so_rcv.sb_cc -= size; 1739 so->so_rcv.sb_datacc -= size; 1740 } else { 1741 *mp = so->so_rcv.sb_mb; 1742 sbfree(so, &so->so_rcv, *mp); 1743 so->so_rcv.sb_mb = (*mp)->m_next; 1744 sbsync(&so->so_rcv, nextrecord); 1745 } 1746 } 1747 *mp = NULL; 1748 1749 SBLASTRECORDCHK(&so->so_rcv, "somove 3"); 1750 SBLASTMBUFCHK(&so->so_rcv, "somove 3"); 1751 SBCHECK(so, &so->so_rcv); 1752 if (m == NULL) 1753 goto release; 1754 m->m_nextpkt = NULL; 1755 if (m->m_flags & M_PKTHDR) { 1756 m_resethdr(m); 1757 m->m_pkthdr.len = len; 1758 } 1759 1760 /* Send window update to source peer as receive buffer has changed. */ 1761 if (so->so_proto->pr_flags & PR_WANTRCVD) { 1762 mtx_leave(&sosp->so_snd.sb_mtx); 1763 mtx_leave(&so->so_rcv.sb_mtx); 1764 solock_shared(so); 1765 pru_rcvd(so); 1766 sounlock_shared(so); 1767 mtx_enter(&so->so_rcv.sb_mtx); 1768 mtx_enter(&sosp->so_snd.sb_mtx); 1769 } 1770 1771 /* Receive buffer did shrink by len bytes, adjust oob. */ 1772 rcvstate = so->so_rcv.sb_state; 1773 so->so_rcv.sb_state &= ~SS_RCVATMARK; 1774 oobmark = so->so_oobmark; 1775 so->so_oobmark = oobmark > len ? oobmark - len : 0; 1776 if (oobmark) { 1777 if (oobmark == len) 1778 so->so_rcv.sb_state |= SS_RCVATMARK; 1779 if (oobmark >= len) 1780 oobmark = 0; 1781 } 1782 1783 /* 1784 * Handle oob data. If any malloc fails, ignore error. 1785 * TCP urgent data is not very reliable anyway. 1786 */ 1787 while (((rcvstate & SS_RCVATMARK) || oobmark) && 1788 (so->so_options & SO_OOBINLINE)) { 1789 struct mbuf *o = NULL; 1790 1791 if (rcvstate & SS_RCVATMARK) { 1792 o = m_get(wait, MT_DATA); 1793 rcvstate &= ~SS_RCVATMARK; 1794 } else if (oobmark) { 1795 o = m_split(m, oobmark, wait); 1796 if (o) { 1797 mtx_leave(&sosp->so_snd.sb_mtx); 1798 mtx_leave(&so->so_rcv.sb_mtx); 1799 solock_shared(sosp); 1800 error = pru_send(sosp, m, NULL, NULL); 1801 sounlock_shared(sosp); 1802 mtx_enter(&so->so_rcv.sb_mtx); 1803 mtx_enter(&sosp->so_snd.sb_mtx); 1804 1805 if (error) { 1806 if (sosp->so_snd.sb_state & 1807 SS_CANTSENDMORE) 1808 error = EPIPE; 1809 m_freem(o); 1810 goto release; 1811 } 1812 len -= oobmark; 1813 so->so_splicelen += oobmark; 1814 m = o; 1815 o = m_get(wait, MT_DATA); 1816 } 1817 oobmark = 0; 1818 } 1819 if (o) { 1820 o->m_len = 1; 1821 *mtod(o, caddr_t) = *mtod(m, caddr_t); 1822 1823 mtx_leave(&sosp->so_snd.sb_mtx); 1824 mtx_leave(&so->so_rcv.sb_mtx); 1825 solock_shared(sosp); 1826 error = pru_sendoob(sosp, o, NULL, NULL); 1827 sounlock_shared(sosp); 1828 mtx_enter(&so->so_rcv.sb_mtx); 1829 mtx_enter(&sosp->so_snd.sb_mtx); 1830 1831 if (error) { 1832 if (sosp->so_snd.sb_state & SS_CANTSENDMORE) 1833 error = EPIPE; 1834 m_freem(m); 1835 goto release; 1836 } 1837 len -= 1; 1838 so->so_splicelen += 1; 1839 if (oobmark) { 1840 oobmark -= 1; 1841 if (oobmark == 0) 1842 rcvstate |= SS_RCVATMARK; 1843 } 1844 m_adj(m, 1); 1845 } 1846 } 1847 1848 /* Append all remaining data to drain socket. */ 1849 if (so->so_rcv.sb_cc == 0 || maxreached) 1850 sosp->so_snd.sb_state &= ~SS_ISSENDING; 1851 1852 mtx_leave(&sosp->so_snd.sb_mtx); 1853 mtx_leave(&so->so_rcv.sb_mtx); 1854 solock_shared(sosp); 1855 error = pru_send(sosp, m, NULL, NULL); 1856 sounlock_shared(sosp); 1857 mtx_enter(&so->so_rcv.sb_mtx); 1858 mtx_enter(&sosp->so_snd.sb_mtx); 1859 1860 if (error) { 1861 if (sosp->so_snd.sb_state & SS_CANTSENDMORE || 1862 sosp->so_pcb == NULL) 1863 error = EPIPE; 1864 goto release; 1865 } 1866 so->so_splicelen += len; 1867 1868 /* Move several packets if possible. */ 1869 if (!maxreached && nextrecord) 1870 goto nextpkt; 1871 1872 release: 1873 sosp->so_snd.sb_state &= ~SS_ISSENDING; 1874 1875 if (!error && maxreached && so->so_splicemax == so->so_splicelen) 1876 error = EFBIG; 1877 if (error) 1878 WRITE_ONCE(so->so_error, error); 1879 1880 if (((so->so_rcv.sb_state & SS_CANTRCVMORE) && 1881 so->so_rcv.sb_cc == 0) || 1882 (sosp->so_snd.sb_state & SS_CANTSENDMORE) || 1883 maxreached || error) 1884 unsplice = 1; 1885 1886 mtx_leave(&sosp->so_snd.sb_mtx); 1887 mtx_leave(&so->so_rcv.sb_mtx); 1888 1889 if (so->so_proto->pr_flags & PR_WANTRCVD) 1890 sbunlock(&so->so_snd); 1891 1892 if (unsplice) { 1893 soref(sosp); 1894 sounsplice(so, sosp, 0); 1895 sorele(sosp); 1896 1897 return (0); 1898 } 1899 if (timerisset(&so->so_idletv)) 1900 timeout_add_tv(&so->so_idleto, &so->so_idletv); 1901 return (1); 1902 } 1903 #endif /* SOCKET_SPLICE */ 1904 1905 void 1906 sorwakeup(struct socket *so) 1907 { 1908 #ifdef SOCKET_SPLICE 1909 if (so->so_proto->pr_flags & PR_SPLICE) { 1910 mtx_enter(&so->so_rcv.sb_mtx); 1911 if (so->so_rcv.sb_flags & SB_SPLICE) 1912 task_add(sosplice_taskq, &so->so_splicetask); 1913 if (isspliced(so)) { 1914 mtx_leave(&so->so_rcv.sb_mtx); 1915 return; 1916 } 1917 mtx_leave(&so->so_rcv.sb_mtx); 1918 } 1919 #endif 1920 sowakeup(so, &so->so_rcv); 1921 if (so->so_upcall) 1922 (*(so->so_upcall))(so, so->so_upcallarg, M_DONTWAIT); 1923 } 1924 1925 void 1926 sowwakeup(struct socket *so) 1927 { 1928 #ifdef SOCKET_SPLICE 1929 if (so->so_proto->pr_flags & PR_SPLICE) { 1930 mtx_enter(&so->so_snd.sb_mtx); 1931 if (so->so_snd.sb_flags & SB_SPLICE) 1932 task_add(sosplice_taskq, 1933 &so->so_sp->ssp_soback->so_splicetask); 1934 if (issplicedback(so)) { 1935 mtx_leave(&so->so_snd.sb_mtx); 1936 return; 1937 } 1938 mtx_leave(&so->so_snd.sb_mtx); 1939 } 1940 #endif 1941 sowakeup(so, &so->so_snd); 1942 } 1943 1944 int 1945 sosetopt(struct socket *so, int level, int optname, struct mbuf *m) 1946 { 1947 int error = 0; 1948 1949 if (level != SOL_SOCKET) { 1950 if (so->so_proto->pr_ctloutput) { 1951 solock(so); 1952 error = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1953 level, optname, m); 1954 sounlock(so); 1955 return (error); 1956 } 1957 error = ENOPROTOOPT; 1958 } else { 1959 switch (optname) { 1960 1961 case SO_LINGER: 1962 if (m == NULL || m->m_len != sizeof (struct linger) || 1963 mtod(m, struct linger *)->l_linger < 0 || 1964 mtod(m, struct linger *)->l_linger > SHRT_MAX) 1965 return (EINVAL); 1966 1967 solock(so); 1968 so->so_linger = mtod(m, struct linger *)->l_linger; 1969 if (*mtod(m, int *)) 1970 so->so_options |= optname; 1971 else 1972 so->so_options &= ~optname; 1973 sounlock(so); 1974 1975 break; 1976 case SO_BINDANY: 1977 if ((error = suser(curproc)) != 0) /* XXX */ 1978 return (error); 1979 /* FALLTHROUGH */ 1980 1981 case SO_DEBUG: 1982 case SO_KEEPALIVE: 1983 case SO_USELOOPBACK: 1984 case SO_BROADCAST: 1985 case SO_REUSEADDR: 1986 case SO_REUSEPORT: 1987 case SO_OOBINLINE: 1988 case SO_TIMESTAMP: 1989 case SO_ZEROIZE: 1990 if (m == NULL || m->m_len < sizeof (int)) 1991 return (EINVAL); 1992 1993 solock(so); 1994 if (*mtod(m, int *)) 1995 so->so_options |= optname; 1996 else 1997 so->so_options &= ~optname; 1998 sounlock(so); 1999 2000 break; 2001 case SO_DONTROUTE: 2002 if (m == NULL || m->m_len < sizeof (int)) 2003 return (EINVAL); 2004 if (*mtod(m, int *)) 2005 error = EOPNOTSUPP; 2006 break; 2007 2008 case SO_SNDBUF: 2009 case SO_RCVBUF: 2010 case SO_SNDLOWAT: 2011 case SO_RCVLOWAT: 2012 { 2013 struct sockbuf *sb = (optname == SO_SNDBUF || 2014 optname == SO_SNDLOWAT ? 2015 &so->so_snd : &so->so_rcv); 2016 u_long cnt; 2017 2018 if (m == NULL || m->m_len < sizeof (int)) 2019 return (EINVAL); 2020 cnt = *mtod(m, int *); 2021 if ((long)cnt <= 0) 2022 cnt = 1; 2023 2024 mtx_enter(&sb->sb_mtx); 2025 switch (optname) { 2026 case SO_SNDBUF: 2027 case SO_RCVBUF: 2028 if (sb->sb_state & 2029 (SS_CANTSENDMORE | SS_CANTRCVMORE)) { 2030 error = EINVAL; 2031 break; 2032 } 2033 if (sbcheckreserve(cnt, sb->sb_wat) || 2034 sbreserve(so, sb, cnt)) { 2035 error = ENOBUFS; 2036 break; 2037 } 2038 sb->sb_wat = cnt; 2039 break; 2040 case SO_SNDLOWAT: 2041 case SO_RCVLOWAT: 2042 sb->sb_lowat = (cnt > sb->sb_hiwat) ? 2043 sb->sb_hiwat : cnt; 2044 break; 2045 } 2046 mtx_leave(&sb->sb_mtx); 2047 2048 break; 2049 } 2050 2051 case SO_SNDTIMEO: 2052 case SO_RCVTIMEO: 2053 { 2054 struct sockbuf *sb = (optname == SO_SNDTIMEO ? 2055 &so->so_snd : &so->so_rcv); 2056 struct timeval tv; 2057 uint64_t nsecs; 2058 2059 if (m == NULL || m->m_len < sizeof (tv)) 2060 return (EINVAL); 2061 memcpy(&tv, mtod(m, struct timeval *), sizeof tv); 2062 if (!timerisvalid(&tv)) 2063 return (EINVAL); 2064 nsecs = TIMEVAL_TO_NSEC(&tv); 2065 if (nsecs == UINT64_MAX) 2066 return (EDOM); 2067 if (nsecs == 0) 2068 nsecs = INFSLP; 2069 2070 mtx_enter(&sb->sb_mtx); 2071 sb->sb_timeo_nsecs = nsecs; 2072 mtx_leave(&sb->sb_mtx); 2073 break; 2074 } 2075 2076 case SO_RTABLE: 2077 if (so->so_proto->pr_domain && 2078 so->so_proto->pr_domain->dom_protosw && 2079 so->so_proto->pr_ctloutput) { 2080 const struct domain *dom = 2081 so->so_proto->pr_domain; 2082 2083 level = dom->dom_protosw->pr_protocol; 2084 solock(so); 2085 error = (*so->so_proto->pr_ctloutput) 2086 (PRCO_SETOPT, so, level, optname, m); 2087 sounlock(so); 2088 } else 2089 error = ENOPROTOOPT; 2090 break; 2091 #ifdef SOCKET_SPLICE 2092 case SO_SPLICE: 2093 if (m == NULL) { 2094 error = sosplice(so, -1, 0, NULL); 2095 } else if (m->m_len < sizeof(int)) { 2096 error = EINVAL; 2097 } else if (m->m_len < sizeof(struct splice)) { 2098 error = sosplice(so, *mtod(m, int *), 0, NULL); 2099 } else { 2100 error = sosplice(so, 2101 mtod(m, struct splice *)->sp_fd, 2102 mtod(m, struct splice *)->sp_max, 2103 &mtod(m, struct splice *)->sp_idle); 2104 } 2105 break; 2106 #endif /* SOCKET_SPLICE */ 2107 2108 default: 2109 error = ENOPROTOOPT; 2110 break; 2111 } 2112 } 2113 2114 return (error); 2115 } 2116 2117 int 2118 sogetopt(struct socket *so, int level, int optname, struct mbuf *m) 2119 { 2120 int error = 0; 2121 2122 if (level != SOL_SOCKET) { 2123 if (so->so_proto->pr_ctloutput) { 2124 m->m_len = 0; 2125 2126 solock(so); 2127 error = (*so->so_proto->pr_ctloutput)(PRCO_GETOPT, so, 2128 level, optname, m); 2129 sounlock(so); 2130 return (error); 2131 } else 2132 return (ENOPROTOOPT); 2133 } else { 2134 m->m_len = sizeof (int); 2135 2136 switch (optname) { 2137 2138 case SO_LINGER: 2139 m->m_len = sizeof (struct linger); 2140 solock_shared(so); 2141 mtod(m, struct linger *)->l_onoff = 2142 so->so_options & SO_LINGER; 2143 mtod(m, struct linger *)->l_linger = so->so_linger; 2144 sounlock_shared(so); 2145 break; 2146 2147 case SO_BINDANY: 2148 case SO_USELOOPBACK: 2149 case SO_DEBUG: 2150 case SO_KEEPALIVE: 2151 case SO_REUSEADDR: 2152 case SO_REUSEPORT: 2153 case SO_BROADCAST: 2154 case SO_OOBINLINE: 2155 case SO_ACCEPTCONN: 2156 case SO_TIMESTAMP: 2157 case SO_ZEROIZE: 2158 *mtod(m, int *) = so->so_options & optname; 2159 break; 2160 2161 case SO_DONTROUTE: 2162 *mtod(m, int *) = 0; 2163 break; 2164 2165 case SO_TYPE: 2166 *mtod(m, int *) = so->so_type; 2167 break; 2168 2169 case SO_ERROR: 2170 solock(so); 2171 *mtod(m, int *) = so->so_error; 2172 so->so_error = 0; 2173 sounlock(so); 2174 2175 break; 2176 2177 case SO_DOMAIN: 2178 *mtod(m, int *) = so->so_proto->pr_domain->dom_family; 2179 break; 2180 2181 case SO_PROTOCOL: 2182 *mtod(m, int *) = so->so_proto->pr_protocol; 2183 break; 2184 2185 case SO_SNDBUF: 2186 *mtod(m, int *) = so->so_snd.sb_hiwat; 2187 break; 2188 2189 case SO_RCVBUF: 2190 *mtod(m, int *) = so->so_rcv.sb_hiwat; 2191 break; 2192 2193 case SO_SNDLOWAT: 2194 *mtod(m, int *) = so->so_snd.sb_lowat; 2195 break; 2196 2197 case SO_RCVLOWAT: 2198 *mtod(m, int *) = so->so_rcv.sb_lowat; 2199 break; 2200 2201 case SO_SNDTIMEO: 2202 case SO_RCVTIMEO: 2203 { 2204 struct sockbuf *sb = (optname == SO_SNDTIMEO ? 2205 &so->so_snd : &so->so_rcv); 2206 struct timeval tv; 2207 uint64_t nsecs; 2208 2209 mtx_enter(&sb->sb_mtx); 2210 nsecs = sb->sb_timeo_nsecs; 2211 mtx_leave(&sb->sb_mtx); 2212 2213 m->m_len = sizeof(struct timeval); 2214 memset(&tv, 0, sizeof(tv)); 2215 if (nsecs != INFSLP) 2216 NSEC_TO_TIMEVAL(nsecs, &tv); 2217 memcpy(mtod(m, struct timeval *), &tv, sizeof tv); 2218 break; 2219 } 2220 2221 case SO_RTABLE: 2222 if (so->so_proto->pr_domain && 2223 so->so_proto->pr_domain->dom_protosw && 2224 so->so_proto->pr_ctloutput) { 2225 const struct domain *dom = 2226 so->so_proto->pr_domain; 2227 2228 level = dom->dom_protosw->pr_protocol; 2229 solock(so); 2230 error = (*so->so_proto->pr_ctloutput) 2231 (PRCO_GETOPT, so, level, optname, m); 2232 sounlock(so); 2233 if (error) 2234 return (error); 2235 break; 2236 } 2237 return (ENOPROTOOPT); 2238 2239 #ifdef SOCKET_SPLICE 2240 case SO_SPLICE: 2241 { 2242 off_t len; 2243 2244 m->m_len = sizeof(off_t); 2245 solock_shared(so); 2246 len = so->so_sp ? so->so_sp->ssp_len : 0; 2247 sounlock_shared(so); 2248 memcpy(mtod(m, off_t *), &len, sizeof(off_t)); 2249 break; 2250 } 2251 #endif /* SOCKET_SPLICE */ 2252 2253 case SO_PEERCRED: 2254 if (so->so_proto->pr_protocol == AF_UNIX) { 2255 struct unpcb *unp = sotounpcb(so); 2256 2257 solock(so); 2258 if (unp->unp_flags & UNP_FEIDS) { 2259 m->m_len = sizeof(unp->unp_connid); 2260 memcpy(mtod(m, caddr_t), 2261 &(unp->unp_connid), m->m_len); 2262 sounlock(so); 2263 break; 2264 } 2265 sounlock(so); 2266 2267 return (ENOTCONN); 2268 } 2269 return (EOPNOTSUPP); 2270 2271 default: 2272 return (ENOPROTOOPT); 2273 } 2274 return (0); 2275 } 2276 } 2277 2278 void 2279 sohasoutofband(struct socket *so) 2280 { 2281 pgsigio(&so->so_sigio, SIGURG, 0); 2282 knote(&so->so_rcv.sb_klist, 0); 2283 } 2284 2285 void 2286 sofilt_lock(struct socket *so, struct sockbuf *sb) 2287 { 2288 switch (so->so_proto->pr_domain->dom_family) { 2289 case PF_INET: 2290 case PF_INET6: 2291 NET_LOCK_SHARED(); 2292 break; 2293 default: 2294 rw_enter_write(&so->so_lock); 2295 break; 2296 } 2297 2298 mtx_enter(&sb->sb_mtx); 2299 } 2300 2301 void 2302 sofilt_unlock(struct socket *so, struct sockbuf *sb) 2303 { 2304 mtx_leave(&sb->sb_mtx); 2305 2306 switch (so->so_proto->pr_domain->dom_family) { 2307 case PF_INET: 2308 case PF_INET6: 2309 NET_UNLOCK_SHARED(); 2310 break; 2311 default: 2312 rw_exit_write(&so->so_lock); 2313 break; 2314 } 2315 } 2316 2317 int 2318 soo_kqfilter(struct file *fp, struct knote *kn) 2319 { 2320 struct socket *so = kn->kn_fp->f_data; 2321 struct sockbuf *sb; 2322 2323 switch (kn->kn_filter) { 2324 case EVFILT_READ: 2325 kn->kn_fop = &soread_filtops; 2326 sb = &so->so_rcv; 2327 break; 2328 case EVFILT_WRITE: 2329 kn->kn_fop = &sowrite_filtops; 2330 sb = &so->so_snd; 2331 break; 2332 case EVFILT_EXCEPT: 2333 kn->kn_fop = &soexcept_filtops; 2334 sb = &so->so_rcv; 2335 break; 2336 default: 2337 return (EINVAL); 2338 } 2339 2340 klist_insert(&sb->sb_klist, kn); 2341 2342 return (0); 2343 } 2344 2345 void 2346 filt_sordetach(struct knote *kn) 2347 { 2348 struct socket *so = kn->kn_fp->f_data; 2349 2350 klist_remove(&so->so_rcv.sb_klist, kn); 2351 } 2352 2353 int 2354 filt_soread(struct knote *kn, long hint) 2355 { 2356 struct socket *so = kn->kn_fp->f_data; 2357 u_int state = READ_ONCE(so->so_state); 2358 u_int error = READ_ONCE(so->so_error); 2359 int rv = 0; 2360 2361 MUTEX_ASSERT_LOCKED(&so->so_rcv.sb_mtx); 2362 2363 if (so->so_options & SO_ACCEPTCONN) { 2364 short qlen = READ_ONCE(so->so_qlen); 2365 2366 soassertlocked_readonly(so); 2367 2368 kn->kn_data = qlen; 2369 rv = (kn->kn_data != 0); 2370 2371 if (kn->kn_flags & (__EV_POLL | __EV_SELECT)) { 2372 if (state & SS_ISDISCONNECTED) { 2373 kn->kn_flags |= __EV_HUP; 2374 rv = 1; 2375 } else { 2376 rv = qlen || soreadable(so); 2377 } 2378 } 2379 2380 return rv; 2381 } 2382 2383 kn->kn_data = so->so_rcv.sb_cc; 2384 #ifdef SOCKET_SPLICE 2385 if (isspliced(so)) { 2386 rv = 0; 2387 } else 2388 #endif /* SOCKET_SPLICE */ 2389 if (so->so_rcv.sb_state & SS_CANTRCVMORE) { 2390 kn->kn_flags |= EV_EOF; 2391 if (kn->kn_flags & __EV_POLL) { 2392 if (state & SS_ISDISCONNECTED) 2393 kn->kn_flags |= __EV_HUP; 2394 } 2395 kn->kn_fflags = error; 2396 rv = 1; 2397 } else if (error) { 2398 rv = 1; 2399 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2400 rv = (kn->kn_data >= kn->kn_sdata); 2401 } else { 2402 rv = (kn->kn_data >= so->so_rcv.sb_lowat); 2403 } 2404 2405 return rv; 2406 } 2407 2408 void 2409 filt_sowdetach(struct knote *kn) 2410 { 2411 struct socket *so = kn->kn_fp->f_data; 2412 2413 klist_remove(&so->so_snd.sb_klist, kn); 2414 } 2415 2416 int 2417 filt_sowrite(struct knote *kn, long hint) 2418 { 2419 struct socket *so = kn->kn_fp->f_data; 2420 u_int state = READ_ONCE(so->so_state); 2421 u_int error = READ_ONCE(so->so_error); 2422 int rv; 2423 2424 MUTEX_ASSERT_LOCKED(&so->so_snd.sb_mtx); 2425 2426 kn->kn_data = sbspace_locked(so, &so->so_snd); 2427 if (so->so_snd.sb_state & SS_CANTSENDMORE) { 2428 kn->kn_flags |= EV_EOF; 2429 if (kn->kn_flags & __EV_POLL) { 2430 if (state & SS_ISDISCONNECTED) 2431 kn->kn_flags |= __EV_HUP; 2432 } 2433 kn->kn_fflags = error; 2434 rv = 1; 2435 } else if (error) { 2436 rv = 1; 2437 } else if (((state & SS_ISCONNECTED) == 0) && 2438 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 2439 rv = 0; 2440 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2441 rv = (kn->kn_data >= kn->kn_sdata); 2442 } else { 2443 rv = (kn->kn_data >= so->so_snd.sb_lowat); 2444 } 2445 2446 return (rv); 2447 } 2448 2449 int 2450 filt_soexcept(struct knote *kn, long hint) 2451 { 2452 struct socket *so = kn->kn_fp->f_data; 2453 int rv = 0; 2454 2455 MUTEX_ASSERT_LOCKED(&so->so_rcv.sb_mtx); 2456 2457 #ifdef SOCKET_SPLICE 2458 if (isspliced(so)) { 2459 rv = 0; 2460 } else 2461 #endif /* SOCKET_SPLICE */ 2462 if (kn->kn_sfflags & NOTE_OOB) { 2463 if (so->so_oobmark || (so->so_rcv.sb_state & SS_RCVATMARK)) { 2464 kn->kn_fflags |= NOTE_OOB; 2465 kn->kn_data -= so->so_oobmark; 2466 rv = 1; 2467 } 2468 } 2469 2470 if (kn->kn_flags & __EV_POLL) { 2471 u_int state = READ_ONCE(so->so_state); 2472 2473 if (state & SS_ISDISCONNECTED) { 2474 kn->kn_flags |= __EV_HUP; 2475 rv = 1; 2476 } 2477 } 2478 2479 return rv; 2480 } 2481 2482 int 2483 filt_sowmodify(struct kevent *kev, struct knote *kn) 2484 { 2485 struct socket *so = kn->kn_fp->f_data; 2486 int rv; 2487 2488 sofilt_lock(so, &so->so_snd); 2489 rv = knote_modify(kev, kn); 2490 sofilt_unlock(so, &so->so_snd); 2491 2492 return (rv); 2493 } 2494 2495 int 2496 filt_sowprocess(struct knote *kn, struct kevent *kev) 2497 { 2498 struct socket *so = kn->kn_fp->f_data; 2499 int rv; 2500 2501 sofilt_lock(so, &so->so_snd); 2502 rv = knote_process(kn, kev); 2503 sofilt_unlock(so, &so->so_snd); 2504 2505 return (rv); 2506 } 2507 2508 int 2509 filt_sormodify(struct kevent *kev, struct knote *kn) 2510 { 2511 struct socket *so = kn->kn_fp->f_data; 2512 int rv; 2513 2514 sofilt_lock(so, &so->so_rcv); 2515 rv = knote_modify(kev, kn); 2516 sofilt_unlock(so, &so->so_rcv); 2517 2518 return (rv); 2519 } 2520 2521 int 2522 filt_sorprocess(struct knote *kn, struct kevent *kev) 2523 { 2524 struct socket *so = kn->kn_fp->f_data; 2525 int rv; 2526 2527 sofilt_lock(so, &so->so_rcv); 2528 rv = knote_process(kn, kev); 2529 sofilt_unlock(so, &so->so_rcv); 2530 2531 return (rv); 2532 } 2533 2534 #ifdef DDB 2535 void 2536 sobuf_print(struct sockbuf *, 2537 int (*)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))); 2538 2539 void 2540 sobuf_print(struct sockbuf *sb, 2541 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2542 { 2543 (*pr)("\tsb_cc: %lu\n", sb->sb_cc); 2544 (*pr)("\tsb_datacc: %lu\n", sb->sb_datacc); 2545 (*pr)("\tsb_hiwat: %lu\n", sb->sb_hiwat); 2546 (*pr)("\tsb_wat: %lu\n", sb->sb_wat); 2547 (*pr)("\tsb_mbcnt: %lu\n", sb->sb_mbcnt); 2548 (*pr)("\tsb_mbmax: %lu\n", sb->sb_mbmax); 2549 (*pr)("\tsb_lowat: %ld\n", sb->sb_lowat); 2550 (*pr)("\tsb_mb: %p\n", sb->sb_mb); 2551 (*pr)("\tsb_mbtail: %p\n", sb->sb_mbtail); 2552 (*pr)("\tsb_lastrecord: %p\n", sb->sb_lastrecord); 2553 (*pr)("\tsb_flags: %04x\n", sb->sb_flags); 2554 (*pr)("\tsb_state: %04x\n", sb->sb_state); 2555 (*pr)("\tsb_timeo_nsecs: %llu\n", sb->sb_timeo_nsecs); 2556 } 2557 2558 void 2559 so_print(void *v, 2560 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2561 { 2562 struct socket *so = v; 2563 2564 (*pr)("socket %p\n", so); 2565 (*pr)("so_type: %i\n", so->so_type); 2566 (*pr)("so_options: 0x%04x\n", so->so_options); /* %b */ 2567 (*pr)("so_linger: %i\n", so->so_linger); 2568 (*pr)("so_state: 0x%04x\n", so->so_state); 2569 (*pr)("so_pcb: %p\n", so->so_pcb); 2570 (*pr)("so_proto: %p\n", so->so_proto); 2571 (*pr)("so_sigio: %p\n", so->so_sigio.sir_sigio); 2572 2573 (*pr)("so_head: %p\n", so->so_head); 2574 (*pr)("so_onq: %p\n", so->so_onq); 2575 (*pr)("so_q0: @%p first: %p\n", &so->so_q0, TAILQ_FIRST(&so->so_q0)); 2576 (*pr)("so_q: @%p first: %p\n", &so->so_q, TAILQ_FIRST(&so->so_q)); 2577 (*pr)("so_eq: next: %p\n", TAILQ_NEXT(so, so_qe)); 2578 (*pr)("so_q0len: %i\n", so->so_q0len); 2579 (*pr)("so_qlen: %i\n", so->so_qlen); 2580 (*pr)("so_qlimit: %i\n", so->so_qlimit); 2581 (*pr)("so_timeo: %i\n", so->so_timeo); 2582 (*pr)("so_obmark: %lu\n", so->so_oobmark); 2583 2584 (*pr)("so_sp: %p\n", so->so_sp); 2585 if (so->so_sp != NULL) { 2586 (*pr)("\tssp_socket: %p\n", so->so_sp->ssp_socket); 2587 (*pr)("\tssp_soback: %p\n", so->so_sp->ssp_soback); 2588 (*pr)("\tssp_len: %lld\n", 2589 (unsigned long long)so->so_sp->ssp_len); 2590 (*pr)("\tssp_max: %lld\n", 2591 (unsigned long long)so->so_sp->ssp_max); 2592 (*pr)("\tssp_idletv: %lld %ld\n", so->so_sp->ssp_idletv.tv_sec, 2593 so->so_sp->ssp_idletv.tv_usec); 2594 (*pr)("\tssp_idleto: %spending (@%i)\n", 2595 timeout_pending(&so->so_sp->ssp_idleto) ? "" : "not ", 2596 so->so_sp->ssp_idleto.to_time); 2597 } 2598 2599 (*pr)("so_rcv:\n"); 2600 sobuf_print(&so->so_rcv, pr); 2601 (*pr)("so_snd:\n"); 2602 sobuf_print(&so->so_snd, pr); 2603 2604 (*pr)("so_upcall: %p so_upcallarg: %p\n", 2605 so->so_upcall, so->so_upcallarg); 2606 2607 (*pr)("so_euid: %d so_ruid: %d\n", so->so_euid, so->so_ruid); 2608 (*pr)("so_egid: %d so_rgid: %d\n", so->so_egid, so->so_rgid); 2609 (*pr)("so_cpid: %d\n", so->so_cpid); 2610 } 2611 #endif 2612