1 /* $OpenBSD: uipc_socket.c,v 1.333 2024/05/03 17:43:09 mvs Exp $ */ 2 /* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/file.h> 39 #include <sys/filedesc.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/domain.h> 43 #include <sys/event.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/unpcb.h> 47 #include <sys/socketvar.h> 48 #include <sys/signalvar.h> 49 #include <sys/pool.h> 50 #include <sys/atomic.h> 51 #include <sys/rwlock.h> 52 #include <sys/time.h> 53 #include <sys/refcnt.h> 54 55 #ifdef DDB 56 #include <machine/db_machdep.h> 57 #endif 58 59 void sbsync(struct sockbuf *, struct mbuf *); 60 61 int sosplice(struct socket *, int, off_t, struct timeval *); 62 void sounsplice(struct socket *, struct socket *, int); 63 void soidle(void *); 64 void sotask(void *); 65 void soreaper(void *); 66 void soput(void *); 67 int somove(struct socket *, int); 68 void sorflush(struct socket *); 69 void sorflush_locked(struct socket *); 70 71 void filt_sordetach(struct knote *kn); 72 int filt_soread(struct knote *kn, long hint); 73 void filt_sowdetach(struct knote *kn); 74 int filt_sowrite(struct knote *kn, long hint); 75 int filt_soexcept(struct knote *kn, long hint); 76 77 int filt_sowmodify(struct kevent *kev, struct knote *kn); 78 int filt_sowprocess(struct knote *kn, struct kevent *kev); 79 80 int filt_sormodify(struct kevent *kev, struct knote *kn); 81 int filt_sorprocess(struct knote *kn, struct kevent *kev); 82 83 const struct filterops soread_filtops = { 84 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 85 .f_attach = NULL, 86 .f_detach = filt_sordetach, 87 .f_event = filt_soread, 88 .f_modify = filt_sormodify, 89 .f_process = filt_sorprocess, 90 }; 91 92 const struct filterops sowrite_filtops = { 93 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 94 .f_attach = NULL, 95 .f_detach = filt_sowdetach, 96 .f_event = filt_sowrite, 97 .f_modify = filt_sowmodify, 98 .f_process = filt_sowprocess, 99 }; 100 101 const struct filterops soexcept_filtops = { 102 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 103 .f_attach = NULL, 104 .f_detach = filt_sordetach, 105 .f_event = filt_soexcept, 106 .f_modify = filt_sormodify, 107 .f_process = filt_sorprocess, 108 }; 109 110 #ifndef SOMINCONN 111 #define SOMINCONN 80 112 #endif /* SOMINCONN */ 113 114 int somaxconn = SOMAXCONN; 115 int sominconn = SOMINCONN; 116 117 struct pool socket_pool; 118 #ifdef SOCKET_SPLICE 119 struct pool sosplice_pool; 120 struct taskq *sosplice_taskq; 121 struct rwlock sosplice_lock = RWLOCK_INITIALIZER("sosplicelk"); 122 #endif 123 124 void 125 soinit(void) 126 { 127 pool_init(&socket_pool, sizeof(struct socket), 0, IPL_SOFTNET, 0, 128 "sockpl", NULL); 129 #ifdef SOCKET_SPLICE 130 pool_init(&sosplice_pool, sizeof(struct sosplice), 0, IPL_SOFTNET, 0, 131 "sosppl", NULL); 132 #endif 133 } 134 135 struct socket * 136 soalloc(const struct protosw *prp, int wait) 137 { 138 const struct domain *dp = prp->pr_domain; 139 struct socket *so; 140 141 so = pool_get(&socket_pool, (wait == M_WAIT ? PR_WAITOK : PR_NOWAIT) | 142 PR_ZERO); 143 if (so == NULL) 144 return (NULL); 145 rw_init_flags(&so->so_lock, dp->dom_name, RWL_DUPOK); 146 refcnt_init(&so->so_refcnt); 147 rw_init(&so->so_rcv.sb_lock, "sbufrcv"); 148 rw_init(&so->so_snd.sb_lock, "sbufsnd"); 149 mtx_init_flags(&so->so_rcv.sb_mtx, IPL_MPFLOOR, "sbrcv", 0); 150 mtx_init_flags(&so->so_snd.sb_mtx, IPL_MPFLOOR, "sbsnd", 0); 151 klist_init_mutex(&so->so_rcv.sb_klist, &so->so_rcv.sb_mtx); 152 klist_init_mutex(&so->so_snd.sb_klist, &so->so_snd.sb_mtx); 153 sigio_init(&so->so_sigio); 154 TAILQ_INIT(&so->so_q0); 155 TAILQ_INIT(&so->so_q); 156 157 switch (dp->dom_family) { 158 case AF_INET: 159 case AF_INET6: 160 switch (prp->pr_type) { 161 case SOCK_RAW: 162 so->so_snd.sb_flags |= SB_MTXLOCK; 163 /* FALLTHROUGH */ 164 case SOCK_DGRAM: 165 so->so_rcv.sb_flags |= SB_MTXLOCK; 166 break; 167 } 168 break; 169 case AF_UNIX: 170 so->so_snd.sb_flags |= SB_MTXLOCK; 171 so->so_rcv.sb_flags |= SB_MTXLOCK; 172 break; 173 } 174 175 return (so); 176 } 177 178 /* 179 * Socket operation routines. 180 * These routines are called by the routines in 181 * sys_socket.c or from a system process, and 182 * implement the semantics of socket operations by 183 * switching out to the protocol specific routines. 184 */ 185 int 186 socreate(int dom, struct socket **aso, int type, int proto) 187 { 188 struct proc *p = curproc; /* XXX */ 189 const struct protosw *prp; 190 struct socket *so; 191 int error; 192 193 if (proto) 194 prp = pffindproto(dom, proto, type); 195 else 196 prp = pffindtype(dom, type); 197 if (prp == NULL || prp->pr_usrreqs == NULL) 198 return (EPROTONOSUPPORT); 199 if (prp->pr_type != type) 200 return (EPROTOTYPE); 201 so = soalloc(prp, M_WAIT); 202 so->so_type = type; 203 if (suser(p) == 0) 204 so->so_state = SS_PRIV; 205 so->so_ruid = p->p_ucred->cr_ruid; 206 so->so_euid = p->p_ucred->cr_uid; 207 so->so_rgid = p->p_ucred->cr_rgid; 208 so->so_egid = p->p_ucred->cr_gid; 209 so->so_cpid = p->p_p->ps_pid; 210 so->so_proto = prp; 211 so->so_snd.sb_timeo_nsecs = INFSLP; 212 so->so_rcv.sb_timeo_nsecs = INFSLP; 213 214 solock(so); 215 error = pru_attach(so, proto, M_WAIT); 216 if (error) { 217 so->so_state |= SS_NOFDREF; 218 /* sofree() calls sounlock(). */ 219 sofree(so, 0); 220 return (error); 221 } 222 sounlock(so); 223 *aso = so; 224 return (0); 225 } 226 227 int 228 sobind(struct socket *so, struct mbuf *nam, struct proc *p) 229 { 230 soassertlocked(so); 231 return pru_bind(so, nam, p); 232 } 233 234 int 235 solisten(struct socket *so, int backlog) 236 { 237 int somaxconn_local = READ_ONCE(somaxconn); 238 int sominconn_local = READ_ONCE(sominconn); 239 int error; 240 241 switch (so->so_type) { 242 case SOCK_STREAM: 243 case SOCK_SEQPACKET: 244 break; 245 default: 246 return (EOPNOTSUPP); 247 } 248 249 soassertlocked(so); 250 251 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) 252 return (EINVAL); 253 #ifdef SOCKET_SPLICE 254 if (isspliced(so) || issplicedback(so)) 255 return (EOPNOTSUPP); 256 #endif /* SOCKET_SPLICE */ 257 error = pru_listen(so); 258 if (error) 259 return (error); 260 if (TAILQ_FIRST(&so->so_q) == NULL) 261 so->so_options |= SO_ACCEPTCONN; 262 if (backlog < 0 || backlog > somaxconn_local) 263 backlog = somaxconn_local; 264 if (backlog < sominconn_local) 265 backlog = sominconn_local; 266 so->so_qlimit = backlog; 267 return (0); 268 } 269 270 #define SOSP_FREEING_READ 1 271 #define SOSP_FREEING_WRITE 2 272 void 273 sofree(struct socket *so, int keep_lock) 274 { 275 int persocket = solock_persocket(so); 276 277 soassertlocked(so); 278 279 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) { 280 if (!keep_lock) 281 sounlock(so); 282 return; 283 } 284 if (so->so_head) { 285 struct socket *head = so->so_head; 286 287 /* 288 * We must not decommission a socket that's on the accept(2) 289 * queue. If we do, then accept(2) may hang after select(2) 290 * indicated that the listening socket was ready. 291 */ 292 if (so->so_onq == &head->so_q) { 293 if (!keep_lock) 294 sounlock(so); 295 return; 296 } 297 298 if (persocket) { 299 /* 300 * Concurrent close of `head' could 301 * abort `so' due to re-lock. 302 */ 303 soref(so); 304 soref(head); 305 sounlock(so); 306 solock(head); 307 solock(so); 308 309 if (so->so_onq != &head->so_q0) { 310 sounlock(head); 311 sounlock(so); 312 sorele(head); 313 sorele(so); 314 return; 315 } 316 317 sorele(head); 318 sorele(so); 319 } 320 321 soqremque(so, 0); 322 323 if (persocket) 324 sounlock(head); 325 } 326 327 if (persocket) { 328 sounlock(so); 329 refcnt_finalize(&so->so_refcnt, "sofinal"); 330 solock(so); 331 } 332 333 sigio_free(&so->so_sigio); 334 klist_free(&so->so_rcv.sb_klist); 335 klist_free(&so->so_snd.sb_klist); 336 #ifdef SOCKET_SPLICE 337 if (issplicedback(so)) { 338 int freeing = SOSP_FREEING_WRITE; 339 340 if (so->so_sp->ssp_soback == so) 341 freeing |= SOSP_FREEING_READ; 342 sounsplice(so->so_sp->ssp_soback, so, freeing); 343 } 344 if (isspliced(so)) { 345 int freeing = SOSP_FREEING_READ; 346 347 if (so == so->so_sp->ssp_socket) 348 freeing |= SOSP_FREEING_WRITE; 349 sounsplice(so, so->so_sp->ssp_socket, freeing); 350 } 351 #endif /* SOCKET_SPLICE */ 352 353 mtx_enter(&so->so_snd.sb_mtx); 354 sbrelease(so, &so->so_snd); 355 mtx_leave(&so->so_snd.sb_mtx); 356 357 /* 358 * Unlocked dispose and cleanup is safe. Socket is unlinked 359 * from everywhere. Even concurrent sotask() thread will not 360 * call somove(). 361 */ 362 if (so->so_proto->pr_flags & PR_RIGHTS && 363 so->so_proto->pr_domain->dom_dispose) 364 (*so->so_proto->pr_domain->dom_dispose)(so->so_rcv.sb_mb); 365 m_purge(so->so_rcv.sb_mb); 366 367 if (!keep_lock) 368 sounlock(so); 369 370 #ifdef SOCKET_SPLICE 371 if (so->so_sp) { 372 /* Reuse splice idle, sounsplice() has been called before. */ 373 timeout_set_proc(&so->so_sp->ssp_idleto, soreaper, so); 374 timeout_add(&so->so_sp->ssp_idleto, 0); 375 } else 376 #endif /* SOCKET_SPLICE */ 377 { 378 pool_put(&socket_pool, so); 379 } 380 } 381 382 static inline uint64_t 383 solinger_nsec(struct socket *so) 384 { 385 if (so->so_linger == 0) 386 return INFSLP; 387 388 return SEC_TO_NSEC(so->so_linger); 389 } 390 391 /* 392 * Close a socket on last file table reference removal. 393 * Initiate disconnect if connected. 394 * Free socket when disconnect complete. 395 */ 396 int 397 soclose(struct socket *so, int flags) 398 { 399 struct socket *so2; 400 int error = 0; 401 402 solock(so); 403 /* Revoke async IO early. There is a final revocation in sofree(). */ 404 sigio_free(&so->so_sigio); 405 if (so->so_state & SS_ISCONNECTED) { 406 if (so->so_pcb == NULL) 407 goto discard; 408 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 409 error = sodisconnect(so); 410 if (error) 411 goto drop; 412 } 413 if (so->so_options & SO_LINGER) { 414 if ((so->so_state & SS_ISDISCONNECTING) && 415 (flags & MSG_DONTWAIT)) 416 goto drop; 417 while (so->so_state & SS_ISCONNECTED) { 418 error = sosleep_nsec(so, &so->so_timeo, 419 PSOCK | PCATCH, "netcls", 420 solinger_nsec(so)); 421 if (error) 422 break; 423 } 424 } 425 } 426 drop: 427 if (so->so_pcb) { 428 int error2; 429 error2 = pru_detach(so); 430 if (error == 0) 431 error = error2; 432 } 433 if (so->so_options & SO_ACCEPTCONN) { 434 int persocket = solock_persocket(so); 435 436 while ((so2 = TAILQ_FIRST(&so->so_q0)) != NULL) { 437 if (persocket) 438 solock(so2); 439 (void) soqremque(so2, 0); 440 if (persocket) 441 sounlock(so); 442 soabort(so2); 443 if (persocket) 444 solock(so); 445 } 446 while ((so2 = TAILQ_FIRST(&so->so_q)) != NULL) { 447 if (persocket) 448 solock(so2); 449 (void) soqremque(so2, 1); 450 if (persocket) 451 sounlock(so); 452 soabort(so2); 453 if (persocket) 454 solock(so); 455 } 456 } 457 discard: 458 if (so->so_state & SS_NOFDREF) 459 panic("soclose NOFDREF: so %p, so_type %d", so, so->so_type); 460 so->so_state |= SS_NOFDREF; 461 /* sofree() calls sounlock(). */ 462 sofree(so, 0); 463 return (error); 464 } 465 466 void 467 soabort(struct socket *so) 468 { 469 soassertlocked(so); 470 pru_abort(so); 471 } 472 473 int 474 soaccept(struct socket *so, struct mbuf *nam) 475 { 476 int error = 0; 477 478 soassertlocked(so); 479 480 if ((so->so_state & SS_NOFDREF) == 0) 481 panic("soaccept !NOFDREF: so %p, so_type %d", so, so->so_type); 482 so->so_state &= ~SS_NOFDREF; 483 if ((so->so_state & SS_ISDISCONNECTED) == 0 || 484 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) 485 error = pru_accept(so, nam); 486 else 487 error = ECONNABORTED; 488 return (error); 489 } 490 491 int 492 soconnect(struct socket *so, struct mbuf *nam) 493 { 494 int error; 495 496 soassertlocked(so); 497 498 if (so->so_options & SO_ACCEPTCONN) 499 return (EOPNOTSUPP); 500 /* 501 * If protocol is connection-based, can only connect once. 502 * Otherwise, if connected, try to disconnect first. 503 * This allows user to disconnect by connecting to, e.g., 504 * a null address. 505 */ 506 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 507 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 508 (error = sodisconnect(so)))) 509 error = EISCONN; 510 else 511 error = pru_connect(so, nam); 512 return (error); 513 } 514 515 int 516 soconnect2(struct socket *so1, struct socket *so2) 517 { 518 int persocket, error; 519 520 if ((persocket = solock_persocket(so1))) 521 solock_pair(so1, so2); 522 else 523 solock(so1); 524 525 error = pru_connect2(so1, so2); 526 527 if (persocket) 528 sounlock(so2); 529 sounlock(so1); 530 return (error); 531 } 532 533 int 534 sodisconnect(struct socket *so) 535 { 536 int error; 537 538 soassertlocked(so); 539 540 if ((so->so_state & SS_ISCONNECTED) == 0) 541 return (ENOTCONN); 542 if (so->so_state & SS_ISDISCONNECTING) 543 return (EALREADY); 544 error = pru_disconnect(so); 545 return (error); 546 } 547 548 int m_getuio(struct mbuf **, int, long, struct uio *); 549 550 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) 551 /* 552 * Send on a socket. 553 * If send must go all at once and message is larger than 554 * send buffering, then hard error. 555 * Lock against other senders. 556 * If must go all at once and not enough room now, then 557 * inform user that this would block and do nothing. 558 * Otherwise, if nonblocking, send as much as possible. 559 * The data to be sent is described by "uio" if nonzero, 560 * otherwise by the mbuf chain "top" (which must be null 561 * if uio is not). Data provided in mbuf chain must be small 562 * enough to send all at once. 563 * 564 * Returns nonzero on error, timeout or signal; callers 565 * must check for short counts if EINTR/ERESTART are returned. 566 * Data and control buffers are freed on return. 567 */ 568 int 569 sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, 570 struct mbuf *control, int flags) 571 { 572 long space, clen = 0; 573 size_t resid; 574 int error; 575 int atomic = sosendallatonce(so) || top; 576 int dosolock = ((so->so_snd.sb_flags & SB_MTXLOCK) == 0); 577 578 if (uio) 579 resid = uio->uio_resid; 580 else 581 resid = top->m_pkthdr.len; 582 /* MSG_EOR on a SOCK_STREAM socket is invalid. */ 583 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 584 m_freem(top); 585 m_freem(control); 586 return (EINVAL); 587 } 588 if (uio && uio->uio_procp) 589 uio->uio_procp->p_ru.ru_msgsnd++; 590 if (control) { 591 /* 592 * In theory clen should be unsigned (since control->m_len is). 593 * However, space must be signed, as it might be less than 0 594 * if we over-committed, and we must use a signed comparison 595 * of space and clen. 596 */ 597 clen = control->m_len; 598 /* reserve extra space for AF_UNIX's internalize */ 599 if (so->so_proto->pr_domain->dom_family == AF_UNIX && 600 clen >= CMSG_ALIGN(sizeof(struct cmsghdr)) && 601 mtod(control, struct cmsghdr *)->cmsg_type == SCM_RIGHTS) 602 clen = CMSG_SPACE( 603 (clen - CMSG_ALIGN(sizeof(struct cmsghdr))) * 604 (sizeof(struct fdpass) / sizeof(int))); 605 } 606 607 #define snderr(errno) { error = errno; goto release; } 608 609 if (dosolock) 610 solock_shared(so); 611 restart: 612 if ((error = sblock(so, &so->so_snd, SBLOCKWAIT(flags))) != 0) 613 goto out; 614 sb_mtx_lock(&so->so_snd); 615 so->so_snd.sb_state |= SS_ISSENDING; 616 do { 617 if (so->so_snd.sb_state & SS_CANTSENDMORE) 618 snderr(EPIPE); 619 if ((error = READ_ONCE(so->so_error))) { 620 so->so_error = 0; 621 snderr(error); 622 } 623 if ((so->so_state & SS_ISCONNECTED) == 0) { 624 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 625 if (!(resid == 0 && clen != 0)) 626 snderr(ENOTCONN); 627 } else if (addr == NULL) 628 snderr(EDESTADDRREQ); 629 } 630 space = sbspace(so, &so->so_snd); 631 if (flags & MSG_OOB) 632 space += 1024; 633 if (so->so_proto->pr_domain->dom_family == AF_UNIX) { 634 if (atomic && resid > so->so_snd.sb_hiwat) 635 snderr(EMSGSIZE); 636 } else { 637 if (clen > so->so_snd.sb_hiwat || 638 (atomic && resid > so->so_snd.sb_hiwat - clen)) 639 snderr(EMSGSIZE); 640 } 641 if (space < clen || 642 (space - clen < resid && 643 (atomic || space < so->so_snd.sb_lowat))) { 644 if (flags & MSG_DONTWAIT) 645 snderr(EWOULDBLOCK); 646 sbunlock(so, &so->so_snd); 647 648 if (so->so_snd.sb_flags & SB_MTXLOCK) 649 error = sbwait_locked(so, &so->so_snd); 650 else 651 error = sbwait(so, &so->so_snd); 652 653 so->so_snd.sb_state &= ~SS_ISSENDING; 654 sb_mtx_unlock(&so->so_snd); 655 if (error) 656 goto out; 657 goto restart; 658 } 659 space -= clen; 660 do { 661 if (uio == NULL) { 662 /* 663 * Data is prepackaged in "top". 664 */ 665 resid = 0; 666 if (flags & MSG_EOR) 667 top->m_flags |= M_EOR; 668 } else { 669 sb_mtx_unlock(&so->so_snd); 670 if (dosolock) 671 sounlock_shared(so); 672 error = m_getuio(&top, atomic, space, uio); 673 if (dosolock) 674 solock_shared(so); 675 sb_mtx_lock(&so->so_snd); 676 if (error) 677 goto release; 678 space -= top->m_pkthdr.len; 679 resid = uio->uio_resid; 680 if (flags & MSG_EOR) 681 top->m_flags |= M_EOR; 682 } 683 if (resid == 0) 684 so->so_snd.sb_state &= ~SS_ISSENDING; 685 if (top && so->so_options & SO_ZEROIZE) 686 top->m_flags |= M_ZEROIZE; 687 sb_mtx_unlock(&so->so_snd); 688 if (!dosolock) 689 solock_shared(so); 690 if (flags & MSG_OOB) 691 error = pru_sendoob(so, top, addr, control); 692 else 693 error = pru_send(so, top, addr, control); 694 if (!dosolock) 695 sounlock_shared(so); 696 sb_mtx_lock(&so->so_snd); 697 clen = 0; 698 control = NULL; 699 top = NULL; 700 if (error) 701 goto release; 702 } while (resid && space > 0); 703 } while (resid); 704 705 release: 706 so->so_snd.sb_state &= ~SS_ISSENDING; 707 sb_mtx_unlock(&so->so_snd); 708 sbunlock(so, &so->so_snd); 709 out: 710 if (dosolock) 711 sounlock_shared(so); 712 m_freem(top); 713 m_freem(control); 714 return (error); 715 } 716 717 int 718 m_getuio(struct mbuf **mp, int atomic, long space, struct uio *uio) 719 { 720 struct mbuf *m, *top = NULL; 721 struct mbuf **nextp = ⊤ 722 u_long len, mlen; 723 size_t resid = uio->uio_resid; 724 int error; 725 726 do { 727 if (top == NULL) { 728 MGETHDR(m, M_WAIT, MT_DATA); 729 mlen = MHLEN; 730 m->m_pkthdr.len = 0; 731 m->m_pkthdr.ph_ifidx = 0; 732 } else { 733 MGET(m, M_WAIT, MT_DATA); 734 mlen = MLEN; 735 } 736 /* chain mbuf together */ 737 *nextp = m; 738 nextp = &m->m_next; 739 740 resid = ulmin(resid, space); 741 if (resid >= MINCLSIZE) { 742 MCLGETL(m, M_NOWAIT, ulmin(resid, MAXMCLBYTES)); 743 if ((m->m_flags & M_EXT) == 0) 744 MCLGETL(m, M_NOWAIT, MCLBYTES); 745 if ((m->m_flags & M_EXT) == 0) 746 goto nopages; 747 mlen = m->m_ext.ext_size; 748 len = ulmin(mlen, resid); 749 /* 750 * For datagram protocols, leave room 751 * for protocol headers in first mbuf. 752 */ 753 if (atomic && m == top && len < mlen - max_hdr) 754 m->m_data += max_hdr; 755 } else { 756 nopages: 757 len = ulmin(mlen, resid); 758 /* 759 * For datagram protocols, leave room 760 * for protocol headers in first mbuf. 761 */ 762 if (atomic && m == top && len < mlen - max_hdr) 763 m_align(m, len); 764 } 765 766 error = uiomove(mtod(m, caddr_t), len, uio); 767 if (error) { 768 m_freem(top); 769 return (error); 770 } 771 772 /* adjust counters */ 773 resid = uio->uio_resid; 774 space -= len; 775 m->m_len = len; 776 top->m_pkthdr.len += len; 777 778 /* Is there more space and more data? */ 779 } while (space > 0 && resid > 0); 780 781 *mp = top; 782 return 0; 783 } 784 785 /* 786 * Following replacement or removal of the first mbuf on the first 787 * mbuf chain of a socket buffer, push necessary state changes back 788 * into the socket buffer so that other consumers see the values 789 * consistently. 'nextrecord' is the callers locally stored value of 790 * the original value of sb->sb_mb->m_nextpkt which must be restored 791 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. 792 */ 793 void 794 sbsync(struct sockbuf *sb, struct mbuf *nextrecord) 795 { 796 797 /* 798 * First, update for the new value of nextrecord. If necessary, 799 * make it the first record. 800 */ 801 if (sb->sb_mb != NULL) 802 sb->sb_mb->m_nextpkt = nextrecord; 803 else 804 sb->sb_mb = nextrecord; 805 806 /* 807 * Now update any dependent socket buffer fields to reflect 808 * the new state. This is an inline of SB_EMPTY_FIXUP, with 809 * the addition of a second clause that takes care of the 810 * case where sb_mb has been updated, but remains the last 811 * record. 812 */ 813 if (sb->sb_mb == NULL) { 814 sb->sb_mbtail = NULL; 815 sb->sb_lastrecord = NULL; 816 } else if (sb->sb_mb->m_nextpkt == NULL) 817 sb->sb_lastrecord = sb->sb_mb; 818 } 819 820 /* 821 * Implement receive operations on a socket. 822 * We depend on the way that records are added to the sockbuf 823 * by sbappend*. In particular, each record (mbufs linked through m_next) 824 * must begin with an address if the protocol so specifies, 825 * followed by an optional mbuf or mbufs containing ancillary data, 826 * and then zero or more mbufs of data. 827 * In order to avoid blocking network for the entire time here, we release 828 * the solock() while doing the actual copy to user space. 829 * Although the sockbuf is locked, new data may still be appended, 830 * and thus we must maintain consistency of the sockbuf during that time. 831 * 832 * The caller may receive the data as a single mbuf chain by supplying 833 * an mbuf **mp0 for use in returning the chain. The uio is then used 834 * only for the count in uio_resid. 835 */ 836 int 837 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, 838 struct mbuf **mp0, struct mbuf **controlp, int *flagsp, 839 socklen_t controllen) 840 { 841 struct mbuf *m, **mp; 842 struct mbuf *cm; 843 u_long len, offset, moff; 844 int flags, error, error2, type, uio_error = 0; 845 const struct protosw *pr = so->so_proto; 846 struct mbuf *nextrecord; 847 size_t resid, orig_resid = uio->uio_resid; 848 int dosolock = ((so->so_rcv.sb_flags & SB_MTXLOCK) == 0); 849 850 mp = mp0; 851 if (paddr) 852 *paddr = NULL; 853 if (controlp) 854 *controlp = NULL; 855 if (flagsp) 856 flags = *flagsp &~ MSG_EOR; 857 else 858 flags = 0; 859 if (flags & MSG_OOB) { 860 m = m_get(M_WAIT, MT_DATA); 861 solock(so); 862 error = pru_rcvoob(so, m, flags & MSG_PEEK); 863 sounlock(so); 864 if (error) 865 goto bad; 866 do { 867 error = uiomove(mtod(m, caddr_t), 868 ulmin(uio->uio_resid, m->m_len), uio); 869 m = m_free(m); 870 } while (uio->uio_resid && error == 0 && m); 871 bad: 872 m_freem(m); 873 return (error); 874 } 875 if (mp) 876 *mp = NULL; 877 878 if (dosolock) 879 solock_shared(so); 880 restart: 881 if ((error = sblock(so, &so->so_rcv, SBLOCKWAIT(flags))) != 0) 882 goto out; 883 sb_mtx_lock(&so->so_rcv); 884 885 m = so->so_rcv.sb_mb; 886 #ifdef SOCKET_SPLICE 887 if (isspliced(so)) 888 m = NULL; 889 #endif /* SOCKET_SPLICE */ 890 /* 891 * If we have less data than requested, block awaiting more 892 * (subject to any timeout) if: 893 * 1. the current count is less than the low water mark, 894 * 2. MSG_WAITALL is set, and it is possible to do the entire 895 * receive operation at once if we block (resid <= hiwat), or 896 * 3. MSG_DONTWAIT is not set. 897 * If MSG_WAITALL is set but resid is larger than the receive buffer, 898 * we have to do the receive in sections, and thus risk returning 899 * a short count if a timeout or signal occurs after we start. 900 */ 901 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 902 so->so_rcv.sb_cc < uio->uio_resid) && 903 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 904 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 905 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 906 #ifdef DIAGNOSTIC 907 if (m == NULL && so->so_rcv.sb_cc) 908 #ifdef SOCKET_SPLICE 909 if (!isspliced(so)) 910 #endif /* SOCKET_SPLICE */ 911 panic("receive 1: so %p, so_type %d, sb_cc %lu", 912 so, so->so_type, so->so_rcv.sb_cc); 913 #endif 914 if ((error2 = READ_ONCE(so->so_error))) { 915 if (m) 916 goto dontblock; 917 error = error2; 918 if ((flags & MSG_PEEK) == 0) 919 so->so_error = 0; 920 goto release; 921 } 922 if (so->so_rcv.sb_state & SS_CANTRCVMORE) { 923 if (m) 924 goto dontblock; 925 else if (so->so_rcv.sb_cc == 0) 926 goto release; 927 } 928 for (; m; m = m->m_next) 929 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 930 m = so->so_rcv.sb_mb; 931 goto dontblock; 932 } 933 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 934 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 935 error = ENOTCONN; 936 goto release; 937 } 938 if (uio->uio_resid == 0 && controlp == NULL) 939 goto release; 940 if (flags & MSG_DONTWAIT) { 941 error = EWOULDBLOCK; 942 goto release; 943 } 944 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); 945 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); 946 947 if (so->so_rcv.sb_flags & SB_MTXLOCK) { 948 sbunlock_locked(so, &so->so_rcv); 949 if (dosolock) 950 sounlock_shared(so); 951 error = sbwait_locked(so, &so->so_rcv); 952 sb_mtx_unlock(&so->so_rcv); 953 if (error) 954 return (error); 955 if (dosolock) 956 solock_shared(so); 957 } else { 958 sb_mtx_unlock(&so->so_rcv); 959 sbunlock(so, &so->so_rcv); 960 error = sbwait(so, &so->so_rcv); 961 if (error) { 962 sounlock_shared(so); 963 return (error); 964 } 965 } 966 goto restart; 967 } 968 dontblock: 969 /* 970 * On entry here, m points to the first record of the socket buffer. 971 * From this point onward, we maintain 'nextrecord' as a cache of the 972 * pointer to the next record in the socket buffer. We must keep the 973 * various socket buffer pointers and local stack versions of the 974 * pointers in sync, pushing out modifications before operations that 975 * may sleep, and re-reading them afterwards. 976 * 977 * Otherwise, we will race with the network stack appending new data 978 * or records onto the socket buffer by using inconsistent/stale 979 * versions of the field, possibly resulting in socket buffer 980 * corruption. 981 */ 982 if (uio->uio_procp) 983 uio->uio_procp->p_ru.ru_msgrcv++; 984 KASSERT(m == so->so_rcv.sb_mb); 985 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); 986 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); 987 nextrecord = m->m_nextpkt; 988 if (pr->pr_flags & PR_ADDR) { 989 #ifdef DIAGNOSTIC 990 if (m->m_type != MT_SONAME) 991 panic("receive 1a: so %p, so_type %d, m %p, m_type %d", 992 so, so->so_type, m, m->m_type); 993 #endif 994 orig_resid = 0; 995 if (flags & MSG_PEEK) { 996 if (paddr) 997 *paddr = m_copym(m, 0, m->m_len, M_NOWAIT); 998 m = m->m_next; 999 } else { 1000 sbfree(so, &so->so_rcv, m); 1001 if (paddr) { 1002 *paddr = m; 1003 so->so_rcv.sb_mb = m->m_next; 1004 m->m_next = NULL; 1005 m = so->so_rcv.sb_mb; 1006 } else { 1007 so->so_rcv.sb_mb = m_free(m); 1008 m = so->so_rcv.sb_mb; 1009 } 1010 sbsync(&so->so_rcv, nextrecord); 1011 } 1012 } 1013 while (m && m->m_type == MT_CONTROL && error == 0) { 1014 int skip = 0; 1015 if (flags & MSG_PEEK) { 1016 if (mtod(m, struct cmsghdr *)->cmsg_type == 1017 SCM_RIGHTS) { 1018 /* don't leak internalized SCM_RIGHTS msgs */ 1019 skip = 1; 1020 } else if (controlp) 1021 *controlp = m_copym(m, 0, m->m_len, M_NOWAIT); 1022 m = m->m_next; 1023 } else { 1024 sbfree(so, &so->so_rcv, m); 1025 so->so_rcv.sb_mb = m->m_next; 1026 m->m_nextpkt = m->m_next = NULL; 1027 cm = m; 1028 m = so->so_rcv.sb_mb; 1029 sbsync(&so->so_rcv, nextrecord); 1030 if (controlp) { 1031 if (pr->pr_domain->dom_externalize) { 1032 sb_mtx_unlock(&so->so_rcv); 1033 if (dosolock) 1034 sounlock_shared(so); 1035 error = 1036 (*pr->pr_domain->dom_externalize) 1037 (cm, controllen, flags); 1038 if (dosolock) 1039 solock_shared(so); 1040 sb_mtx_lock(&so->so_rcv); 1041 } 1042 *controlp = cm; 1043 } else { 1044 /* 1045 * Dispose of any SCM_RIGHTS message that went 1046 * through the read path rather than recv. 1047 */ 1048 if (pr->pr_domain->dom_dispose) { 1049 sb_mtx_unlock(&so->so_rcv); 1050 pr->pr_domain->dom_dispose(cm); 1051 sb_mtx_lock(&so->so_rcv); 1052 } 1053 m_free(cm); 1054 } 1055 } 1056 if (m != NULL) 1057 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 1058 else 1059 nextrecord = so->so_rcv.sb_mb; 1060 if (controlp && !skip) 1061 controlp = &(*controlp)->m_next; 1062 orig_resid = 0; 1063 } 1064 1065 /* If m is non-NULL, we have some data to read. */ 1066 if (m) { 1067 type = m->m_type; 1068 if (type == MT_OOBDATA) 1069 flags |= MSG_OOB; 1070 if (m->m_flags & M_BCAST) 1071 flags |= MSG_BCAST; 1072 if (m->m_flags & M_MCAST) 1073 flags |= MSG_MCAST; 1074 } 1075 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); 1076 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); 1077 1078 moff = 0; 1079 offset = 0; 1080 while (m && uio->uio_resid > 0 && error == 0) { 1081 if (m->m_type == MT_OOBDATA) { 1082 if (type != MT_OOBDATA) 1083 break; 1084 } else if (type == MT_OOBDATA) { 1085 break; 1086 } else if (m->m_type == MT_CONTROL) { 1087 /* 1088 * If there is more than one control message in the 1089 * stream, we do a short read. Next can be received 1090 * or disposed by another system call. 1091 */ 1092 break; 1093 #ifdef DIAGNOSTIC 1094 } else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) { 1095 panic("receive 3: so %p, so_type %d, m %p, m_type %d", 1096 so, so->so_type, m, m->m_type); 1097 #endif 1098 } 1099 so->so_rcv.sb_state &= ~SS_RCVATMARK; 1100 len = uio->uio_resid; 1101 if (so->so_oobmark && len > so->so_oobmark - offset) 1102 len = so->so_oobmark - offset; 1103 if (len > m->m_len - moff) 1104 len = m->m_len - moff; 1105 /* 1106 * If mp is set, just pass back the mbufs. 1107 * Otherwise copy them out via the uio, then free. 1108 * Sockbuf must be consistent here (points to current mbuf, 1109 * it points to next record) when we drop priority; 1110 * we must note any additions to the sockbuf when we 1111 * block interrupts again. 1112 */ 1113 if (mp == NULL && uio_error == 0) { 1114 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); 1115 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); 1116 resid = uio->uio_resid; 1117 sb_mtx_unlock(&so->so_rcv); 1118 if (dosolock) 1119 sounlock_shared(so); 1120 uio_error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1121 if (dosolock) 1122 solock_shared(so); 1123 sb_mtx_lock(&so->so_rcv); 1124 if (uio_error) 1125 uio->uio_resid = resid - len; 1126 } else 1127 uio->uio_resid -= len; 1128 if (len == m->m_len - moff) { 1129 if (m->m_flags & M_EOR) 1130 flags |= MSG_EOR; 1131 if (flags & MSG_PEEK) { 1132 m = m->m_next; 1133 moff = 0; 1134 orig_resid = 0; 1135 } else { 1136 nextrecord = m->m_nextpkt; 1137 sbfree(so, &so->so_rcv, m); 1138 if (mp) { 1139 *mp = m; 1140 mp = &m->m_next; 1141 so->so_rcv.sb_mb = m = m->m_next; 1142 *mp = NULL; 1143 } else { 1144 so->so_rcv.sb_mb = m_free(m); 1145 m = so->so_rcv.sb_mb; 1146 } 1147 /* 1148 * If m != NULL, we also know that 1149 * so->so_rcv.sb_mb != NULL. 1150 */ 1151 KASSERT(so->so_rcv.sb_mb == m); 1152 if (m) { 1153 m->m_nextpkt = nextrecord; 1154 if (nextrecord == NULL) 1155 so->so_rcv.sb_lastrecord = m; 1156 } else { 1157 so->so_rcv.sb_mb = nextrecord; 1158 SB_EMPTY_FIXUP(&so->so_rcv); 1159 } 1160 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); 1161 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); 1162 } 1163 } else { 1164 if (flags & MSG_PEEK) { 1165 moff += len; 1166 orig_resid = 0; 1167 } else { 1168 if (mp) 1169 *mp = m_copym(m, 0, len, M_WAIT); 1170 m->m_data += len; 1171 m->m_len -= len; 1172 so->so_rcv.sb_cc -= len; 1173 so->so_rcv.sb_datacc -= len; 1174 } 1175 } 1176 if (so->so_oobmark) { 1177 if ((flags & MSG_PEEK) == 0) { 1178 so->so_oobmark -= len; 1179 if (so->so_oobmark == 0) { 1180 so->so_rcv.sb_state |= SS_RCVATMARK; 1181 break; 1182 } 1183 } else { 1184 offset += len; 1185 if (offset == so->so_oobmark) 1186 break; 1187 } 1188 } 1189 if (flags & MSG_EOR) 1190 break; 1191 /* 1192 * If the MSG_WAITALL flag is set (for non-atomic socket), 1193 * we must not quit until "uio->uio_resid == 0" or an error 1194 * termination. If a signal/timeout occurs, return 1195 * with a short count but without error. 1196 * Keep sockbuf locked against other readers. 1197 */ 1198 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1199 !sosendallatonce(so) && !nextrecord) { 1200 if (so->so_rcv.sb_state & SS_CANTRCVMORE || 1201 so->so_error) 1202 break; 1203 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); 1204 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); 1205 if (dosolock) { 1206 sb_mtx_unlock(&so->so_rcv); 1207 error = sbwait(so, &so->so_rcv); 1208 if (error) { 1209 sbunlock(so, &so->so_rcv); 1210 sounlock_shared(so); 1211 return (0); 1212 } 1213 sb_mtx_lock(&so->so_rcv); 1214 } else { 1215 if (sbwait_locked(so, &so->so_rcv)) { 1216 sb_mtx_unlock(&so->so_rcv); 1217 sbunlock(so, &so->so_rcv); 1218 return (0); 1219 } 1220 } 1221 if ((m = so->so_rcv.sb_mb) != NULL) 1222 nextrecord = m->m_nextpkt; 1223 } 1224 } 1225 1226 if (m && pr->pr_flags & PR_ATOMIC) { 1227 flags |= MSG_TRUNC; 1228 if ((flags & MSG_PEEK) == 0) 1229 (void) sbdroprecord(so, &so->so_rcv); 1230 } 1231 if ((flags & MSG_PEEK) == 0) { 1232 if (m == NULL) { 1233 /* 1234 * First part is an inline SB_EMPTY_FIXUP(). Second 1235 * part makes sure sb_lastrecord is up-to-date if 1236 * there is still data in the socket buffer. 1237 */ 1238 so->so_rcv.sb_mb = nextrecord; 1239 if (so->so_rcv.sb_mb == NULL) { 1240 so->so_rcv.sb_mbtail = NULL; 1241 so->so_rcv.sb_lastrecord = NULL; 1242 } else if (nextrecord->m_nextpkt == NULL) 1243 so->so_rcv.sb_lastrecord = nextrecord; 1244 } 1245 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); 1246 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); 1247 if (pr->pr_flags & PR_WANTRCVD) { 1248 sb_mtx_unlock(&so->so_rcv); 1249 if (!dosolock) 1250 solock_shared(so); 1251 pru_rcvd(so); 1252 if (!dosolock) 1253 sounlock_shared(so); 1254 sb_mtx_lock(&so->so_rcv); 1255 } 1256 } 1257 if (orig_resid == uio->uio_resid && orig_resid && 1258 (flags & MSG_EOR) == 0 && 1259 (so->so_rcv.sb_state & SS_CANTRCVMORE) == 0) { 1260 sb_mtx_unlock(&so->so_rcv); 1261 sbunlock(so, &so->so_rcv); 1262 goto restart; 1263 } 1264 1265 if (uio_error) 1266 error = uio_error; 1267 1268 if (flagsp) 1269 *flagsp |= flags; 1270 release: 1271 sb_mtx_unlock(&so->so_rcv); 1272 sbunlock(so, &so->so_rcv); 1273 out: 1274 if (dosolock) 1275 sounlock_shared(so); 1276 return (error); 1277 } 1278 1279 int 1280 soshutdown(struct socket *so, int how) 1281 { 1282 int error = 0; 1283 1284 switch (how) { 1285 case SHUT_RD: 1286 sorflush(so); 1287 break; 1288 case SHUT_RDWR: 1289 sorflush(so); 1290 /* FALLTHROUGH */ 1291 case SHUT_WR: 1292 solock(so); 1293 error = pru_shutdown(so); 1294 sounlock(so); 1295 break; 1296 default: 1297 error = EINVAL; 1298 break; 1299 } 1300 1301 return (error); 1302 } 1303 1304 void 1305 sorflush_locked(struct socket *so) 1306 { 1307 struct sockbuf *sb = &so->so_rcv; 1308 struct mbuf *m; 1309 const struct protosw *pr = so->so_proto; 1310 int error; 1311 1312 if ((sb->sb_flags & SB_MTXLOCK) == 0) 1313 soassertlocked(so); 1314 1315 error = sblock(so, sb, SBL_WAIT | SBL_NOINTR); 1316 /* with SBL_WAIT and SLB_NOINTR sblock() must not fail */ 1317 KASSERT(error == 0); 1318 1319 if (sb->sb_flags & SB_MTXLOCK) 1320 solock(so); 1321 socantrcvmore(so); 1322 if (sb->sb_flags & SB_MTXLOCK) 1323 sounlock(so); 1324 1325 mtx_enter(&sb->sb_mtx); 1326 m = sb->sb_mb; 1327 memset(&sb->sb_startzero, 0, 1328 (caddr_t)&sb->sb_endzero - (caddr_t)&sb->sb_startzero); 1329 sb->sb_timeo_nsecs = INFSLP; 1330 mtx_leave(&sb->sb_mtx); 1331 sbunlock(so, sb); 1332 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 1333 (*pr->pr_domain->dom_dispose)(m); 1334 m_purge(m); 1335 } 1336 1337 void 1338 sorflush(struct socket *so) 1339 { 1340 if ((so->so_rcv.sb_flags & SB_MTXLOCK) == 0) 1341 solock_shared(so); 1342 sorflush_locked(so); 1343 if ((so->so_rcv.sb_flags & SB_MTXLOCK) == 0) 1344 sounlock_shared(so); 1345 } 1346 1347 #ifdef SOCKET_SPLICE 1348 1349 #define so_splicelen so_sp->ssp_len 1350 #define so_splicemax so_sp->ssp_max 1351 #define so_idletv so_sp->ssp_idletv 1352 #define so_idleto so_sp->ssp_idleto 1353 #define so_splicetask so_sp->ssp_task 1354 1355 int 1356 sosplice(struct socket *so, int fd, off_t max, struct timeval *tv) 1357 { 1358 struct file *fp = NULL; 1359 struct socket *sosp; 1360 struct taskq *tq; 1361 int error = 0; 1362 1363 if ((so->so_proto->pr_flags & PR_SPLICE) == 0) 1364 return (EPROTONOSUPPORT); 1365 if (max && max < 0) 1366 return (EINVAL); 1367 if (tv && (tv->tv_sec < 0 || !timerisvalid(tv))) 1368 return (EINVAL); 1369 1370 if (sosplice_taskq == NULL) { 1371 rw_enter_write(&sosplice_lock); 1372 if (sosplice_taskq == NULL) { 1373 tq = taskq_create("sosplice", 1, IPL_SOFTNET, 1374 TASKQ_MPSAFE); 1375 if (tq == NULL) { 1376 rw_exit_write(&sosplice_lock); 1377 return (ENOMEM); 1378 } 1379 /* Ensure the taskq is fully visible to other CPUs. */ 1380 membar_producer(); 1381 sosplice_taskq = tq; 1382 } 1383 rw_exit_write(&sosplice_lock); 1384 } else { 1385 /* Ensure the taskq is fully visible on this CPU. */ 1386 membar_consumer(); 1387 } 1388 1389 if (so->so_rcv.sb_flags & SB_MTXLOCK) { 1390 if ((error = sblock(so, &so->so_rcv, SBL_WAIT)) != 0) 1391 return (error); 1392 solock(so); 1393 } else { 1394 solock(so); 1395 if ((error = sblock(so, &so->so_rcv, SBL_WAIT)) != 0) { 1396 sounlock(so); 1397 return (error); 1398 } 1399 } 1400 1401 if (so->so_options & SO_ACCEPTCONN) { 1402 error = EOPNOTSUPP; 1403 goto out; 1404 } 1405 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1406 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1407 error = ENOTCONN; 1408 goto out; 1409 } 1410 if (so->so_sp == NULL) 1411 so->so_sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1412 1413 /* If no fd is given, unsplice by removing existing link. */ 1414 if (fd < 0) { 1415 if (so->so_sp->ssp_socket) 1416 sounsplice(so, so->so_sp->ssp_socket, 0); 1417 goto out; 1418 } 1419 1420 /* Find sosp, the drain socket where data will be spliced into. */ 1421 if ((error = getsock(curproc, fd, &fp)) != 0) 1422 goto out; 1423 sosp = fp->f_data; 1424 if (sosp->so_proto->pr_usrreqs->pru_send != 1425 so->so_proto->pr_usrreqs->pru_send) { 1426 error = EPROTONOSUPPORT; 1427 goto out; 1428 } 1429 if (sosp->so_sp == NULL) 1430 sosp->so_sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1431 1432 if ((error = sblock(sosp, &sosp->so_snd, SBL_WAIT)) != 0) { 1433 goto out; 1434 } 1435 1436 if (so->so_sp->ssp_socket || sosp->so_sp->ssp_soback) { 1437 error = EBUSY; 1438 goto release; 1439 } 1440 if (sosp->so_options & SO_ACCEPTCONN) { 1441 error = EOPNOTSUPP; 1442 goto release; 1443 } 1444 if ((sosp->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) { 1445 error = ENOTCONN; 1446 goto release; 1447 } 1448 1449 /* Splice so and sosp together. */ 1450 mtx_enter(&so->so_rcv.sb_mtx); 1451 so->so_sp->ssp_socket = sosp; 1452 sosp->so_sp->ssp_soback = so; 1453 mtx_leave(&so->so_rcv.sb_mtx); 1454 so->so_splicelen = 0; 1455 so->so_splicemax = max; 1456 if (tv) 1457 so->so_idletv = *tv; 1458 else 1459 timerclear(&so->so_idletv); 1460 timeout_set_proc(&so->so_idleto, soidle, so); 1461 task_set(&so->so_splicetask, sotask, so); 1462 1463 /* 1464 * To prevent softnet interrupt from calling somove() while 1465 * we sleep, the socket buffers are not marked as spliced yet. 1466 */ 1467 if (somove(so, M_WAIT)) { 1468 mtx_enter(&so->so_rcv.sb_mtx); 1469 so->so_rcv.sb_flags |= SB_SPLICE; 1470 mtx_leave(&so->so_rcv.sb_mtx); 1471 sosp->so_snd.sb_flags |= SB_SPLICE; 1472 } 1473 1474 release: 1475 sbunlock(sosp, &sosp->so_snd); 1476 out: 1477 if (so->so_rcv.sb_flags & SB_MTXLOCK) { 1478 sounlock(so); 1479 sbunlock(so, &so->so_rcv); 1480 } else { 1481 sbunlock(so, &so->so_rcv); 1482 sounlock(so); 1483 } 1484 1485 if (fp) 1486 FRELE(fp, curproc); 1487 1488 return (error); 1489 } 1490 1491 void 1492 sounsplice(struct socket *so, struct socket *sosp, int freeing) 1493 { 1494 soassertlocked(so); 1495 1496 task_del(sosplice_taskq, &so->so_splicetask); 1497 timeout_del(&so->so_idleto); 1498 sosp->so_snd.sb_flags &= ~SB_SPLICE; 1499 1500 mtx_enter(&so->so_rcv.sb_mtx); 1501 so->so_rcv.sb_flags &= ~SB_SPLICE; 1502 so->so_sp->ssp_socket = sosp->so_sp->ssp_soback = NULL; 1503 mtx_leave(&so->so_rcv.sb_mtx); 1504 1505 /* Do not wakeup a socket that is about to be freed. */ 1506 if ((freeing & SOSP_FREEING_READ) == 0 && soreadable(so)) 1507 sorwakeup(so); 1508 if ((freeing & SOSP_FREEING_WRITE) == 0 && sowriteable(sosp)) 1509 sowwakeup(sosp); 1510 } 1511 1512 void 1513 soidle(void *arg) 1514 { 1515 struct socket *so = arg; 1516 1517 solock(so); 1518 if (so->so_rcv.sb_flags & SB_SPLICE) { 1519 so->so_error = ETIMEDOUT; 1520 sounsplice(so, so->so_sp->ssp_socket, 0); 1521 } 1522 sounlock(so); 1523 } 1524 1525 void 1526 sotask(void *arg) 1527 { 1528 struct socket *so = arg; 1529 1530 solock(so); 1531 if (so->so_rcv.sb_flags & SB_SPLICE) { 1532 /* 1533 * We may not sleep here as sofree() and unsplice() may be 1534 * called from softnet interrupt context. This would remove 1535 * the socket during somove(). 1536 */ 1537 somove(so, M_DONTWAIT); 1538 } 1539 sounlock(so); 1540 1541 /* Avoid user land starvation. */ 1542 yield(); 1543 } 1544 1545 /* 1546 * The socket splicing task or idle timeout may sleep while grabbing the net 1547 * lock. As sofree() can be called anytime, sotask() or soidle() could access 1548 * the socket memory of a freed socket after wakeup. So delay the pool_put() 1549 * after all pending socket splicing tasks or timeouts have finished. Do this 1550 * by scheduling it on the same threads. 1551 */ 1552 void 1553 soreaper(void *arg) 1554 { 1555 struct socket *so = arg; 1556 1557 /* Reuse splice task, sounsplice() has been called before. */ 1558 task_set(&so->so_sp->ssp_task, soput, so); 1559 task_add(sosplice_taskq, &so->so_sp->ssp_task); 1560 } 1561 1562 void 1563 soput(void *arg) 1564 { 1565 struct socket *so = arg; 1566 1567 pool_put(&sosplice_pool, so->so_sp); 1568 pool_put(&socket_pool, so); 1569 } 1570 1571 /* 1572 * Move data from receive buffer of spliced source socket to send 1573 * buffer of drain socket. Try to move as much as possible in one 1574 * big chunk. It is a TCP only implementation. 1575 * Return value 0 means splicing has been finished, 1 continue. 1576 */ 1577 int 1578 somove(struct socket *so, int wait) 1579 { 1580 struct socket *sosp = so->so_sp->ssp_socket; 1581 struct mbuf *m, **mp, *nextrecord; 1582 u_long len, off, oobmark; 1583 long space; 1584 int error = 0, maxreached = 0; 1585 unsigned int rcvstate; 1586 1587 soassertlocked(so); 1588 1589 nextpkt: 1590 if (so->so_error) { 1591 error = so->so_error; 1592 goto release; 1593 } 1594 if (sosp->so_snd.sb_state & SS_CANTSENDMORE) { 1595 error = EPIPE; 1596 goto release; 1597 } 1598 if (sosp->so_error && sosp->so_error != ETIMEDOUT && 1599 sosp->so_error != EFBIG && sosp->so_error != ELOOP) { 1600 error = sosp->so_error; 1601 goto release; 1602 } 1603 if ((sosp->so_state & SS_ISCONNECTED) == 0) 1604 goto release; 1605 1606 /* Calculate how many bytes can be copied now. */ 1607 len = so->so_rcv.sb_datacc; 1608 if (so->so_splicemax) { 1609 KASSERT(so->so_splicelen < so->so_splicemax); 1610 if (so->so_splicemax <= so->so_splicelen + len) { 1611 len = so->so_splicemax - so->so_splicelen; 1612 maxreached = 1; 1613 } 1614 } 1615 space = sbspace(sosp, &sosp->so_snd); 1616 if (so->so_oobmark && so->so_oobmark < len && 1617 so->so_oobmark < space + 1024) 1618 space += 1024; 1619 if (space <= 0) { 1620 maxreached = 0; 1621 goto release; 1622 } 1623 if (space < len) { 1624 maxreached = 0; 1625 if (space < sosp->so_snd.sb_lowat) 1626 goto release; 1627 len = space; 1628 } 1629 sosp->so_snd.sb_state |= SS_ISSENDING; 1630 1631 SBLASTRECORDCHK(&so->so_rcv, "somove 1"); 1632 SBLASTMBUFCHK(&so->so_rcv, "somove 1"); 1633 m = so->so_rcv.sb_mb; 1634 if (m == NULL) 1635 goto release; 1636 nextrecord = m->m_nextpkt; 1637 1638 /* Drop address and control information not used with splicing. */ 1639 if (so->so_proto->pr_flags & PR_ADDR) { 1640 #ifdef DIAGNOSTIC 1641 if (m->m_type != MT_SONAME) 1642 panic("somove soname: so %p, so_type %d, m %p, " 1643 "m_type %d", so, so->so_type, m, m->m_type); 1644 #endif 1645 m = m->m_next; 1646 } 1647 while (m && m->m_type == MT_CONTROL) 1648 m = m->m_next; 1649 if (m == NULL) { 1650 sbdroprecord(so, &so->so_rcv); 1651 if (so->so_proto->pr_flags & PR_WANTRCVD) 1652 pru_rcvd(so); 1653 goto nextpkt; 1654 } 1655 1656 /* 1657 * By splicing sockets connected to localhost, userland might create a 1658 * loop. Dissolve splicing with error if loop is detected by counter. 1659 * 1660 * If we deal with looped broadcast/multicast packet we bail out with 1661 * no error to suppress splice termination. 1662 */ 1663 if ((m->m_flags & M_PKTHDR) && 1664 ((m->m_pkthdr.ph_loopcnt++ >= M_MAXLOOP) || 1665 ((m->m_flags & M_LOOP) && (m->m_flags & (M_BCAST|M_MCAST))))) { 1666 error = ELOOP; 1667 goto release; 1668 } 1669 1670 if (so->so_proto->pr_flags & PR_ATOMIC) { 1671 if ((m->m_flags & M_PKTHDR) == 0) 1672 panic("somove !PKTHDR: so %p, so_type %d, m %p, " 1673 "m_type %d", so, so->so_type, m, m->m_type); 1674 if (sosp->so_snd.sb_hiwat < m->m_pkthdr.len) { 1675 error = EMSGSIZE; 1676 goto release; 1677 } 1678 if (len < m->m_pkthdr.len) 1679 goto release; 1680 if (m->m_pkthdr.len < len) { 1681 maxreached = 0; 1682 len = m->m_pkthdr.len; 1683 } 1684 /* 1685 * Throw away the name mbuf after it has been assured 1686 * that the whole first record can be processed. 1687 */ 1688 m = so->so_rcv.sb_mb; 1689 sbfree(so, &so->so_rcv, m); 1690 so->so_rcv.sb_mb = m_free(m); 1691 sbsync(&so->so_rcv, nextrecord); 1692 } 1693 /* 1694 * Throw away the control mbufs after it has been assured 1695 * that the whole first record can be processed. 1696 */ 1697 m = so->so_rcv.sb_mb; 1698 while (m && m->m_type == MT_CONTROL) { 1699 sbfree(so, &so->so_rcv, m); 1700 so->so_rcv.sb_mb = m_free(m); 1701 m = so->so_rcv.sb_mb; 1702 sbsync(&so->so_rcv, nextrecord); 1703 } 1704 1705 SBLASTRECORDCHK(&so->so_rcv, "somove 2"); 1706 SBLASTMBUFCHK(&so->so_rcv, "somove 2"); 1707 1708 /* Take at most len mbufs out of receive buffer. */ 1709 for (off = 0, mp = &m; off <= len && *mp; 1710 off += (*mp)->m_len, mp = &(*mp)->m_next) { 1711 u_long size = len - off; 1712 1713 #ifdef DIAGNOSTIC 1714 if ((*mp)->m_type != MT_DATA && (*mp)->m_type != MT_HEADER) 1715 panic("somove type: so %p, so_type %d, m %p, " 1716 "m_type %d", so, so->so_type, *mp, (*mp)->m_type); 1717 #endif 1718 if ((*mp)->m_len > size) { 1719 /* 1720 * Move only a partial mbuf at maximum splice length or 1721 * if the drain buffer is too small for this large mbuf. 1722 */ 1723 if (!maxreached && so->so_snd.sb_datacc > 0) { 1724 len -= size; 1725 break; 1726 } 1727 *mp = m_copym(so->so_rcv.sb_mb, 0, size, wait); 1728 if (*mp == NULL) { 1729 len -= size; 1730 break; 1731 } 1732 so->so_rcv.sb_mb->m_data += size; 1733 so->so_rcv.sb_mb->m_len -= size; 1734 so->so_rcv.sb_cc -= size; 1735 so->so_rcv.sb_datacc -= size; 1736 } else { 1737 *mp = so->so_rcv.sb_mb; 1738 sbfree(so, &so->so_rcv, *mp); 1739 so->so_rcv.sb_mb = (*mp)->m_next; 1740 sbsync(&so->so_rcv, nextrecord); 1741 } 1742 } 1743 *mp = NULL; 1744 1745 SBLASTRECORDCHK(&so->so_rcv, "somove 3"); 1746 SBLASTMBUFCHK(&so->so_rcv, "somove 3"); 1747 SBCHECK(so, &so->so_rcv); 1748 if (m == NULL) 1749 goto release; 1750 m->m_nextpkt = NULL; 1751 if (m->m_flags & M_PKTHDR) { 1752 m_resethdr(m); 1753 m->m_pkthdr.len = len; 1754 } 1755 1756 /* Send window update to source peer as receive buffer has changed. */ 1757 if (so->so_proto->pr_flags & PR_WANTRCVD) 1758 pru_rcvd(so); 1759 1760 /* Receive buffer did shrink by len bytes, adjust oob. */ 1761 mtx_enter(&so->so_rcv.sb_mtx); 1762 rcvstate = so->so_rcv.sb_state; 1763 so->so_rcv.sb_state &= ~SS_RCVATMARK; 1764 oobmark = so->so_oobmark; 1765 so->so_oobmark = oobmark > len ? oobmark - len : 0; 1766 if (oobmark) { 1767 if (oobmark == len) 1768 so->so_rcv.sb_state |= SS_RCVATMARK; 1769 if (oobmark >= len) 1770 oobmark = 0; 1771 } 1772 mtx_leave(&so->so_rcv.sb_mtx); 1773 1774 /* 1775 * Handle oob data. If any malloc fails, ignore error. 1776 * TCP urgent data is not very reliable anyway. 1777 */ 1778 while (((rcvstate & SS_RCVATMARK) || oobmark) && 1779 (so->so_options & SO_OOBINLINE)) { 1780 struct mbuf *o = NULL; 1781 1782 if (rcvstate & SS_RCVATMARK) { 1783 o = m_get(wait, MT_DATA); 1784 rcvstate &= ~SS_RCVATMARK; 1785 } else if (oobmark) { 1786 o = m_split(m, oobmark, wait); 1787 if (o) { 1788 error = pru_send(sosp, m, NULL, NULL); 1789 if (error) { 1790 if (sosp->so_snd.sb_state & 1791 SS_CANTSENDMORE) 1792 error = EPIPE; 1793 m_freem(o); 1794 goto release; 1795 } 1796 len -= oobmark; 1797 so->so_splicelen += oobmark; 1798 m = o; 1799 o = m_get(wait, MT_DATA); 1800 } 1801 oobmark = 0; 1802 } 1803 if (o) { 1804 o->m_len = 1; 1805 *mtod(o, caddr_t) = *mtod(m, caddr_t); 1806 error = pru_sendoob(sosp, o, NULL, NULL); 1807 if (error) { 1808 if (sosp->so_snd.sb_state & SS_CANTSENDMORE) 1809 error = EPIPE; 1810 m_freem(m); 1811 goto release; 1812 } 1813 len -= 1; 1814 so->so_splicelen += 1; 1815 if (oobmark) { 1816 oobmark -= 1; 1817 if (oobmark == 0) 1818 rcvstate |= SS_RCVATMARK; 1819 } 1820 m_adj(m, 1); 1821 } 1822 } 1823 1824 /* Append all remaining data to drain socket. */ 1825 if (so->so_rcv.sb_cc == 0 || maxreached) 1826 sosp->so_snd.sb_state &= ~SS_ISSENDING; 1827 error = pru_send(sosp, m, NULL, NULL); 1828 if (error) { 1829 if (sosp->so_snd.sb_state & SS_CANTSENDMORE) 1830 error = EPIPE; 1831 goto release; 1832 } 1833 so->so_splicelen += len; 1834 1835 /* Move several packets if possible. */ 1836 if (!maxreached && nextrecord) 1837 goto nextpkt; 1838 1839 release: 1840 sosp->so_snd.sb_state &= ~SS_ISSENDING; 1841 if (!error && maxreached && so->so_splicemax == so->so_splicelen) 1842 error = EFBIG; 1843 if (error) 1844 so->so_error = error; 1845 if (((so->so_rcv.sb_state & SS_CANTRCVMORE) && 1846 so->so_rcv.sb_cc == 0) || 1847 (sosp->so_snd.sb_state & SS_CANTSENDMORE) || 1848 maxreached || error) { 1849 sounsplice(so, sosp, 0); 1850 return (0); 1851 } 1852 if (timerisset(&so->so_idletv)) 1853 timeout_add_tv(&so->so_idleto, &so->so_idletv); 1854 return (1); 1855 } 1856 1857 #endif /* SOCKET_SPLICE */ 1858 1859 void 1860 sorwakeup(struct socket *so) 1861 { 1862 if ((so->so_rcv.sb_flags & SB_MTXLOCK) == 0) 1863 soassertlocked_readonly(so); 1864 1865 #ifdef SOCKET_SPLICE 1866 if (so->so_rcv.sb_flags & SB_SPLICE) { 1867 /* 1868 * TCP has a sendbuffer that can handle multiple packets 1869 * at once. So queue the stream a bit to accumulate data. 1870 * The sosplice thread will call somove() later and send 1871 * the packets calling tcp_output() only once. 1872 * In the UDP case, send out the packets immediately. 1873 * Using a thread would make things slower. 1874 */ 1875 if (so->so_proto->pr_flags & PR_WANTRCVD) 1876 task_add(sosplice_taskq, &so->so_splicetask); 1877 else 1878 somove(so, M_DONTWAIT); 1879 } 1880 if (isspliced(so)) 1881 return; 1882 #endif 1883 sowakeup(so, &so->so_rcv); 1884 if (so->so_upcall) 1885 (*(so->so_upcall))(so, so->so_upcallarg, M_DONTWAIT); 1886 } 1887 1888 void 1889 sowwakeup(struct socket *so) 1890 { 1891 if ((so->so_snd.sb_flags & SB_MTXLOCK) == 0) 1892 soassertlocked_readonly(so); 1893 1894 #ifdef SOCKET_SPLICE 1895 if (so->so_snd.sb_flags & SB_SPLICE) 1896 task_add(sosplice_taskq, &so->so_sp->ssp_soback->so_splicetask); 1897 if (issplicedback(so)) 1898 return; 1899 #endif 1900 sowakeup(so, &so->so_snd); 1901 } 1902 1903 int 1904 sosetopt(struct socket *so, int level, int optname, struct mbuf *m) 1905 { 1906 int error = 0; 1907 1908 if (level != SOL_SOCKET) { 1909 if (so->so_proto->pr_ctloutput) { 1910 solock(so); 1911 error = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1912 level, optname, m); 1913 sounlock(so); 1914 return (error); 1915 } 1916 error = ENOPROTOOPT; 1917 } else { 1918 switch (optname) { 1919 1920 case SO_LINGER: 1921 if (m == NULL || m->m_len != sizeof (struct linger) || 1922 mtod(m, struct linger *)->l_linger < 0 || 1923 mtod(m, struct linger *)->l_linger > SHRT_MAX) 1924 return (EINVAL); 1925 1926 solock(so); 1927 so->so_linger = mtod(m, struct linger *)->l_linger; 1928 if (*mtod(m, int *)) 1929 so->so_options |= optname; 1930 else 1931 so->so_options &= ~optname; 1932 sounlock(so); 1933 1934 break; 1935 case SO_BINDANY: 1936 if ((error = suser(curproc)) != 0) /* XXX */ 1937 return (error); 1938 /* FALLTHROUGH */ 1939 1940 case SO_DEBUG: 1941 case SO_KEEPALIVE: 1942 case SO_USELOOPBACK: 1943 case SO_BROADCAST: 1944 case SO_REUSEADDR: 1945 case SO_REUSEPORT: 1946 case SO_OOBINLINE: 1947 case SO_TIMESTAMP: 1948 case SO_ZEROIZE: 1949 if (m == NULL || m->m_len < sizeof (int)) 1950 return (EINVAL); 1951 1952 solock(so); 1953 if (*mtod(m, int *)) 1954 so->so_options |= optname; 1955 else 1956 so->so_options &= ~optname; 1957 sounlock(so); 1958 1959 break; 1960 case SO_DONTROUTE: 1961 if (m == NULL || m->m_len < sizeof (int)) 1962 return (EINVAL); 1963 if (*mtod(m, int *)) 1964 error = EOPNOTSUPP; 1965 break; 1966 1967 case SO_SNDBUF: 1968 case SO_RCVBUF: 1969 case SO_SNDLOWAT: 1970 case SO_RCVLOWAT: 1971 { 1972 struct sockbuf *sb = (optname == SO_SNDBUF || 1973 optname == SO_SNDLOWAT ? 1974 &so->so_snd : &so->so_rcv); 1975 u_long cnt; 1976 1977 if (m == NULL || m->m_len < sizeof (int)) 1978 return (EINVAL); 1979 cnt = *mtod(m, int *); 1980 if ((long)cnt <= 0) 1981 cnt = 1; 1982 1983 if (((sb->sb_flags & SB_MTXLOCK) == 0)) 1984 solock(so); 1985 mtx_enter(&sb->sb_mtx); 1986 1987 switch (optname) { 1988 case SO_SNDBUF: 1989 case SO_RCVBUF: 1990 if (sb->sb_state & 1991 (SS_CANTSENDMORE | SS_CANTRCVMORE)) { 1992 error = EINVAL; 1993 break; 1994 } 1995 if (sbcheckreserve(cnt, sb->sb_wat) || 1996 sbreserve(so, sb, cnt)) { 1997 error = ENOBUFS; 1998 break; 1999 } 2000 sb->sb_wat = cnt; 2001 break; 2002 case SO_SNDLOWAT: 2003 case SO_RCVLOWAT: 2004 sb->sb_lowat = (cnt > sb->sb_hiwat) ? 2005 sb->sb_hiwat : cnt; 2006 break; 2007 } 2008 2009 mtx_leave(&sb->sb_mtx); 2010 if (((sb->sb_flags & SB_MTXLOCK) == 0)) 2011 sounlock(so); 2012 2013 break; 2014 } 2015 2016 case SO_SNDTIMEO: 2017 case SO_RCVTIMEO: 2018 { 2019 struct sockbuf *sb = (optname == SO_SNDTIMEO ? 2020 &so->so_snd : &so->so_rcv); 2021 struct timeval tv; 2022 uint64_t nsecs; 2023 2024 if (m == NULL || m->m_len < sizeof (tv)) 2025 return (EINVAL); 2026 memcpy(&tv, mtod(m, struct timeval *), sizeof tv); 2027 if (!timerisvalid(&tv)) 2028 return (EINVAL); 2029 nsecs = TIMEVAL_TO_NSEC(&tv); 2030 if (nsecs == UINT64_MAX) 2031 return (EDOM); 2032 if (nsecs == 0) 2033 nsecs = INFSLP; 2034 2035 mtx_enter(&sb->sb_mtx); 2036 sb->sb_timeo_nsecs = nsecs; 2037 mtx_leave(&sb->sb_mtx); 2038 break; 2039 } 2040 2041 case SO_RTABLE: 2042 if (so->so_proto->pr_domain && 2043 so->so_proto->pr_domain->dom_protosw && 2044 so->so_proto->pr_ctloutput) { 2045 const struct domain *dom = 2046 so->so_proto->pr_domain; 2047 2048 level = dom->dom_protosw->pr_protocol; 2049 solock(so); 2050 error = (*so->so_proto->pr_ctloutput) 2051 (PRCO_SETOPT, so, level, optname, m); 2052 sounlock(so); 2053 } else 2054 error = ENOPROTOOPT; 2055 break; 2056 #ifdef SOCKET_SPLICE 2057 case SO_SPLICE: 2058 if (m == NULL) { 2059 error = sosplice(so, -1, 0, NULL); 2060 } else if (m->m_len < sizeof(int)) { 2061 error = EINVAL; 2062 } else if (m->m_len < sizeof(struct splice)) { 2063 error = sosplice(so, *mtod(m, int *), 0, NULL); 2064 } else { 2065 error = sosplice(so, 2066 mtod(m, struct splice *)->sp_fd, 2067 mtod(m, struct splice *)->sp_max, 2068 &mtod(m, struct splice *)->sp_idle); 2069 } 2070 break; 2071 #endif /* SOCKET_SPLICE */ 2072 2073 default: 2074 error = ENOPROTOOPT; 2075 break; 2076 } 2077 } 2078 2079 return (error); 2080 } 2081 2082 int 2083 sogetopt(struct socket *so, int level, int optname, struct mbuf *m) 2084 { 2085 int error = 0; 2086 2087 if (level != SOL_SOCKET) { 2088 if (so->so_proto->pr_ctloutput) { 2089 m->m_len = 0; 2090 2091 solock(so); 2092 error = (*so->so_proto->pr_ctloutput)(PRCO_GETOPT, so, 2093 level, optname, m); 2094 sounlock(so); 2095 return (error); 2096 } else 2097 return (ENOPROTOOPT); 2098 } else { 2099 m->m_len = sizeof (int); 2100 2101 switch (optname) { 2102 2103 case SO_LINGER: 2104 m->m_len = sizeof (struct linger); 2105 solock_shared(so); 2106 mtod(m, struct linger *)->l_onoff = 2107 so->so_options & SO_LINGER; 2108 mtod(m, struct linger *)->l_linger = so->so_linger; 2109 sounlock_shared(so); 2110 break; 2111 2112 case SO_BINDANY: 2113 case SO_USELOOPBACK: 2114 case SO_DEBUG: 2115 case SO_KEEPALIVE: 2116 case SO_REUSEADDR: 2117 case SO_REUSEPORT: 2118 case SO_BROADCAST: 2119 case SO_OOBINLINE: 2120 case SO_ACCEPTCONN: 2121 case SO_TIMESTAMP: 2122 case SO_ZEROIZE: 2123 *mtod(m, int *) = so->so_options & optname; 2124 break; 2125 2126 case SO_DONTROUTE: 2127 *mtod(m, int *) = 0; 2128 break; 2129 2130 case SO_TYPE: 2131 *mtod(m, int *) = so->so_type; 2132 break; 2133 2134 case SO_ERROR: 2135 solock(so); 2136 *mtod(m, int *) = so->so_error; 2137 so->so_error = 0; 2138 sounlock(so); 2139 2140 break; 2141 2142 case SO_DOMAIN: 2143 *mtod(m, int *) = so->so_proto->pr_domain->dom_family; 2144 break; 2145 2146 case SO_PROTOCOL: 2147 *mtod(m, int *) = so->so_proto->pr_protocol; 2148 break; 2149 2150 case SO_SNDBUF: 2151 *mtod(m, int *) = so->so_snd.sb_hiwat; 2152 break; 2153 2154 case SO_RCVBUF: 2155 *mtod(m, int *) = so->so_rcv.sb_hiwat; 2156 break; 2157 2158 case SO_SNDLOWAT: 2159 *mtod(m, int *) = so->so_snd.sb_lowat; 2160 break; 2161 2162 case SO_RCVLOWAT: 2163 *mtod(m, int *) = so->so_rcv.sb_lowat; 2164 break; 2165 2166 case SO_SNDTIMEO: 2167 case SO_RCVTIMEO: 2168 { 2169 struct sockbuf *sb = (optname == SO_SNDTIMEO ? 2170 &so->so_snd : &so->so_rcv); 2171 struct timeval tv; 2172 uint64_t nsecs; 2173 2174 mtx_enter(&sb->sb_mtx); 2175 nsecs = sb->sb_timeo_nsecs; 2176 mtx_leave(&sb->sb_mtx); 2177 2178 m->m_len = sizeof(struct timeval); 2179 memset(&tv, 0, sizeof(tv)); 2180 if (nsecs != INFSLP) 2181 NSEC_TO_TIMEVAL(nsecs, &tv); 2182 memcpy(mtod(m, struct timeval *), &tv, sizeof tv); 2183 break; 2184 } 2185 2186 case SO_RTABLE: 2187 if (so->so_proto->pr_domain && 2188 so->so_proto->pr_domain->dom_protosw && 2189 so->so_proto->pr_ctloutput) { 2190 const struct domain *dom = 2191 so->so_proto->pr_domain; 2192 2193 level = dom->dom_protosw->pr_protocol; 2194 solock(so); 2195 error = (*so->so_proto->pr_ctloutput) 2196 (PRCO_GETOPT, so, level, optname, m); 2197 sounlock(so); 2198 if (error) 2199 return (error); 2200 break; 2201 } 2202 return (ENOPROTOOPT); 2203 2204 #ifdef SOCKET_SPLICE 2205 case SO_SPLICE: 2206 { 2207 off_t len; 2208 2209 m->m_len = sizeof(off_t); 2210 solock_shared(so); 2211 len = so->so_sp ? so->so_sp->ssp_len : 0; 2212 sounlock_shared(so); 2213 memcpy(mtod(m, off_t *), &len, sizeof(off_t)); 2214 break; 2215 } 2216 #endif /* SOCKET_SPLICE */ 2217 2218 case SO_PEERCRED: 2219 if (so->so_proto->pr_protocol == AF_UNIX) { 2220 struct unpcb *unp = sotounpcb(so); 2221 2222 solock(so); 2223 if (unp->unp_flags & UNP_FEIDS) { 2224 m->m_len = sizeof(unp->unp_connid); 2225 memcpy(mtod(m, caddr_t), 2226 &(unp->unp_connid), m->m_len); 2227 sounlock(so); 2228 break; 2229 } 2230 sounlock(so); 2231 2232 return (ENOTCONN); 2233 } 2234 return (EOPNOTSUPP); 2235 2236 default: 2237 return (ENOPROTOOPT); 2238 } 2239 return (0); 2240 } 2241 } 2242 2243 void 2244 sohasoutofband(struct socket *so) 2245 { 2246 pgsigio(&so->so_sigio, SIGURG, 0); 2247 knote(&so->so_rcv.sb_klist, 0); 2248 } 2249 2250 void 2251 sofilt_lock(struct socket *so, struct sockbuf *sb) 2252 { 2253 switch (so->so_proto->pr_domain->dom_family) { 2254 case PF_INET: 2255 case PF_INET6: 2256 NET_LOCK_SHARED(); 2257 break; 2258 default: 2259 rw_enter_write(&so->so_lock); 2260 break; 2261 } 2262 2263 mtx_enter(&sb->sb_mtx); 2264 } 2265 2266 void 2267 sofilt_unlock(struct socket *so, struct sockbuf *sb) 2268 { 2269 mtx_leave(&sb->sb_mtx); 2270 2271 switch (so->so_proto->pr_domain->dom_family) { 2272 case PF_INET: 2273 case PF_INET6: 2274 NET_UNLOCK_SHARED(); 2275 break; 2276 default: 2277 rw_exit_write(&so->so_lock); 2278 break; 2279 } 2280 } 2281 2282 int 2283 soo_kqfilter(struct file *fp, struct knote *kn) 2284 { 2285 struct socket *so = kn->kn_fp->f_data; 2286 struct sockbuf *sb; 2287 2288 switch (kn->kn_filter) { 2289 case EVFILT_READ: 2290 kn->kn_fop = &soread_filtops; 2291 sb = &so->so_rcv; 2292 break; 2293 case EVFILT_WRITE: 2294 kn->kn_fop = &sowrite_filtops; 2295 sb = &so->so_snd; 2296 break; 2297 case EVFILT_EXCEPT: 2298 kn->kn_fop = &soexcept_filtops; 2299 sb = &so->so_rcv; 2300 break; 2301 default: 2302 return (EINVAL); 2303 } 2304 2305 klist_insert(&sb->sb_klist, kn); 2306 2307 return (0); 2308 } 2309 2310 void 2311 filt_sordetach(struct knote *kn) 2312 { 2313 struct socket *so = kn->kn_fp->f_data; 2314 2315 klist_remove(&so->so_rcv.sb_klist, kn); 2316 } 2317 2318 int 2319 filt_soread(struct knote *kn, long hint) 2320 { 2321 struct socket *so = kn->kn_fp->f_data; 2322 int rv = 0; 2323 2324 MUTEX_ASSERT_LOCKED(&so->so_rcv.sb_mtx); 2325 if ((so->so_rcv.sb_flags & SB_MTXLOCK) == 0) 2326 soassertlocked_readonly(so); 2327 2328 if (so->so_options & SO_ACCEPTCONN) { 2329 if (so->so_rcv.sb_flags & SB_MTXLOCK) 2330 soassertlocked_readonly(so); 2331 2332 kn->kn_data = so->so_qlen; 2333 rv = (kn->kn_data != 0); 2334 2335 if (kn->kn_flags & (__EV_POLL | __EV_SELECT)) { 2336 if (so->so_state & SS_ISDISCONNECTED) { 2337 kn->kn_flags |= __EV_HUP; 2338 rv = 1; 2339 } else { 2340 rv = soreadable(so); 2341 } 2342 } 2343 2344 return rv; 2345 } 2346 2347 kn->kn_data = so->so_rcv.sb_cc; 2348 #ifdef SOCKET_SPLICE 2349 if (isspliced(so)) { 2350 rv = 0; 2351 } else 2352 #endif /* SOCKET_SPLICE */ 2353 if (so->so_rcv.sb_state & SS_CANTRCVMORE) { 2354 kn->kn_flags |= EV_EOF; 2355 if (kn->kn_flags & __EV_POLL) { 2356 if (so->so_state & SS_ISDISCONNECTED) 2357 kn->kn_flags |= __EV_HUP; 2358 } 2359 kn->kn_fflags = so->so_error; 2360 rv = 1; 2361 } else if (so->so_error) { 2362 rv = 1; 2363 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2364 rv = (kn->kn_data >= kn->kn_sdata); 2365 } else { 2366 rv = (kn->kn_data >= so->so_rcv.sb_lowat); 2367 } 2368 2369 return rv; 2370 } 2371 2372 void 2373 filt_sowdetach(struct knote *kn) 2374 { 2375 struct socket *so = kn->kn_fp->f_data; 2376 2377 klist_remove(&so->so_snd.sb_klist, kn); 2378 } 2379 2380 int 2381 filt_sowrite(struct knote *kn, long hint) 2382 { 2383 struct socket *so = kn->kn_fp->f_data; 2384 int rv; 2385 2386 MUTEX_ASSERT_LOCKED(&so->so_snd.sb_mtx); 2387 if ((so->so_snd.sb_flags & SB_MTXLOCK) == 0) 2388 soassertlocked_readonly(so); 2389 2390 kn->kn_data = sbspace(so, &so->so_snd); 2391 if (so->so_snd.sb_state & SS_CANTSENDMORE) { 2392 kn->kn_flags |= EV_EOF; 2393 if (kn->kn_flags & __EV_POLL) { 2394 if (so->so_state & SS_ISDISCONNECTED) 2395 kn->kn_flags |= __EV_HUP; 2396 } 2397 kn->kn_fflags = so->so_error; 2398 rv = 1; 2399 } else if (so->so_error) { 2400 rv = 1; 2401 } else if (((so->so_state & SS_ISCONNECTED) == 0) && 2402 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 2403 rv = 0; 2404 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2405 rv = (kn->kn_data >= kn->kn_sdata); 2406 } else { 2407 rv = (kn->kn_data >= so->so_snd.sb_lowat); 2408 } 2409 2410 return (rv); 2411 } 2412 2413 int 2414 filt_soexcept(struct knote *kn, long hint) 2415 { 2416 struct socket *so = kn->kn_fp->f_data; 2417 int rv = 0; 2418 2419 MUTEX_ASSERT_LOCKED(&so->so_rcv.sb_mtx); 2420 if ((so->so_rcv.sb_flags & SB_MTXLOCK) == 0) 2421 soassertlocked_readonly(so); 2422 2423 #ifdef SOCKET_SPLICE 2424 if (isspliced(so)) { 2425 rv = 0; 2426 } else 2427 #endif /* SOCKET_SPLICE */ 2428 if (kn->kn_sfflags & NOTE_OOB) { 2429 if (so->so_oobmark || (so->so_rcv.sb_state & SS_RCVATMARK)) { 2430 kn->kn_fflags |= NOTE_OOB; 2431 kn->kn_data -= so->so_oobmark; 2432 rv = 1; 2433 } 2434 } 2435 2436 if (kn->kn_flags & __EV_POLL) { 2437 if (so->so_state & SS_ISDISCONNECTED) { 2438 kn->kn_flags |= __EV_HUP; 2439 rv = 1; 2440 } 2441 } 2442 2443 return rv; 2444 } 2445 2446 int 2447 filt_sowmodify(struct kevent *kev, struct knote *kn) 2448 { 2449 struct socket *so = kn->kn_fp->f_data; 2450 int rv; 2451 2452 sofilt_lock(so, &so->so_snd); 2453 rv = knote_modify(kev, kn); 2454 sofilt_unlock(so, &so->so_snd); 2455 2456 return (rv); 2457 } 2458 2459 int 2460 filt_sowprocess(struct knote *kn, struct kevent *kev) 2461 { 2462 struct socket *so = kn->kn_fp->f_data; 2463 int rv; 2464 2465 sofilt_lock(so, &so->so_snd); 2466 rv = knote_process(kn, kev); 2467 sofilt_unlock(so, &so->so_snd); 2468 2469 return (rv); 2470 } 2471 2472 int 2473 filt_sormodify(struct kevent *kev, struct knote *kn) 2474 { 2475 struct socket *so = kn->kn_fp->f_data; 2476 int rv; 2477 2478 sofilt_lock(so, &so->so_rcv); 2479 rv = knote_modify(kev, kn); 2480 sofilt_unlock(so, &so->so_rcv); 2481 2482 return (rv); 2483 } 2484 2485 int 2486 filt_sorprocess(struct knote *kn, struct kevent *kev) 2487 { 2488 struct socket *so = kn->kn_fp->f_data; 2489 int rv; 2490 2491 sofilt_lock(so, &so->so_rcv); 2492 rv = knote_process(kn, kev); 2493 sofilt_unlock(so, &so->so_rcv); 2494 2495 return (rv); 2496 } 2497 2498 #ifdef DDB 2499 void 2500 sobuf_print(struct sockbuf *, 2501 int (*)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))); 2502 2503 void 2504 sobuf_print(struct sockbuf *sb, 2505 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2506 { 2507 (*pr)("\tsb_cc: %lu\n", sb->sb_cc); 2508 (*pr)("\tsb_datacc: %lu\n", sb->sb_datacc); 2509 (*pr)("\tsb_hiwat: %lu\n", sb->sb_hiwat); 2510 (*pr)("\tsb_wat: %lu\n", sb->sb_wat); 2511 (*pr)("\tsb_mbcnt: %lu\n", sb->sb_mbcnt); 2512 (*pr)("\tsb_mbmax: %lu\n", sb->sb_mbmax); 2513 (*pr)("\tsb_lowat: %ld\n", sb->sb_lowat); 2514 (*pr)("\tsb_mb: %p\n", sb->sb_mb); 2515 (*pr)("\tsb_mbtail: %p\n", sb->sb_mbtail); 2516 (*pr)("\tsb_lastrecord: %p\n", sb->sb_lastrecord); 2517 (*pr)("\tsb_sel: ...\n"); 2518 (*pr)("\tsb_flags: %04x\n", sb->sb_flags); 2519 (*pr)("\tsb_state: %04x\n", sb->sb_state); 2520 (*pr)("\tsb_timeo_nsecs: %llu\n", sb->sb_timeo_nsecs); 2521 } 2522 2523 void 2524 so_print(void *v, 2525 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2526 { 2527 struct socket *so = v; 2528 2529 (*pr)("socket %p\n", so); 2530 (*pr)("so_type: %i\n", so->so_type); 2531 (*pr)("so_options: 0x%04x\n", so->so_options); /* %b */ 2532 (*pr)("so_linger: %i\n", so->so_linger); 2533 (*pr)("so_state: 0x%04x\n", so->so_state); 2534 (*pr)("so_pcb: %p\n", so->so_pcb); 2535 (*pr)("so_proto: %p\n", so->so_proto); 2536 (*pr)("so_sigio: %p\n", so->so_sigio.sir_sigio); 2537 2538 (*pr)("so_head: %p\n", so->so_head); 2539 (*pr)("so_onq: %p\n", so->so_onq); 2540 (*pr)("so_q0: @%p first: %p\n", &so->so_q0, TAILQ_FIRST(&so->so_q0)); 2541 (*pr)("so_q: @%p first: %p\n", &so->so_q, TAILQ_FIRST(&so->so_q)); 2542 (*pr)("so_eq: next: %p\n", TAILQ_NEXT(so, so_qe)); 2543 (*pr)("so_q0len: %i\n", so->so_q0len); 2544 (*pr)("so_qlen: %i\n", so->so_qlen); 2545 (*pr)("so_qlimit: %i\n", so->so_qlimit); 2546 (*pr)("so_timeo: %i\n", so->so_timeo); 2547 (*pr)("so_obmark: %lu\n", so->so_oobmark); 2548 2549 (*pr)("so_sp: %p\n", so->so_sp); 2550 if (so->so_sp != NULL) { 2551 (*pr)("\tssp_socket: %p\n", so->so_sp->ssp_socket); 2552 (*pr)("\tssp_soback: %p\n", so->so_sp->ssp_soback); 2553 (*pr)("\tssp_len: %lld\n", 2554 (unsigned long long)so->so_sp->ssp_len); 2555 (*pr)("\tssp_max: %lld\n", 2556 (unsigned long long)so->so_sp->ssp_max); 2557 (*pr)("\tssp_idletv: %lld %ld\n", so->so_sp->ssp_idletv.tv_sec, 2558 so->so_sp->ssp_idletv.tv_usec); 2559 (*pr)("\tssp_idleto: %spending (@%i)\n", 2560 timeout_pending(&so->so_sp->ssp_idleto) ? "" : "not ", 2561 so->so_sp->ssp_idleto.to_time); 2562 } 2563 2564 (*pr)("so_rcv:\n"); 2565 sobuf_print(&so->so_rcv, pr); 2566 (*pr)("so_snd:\n"); 2567 sobuf_print(&so->so_snd, pr); 2568 2569 (*pr)("so_upcall: %p so_upcallarg: %p\n", 2570 so->so_upcall, so->so_upcallarg); 2571 2572 (*pr)("so_euid: %d so_ruid: %d\n", so->so_euid, so->so_ruid); 2573 (*pr)("so_egid: %d so_rgid: %d\n", so->so_egid, so->so_rgid); 2574 (*pr)("so_cpid: %d\n", so->so_cpid); 2575 } 2576 #endif 2577