1 /* $OpenBSD: uipc_socket.c,v 1.279 2022/07/01 09:56:17 mvs Exp $ */ 2 /* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/file.h> 39 #include <sys/filedesc.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/domain.h> 43 #include <sys/kernel.h> 44 #include <sys/event.h> 45 #include <sys/protosw.h> 46 #include <sys/socket.h> 47 #include <sys/unpcb.h> 48 #include <sys/socketvar.h> 49 #include <sys/signalvar.h> 50 #include <net/if.h> 51 #include <sys/pool.h> 52 #include <sys/atomic.h> 53 #include <sys/rwlock.h> 54 #include <sys/time.h> 55 #include <sys/refcnt.h> 56 57 #ifdef DDB 58 #include <machine/db_machdep.h> 59 #endif 60 61 void sbsync(struct sockbuf *, struct mbuf *); 62 63 int sosplice(struct socket *, int, off_t, struct timeval *); 64 void sounsplice(struct socket *, struct socket *, int); 65 void soidle(void *); 66 void sotask(void *); 67 void soreaper(void *); 68 void soput(void *); 69 int somove(struct socket *, int); 70 void sorflush(struct socket *); 71 72 void filt_sordetach(struct knote *kn); 73 int filt_soread(struct knote *kn, long hint); 74 void filt_sowdetach(struct knote *kn); 75 int filt_sowrite(struct knote *kn, long hint); 76 int filt_soexcept(struct knote *kn, long hint); 77 int filt_solisten(struct knote *kn, long hint); 78 int filt_somodify(struct kevent *kev, struct knote *kn); 79 int filt_soprocess(struct knote *kn, struct kevent *kev); 80 81 const struct filterops solisten_filtops = { 82 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 83 .f_attach = NULL, 84 .f_detach = filt_sordetach, 85 .f_event = filt_solisten, 86 .f_modify = filt_somodify, 87 .f_process = filt_soprocess, 88 }; 89 90 const struct filterops soread_filtops = { 91 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 92 .f_attach = NULL, 93 .f_detach = filt_sordetach, 94 .f_event = filt_soread, 95 .f_modify = filt_somodify, 96 .f_process = filt_soprocess, 97 }; 98 99 const struct filterops sowrite_filtops = { 100 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 101 .f_attach = NULL, 102 .f_detach = filt_sowdetach, 103 .f_event = filt_sowrite, 104 .f_modify = filt_somodify, 105 .f_process = filt_soprocess, 106 }; 107 108 const struct filterops soexcept_filtops = { 109 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 110 .f_attach = NULL, 111 .f_detach = filt_sordetach, 112 .f_event = filt_soexcept, 113 .f_modify = filt_somodify, 114 .f_process = filt_soprocess, 115 }; 116 117 #ifndef SOMINCONN 118 #define SOMINCONN 80 119 #endif /* SOMINCONN */ 120 121 int somaxconn = SOMAXCONN; 122 int sominconn = SOMINCONN; 123 124 struct pool socket_pool; 125 #ifdef SOCKET_SPLICE 126 struct pool sosplice_pool; 127 struct taskq *sosplice_taskq; 128 struct rwlock sosplice_lock = RWLOCK_INITIALIZER("sosplicelk"); 129 #endif 130 131 void 132 soinit(void) 133 { 134 pool_init(&socket_pool, sizeof(struct socket), 0, IPL_SOFTNET, 0, 135 "sockpl", NULL); 136 #ifdef SOCKET_SPLICE 137 pool_init(&sosplice_pool, sizeof(struct sosplice), 0, IPL_SOFTNET, 0, 138 "sosppl", NULL); 139 #endif 140 } 141 142 struct socket * 143 soalloc(int prflags) 144 { 145 struct socket *so; 146 147 so = pool_get(&socket_pool, prflags); 148 if (so == NULL) 149 return (NULL); 150 rw_init_flags(&so->so_lock, "solock", RWL_DUPOK); 151 refcnt_init(&so->so_refcnt); 152 153 return (so); 154 } 155 156 /* 157 * Socket operation routines. 158 * These routines are called by the routines in 159 * sys_socket.c or from a system process, and 160 * implement the semantics of socket operations by 161 * switching out to the protocol specific routines. 162 */ 163 int 164 socreate(int dom, struct socket **aso, int type, int proto) 165 { 166 struct proc *p = curproc; /* XXX */ 167 const struct protosw *prp; 168 struct socket *so; 169 int error; 170 171 if (proto) 172 prp = pffindproto(dom, proto, type); 173 else 174 prp = pffindtype(dom, type); 175 if (prp == NULL || prp->pr_attach == NULL) 176 return (EPROTONOSUPPORT); 177 if (prp->pr_type != type) 178 return (EPROTOTYPE); 179 so = soalloc(PR_WAITOK | PR_ZERO); 180 klist_init(&so->so_rcv.sb_sel.si_note, &socket_klistops, so); 181 klist_init(&so->so_snd.sb_sel.si_note, &socket_klistops, so); 182 sigio_init(&so->so_sigio); 183 TAILQ_INIT(&so->so_q0); 184 TAILQ_INIT(&so->so_q); 185 so->so_type = type; 186 if (suser(p) == 0) 187 so->so_state = SS_PRIV; 188 so->so_ruid = p->p_ucred->cr_ruid; 189 so->so_euid = p->p_ucred->cr_uid; 190 so->so_rgid = p->p_ucred->cr_rgid; 191 so->so_egid = p->p_ucred->cr_gid; 192 so->so_cpid = p->p_p->ps_pid; 193 so->so_proto = prp; 194 so->so_snd.sb_timeo_nsecs = INFSLP; 195 so->so_rcv.sb_timeo_nsecs = INFSLP; 196 197 solock(so); 198 error = (*prp->pr_attach)(so, proto); 199 if (error) { 200 so->so_state |= SS_NOFDREF; 201 /* sofree() calls sounlock(). */ 202 sofree(so, 0); 203 return (error); 204 } 205 sounlock(so); 206 *aso = so; 207 return (0); 208 } 209 210 int 211 sobind(struct socket *so, struct mbuf *nam, struct proc *p) 212 { 213 int error; 214 215 soassertlocked(so); 216 217 error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, p); 218 return (error); 219 } 220 221 int 222 solisten(struct socket *so, int backlog) 223 { 224 int error; 225 226 soassertlocked(so); 227 228 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) 229 return (EINVAL); 230 #ifdef SOCKET_SPLICE 231 if (isspliced(so) || issplicedback(so)) 232 return (EOPNOTSUPP); 233 #endif /* SOCKET_SPLICE */ 234 error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL, 235 curproc); 236 if (error) 237 return (error); 238 if (TAILQ_FIRST(&so->so_q) == NULL) 239 so->so_options |= SO_ACCEPTCONN; 240 if (backlog < 0 || backlog > somaxconn) 241 backlog = somaxconn; 242 if (backlog < sominconn) 243 backlog = sominconn; 244 so->so_qlimit = backlog; 245 return (0); 246 } 247 248 #define SOSP_FREEING_READ 1 249 #define SOSP_FREEING_WRITE 2 250 void 251 sofree(struct socket *so, int keep_lock) 252 { 253 int persocket = solock_persocket(so); 254 255 soassertlocked(so); 256 257 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) { 258 if (!keep_lock) 259 sounlock(so); 260 return; 261 } 262 if (so->so_head) { 263 struct socket *head = so->so_head; 264 265 /* 266 * We must not decommission a socket that's on the accept(2) 267 * queue. If we do, then accept(2) may hang after select(2) 268 * indicated that the listening socket was ready. 269 */ 270 if (so->so_onq == &head->so_q) { 271 if (!keep_lock) 272 sounlock(so); 273 return; 274 } 275 276 if (persocket) { 277 /* 278 * Concurrent close of `head' could 279 * abort `so' due to re-lock. 280 */ 281 soref(so); 282 soref(head); 283 sounlock(so); 284 solock(head); 285 solock(so); 286 287 if (so->so_onq != &head->so_q0) { 288 sounlock(head); 289 sounlock(so); 290 sorele(head); 291 sorele(so); 292 return; 293 } 294 295 sorele(head); 296 sorele(so); 297 } 298 299 soqremque(so, 0); 300 301 if (persocket) 302 sounlock(head); 303 } 304 305 if (persocket) { 306 sounlock(so); 307 refcnt_finalize(&so->so_refcnt, "sofinal"); 308 solock(so); 309 } 310 311 sigio_free(&so->so_sigio); 312 klist_free(&so->so_rcv.sb_sel.si_note); 313 klist_free(&so->so_snd.sb_sel.si_note); 314 #ifdef SOCKET_SPLICE 315 if (so->so_sp) { 316 if (issplicedback(so)) { 317 int freeing = SOSP_FREEING_WRITE; 318 319 if (so->so_sp->ssp_soback == so) 320 freeing |= SOSP_FREEING_READ; 321 sounsplice(so->so_sp->ssp_soback, so, freeing); 322 } 323 if (isspliced(so)) { 324 int freeing = SOSP_FREEING_READ; 325 326 if (so == so->so_sp->ssp_socket) 327 freeing |= SOSP_FREEING_WRITE; 328 sounsplice(so, so->so_sp->ssp_socket, freeing); 329 } 330 } 331 #endif /* SOCKET_SPLICE */ 332 sbrelease(so, &so->so_snd); 333 sorflush(so); 334 if (!keep_lock) 335 sounlock(so); 336 #ifdef SOCKET_SPLICE 337 if (so->so_sp) { 338 /* Reuse splice idle, sounsplice() has been called before. */ 339 timeout_set_proc(&so->so_sp->ssp_idleto, soreaper, so); 340 timeout_add(&so->so_sp->ssp_idleto, 0); 341 } else 342 #endif /* SOCKET_SPLICE */ 343 { 344 pool_put(&socket_pool, so); 345 } 346 } 347 348 static inline uint64_t 349 solinger_nsec(struct socket *so) 350 { 351 if (so->so_linger == 0) 352 return INFSLP; 353 354 return SEC_TO_NSEC(so->so_linger); 355 } 356 357 /* 358 * Close a socket on last file table reference removal. 359 * Initiate disconnect if connected. 360 * Free socket when disconnect complete. 361 */ 362 int 363 soclose(struct socket *so, int flags) 364 { 365 struct socket *so2; 366 int error = 0; 367 368 solock(so); 369 /* Revoke async IO early. There is a final revocation in sofree(). */ 370 sigio_free(&so->so_sigio); 371 if (so->so_state & SS_ISCONNECTED) { 372 if (so->so_pcb == NULL) 373 goto discard; 374 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 375 error = sodisconnect(so); 376 if (error) 377 goto drop; 378 } 379 if (so->so_options & SO_LINGER) { 380 if ((so->so_state & SS_ISDISCONNECTING) && 381 (flags & MSG_DONTWAIT)) 382 goto drop; 383 while (so->so_state & SS_ISCONNECTED) { 384 error = sosleep_nsec(so, &so->so_timeo, 385 PSOCK | PCATCH, "netcls", 386 solinger_nsec(so)); 387 if (error) 388 break; 389 } 390 } 391 } 392 drop: 393 if (so->so_pcb) { 394 int error2; 395 KASSERT(so->so_proto->pr_detach); 396 error2 = (*so->so_proto->pr_detach)(so); 397 if (error == 0) 398 error = error2; 399 } 400 if (so->so_options & SO_ACCEPTCONN) { 401 int persocket = solock_persocket(so); 402 403 if (persocket) { 404 /* Wait concurrent sonewconn() threads. */ 405 while (so->so_newconn > 0) { 406 so->so_state |= SS_NEWCONN_WAIT; 407 sosleep_nsec(so, &so->so_newconn, PSOCK, 408 "netlck", INFSLP); 409 } 410 } 411 412 while ((so2 = TAILQ_FIRST(&so->so_q0)) != NULL) { 413 if (persocket) 414 solock(so2); 415 (void) soqremque(so2, 0); 416 if (persocket) 417 sounlock(so); 418 (void) soabort(so2); 419 if (persocket) 420 solock(so); 421 } 422 while ((so2 = TAILQ_FIRST(&so->so_q)) != NULL) { 423 if (persocket) 424 solock(so2); 425 (void) soqremque(so2, 1); 426 if (persocket) 427 sounlock(so); 428 (void) soabort(so2); 429 if (persocket) 430 solock(so); 431 } 432 } 433 discard: 434 if (so->so_state & SS_NOFDREF) 435 panic("soclose NOFDREF: so %p, so_type %d", so, so->so_type); 436 so->so_state |= SS_NOFDREF; 437 /* sofree() calls sounlock(). */ 438 sofree(so, 0); 439 return (error); 440 } 441 442 int 443 soabort(struct socket *so) 444 { 445 soassertlocked(so); 446 447 return (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, NULL, NULL, 448 curproc); 449 } 450 451 int 452 soaccept(struct socket *so, struct mbuf *nam) 453 { 454 int error = 0; 455 456 soassertlocked(so); 457 458 if ((so->so_state & SS_NOFDREF) == 0) 459 panic("soaccept !NOFDREF: so %p, so_type %d", so, so->so_type); 460 so->so_state &= ~SS_NOFDREF; 461 if ((so->so_state & SS_ISDISCONNECTED) == 0 || 462 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) 463 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, NULL, 464 nam, NULL, curproc); 465 else 466 error = ECONNABORTED; 467 return (error); 468 } 469 470 int 471 soconnect(struct socket *so, struct mbuf *nam) 472 { 473 int error; 474 475 soassertlocked(so); 476 477 if (so->so_options & SO_ACCEPTCONN) 478 return (EOPNOTSUPP); 479 /* 480 * If protocol is connection-based, can only connect once. 481 * Otherwise, if connected, try to disconnect first. 482 * This allows user to disconnect by connecting to, e.g., 483 * a null address. 484 */ 485 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 486 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 487 (error = sodisconnect(so)))) 488 error = EISCONN; 489 else 490 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 491 NULL, nam, NULL, curproc); 492 return (error); 493 } 494 495 int 496 soconnect2(struct socket *so1, struct socket *so2) 497 { 498 int persocket, error; 499 500 if ((persocket = solock_persocket(so1))) 501 solock_pair(so1, so2); 502 else 503 solock(so1); 504 505 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, NULL, 506 (struct mbuf *)so2, NULL, curproc); 507 508 if (persocket) 509 sounlock(so2); 510 sounlock(so1); 511 return (error); 512 } 513 514 int 515 sodisconnect(struct socket *so) 516 { 517 int error; 518 519 soassertlocked(so); 520 521 if ((so->so_state & SS_ISCONNECTED) == 0) 522 return (ENOTCONN); 523 if (so->so_state & SS_ISDISCONNECTING) 524 return (EALREADY); 525 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, NULL, NULL, 526 NULL, curproc); 527 return (error); 528 } 529 530 int m_getuio(struct mbuf **, int, long, struct uio *); 531 532 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 533 /* 534 * Send on a socket. 535 * If send must go all at once and message is larger than 536 * send buffering, then hard error. 537 * Lock against other senders. 538 * If must go all at once and not enough room now, then 539 * inform user that this would block and do nothing. 540 * Otherwise, if nonblocking, send as much as possible. 541 * The data to be sent is described by "uio" if nonzero, 542 * otherwise by the mbuf chain "top" (which must be null 543 * if uio is not). Data provided in mbuf chain must be small 544 * enough to send all at once. 545 * 546 * Returns nonzero on error, timeout or signal; callers 547 * must check for short counts if EINTR/ERESTART are returned. 548 * Data and control buffers are freed on return. 549 */ 550 int 551 sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, 552 struct mbuf *control, int flags) 553 { 554 long space, clen = 0; 555 size_t resid; 556 int error; 557 int atomic = sosendallatonce(so) || top; 558 559 if (uio) 560 resid = uio->uio_resid; 561 else 562 resid = top->m_pkthdr.len; 563 /* MSG_EOR on a SOCK_STREAM socket is invalid. */ 564 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 565 m_freem(top); 566 m_freem(control); 567 return (EINVAL); 568 } 569 if (uio && uio->uio_procp) 570 uio->uio_procp->p_ru.ru_msgsnd++; 571 if (control) { 572 /* 573 * In theory clen should be unsigned (since control->m_len is). 574 * However, space must be signed, as it might be less than 0 575 * if we over-committed, and we must use a signed comparison 576 * of space and clen. 577 */ 578 clen = control->m_len; 579 /* reserve extra space for AF_UNIX's internalize */ 580 if (so->so_proto->pr_domain->dom_family == AF_UNIX && 581 clen >= CMSG_ALIGN(sizeof(struct cmsghdr)) && 582 mtod(control, struct cmsghdr *)->cmsg_type == SCM_RIGHTS) 583 clen = CMSG_SPACE( 584 (clen - CMSG_ALIGN(sizeof(struct cmsghdr))) * 585 (sizeof(struct fdpass) / sizeof(int))); 586 } 587 588 #define snderr(errno) { error = errno; goto release; } 589 590 solock(so); 591 restart: 592 if ((error = sblock(so, &so->so_snd, SBLOCKWAIT(flags))) != 0) 593 goto out; 594 so->so_state |= SS_ISSENDING; 595 do { 596 if (so->so_state & SS_CANTSENDMORE) 597 snderr(EPIPE); 598 if (so->so_error) { 599 error = so->so_error; 600 so->so_error = 0; 601 snderr(error); 602 } 603 if ((so->so_state & SS_ISCONNECTED) == 0) { 604 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 605 if (!(resid == 0 && clen != 0)) 606 snderr(ENOTCONN); 607 } else if (addr == NULL) 608 snderr(EDESTADDRREQ); 609 } 610 space = sbspace(so, &so->so_snd); 611 if (flags & MSG_OOB) 612 space += 1024; 613 if (so->so_proto->pr_domain->dom_family == AF_UNIX) { 614 if (atomic && resid > so->so_snd.sb_hiwat) 615 snderr(EMSGSIZE); 616 } else { 617 if (clen > so->so_snd.sb_hiwat || 618 (atomic && resid > so->so_snd.sb_hiwat - clen)) 619 snderr(EMSGSIZE); 620 } 621 if (space < clen || 622 (space - clen < resid && 623 (atomic || space < so->so_snd.sb_lowat))) { 624 if (flags & MSG_DONTWAIT) 625 snderr(EWOULDBLOCK); 626 sbunlock(so, &so->so_snd); 627 error = sbwait(so, &so->so_snd); 628 so->so_state &= ~SS_ISSENDING; 629 if (error) 630 goto out; 631 goto restart; 632 } 633 space -= clen; 634 do { 635 if (uio == NULL) { 636 /* 637 * Data is prepackaged in "top". 638 */ 639 resid = 0; 640 if (flags & MSG_EOR) 641 top->m_flags |= M_EOR; 642 } else { 643 sounlock(so); 644 error = m_getuio(&top, atomic, space, uio); 645 solock(so); 646 if (error) 647 goto release; 648 space -= top->m_pkthdr.len; 649 resid = uio->uio_resid; 650 if (flags & MSG_EOR) 651 top->m_flags |= M_EOR; 652 } 653 if (resid == 0) 654 so->so_state &= ~SS_ISSENDING; 655 if (top && so->so_options & SO_ZEROIZE) 656 top->m_flags |= M_ZEROIZE; 657 error = (*so->so_proto->pr_usrreq)(so, 658 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 659 top, addr, control, curproc); 660 clen = 0; 661 control = NULL; 662 top = NULL; 663 if (error) 664 goto release; 665 } while (resid && space > 0); 666 } while (resid); 667 668 release: 669 so->so_state &= ~SS_ISSENDING; 670 sbunlock(so, &so->so_snd); 671 out: 672 sounlock(so); 673 m_freem(top); 674 m_freem(control); 675 return (error); 676 } 677 678 int 679 m_getuio(struct mbuf **mp, int atomic, long space, struct uio *uio) 680 { 681 struct mbuf *m, *top = NULL; 682 struct mbuf **nextp = ⊤ 683 u_long len, mlen; 684 size_t resid = uio->uio_resid; 685 int error; 686 687 do { 688 if (top == NULL) { 689 MGETHDR(m, M_WAIT, MT_DATA); 690 mlen = MHLEN; 691 m->m_pkthdr.len = 0; 692 m->m_pkthdr.ph_ifidx = 0; 693 } else { 694 MGET(m, M_WAIT, MT_DATA); 695 mlen = MLEN; 696 } 697 /* chain mbuf together */ 698 *nextp = m; 699 nextp = &m->m_next; 700 701 resid = ulmin(resid, space); 702 if (resid >= MINCLSIZE) { 703 MCLGETL(m, M_NOWAIT, ulmin(resid, MAXMCLBYTES)); 704 if ((m->m_flags & M_EXT) == 0) 705 MCLGETL(m, M_NOWAIT, MCLBYTES); 706 if ((m->m_flags & M_EXT) == 0) 707 goto nopages; 708 mlen = m->m_ext.ext_size; 709 len = ulmin(mlen, resid); 710 /* 711 * For datagram protocols, leave room 712 * for protocol headers in first mbuf. 713 */ 714 if (atomic && m == top && len < mlen - max_hdr) 715 m->m_data += max_hdr; 716 } else { 717 nopages: 718 len = ulmin(mlen, resid); 719 /* 720 * For datagram protocols, leave room 721 * for protocol headers in first mbuf. 722 */ 723 if (atomic && m == top && len < mlen - max_hdr) 724 m_align(m, len); 725 } 726 727 error = uiomove(mtod(m, caddr_t), len, uio); 728 if (error) { 729 m_freem(top); 730 return (error); 731 } 732 733 /* adjust counters */ 734 resid = uio->uio_resid; 735 space -= len; 736 m->m_len = len; 737 top->m_pkthdr.len += len; 738 739 /* Is there more space and more data? */ 740 } while (space > 0 && resid > 0); 741 742 *mp = top; 743 return 0; 744 } 745 746 /* 747 * Following replacement or removal of the first mbuf on the first 748 * mbuf chain of a socket buffer, push necessary state changes back 749 * into the socket buffer so that other consumers see the values 750 * consistently. 'nextrecord' is the callers locally stored value of 751 * the original value of sb->sb_mb->m_nextpkt which must be restored 752 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. 753 */ 754 void 755 sbsync(struct sockbuf *sb, struct mbuf *nextrecord) 756 { 757 758 /* 759 * First, update for the new value of nextrecord. If necessary, 760 * make it the first record. 761 */ 762 if (sb->sb_mb != NULL) 763 sb->sb_mb->m_nextpkt = nextrecord; 764 else 765 sb->sb_mb = nextrecord; 766 767 /* 768 * Now update any dependent socket buffer fields to reflect 769 * the new state. This is an inline of SB_EMPTY_FIXUP, with 770 * the addition of a second clause that takes care of the 771 * case where sb_mb has been updated, but remains the last 772 * record. 773 */ 774 if (sb->sb_mb == NULL) { 775 sb->sb_mbtail = NULL; 776 sb->sb_lastrecord = NULL; 777 } else if (sb->sb_mb->m_nextpkt == NULL) 778 sb->sb_lastrecord = sb->sb_mb; 779 } 780 781 /* 782 * Implement receive operations on a socket. 783 * We depend on the way that records are added to the sockbuf 784 * by sbappend*. In particular, each record (mbufs linked through m_next) 785 * must begin with an address if the protocol so specifies, 786 * followed by an optional mbuf or mbufs containing ancillary data, 787 * and then zero or more mbufs of data. 788 * In order to avoid blocking network for the entire time here, we release 789 * the solock() while doing the actual copy to user space. 790 * Although the sockbuf is locked, new data may still be appended, 791 * and thus we must maintain consistency of the sockbuf during that time. 792 * 793 * The caller may receive the data as a single mbuf chain by supplying 794 * an mbuf **mp0 for use in returning the chain. The uio is then used 795 * only for the count in uio_resid. 796 */ 797 int 798 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, 799 struct mbuf **mp0, struct mbuf **controlp, int *flagsp, 800 socklen_t controllen) 801 { 802 struct mbuf *m, **mp; 803 struct mbuf *cm; 804 u_long len, offset, moff; 805 int flags, error, type, uio_error = 0; 806 const struct protosw *pr = so->so_proto; 807 struct mbuf *nextrecord; 808 size_t resid, orig_resid = uio->uio_resid; 809 810 mp = mp0; 811 if (paddr) 812 *paddr = NULL; 813 if (controlp) 814 *controlp = NULL; 815 if (flagsp) 816 flags = *flagsp &~ MSG_EOR; 817 else 818 flags = 0; 819 if (flags & MSG_OOB) { 820 m = m_get(M_WAIT, MT_DATA); 821 solock(so); 822 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, 823 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, curproc); 824 sounlock(so); 825 if (error) 826 goto bad; 827 do { 828 error = uiomove(mtod(m, caddr_t), 829 ulmin(uio->uio_resid, m->m_len), uio); 830 m = m_free(m); 831 } while (uio->uio_resid && error == 0 && m); 832 bad: 833 m_freem(m); 834 return (error); 835 } 836 if (mp) 837 *mp = NULL; 838 839 solock(so); 840 restart: 841 if ((error = sblock(so, &so->so_rcv, SBLOCKWAIT(flags))) != 0) { 842 sounlock(so); 843 return (error); 844 } 845 846 m = so->so_rcv.sb_mb; 847 #ifdef SOCKET_SPLICE 848 if (isspliced(so)) 849 m = NULL; 850 #endif /* SOCKET_SPLICE */ 851 /* 852 * If we have less data than requested, block awaiting more 853 * (subject to any timeout) if: 854 * 1. the current count is less than the low water mark, 855 * 2. MSG_WAITALL is set, and it is possible to do the entire 856 * receive operation at once if we block (resid <= hiwat), or 857 * 3. MSG_DONTWAIT is not set. 858 * If MSG_WAITALL is set but resid is larger than the receive buffer, 859 * we have to do the receive in sections, and thus risk returning 860 * a short count if a timeout or signal occurs after we start. 861 */ 862 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 863 so->so_rcv.sb_cc < uio->uio_resid) && 864 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 865 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 866 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 867 #ifdef DIAGNOSTIC 868 if (m == NULL && so->so_rcv.sb_cc) 869 #ifdef SOCKET_SPLICE 870 if (!isspliced(so)) 871 #endif /* SOCKET_SPLICE */ 872 panic("receive 1: so %p, so_type %d, sb_cc %lu", 873 so, so->so_type, so->so_rcv.sb_cc); 874 #endif 875 if (so->so_error) { 876 if (m) 877 goto dontblock; 878 error = so->so_error; 879 if ((flags & MSG_PEEK) == 0) 880 so->so_error = 0; 881 goto release; 882 } 883 if (so->so_state & SS_CANTRCVMORE) { 884 if (m) 885 goto dontblock; 886 else if (so->so_rcv.sb_cc == 0) 887 goto release; 888 } 889 for (; m; m = m->m_next) 890 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 891 m = so->so_rcv.sb_mb; 892 goto dontblock; 893 } 894 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 895 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 896 error = ENOTCONN; 897 goto release; 898 } 899 if (uio->uio_resid == 0 && controlp == NULL) 900 goto release; 901 if (flags & MSG_DONTWAIT) { 902 error = EWOULDBLOCK; 903 goto release; 904 } 905 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); 906 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); 907 sbunlock(so, &so->so_rcv); 908 error = sbwait(so, &so->so_rcv); 909 if (error) { 910 sounlock(so); 911 return (error); 912 } 913 goto restart; 914 } 915 dontblock: 916 /* 917 * On entry here, m points to the first record of the socket buffer. 918 * From this point onward, we maintain 'nextrecord' as a cache of the 919 * pointer to the next record in the socket buffer. We must keep the 920 * various socket buffer pointers and local stack versions of the 921 * pointers in sync, pushing out modifications before operations that 922 * may sleep, and re-reading them afterwards. 923 * 924 * Otherwise, we will race with the network stack appending new data 925 * or records onto the socket buffer by using inconsistent/stale 926 * versions of the field, possibly resulting in socket buffer 927 * corruption. 928 */ 929 if (uio->uio_procp) 930 uio->uio_procp->p_ru.ru_msgrcv++; 931 KASSERT(m == so->so_rcv.sb_mb); 932 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); 933 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); 934 nextrecord = m->m_nextpkt; 935 if (pr->pr_flags & PR_ADDR) { 936 #ifdef DIAGNOSTIC 937 if (m->m_type != MT_SONAME) 938 panic("receive 1a: so %p, so_type %d, m %p, m_type %d", 939 so, so->so_type, m, m->m_type); 940 #endif 941 orig_resid = 0; 942 if (flags & MSG_PEEK) { 943 if (paddr) 944 *paddr = m_copym(m, 0, m->m_len, M_NOWAIT); 945 m = m->m_next; 946 } else { 947 sbfree(so, &so->so_rcv, m); 948 if (paddr) { 949 *paddr = m; 950 so->so_rcv.sb_mb = m->m_next; 951 m->m_next = NULL; 952 m = so->so_rcv.sb_mb; 953 } else { 954 so->so_rcv.sb_mb = m_free(m); 955 m = so->so_rcv.sb_mb; 956 } 957 sbsync(&so->so_rcv, nextrecord); 958 } 959 } 960 while (m && m->m_type == MT_CONTROL && error == 0) { 961 int skip = 0; 962 if (flags & MSG_PEEK) { 963 if (mtod(m, struct cmsghdr *)->cmsg_type == 964 SCM_RIGHTS) { 965 /* don't leak internalized SCM_RIGHTS msgs */ 966 skip = 1; 967 } else if (controlp) 968 *controlp = m_copym(m, 0, m->m_len, M_NOWAIT); 969 m = m->m_next; 970 } else { 971 sbfree(so, &so->so_rcv, m); 972 so->so_rcv.sb_mb = m->m_next; 973 m->m_nextpkt = m->m_next = NULL; 974 cm = m; 975 m = so->so_rcv.sb_mb; 976 sbsync(&so->so_rcv, nextrecord); 977 if (controlp) { 978 if (pr->pr_domain->dom_externalize) { 979 sounlock(so); 980 error = 981 (*pr->pr_domain->dom_externalize) 982 (cm, controllen, flags); 983 solock(so); 984 } 985 *controlp = cm; 986 } else { 987 /* 988 * Dispose of any SCM_RIGHTS message that went 989 * through the read path rather than recv. 990 */ 991 if (pr->pr_domain->dom_dispose) 992 pr->pr_domain->dom_dispose(cm); 993 m_free(cm); 994 } 995 } 996 if (m != NULL) 997 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 998 else 999 nextrecord = so->so_rcv.sb_mb; 1000 if (controlp && !skip) 1001 controlp = &(*controlp)->m_next; 1002 orig_resid = 0; 1003 } 1004 1005 /* If m is non-NULL, we have some data to read. */ 1006 if (m) { 1007 type = m->m_type; 1008 if (type == MT_OOBDATA) 1009 flags |= MSG_OOB; 1010 if (m->m_flags & M_BCAST) 1011 flags |= MSG_BCAST; 1012 if (m->m_flags & M_MCAST) 1013 flags |= MSG_MCAST; 1014 } 1015 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); 1016 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); 1017 1018 moff = 0; 1019 offset = 0; 1020 while (m && uio->uio_resid > 0 && error == 0) { 1021 if (m->m_type == MT_OOBDATA) { 1022 if (type != MT_OOBDATA) 1023 break; 1024 } else if (type == MT_OOBDATA) { 1025 break; 1026 } else if (m->m_type == MT_CONTROL) { 1027 /* 1028 * If there is more than one control message in the 1029 * stream, we do a short read. Next can be received 1030 * or disposed by another system call. 1031 */ 1032 break; 1033 #ifdef DIAGNOSTIC 1034 } else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) { 1035 panic("receive 3: so %p, so_type %d, m %p, m_type %d", 1036 so, so->so_type, m, m->m_type); 1037 #endif 1038 } 1039 so->so_state &= ~SS_RCVATMARK; 1040 len = uio->uio_resid; 1041 if (so->so_oobmark && len > so->so_oobmark - offset) 1042 len = so->so_oobmark - offset; 1043 if (len > m->m_len - moff) 1044 len = m->m_len - moff; 1045 /* 1046 * If mp is set, just pass back the mbufs. 1047 * Otherwise copy them out via the uio, then free. 1048 * Sockbuf must be consistent here (points to current mbuf, 1049 * it points to next record) when we drop priority; 1050 * we must note any additions to the sockbuf when we 1051 * block interrupts again. 1052 */ 1053 if (mp == NULL && uio_error == 0) { 1054 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); 1055 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); 1056 resid = uio->uio_resid; 1057 sounlock(so); 1058 uio_error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1059 solock(so); 1060 if (uio_error) 1061 uio->uio_resid = resid - len; 1062 } else 1063 uio->uio_resid -= len; 1064 if (len == m->m_len - moff) { 1065 if (m->m_flags & M_EOR) 1066 flags |= MSG_EOR; 1067 if (flags & MSG_PEEK) { 1068 m = m->m_next; 1069 moff = 0; 1070 orig_resid = 0; 1071 } else { 1072 nextrecord = m->m_nextpkt; 1073 sbfree(so, &so->so_rcv, m); 1074 if (mp) { 1075 *mp = m; 1076 mp = &m->m_next; 1077 so->so_rcv.sb_mb = m = m->m_next; 1078 *mp = NULL; 1079 } else { 1080 so->so_rcv.sb_mb = m_free(m); 1081 m = so->so_rcv.sb_mb; 1082 } 1083 /* 1084 * If m != NULL, we also know that 1085 * so->so_rcv.sb_mb != NULL. 1086 */ 1087 KASSERT(so->so_rcv.sb_mb == m); 1088 if (m) { 1089 m->m_nextpkt = nextrecord; 1090 if (nextrecord == NULL) 1091 so->so_rcv.sb_lastrecord = m; 1092 } else { 1093 so->so_rcv.sb_mb = nextrecord; 1094 SB_EMPTY_FIXUP(&so->so_rcv); 1095 } 1096 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); 1097 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); 1098 } 1099 } else { 1100 if (flags & MSG_PEEK) { 1101 moff += len; 1102 orig_resid = 0; 1103 } else { 1104 if (mp) 1105 *mp = m_copym(m, 0, len, M_WAIT); 1106 m->m_data += len; 1107 m->m_len -= len; 1108 so->so_rcv.sb_cc -= len; 1109 so->so_rcv.sb_datacc -= len; 1110 } 1111 } 1112 if (so->so_oobmark) { 1113 if ((flags & MSG_PEEK) == 0) { 1114 so->so_oobmark -= len; 1115 if (so->so_oobmark == 0) { 1116 so->so_state |= SS_RCVATMARK; 1117 break; 1118 } 1119 } else { 1120 offset += len; 1121 if (offset == so->so_oobmark) 1122 break; 1123 } 1124 } 1125 if (flags & MSG_EOR) 1126 break; 1127 /* 1128 * If the MSG_WAITALL flag is set (for non-atomic socket), 1129 * we must not quit until "uio->uio_resid == 0" or an error 1130 * termination. If a signal/timeout occurs, return 1131 * with a short count but without error. 1132 * Keep sockbuf locked against other readers. 1133 */ 1134 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1135 !sosendallatonce(so) && !nextrecord) { 1136 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1137 break; 1138 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); 1139 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); 1140 error = sbwait(so, &so->so_rcv); 1141 if (error) { 1142 sbunlock(so, &so->so_rcv); 1143 sounlock(so); 1144 return (0); 1145 } 1146 if ((m = so->so_rcv.sb_mb) != NULL) 1147 nextrecord = m->m_nextpkt; 1148 } 1149 } 1150 1151 if (m && pr->pr_flags & PR_ATOMIC) { 1152 flags |= MSG_TRUNC; 1153 if ((flags & MSG_PEEK) == 0) 1154 (void) sbdroprecord(so, &so->so_rcv); 1155 } 1156 if ((flags & MSG_PEEK) == 0) { 1157 if (m == NULL) { 1158 /* 1159 * First part is an inline SB_EMPTY_FIXUP(). Second 1160 * part makes sure sb_lastrecord is up-to-date if 1161 * there is still data in the socket buffer. 1162 */ 1163 so->so_rcv.sb_mb = nextrecord; 1164 if (so->so_rcv.sb_mb == NULL) { 1165 so->so_rcv.sb_mbtail = NULL; 1166 so->so_rcv.sb_lastrecord = NULL; 1167 } else if (nextrecord->m_nextpkt == NULL) 1168 so->so_rcv.sb_lastrecord = nextrecord; 1169 } 1170 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); 1171 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); 1172 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1173 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, 1174 (struct mbuf *)(long)flags, NULL, curproc); 1175 } 1176 if (orig_resid == uio->uio_resid && orig_resid && 1177 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1178 sbunlock(so, &so->so_rcv); 1179 goto restart; 1180 } 1181 1182 if (uio_error) 1183 error = uio_error; 1184 1185 if (flagsp) 1186 *flagsp |= flags; 1187 release: 1188 sbunlock(so, &so->so_rcv); 1189 sounlock(so); 1190 return (error); 1191 } 1192 1193 int 1194 soshutdown(struct socket *so, int how) 1195 { 1196 const struct protosw *pr = so->so_proto; 1197 int error = 0; 1198 1199 solock(so); 1200 switch (how) { 1201 case SHUT_RD: 1202 sorflush(so); 1203 break; 1204 case SHUT_RDWR: 1205 sorflush(so); 1206 /* FALLTHROUGH */ 1207 case SHUT_WR: 1208 error = (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, NULL, NULL, 1209 curproc); 1210 break; 1211 default: 1212 error = EINVAL; 1213 break; 1214 } 1215 sounlock(so); 1216 1217 return (error); 1218 } 1219 1220 void 1221 sorflush(struct socket *so) 1222 { 1223 struct sockbuf *sb = &so->so_rcv; 1224 struct mbuf *m; 1225 const struct protosw *pr = so->so_proto; 1226 int error; 1227 1228 sb->sb_flags |= SB_NOINTR; 1229 error = sblock(so, sb, M_WAITOK); 1230 /* with SB_NOINTR and M_WAITOK sblock() must not fail */ 1231 KASSERT(error == 0); 1232 socantrcvmore(so); 1233 m = sb->sb_mb; 1234 memset(&sb->sb_startzero, 0, 1235 (caddr_t)&sb->sb_endzero - (caddr_t)&sb->sb_startzero); 1236 sb->sb_timeo_nsecs = INFSLP; 1237 sbunlock(so, sb); 1238 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 1239 (*pr->pr_domain->dom_dispose)(m); 1240 m_purge(m); 1241 } 1242 1243 #ifdef SOCKET_SPLICE 1244 1245 #define so_splicelen so_sp->ssp_len 1246 #define so_splicemax so_sp->ssp_max 1247 #define so_idletv so_sp->ssp_idletv 1248 #define so_idleto so_sp->ssp_idleto 1249 #define so_splicetask so_sp->ssp_task 1250 1251 int 1252 sosplice(struct socket *so, int fd, off_t max, struct timeval *tv) 1253 { 1254 struct file *fp; 1255 struct socket *sosp; 1256 struct sosplice *sp; 1257 struct taskq *tq; 1258 int error = 0; 1259 1260 soassertlocked(so); 1261 1262 if (sosplice_taskq == NULL) { 1263 rw_enter_write(&sosplice_lock); 1264 if (sosplice_taskq == NULL) { 1265 tq = taskq_create("sosplice", 1, IPL_SOFTNET, 1266 TASKQ_MPSAFE); 1267 /* Ensure the taskq is fully visible to other CPUs. */ 1268 membar_producer(); 1269 sosplice_taskq = tq; 1270 } 1271 rw_exit_write(&sosplice_lock); 1272 } 1273 if (sosplice_taskq == NULL) 1274 return (ENOMEM); 1275 1276 if ((so->so_proto->pr_flags & PR_SPLICE) == 0) 1277 return (EPROTONOSUPPORT); 1278 if (so->so_options & SO_ACCEPTCONN) 1279 return (EOPNOTSUPP); 1280 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1281 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 1282 return (ENOTCONN); 1283 if (so->so_sp == NULL) { 1284 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1285 if (so->so_sp == NULL) 1286 so->so_sp = sp; 1287 else 1288 pool_put(&sosplice_pool, sp); 1289 } 1290 1291 /* If no fd is given, unsplice by removing existing link. */ 1292 if (fd < 0) { 1293 /* Lock receive buffer. */ 1294 if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) { 1295 return (error); 1296 } 1297 if (so->so_sp->ssp_socket) 1298 sounsplice(so, so->so_sp->ssp_socket, 0); 1299 sbunlock(so, &so->so_rcv); 1300 return (0); 1301 } 1302 1303 if (max && max < 0) 1304 return (EINVAL); 1305 1306 if (tv && (tv->tv_sec < 0 || !timerisvalid(tv))) 1307 return (EINVAL); 1308 1309 /* Find sosp, the drain socket where data will be spliced into. */ 1310 if ((error = getsock(curproc, fd, &fp)) != 0) 1311 return (error); 1312 sosp = fp->f_data; 1313 if (sosp->so_proto->pr_usrreq != so->so_proto->pr_usrreq) { 1314 error = EPROTONOSUPPORT; 1315 goto frele; 1316 } 1317 if (sosp->so_sp == NULL) { 1318 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1319 if (sosp->so_sp == NULL) 1320 sosp->so_sp = sp; 1321 else 1322 pool_put(&sosplice_pool, sp); 1323 } 1324 1325 /* Lock both receive and send buffer. */ 1326 if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) { 1327 goto frele; 1328 } 1329 if ((error = sblock(so, &sosp->so_snd, M_WAITOK)) != 0) { 1330 sbunlock(so, &so->so_rcv); 1331 goto frele; 1332 } 1333 1334 if (so->so_sp->ssp_socket || sosp->so_sp->ssp_soback) { 1335 error = EBUSY; 1336 goto release; 1337 } 1338 if (sosp->so_options & SO_ACCEPTCONN) { 1339 error = EOPNOTSUPP; 1340 goto release; 1341 } 1342 if ((sosp->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) { 1343 error = ENOTCONN; 1344 goto release; 1345 } 1346 1347 /* Splice so and sosp together. */ 1348 so->so_sp->ssp_socket = sosp; 1349 sosp->so_sp->ssp_soback = so; 1350 so->so_splicelen = 0; 1351 so->so_splicemax = max; 1352 if (tv) 1353 so->so_idletv = *tv; 1354 else 1355 timerclear(&so->so_idletv); 1356 timeout_set_proc(&so->so_idleto, soidle, so); 1357 task_set(&so->so_splicetask, sotask, so); 1358 1359 /* 1360 * To prevent softnet interrupt from calling somove() while 1361 * we sleep, the socket buffers are not marked as spliced yet. 1362 */ 1363 if (somove(so, M_WAIT)) { 1364 so->so_rcv.sb_flags |= SB_SPLICE; 1365 sosp->so_snd.sb_flags |= SB_SPLICE; 1366 } 1367 1368 release: 1369 sbunlock(sosp, &sosp->so_snd); 1370 sbunlock(so, &so->so_rcv); 1371 frele: 1372 /* 1373 * FRELE() must not be called with the socket lock held. It is safe to 1374 * release the lock here as long as no other operation happen on the 1375 * socket when sosplice() returns. The dance could be avoided by 1376 * grabbing the socket lock inside this function. 1377 */ 1378 sounlock(so); 1379 FRELE(fp, curproc); 1380 solock(so); 1381 return (error); 1382 } 1383 1384 void 1385 sounsplice(struct socket *so, struct socket *sosp, int freeing) 1386 { 1387 soassertlocked(so); 1388 1389 task_del(sosplice_taskq, &so->so_splicetask); 1390 timeout_del(&so->so_idleto); 1391 sosp->so_snd.sb_flags &= ~SB_SPLICE; 1392 so->so_rcv.sb_flags &= ~SB_SPLICE; 1393 so->so_sp->ssp_socket = sosp->so_sp->ssp_soback = NULL; 1394 /* Do not wakeup a socket that is about to be freed. */ 1395 if ((freeing & SOSP_FREEING_READ) == 0 && soreadable(so)) 1396 sorwakeup(so); 1397 if ((freeing & SOSP_FREEING_WRITE) == 0 && sowriteable(sosp)) 1398 sowwakeup(sosp); 1399 } 1400 1401 void 1402 soidle(void *arg) 1403 { 1404 struct socket *so = arg; 1405 1406 solock(so); 1407 if (so->so_rcv.sb_flags & SB_SPLICE) { 1408 so->so_error = ETIMEDOUT; 1409 sounsplice(so, so->so_sp->ssp_socket, 0); 1410 } 1411 sounlock(so); 1412 } 1413 1414 void 1415 sotask(void *arg) 1416 { 1417 struct socket *so = arg; 1418 1419 solock(so); 1420 if (so->so_rcv.sb_flags & SB_SPLICE) { 1421 /* 1422 * We may not sleep here as sofree() and unsplice() may be 1423 * called from softnet interrupt context. This would remove 1424 * the socket during somove(). 1425 */ 1426 somove(so, M_DONTWAIT); 1427 } 1428 sounlock(so); 1429 1430 /* Avoid user land starvation. */ 1431 yield(); 1432 } 1433 1434 /* 1435 * The socket splicing task or idle timeout may sleep while grabbing the net 1436 * lock. As sofree() can be called anytime, sotask() or soidle() could access 1437 * the socket memory of a freed socket after wakeup. So delay the pool_put() 1438 * after all pending socket splicing tasks or timeouts have finished. Do this 1439 * by scheduling it on the same threads. 1440 */ 1441 void 1442 soreaper(void *arg) 1443 { 1444 struct socket *so = arg; 1445 1446 /* Reuse splice task, sounsplice() has been called before. */ 1447 task_set(&so->so_sp->ssp_task, soput, so); 1448 task_add(sosplice_taskq, &so->so_sp->ssp_task); 1449 } 1450 1451 void 1452 soput(void *arg) 1453 { 1454 struct socket *so = arg; 1455 1456 pool_put(&sosplice_pool, so->so_sp); 1457 pool_put(&socket_pool, so); 1458 } 1459 1460 /* 1461 * Move data from receive buffer of spliced source socket to send 1462 * buffer of drain socket. Try to move as much as possible in one 1463 * big chunk. It is a TCP only implementation. 1464 * Return value 0 means splicing has been finished, 1 continue. 1465 */ 1466 int 1467 somove(struct socket *so, int wait) 1468 { 1469 struct socket *sosp = so->so_sp->ssp_socket; 1470 struct mbuf *m, **mp, *nextrecord; 1471 u_long len, off, oobmark; 1472 long space; 1473 int error = 0, maxreached = 0; 1474 unsigned int state; 1475 1476 soassertlocked(so); 1477 1478 nextpkt: 1479 if (so->so_error) { 1480 error = so->so_error; 1481 goto release; 1482 } 1483 if (sosp->so_state & SS_CANTSENDMORE) { 1484 error = EPIPE; 1485 goto release; 1486 } 1487 if (sosp->so_error && sosp->so_error != ETIMEDOUT && 1488 sosp->so_error != EFBIG && sosp->so_error != ELOOP) { 1489 error = sosp->so_error; 1490 goto release; 1491 } 1492 if ((sosp->so_state & SS_ISCONNECTED) == 0) 1493 goto release; 1494 1495 /* Calculate how many bytes can be copied now. */ 1496 len = so->so_rcv.sb_datacc; 1497 if (so->so_splicemax) { 1498 KASSERT(so->so_splicelen < so->so_splicemax); 1499 if (so->so_splicemax <= so->so_splicelen + len) { 1500 len = so->so_splicemax - so->so_splicelen; 1501 maxreached = 1; 1502 } 1503 } 1504 space = sbspace(sosp, &sosp->so_snd); 1505 if (so->so_oobmark && so->so_oobmark < len && 1506 so->so_oobmark < space + 1024) 1507 space += 1024; 1508 if (space <= 0) { 1509 maxreached = 0; 1510 goto release; 1511 } 1512 if (space < len) { 1513 maxreached = 0; 1514 if (space < sosp->so_snd.sb_lowat) 1515 goto release; 1516 len = space; 1517 } 1518 sosp->so_state |= SS_ISSENDING; 1519 1520 SBLASTRECORDCHK(&so->so_rcv, "somove 1"); 1521 SBLASTMBUFCHK(&so->so_rcv, "somove 1"); 1522 m = so->so_rcv.sb_mb; 1523 if (m == NULL) 1524 goto release; 1525 nextrecord = m->m_nextpkt; 1526 1527 /* Drop address and control information not used with splicing. */ 1528 if (so->so_proto->pr_flags & PR_ADDR) { 1529 #ifdef DIAGNOSTIC 1530 if (m->m_type != MT_SONAME) 1531 panic("somove soname: so %p, so_type %d, m %p, " 1532 "m_type %d", so, so->so_type, m, m->m_type); 1533 #endif 1534 m = m->m_next; 1535 } 1536 while (m && m->m_type == MT_CONTROL) 1537 m = m->m_next; 1538 if (m == NULL) { 1539 sbdroprecord(so, &so->so_rcv); 1540 if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb) 1541 (so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL, 1542 NULL, NULL, NULL); 1543 goto nextpkt; 1544 } 1545 1546 /* 1547 * By splicing sockets connected to localhost, userland might create a 1548 * loop. Dissolve splicing with error if loop is detected by counter. 1549 * 1550 * If we deal with looped broadcast/multicast packet we bail out with 1551 * no error to suppress splice termination. 1552 */ 1553 if ((m->m_flags & M_PKTHDR) && 1554 ((m->m_pkthdr.ph_loopcnt++ >= M_MAXLOOP) || 1555 ((m->m_flags & M_LOOP) && (m->m_flags & (M_BCAST|M_MCAST))))) { 1556 error = ELOOP; 1557 goto release; 1558 } 1559 1560 if (so->so_proto->pr_flags & PR_ATOMIC) { 1561 if ((m->m_flags & M_PKTHDR) == 0) 1562 panic("somove !PKTHDR: so %p, so_type %d, m %p, " 1563 "m_type %d", so, so->so_type, m, m->m_type); 1564 if (sosp->so_snd.sb_hiwat < m->m_pkthdr.len) { 1565 error = EMSGSIZE; 1566 goto release; 1567 } 1568 if (len < m->m_pkthdr.len) 1569 goto release; 1570 if (m->m_pkthdr.len < len) { 1571 maxreached = 0; 1572 len = m->m_pkthdr.len; 1573 } 1574 /* 1575 * Throw away the name mbuf after it has been assured 1576 * that the whole first record can be processed. 1577 */ 1578 m = so->so_rcv.sb_mb; 1579 sbfree(so, &so->so_rcv, m); 1580 so->so_rcv.sb_mb = m_free(m); 1581 sbsync(&so->so_rcv, nextrecord); 1582 } 1583 /* 1584 * Throw away the control mbufs after it has been assured 1585 * that the whole first record can be processed. 1586 */ 1587 m = so->so_rcv.sb_mb; 1588 while (m && m->m_type == MT_CONTROL) { 1589 sbfree(so, &so->so_rcv, m); 1590 so->so_rcv.sb_mb = m_free(m); 1591 m = so->so_rcv.sb_mb; 1592 sbsync(&so->so_rcv, nextrecord); 1593 } 1594 1595 SBLASTRECORDCHK(&so->so_rcv, "somove 2"); 1596 SBLASTMBUFCHK(&so->so_rcv, "somove 2"); 1597 1598 /* Take at most len mbufs out of receive buffer. */ 1599 for (off = 0, mp = &m; off <= len && *mp; 1600 off += (*mp)->m_len, mp = &(*mp)->m_next) { 1601 u_long size = len - off; 1602 1603 #ifdef DIAGNOSTIC 1604 if ((*mp)->m_type != MT_DATA && (*mp)->m_type != MT_HEADER) 1605 panic("somove type: so %p, so_type %d, m %p, " 1606 "m_type %d", so, so->so_type, *mp, (*mp)->m_type); 1607 #endif 1608 if ((*mp)->m_len > size) { 1609 /* 1610 * Move only a partial mbuf at maximum splice length or 1611 * if the drain buffer is too small for this large mbuf. 1612 */ 1613 if (!maxreached && so->so_snd.sb_datacc > 0) { 1614 len -= size; 1615 break; 1616 } 1617 *mp = m_copym(so->so_rcv.sb_mb, 0, size, wait); 1618 if (*mp == NULL) { 1619 len -= size; 1620 break; 1621 } 1622 so->so_rcv.sb_mb->m_data += size; 1623 so->so_rcv.sb_mb->m_len -= size; 1624 so->so_rcv.sb_cc -= size; 1625 so->so_rcv.sb_datacc -= size; 1626 } else { 1627 *mp = so->so_rcv.sb_mb; 1628 sbfree(so, &so->so_rcv, *mp); 1629 so->so_rcv.sb_mb = (*mp)->m_next; 1630 sbsync(&so->so_rcv, nextrecord); 1631 } 1632 } 1633 *mp = NULL; 1634 1635 SBLASTRECORDCHK(&so->so_rcv, "somove 3"); 1636 SBLASTMBUFCHK(&so->so_rcv, "somove 3"); 1637 SBCHECK(so, &so->so_rcv); 1638 if (m == NULL) 1639 goto release; 1640 m->m_nextpkt = NULL; 1641 if (m->m_flags & M_PKTHDR) { 1642 m_resethdr(m); 1643 m->m_pkthdr.len = len; 1644 } 1645 1646 /* Send window update to source peer as receive buffer has changed. */ 1647 if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb) 1648 (so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL, 1649 NULL, NULL, NULL); 1650 1651 /* Receive buffer did shrink by len bytes, adjust oob. */ 1652 state = so->so_state; 1653 so->so_state &= ~SS_RCVATMARK; 1654 oobmark = so->so_oobmark; 1655 so->so_oobmark = oobmark > len ? oobmark - len : 0; 1656 if (oobmark) { 1657 if (oobmark == len) 1658 so->so_state |= SS_RCVATMARK; 1659 if (oobmark >= len) 1660 oobmark = 0; 1661 } 1662 1663 /* 1664 * Handle oob data. If any malloc fails, ignore error. 1665 * TCP urgent data is not very reliable anyway. 1666 */ 1667 while (((state & SS_RCVATMARK) || oobmark) && 1668 (so->so_options & SO_OOBINLINE)) { 1669 struct mbuf *o = NULL; 1670 1671 if (state & SS_RCVATMARK) { 1672 o = m_get(wait, MT_DATA); 1673 state &= ~SS_RCVATMARK; 1674 } else if (oobmark) { 1675 o = m_split(m, oobmark, wait); 1676 if (o) { 1677 error = (*sosp->so_proto->pr_usrreq)(sosp, 1678 PRU_SEND, m, NULL, NULL, NULL); 1679 if (error) { 1680 if (sosp->so_state & SS_CANTSENDMORE) 1681 error = EPIPE; 1682 m_freem(o); 1683 goto release; 1684 } 1685 len -= oobmark; 1686 so->so_splicelen += oobmark; 1687 m = o; 1688 o = m_get(wait, MT_DATA); 1689 } 1690 oobmark = 0; 1691 } 1692 if (o) { 1693 o->m_len = 1; 1694 *mtod(o, caddr_t) = *mtod(m, caddr_t); 1695 error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SENDOOB, 1696 o, NULL, NULL, NULL); 1697 if (error) { 1698 if (sosp->so_state & SS_CANTSENDMORE) 1699 error = EPIPE; 1700 m_freem(m); 1701 goto release; 1702 } 1703 len -= 1; 1704 so->so_splicelen += 1; 1705 if (oobmark) { 1706 oobmark -= 1; 1707 if (oobmark == 0) 1708 state |= SS_RCVATMARK; 1709 } 1710 m_adj(m, 1); 1711 } 1712 } 1713 1714 /* Append all remaining data to drain socket. */ 1715 if (so->so_rcv.sb_cc == 0 || maxreached) 1716 sosp->so_state &= ~SS_ISSENDING; 1717 error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SEND, m, NULL, NULL, 1718 NULL); 1719 if (error) { 1720 if (sosp->so_state & SS_CANTSENDMORE) 1721 error = EPIPE; 1722 goto release; 1723 } 1724 so->so_splicelen += len; 1725 1726 /* Move several packets if possible. */ 1727 if (!maxreached && nextrecord) 1728 goto nextpkt; 1729 1730 release: 1731 sosp->so_state &= ~SS_ISSENDING; 1732 if (!error && maxreached && so->so_splicemax == so->so_splicelen) 1733 error = EFBIG; 1734 if (error) 1735 so->so_error = error; 1736 if (((so->so_state & SS_CANTRCVMORE) && so->so_rcv.sb_cc == 0) || 1737 (sosp->so_state & SS_CANTSENDMORE) || maxreached || error) { 1738 sounsplice(so, sosp, 0); 1739 return (0); 1740 } 1741 if (timerisset(&so->so_idletv)) 1742 timeout_add_tv(&so->so_idleto, &so->so_idletv); 1743 return (1); 1744 } 1745 1746 #endif /* SOCKET_SPLICE */ 1747 1748 void 1749 sorwakeup(struct socket *so) 1750 { 1751 soassertlocked(so); 1752 1753 #ifdef SOCKET_SPLICE 1754 if (so->so_rcv.sb_flags & SB_SPLICE) { 1755 /* 1756 * TCP has a sendbuffer that can handle multiple packets 1757 * at once. So queue the stream a bit to accumulate data. 1758 * The sosplice thread will call somove() later and send 1759 * the packets calling tcp_output() only once. 1760 * In the UDP case, send out the packets immediately. 1761 * Using a thread would make things slower. 1762 */ 1763 if (so->so_proto->pr_flags & PR_WANTRCVD) 1764 task_add(sosplice_taskq, &so->so_splicetask); 1765 else 1766 somove(so, M_DONTWAIT); 1767 } 1768 if (isspliced(so)) 1769 return; 1770 #endif 1771 sowakeup(so, &so->so_rcv); 1772 if (so->so_upcall) 1773 (*(so->so_upcall))(so, so->so_upcallarg, M_DONTWAIT); 1774 } 1775 1776 void 1777 sowwakeup(struct socket *so) 1778 { 1779 soassertlocked(so); 1780 1781 #ifdef SOCKET_SPLICE 1782 if (so->so_snd.sb_flags & SB_SPLICE) 1783 task_add(sosplice_taskq, &so->so_sp->ssp_soback->so_splicetask); 1784 if (issplicedback(so)) 1785 return; 1786 #endif 1787 sowakeup(so, &so->so_snd); 1788 } 1789 1790 int 1791 sosetopt(struct socket *so, int level, int optname, struct mbuf *m) 1792 { 1793 int error = 0; 1794 1795 soassertlocked(so); 1796 1797 if (level != SOL_SOCKET) { 1798 if (so->so_proto->pr_ctloutput) { 1799 error = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1800 level, optname, m); 1801 return (error); 1802 } 1803 error = ENOPROTOOPT; 1804 } else { 1805 switch (optname) { 1806 case SO_BINDANY: 1807 if ((error = suser(curproc)) != 0) /* XXX */ 1808 return (error); 1809 break; 1810 } 1811 1812 switch (optname) { 1813 1814 case SO_LINGER: 1815 if (m == NULL || m->m_len != sizeof (struct linger) || 1816 mtod(m, struct linger *)->l_linger < 0 || 1817 mtod(m, struct linger *)->l_linger > SHRT_MAX) 1818 return (EINVAL); 1819 so->so_linger = mtod(m, struct linger *)->l_linger; 1820 /* FALLTHROUGH */ 1821 1822 case SO_BINDANY: 1823 case SO_DEBUG: 1824 case SO_KEEPALIVE: 1825 case SO_USELOOPBACK: 1826 case SO_BROADCAST: 1827 case SO_REUSEADDR: 1828 case SO_REUSEPORT: 1829 case SO_OOBINLINE: 1830 case SO_TIMESTAMP: 1831 case SO_ZEROIZE: 1832 if (m == NULL || m->m_len < sizeof (int)) 1833 return (EINVAL); 1834 if (*mtod(m, int *)) 1835 so->so_options |= optname; 1836 else 1837 so->so_options &= ~optname; 1838 break; 1839 1840 case SO_DONTROUTE: 1841 if (m == NULL || m->m_len < sizeof (int)) 1842 return (EINVAL); 1843 if (*mtod(m, int *)) 1844 error = EOPNOTSUPP; 1845 break; 1846 1847 case SO_SNDBUF: 1848 case SO_RCVBUF: 1849 case SO_SNDLOWAT: 1850 case SO_RCVLOWAT: 1851 { 1852 u_long cnt; 1853 1854 if (m == NULL || m->m_len < sizeof (int)) 1855 return (EINVAL); 1856 cnt = *mtod(m, int *); 1857 if ((long)cnt <= 0) 1858 cnt = 1; 1859 switch (optname) { 1860 1861 case SO_SNDBUF: 1862 if (so->so_state & SS_CANTSENDMORE) 1863 return (EINVAL); 1864 if (sbcheckreserve(cnt, so->so_snd.sb_wat) || 1865 sbreserve(so, &so->so_snd, cnt)) 1866 return (ENOBUFS); 1867 so->so_snd.sb_wat = cnt; 1868 break; 1869 1870 case SO_RCVBUF: 1871 if (so->so_state & SS_CANTRCVMORE) 1872 return (EINVAL); 1873 if (sbcheckreserve(cnt, so->so_rcv.sb_wat) || 1874 sbreserve(so, &so->so_rcv, cnt)) 1875 return (ENOBUFS); 1876 so->so_rcv.sb_wat = cnt; 1877 break; 1878 1879 case SO_SNDLOWAT: 1880 so->so_snd.sb_lowat = 1881 (cnt > so->so_snd.sb_hiwat) ? 1882 so->so_snd.sb_hiwat : cnt; 1883 break; 1884 case SO_RCVLOWAT: 1885 so->so_rcv.sb_lowat = 1886 (cnt > so->so_rcv.sb_hiwat) ? 1887 so->so_rcv.sb_hiwat : cnt; 1888 break; 1889 } 1890 break; 1891 } 1892 1893 case SO_SNDTIMEO: 1894 case SO_RCVTIMEO: 1895 { 1896 struct timeval tv; 1897 uint64_t nsecs; 1898 1899 if (m == NULL || m->m_len < sizeof (tv)) 1900 return (EINVAL); 1901 memcpy(&tv, mtod(m, struct timeval *), sizeof tv); 1902 if (!timerisvalid(&tv)) 1903 return (EINVAL); 1904 nsecs = TIMEVAL_TO_NSEC(&tv); 1905 if (nsecs == UINT64_MAX) 1906 return (EDOM); 1907 if (nsecs == 0) 1908 nsecs = INFSLP; 1909 switch (optname) { 1910 1911 case SO_SNDTIMEO: 1912 so->so_snd.sb_timeo_nsecs = nsecs; 1913 break; 1914 case SO_RCVTIMEO: 1915 so->so_rcv.sb_timeo_nsecs = nsecs; 1916 break; 1917 } 1918 break; 1919 } 1920 1921 case SO_RTABLE: 1922 if (so->so_proto->pr_domain && 1923 so->so_proto->pr_domain->dom_protosw && 1924 so->so_proto->pr_ctloutput) { 1925 const struct domain *dom = 1926 so->so_proto->pr_domain; 1927 1928 level = dom->dom_protosw->pr_protocol; 1929 error = (*so->so_proto->pr_ctloutput) 1930 (PRCO_SETOPT, so, level, optname, m); 1931 return (error); 1932 } 1933 error = ENOPROTOOPT; 1934 break; 1935 1936 #ifdef SOCKET_SPLICE 1937 case SO_SPLICE: 1938 if (m == NULL) { 1939 error = sosplice(so, -1, 0, NULL); 1940 } else if (m->m_len < sizeof(int)) { 1941 return (EINVAL); 1942 } else if (m->m_len < sizeof(struct splice)) { 1943 error = sosplice(so, *mtod(m, int *), 0, NULL); 1944 } else { 1945 error = sosplice(so, 1946 mtod(m, struct splice *)->sp_fd, 1947 mtod(m, struct splice *)->sp_max, 1948 &mtod(m, struct splice *)->sp_idle); 1949 } 1950 break; 1951 #endif /* SOCKET_SPLICE */ 1952 1953 default: 1954 error = ENOPROTOOPT; 1955 break; 1956 } 1957 if (error == 0 && so->so_proto->pr_ctloutput) { 1958 (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1959 level, optname, m); 1960 } 1961 } 1962 1963 return (error); 1964 } 1965 1966 int 1967 sogetopt(struct socket *so, int level, int optname, struct mbuf *m) 1968 { 1969 int error = 0; 1970 1971 soassertlocked(so); 1972 1973 if (level != SOL_SOCKET) { 1974 if (so->so_proto->pr_ctloutput) { 1975 m->m_len = 0; 1976 1977 error = (*so->so_proto->pr_ctloutput)(PRCO_GETOPT, so, 1978 level, optname, m); 1979 if (error) 1980 return (error); 1981 return (0); 1982 } else 1983 return (ENOPROTOOPT); 1984 } else { 1985 m->m_len = sizeof (int); 1986 1987 switch (optname) { 1988 1989 case SO_LINGER: 1990 m->m_len = sizeof (struct linger); 1991 mtod(m, struct linger *)->l_onoff = 1992 so->so_options & SO_LINGER; 1993 mtod(m, struct linger *)->l_linger = so->so_linger; 1994 break; 1995 1996 case SO_BINDANY: 1997 case SO_USELOOPBACK: 1998 case SO_DEBUG: 1999 case SO_KEEPALIVE: 2000 case SO_REUSEADDR: 2001 case SO_REUSEPORT: 2002 case SO_BROADCAST: 2003 case SO_OOBINLINE: 2004 case SO_TIMESTAMP: 2005 case SO_ZEROIZE: 2006 *mtod(m, int *) = so->so_options & optname; 2007 break; 2008 2009 case SO_DONTROUTE: 2010 *mtod(m, int *) = 0; 2011 break; 2012 2013 case SO_TYPE: 2014 *mtod(m, int *) = so->so_type; 2015 break; 2016 2017 case SO_ERROR: 2018 *mtod(m, int *) = so->so_error; 2019 so->so_error = 0; 2020 break; 2021 2022 case SO_DOMAIN: 2023 *mtod(m, int *) = so->so_proto->pr_domain->dom_family; 2024 break; 2025 2026 case SO_PROTOCOL: 2027 *mtod(m, int *) = so->so_proto->pr_protocol; 2028 break; 2029 2030 case SO_SNDBUF: 2031 *mtod(m, int *) = so->so_snd.sb_hiwat; 2032 break; 2033 2034 case SO_RCVBUF: 2035 *mtod(m, int *) = so->so_rcv.sb_hiwat; 2036 break; 2037 2038 case SO_SNDLOWAT: 2039 *mtod(m, int *) = so->so_snd.sb_lowat; 2040 break; 2041 2042 case SO_RCVLOWAT: 2043 *mtod(m, int *) = so->so_rcv.sb_lowat; 2044 break; 2045 2046 case SO_SNDTIMEO: 2047 case SO_RCVTIMEO: 2048 { 2049 struct timeval tv; 2050 uint64_t nsecs = (optname == SO_SNDTIMEO ? 2051 so->so_snd.sb_timeo_nsecs : 2052 so->so_rcv.sb_timeo_nsecs); 2053 2054 m->m_len = sizeof(struct timeval); 2055 memset(&tv, 0, sizeof(tv)); 2056 if (nsecs != INFSLP) 2057 NSEC_TO_TIMEVAL(nsecs, &tv); 2058 memcpy(mtod(m, struct timeval *), &tv, sizeof tv); 2059 break; 2060 } 2061 2062 case SO_RTABLE: 2063 if (so->so_proto->pr_domain && 2064 so->so_proto->pr_domain->dom_protosw && 2065 so->so_proto->pr_ctloutput) { 2066 const struct domain *dom = 2067 so->so_proto->pr_domain; 2068 2069 level = dom->dom_protosw->pr_protocol; 2070 error = (*so->so_proto->pr_ctloutput) 2071 (PRCO_GETOPT, so, level, optname, m); 2072 if (error) 2073 return (error); 2074 break; 2075 } 2076 return (ENOPROTOOPT); 2077 2078 #ifdef SOCKET_SPLICE 2079 case SO_SPLICE: 2080 { 2081 off_t len; 2082 2083 m->m_len = sizeof(off_t); 2084 len = so->so_sp ? so->so_sp->ssp_len : 0; 2085 memcpy(mtod(m, off_t *), &len, sizeof(off_t)); 2086 break; 2087 } 2088 #endif /* SOCKET_SPLICE */ 2089 2090 case SO_PEERCRED: 2091 if (so->so_proto->pr_protocol == AF_UNIX) { 2092 struct unpcb *unp = sotounpcb(so); 2093 2094 if (unp->unp_flags & UNP_FEIDS) { 2095 m->m_len = sizeof(unp->unp_connid); 2096 memcpy(mtod(m, caddr_t), 2097 &(unp->unp_connid), m->m_len); 2098 break; 2099 } 2100 return (ENOTCONN); 2101 } 2102 return (EOPNOTSUPP); 2103 2104 default: 2105 return (ENOPROTOOPT); 2106 } 2107 return (0); 2108 } 2109 } 2110 2111 void 2112 sohasoutofband(struct socket *so) 2113 { 2114 pgsigio(&so->so_sigio, SIGURG, 0); 2115 selwakeup(&so->so_rcv.sb_sel); 2116 } 2117 2118 int 2119 soo_kqfilter(struct file *fp, struct knote *kn) 2120 { 2121 struct socket *so = kn->kn_fp->f_data; 2122 struct sockbuf *sb; 2123 2124 solock(so); 2125 switch (kn->kn_filter) { 2126 case EVFILT_READ: 2127 if (so->so_options & SO_ACCEPTCONN) 2128 kn->kn_fop = &solisten_filtops; 2129 else 2130 kn->kn_fop = &soread_filtops; 2131 sb = &so->so_rcv; 2132 break; 2133 case EVFILT_WRITE: 2134 kn->kn_fop = &sowrite_filtops; 2135 sb = &so->so_snd; 2136 break; 2137 case EVFILT_EXCEPT: 2138 kn->kn_fop = &soexcept_filtops; 2139 sb = &so->so_rcv; 2140 break; 2141 default: 2142 sounlock(so); 2143 return (EINVAL); 2144 } 2145 2146 klist_insert_locked(&sb->sb_sel.si_note, kn); 2147 sounlock(so); 2148 2149 return (0); 2150 } 2151 2152 void 2153 filt_sordetach(struct knote *kn) 2154 { 2155 struct socket *so = kn->kn_fp->f_data; 2156 2157 klist_remove(&so->so_rcv.sb_sel.si_note, kn); 2158 } 2159 2160 int 2161 filt_soread(struct knote *kn, long hint) 2162 { 2163 struct socket *so = kn->kn_fp->f_data; 2164 int rv = 0; 2165 2166 soassertlocked(so); 2167 2168 kn->kn_data = so->so_rcv.sb_cc; 2169 #ifdef SOCKET_SPLICE 2170 if (isspliced(so)) { 2171 rv = 0; 2172 } else 2173 #endif /* SOCKET_SPLICE */ 2174 if (so->so_state & SS_CANTRCVMORE) { 2175 kn->kn_flags |= EV_EOF; 2176 if (kn->kn_flags & __EV_POLL) { 2177 if (so->so_state & SS_ISDISCONNECTED) 2178 kn->kn_flags |= __EV_HUP; 2179 } 2180 kn->kn_fflags = so->so_error; 2181 rv = 1; 2182 } else if (so->so_error) { /* temporary udp error */ 2183 rv = 1; 2184 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2185 rv = (kn->kn_data >= kn->kn_sdata); 2186 } else { 2187 rv = (kn->kn_data >= so->so_rcv.sb_lowat); 2188 } 2189 2190 return rv; 2191 } 2192 2193 void 2194 filt_sowdetach(struct knote *kn) 2195 { 2196 struct socket *so = kn->kn_fp->f_data; 2197 2198 klist_remove(&so->so_snd.sb_sel.si_note, kn); 2199 } 2200 2201 int 2202 filt_sowrite(struct knote *kn, long hint) 2203 { 2204 struct socket *so = kn->kn_fp->f_data; 2205 int rv; 2206 2207 soassertlocked(so); 2208 2209 kn->kn_data = sbspace(so, &so->so_snd); 2210 if (so->so_state & SS_CANTSENDMORE) { 2211 kn->kn_flags |= EV_EOF; 2212 if (kn->kn_flags & __EV_POLL) { 2213 if (so->so_state & SS_ISDISCONNECTED) 2214 kn->kn_flags |= __EV_HUP; 2215 } 2216 kn->kn_fflags = so->so_error; 2217 rv = 1; 2218 } else if (so->so_error) { /* temporary udp error */ 2219 rv = 1; 2220 } else if (((so->so_state & SS_ISCONNECTED) == 0) && 2221 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 2222 rv = 0; 2223 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2224 rv = (kn->kn_data >= kn->kn_sdata); 2225 } else { 2226 rv = (kn->kn_data >= so->so_snd.sb_lowat); 2227 } 2228 2229 return (rv); 2230 } 2231 2232 int 2233 filt_soexcept(struct knote *kn, long hint) 2234 { 2235 struct socket *so = kn->kn_fp->f_data; 2236 int rv = 0; 2237 2238 soassertlocked(so); 2239 2240 #ifdef SOCKET_SPLICE 2241 if (isspliced(so)) { 2242 rv = 0; 2243 } else 2244 #endif /* SOCKET_SPLICE */ 2245 if (kn->kn_sfflags & NOTE_OOB) { 2246 if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) { 2247 kn->kn_fflags |= NOTE_OOB; 2248 kn->kn_data -= so->so_oobmark; 2249 rv = 1; 2250 } 2251 } 2252 2253 if (kn->kn_flags & __EV_POLL) { 2254 if (so->so_state & SS_ISDISCONNECTED) { 2255 kn->kn_flags |= __EV_HUP; 2256 rv = 1; 2257 } 2258 } 2259 2260 return rv; 2261 } 2262 2263 int 2264 filt_solisten(struct knote *kn, long hint) 2265 { 2266 struct socket *so = kn->kn_fp->f_data; 2267 int active; 2268 2269 soassertlocked(so); 2270 2271 kn->kn_data = so->so_qlen; 2272 active = (kn->kn_data != 0); 2273 2274 if (kn->kn_flags & (__EV_POLL | __EV_SELECT)) { 2275 if (so->so_state & SS_ISDISCONNECTED) { 2276 kn->kn_flags |= __EV_HUP; 2277 active = 1; 2278 } else { 2279 active = soreadable(so); 2280 } 2281 } 2282 2283 return (active); 2284 } 2285 2286 int 2287 filt_somodify(struct kevent *kev, struct knote *kn) 2288 { 2289 struct socket *so = kn->kn_fp->f_data; 2290 int rv; 2291 2292 solock(so); 2293 rv = knote_modify(kev, kn); 2294 sounlock(so); 2295 2296 return (rv); 2297 } 2298 2299 int 2300 filt_soprocess(struct knote *kn, struct kevent *kev) 2301 { 2302 struct socket *so = kn->kn_fp->f_data; 2303 int rv; 2304 2305 solock(so); 2306 rv = knote_process(kn, kev); 2307 sounlock(so); 2308 2309 return (rv); 2310 } 2311 2312 void 2313 klist_soassertlk(void *arg) 2314 { 2315 struct socket *so = arg; 2316 2317 soassertlocked(so); 2318 } 2319 2320 int 2321 klist_solock(void *arg) 2322 { 2323 struct socket *so = arg; 2324 2325 solock(so); 2326 return (1); 2327 } 2328 2329 void 2330 klist_sounlock(void *arg, int ls) 2331 { 2332 struct socket *so = arg; 2333 2334 sounlock(so); 2335 } 2336 2337 const struct klistops socket_klistops = { 2338 .klo_assertlk = klist_soassertlk, 2339 .klo_lock = klist_solock, 2340 .klo_unlock = klist_sounlock, 2341 }; 2342 2343 #ifdef DDB 2344 void 2345 sobuf_print(struct sockbuf *, 2346 int (*)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))); 2347 2348 void 2349 sobuf_print(struct sockbuf *sb, 2350 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2351 { 2352 (*pr)("\tsb_cc: %lu\n", sb->sb_cc); 2353 (*pr)("\tsb_datacc: %lu\n", sb->sb_datacc); 2354 (*pr)("\tsb_hiwat: %lu\n", sb->sb_hiwat); 2355 (*pr)("\tsb_wat: %lu\n", sb->sb_wat); 2356 (*pr)("\tsb_mbcnt: %lu\n", sb->sb_mbcnt); 2357 (*pr)("\tsb_mbmax: %lu\n", sb->sb_mbmax); 2358 (*pr)("\tsb_lowat: %ld\n", sb->sb_lowat); 2359 (*pr)("\tsb_mb: %p\n", sb->sb_mb); 2360 (*pr)("\tsb_mbtail: %p\n", sb->sb_mbtail); 2361 (*pr)("\tsb_lastrecord: %p\n", sb->sb_lastrecord); 2362 (*pr)("\tsb_sel: ...\n"); 2363 (*pr)("\tsb_flags: %i\n", sb->sb_flags); 2364 (*pr)("\tsb_timeo_nsecs: %llu\n", sb->sb_timeo_nsecs); 2365 } 2366 2367 void 2368 so_print(void *v, 2369 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2370 { 2371 struct socket *so = v; 2372 2373 (*pr)("socket %p\n", so); 2374 (*pr)("so_type: %i\n", so->so_type); 2375 (*pr)("so_options: 0x%04x\n", so->so_options); /* %b */ 2376 (*pr)("so_linger: %i\n", so->so_linger); 2377 (*pr)("so_state: 0x%04x\n", so->so_state); 2378 (*pr)("so_pcb: %p\n", so->so_pcb); 2379 (*pr)("so_proto: %p\n", so->so_proto); 2380 (*pr)("so_sigio: %p\n", so->so_sigio.sir_sigio); 2381 2382 (*pr)("so_head: %p\n", so->so_head); 2383 (*pr)("so_onq: %p\n", so->so_onq); 2384 (*pr)("so_q0: @%p first: %p\n", &so->so_q0, TAILQ_FIRST(&so->so_q0)); 2385 (*pr)("so_q: @%p first: %p\n", &so->so_q, TAILQ_FIRST(&so->so_q)); 2386 (*pr)("so_eq: next: %p\n", TAILQ_NEXT(so, so_qe)); 2387 (*pr)("so_q0len: %i\n", so->so_q0len); 2388 (*pr)("so_qlen: %i\n", so->so_qlen); 2389 (*pr)("so_qlimit: %i\n", so->so_qlimit); 2390 (*pr)("so_timeo: %i\n", so->so_timeo); 2391 (*pr)("so_obmark: %lu\n", so->so_oobmark); 2392 2393 (*pr)("so_sp: %p\n", so->so_sp); 2394 if (so->so_sp != NULL) { 2395 (*pr)("\tssp_socket: %p\n", so->so_sp->ssp_socket); 2396 (*pr)("\tssp_soback: %p\n", so->so_sp->ssp_soback); 2397 (*pr)("\tssp_len: %lld\n", 2398 (unsigned long long)so->so_sp->ssp_len); 2399 (*pr)("\tssp_max: %lld\n", 2400 (unsigned long long)so->so_sp->ssp_max); 2401 (*pr)("\tssp_idletv: %lld %ld\n", so->so_sp->ssp_idletv.tv_sec, 2402 so->so_sp->ssp_idletv.tv_usec); 2403 (*pr)("\tssp_idleto: %spending (@%i)\n", 2404 timeout_pending(&so->so_sp->ssp_idleto) ? "" : "not ", 2405 so->so_sp->ssp_idleto.to_time); 2406 } 2407 2408 (*pr)("so_rcv:\n"); 2409 sobuf_print(&so->so_rcv, pr); 2410 (*pr)("so_snd:\n"); 2411 sobuf_print(&so->so_snd, pr); 2412 2413 (*pr)("so_upcall: %p so_upcallarg: %p\n", 2414 so->so_upcall, so->so_upcallarg); 2415 2416 (*pr)("so_euid: %d so_ruid: %d\n", so->so_euid, so->so_ruid); 2417 (*pr)("so_egid: %d so_rgid: %d\n", so->so_egid, so->so_rgid); 2418 (*pr)("so_cpid: %d\n", so->so_cpid); 2419 } 2420 #endif 2421