1 /* $OpenBSD: uipc_socket.c,v 1.304 2023/06/30 11:52:11 mvs Exp $ */ 2 /* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/file.h> 39 #include <sys/filedesc.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/domain.h> 43 #include <sys/event.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/unpcb.h> 47 #include <sys/socketvar.h> 48 #include <sys/signalvar.h> 49 #include <sys/pool.h> 50 #include <sys/atomic.h> 51 #include <sys/rwlock.h> 52 #include <sys/time.h> 53 #include <sys/refcnt.h> 54 55 #ifdef DDB 56 #include <machine/db_machdep.h> 57 #endif 58 59 void sbsync(struct sockbuf *, struct mbuf *); 60 61 int sosplice(struct socket *, int, off_t, struct timeval *); 62 void sounsplice(struct socket *, struct socket *, int); 63 void soidle(void *); 64 void sotask(void *); 65 void soreaper(void *); 66 void soput(void *); 67 int somove(struct socket *, int); 68 void sorflush(struct socket *); 69 70 void filt_sordetach(struct knote *kn); 71 int filt_soread(struct knote *kn, long hint); 72 void filt_sowdetach(struct knote *kn); 73 int filt_sowrite(struct knote *kn, long hint); 74 int filt_soexcept(struct knote *kn, long hint); 75 int filt_solisten(struct knote *kn, long hint); 76 int filt_somodify(struct kevent *kev, struct knote *kn); 77 int filt_soprocess(struct knote *kn, struct kevent *kev); 78 79 const struct filterops solisten_filtops = { 80 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 81 .f_attach = NULL, 82 .f_detach = filt_sordetach, 83 .f_event = filt_solisten, 84 .f_modify = filt_somodify, 85 .f_process = filt_soprocess, 86 }; 87 88 const struct filterops soread_filtops = { 89 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 90 .f_attach = NULL, 91 .f_detach = filt_sordetach, 92 .f_event = filt_soread, 93 .f_modify = filt_somodify, 94 .f_process = filt_soprocess, 95 }; 96 97 const struct filterops sowrite_filtops = { 98 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 99 .f_attach = NULL, 100 .f_detach = filt_sowdetach, 101 .f_event = filt_sowrite, 102 .f_modify = filt_somodify, 103 .f_process = filt_soprocess, 104 }; 105 106 const struct filterops soexcept_filtops = { 107 .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE, 108 .f_attach = NULL, 109 .f_detach = filt_sordetach, 110 .f_event = filt_soexcept, 111 .f_modify = filt_somodify, 112 .f_process = filt_soprocess, 113 }; 114 115 void klist_soassertlk(void *); 116 int klist_solock(void *); 117 void klist_sounlock(void *, int); 118 119 const struct klistops socket_klistops = { 120 .klo_assertlk = klist_soassertlk, 121 .klo_lock = klist_solock, 122 .klo_unlock = klist_sounlock, 123 }; 124 125 #ifndef SOMINCONN 126 #define SOMINCONN 80 127 #endif /* SOMINCONN */ 128 129 int somaxconn = SOMAXCONN; 130 int sominconn = SOMINCONN; 131 132 struct pool socket_pool; 133 #ifdef SOCKET_SPLICE 134 struct pool sosplice_pool; 135 struct taskq *sosplice_taskq; 136 struct rwlock sosplice_lock = RWLOCK_INITIALIZER("sosplicelk"); 137 #endif 138 139 void 140 soinit(void) 141 { 142 pool_init(&socket_pool, sizeof(struct socket), 0, IPL_SOFTNET, 0, 143 "sockpl", NULL); 144 #ifdef SOCKET_SPLICE 145 pool_init(&sosplice_pool, sizeof(struct sosplice), 0, IPL_SOFTNET, 0, 146 "sosppl", NULL); 147 #endif 148 } 149 150 struct socket * 151 soalloc(int wait) 152 { 153 struct socket *so; 154 155 so = pool_get(&socket_pool, (wait == M_WAIT ? PR_WAITOK : PR_NOWAIT) | 156 PR_ZERO); 157 if (so == NULL) 158 return (NULL); 159 rw_init_flags(&so->so_lock, "solock", RWL_DUPOK); 160 refcnt_init(&so->so_refcnt); 161 klist_init(&so->so_rcv.sb_klist, &socket_klistops, so); 162 klist_init(&so->so_snd.sb_klist, &socket_klistops, so); 163 sigio_init(&so->so_sigio); 164 TAILQ_INIT(&so->so_q0); 165 TAILQ_INIT(&so->so_q); 166 167 return (so); 168 } 169 170 /* 171 * Socket operation routines. 172 * These routines are called by the routines in 173 * sys_socket.c or from a system process, and 174 * implement the semantics of socket operations by 175 * switching out to the protocol specific routines. 176 */ 177 int 178 socreate(int dom, struct socket **aso, int type, int proto) 179 { 180 struct proc *p = curproc; /* XXX */ 181 const struct protosw *prp; 182 struct socket *so; 183 int error; 184 185 if (proto) 186 prp = pffindproto(dom, proto, type); 187 else 188 prp = pffindtype(dom, type); 189 if (prp == NULL || prp->pr_usrreqs == NULL) 190 return (EPROTONOSUPPORT); 191 if (prp->pr_type != type) 192 return (EPROTOTYPE); 193 so = soalloc(M_WAIT); 194 so->so_type = type; 195 if (suser(p) == 0) 196 so->so_state = SS_PRIV; 197 so->so_ruid = p->p_ucred->cr_ruid; 198 so->so_euid = p->p_ucred->cr_uid; 199 so->so_rgid = p->p_ucred->cr_rgid; 200 so->so_egid = p->p_ucred->cr_gid; 201 so->so_cpid = p->p_p->ps_pid; 202 so->so_proto = prp; 203 so->so_snd.sb_timeo_nsecs = INFSLP; 204 so->so_rcv.sb_timeo_nsecs = INFSLP; 205 206 solock(so); 207 error = pru_attach(so, proto, M_WAIT); 208 if (error) { 209 so->so_state |= SS_NOFDREF; 210 /* sofree() calls sounlock(). */ 211 sofree(so, 0); 212 return (error); 213 } 214 sounlock(so); 215 *aso = so; 216 return (0); 217 } 218 219 int 220 sobind(struct socket *so, struct mbuf *nam, struct proc *p) 221 { 222 soassertlocked(so); 223 return pru_bind(so, nam, p); 224 } 225 226 int 227 solisten(struct socket *so, int backlog) 228 { 229 int error; 230 231 soassertlocked(so); 232 233 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) 234 return (EINVAL); 235 #ifdef SOCKET_SPLICE 236 if (isspliced(so) || issplicedback(so)) 237 return (EOPNOTSUPP); 238 #endif /* SOCKET_SPLICE */ 239 error = pru_listen(so); 240 if (error) 241 return (error); 242 if (TAILQ_FIRST(&so->so_q) == NULL) 243 so->so_options |= SO_ACCEPTCONN; 244 if (backlog < 0 || backlog > somaxconn) 245 backlog = somaxconn; 246 if (backlog < sominconn) 247 backlog = sominconn; 248 so->so_qlimit = backlog; 249 return (0); 250 } 251 252 #define SOSP_FREEING_READ 1 253 #define SOSP_FREEING_WRITE 2 254 void 255 sofree(struct socket *so, int keep_lock) 256 { 257 int persocket = solock_persocket(so); 258 259 soassertlocked(so); 260 261 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) { 262 if (!keep_lock) 263 sounlock(so); 264 return; 265 } 266 if (so->so_head) { 267 struct socket *head = so->so_head; 268 269 /* 270 * We must not decommission a socket that's on the accept(2) 271 * queue. If we do, then accept(2) may hang after select(2) 272 * indicated that the listening socket was ready. 273 */ 274 if (so->so_onq == &head->so_q) { 275 if (!keep_lock) 276 sounlock(so); 277 return; 278 } 279 280 if (persocket) { 281 /* 282 * Concurrent close of `head' could 283 * abort `so' due to re-lock. 284 */ 285 soref(so); 286 soref(head); 287 sounlock(so); 288 solock(head); 289 solock(so); 290 291 if (so->so_onq != &head->so_q0) { 292 sounlock(head); 293 sounlock(so); 294 sorele(head); 295 sorele(so); 296 return; 297 } 298 299 sorele(head); 300 sorele(so); 301 } 302 303 soqremque(so, 0); 304 305 if (persocket) 306 sounlock(head); 307 } 308 309 if (persocket) { 310 sounlock(so); 311 refcnt_finalize(&so->so_refcnt, "sofinal"); 312 solock(so); 313 } 314 315 sigio_free(&so->so_sigio); 316 klist_free(&so->so_rcv.sb_klist); 317 klist_free(&so->so_snd.sb_klist); 318 #ifdef SOCKET_SPLICE 319 if (issplicedback(so)) { 320 int freeing = SOSP_FREEING_WRITE; 321 322 if (so->so_sp->ssp_soback == so) 323 freeing |= SOSP_FREEING_READ; 324 sounsplice(so->so_sp->ssp_soback, so, freeing); 325 } 326 if (isspliced(so)) { 327 int freeing = SOSP_FREEING_READ; 328 329 if (so == so->so_sp->ssp_socket) 330 freeing |= SOSP_FREEING_WRITE; 331 sounsplice(so, so->so_sp->ssp_socket, freeing); 332 } 333 #endif /* SOCKET_SPLICE */ 334 sbrelease(so, &so->so_snd); 335 sorflush(so); 336 if (!keep_lock) 337 sounlock(so); 338 #ifdef SOCKET_SPLICE 339 if (so->so_sp) { 340 /* Reuse splice idle, sounsplice() has been called before. */ 341 timeout_set_proc(&so->so_sp->ssp_idleto, soreaper, so); 342 timeout_add(&so->so_sp->ssp_idleto, 0); 343 } else 344 #endif /* SOCKET_SPLICE */ 345 { 346 pool_put(&socket_pool, so); 347 } 348 } 349 350 static inline uint64_t 351 solinger_nsec(struct socket *so) 352 { 353 if (so->so_linger == 0) 354 return INFSLP; 355 356 return SEC_TO_NSEC(so->so_linger); 357 } 358 359 /* 360 * Close a socket on last file table reference removal. 361 * Initiate disconnect if connected. 362 * Free socket when disconnect complete. 363 */ 364 int 365 soclose(struct socket *so, int flags) 366 { 367 struct socket *so2; 368 int error = 0; 369 370 solock(so); 371 /* Revoke async IO early. There is a final revocation in sofree(). */ 372 sigio_free(&so->so_sigio); 373 if (so->so_state & SS_ISCONNECTED) { 374 if (so->so_pcb == NULL) 375 goto discard; 376 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 377 error = sodisconnect(so); 378 if (error) 379 goto drop; 380 } 381 if (so->so_options & SO_LINGER) { 382 if ((so->so_state & SS_ISDISCONNECTING) && 383 (flags & MSG_DONTWAIT)) 384 goto drop; 385 while (so->so_state & SS_ISCONNECTED) { 386 error = sosleep_nsec(so, &so->so_timeo, 387 PSOCK | PCATCH, "netcls", 388 solinger_nsec(so)); 389 if (error) 390 break; 391 } 392 } 393 } 394 drop: 395 if (so->so_pcb) { 396 int error2; 397 error2 = pru_detach(so); 398 if (error == 0) 399 error = error2; 400 } 401 if (so->so_options & SO_ACCEPTCONN) { 402 int persocket = solock_persocket(so); 403 404 if (persocket) { 405 /* Wait concurrent sonewconn() threads. */ 406 while (so->so_newconn > 0) { 407 so->so_state |= SS_NEWCONN_WAIT; 408 sosleep_nsec(so, &so->so_newconn, PSOCK, 409 "newcon", INFSLP); 410 } 411 } 412 413 while ((so2 = TAILQ_FIRST(&so->so_q0)) != NULL) { 414 if (persocket) 415 solock(so2); 416 (void) soqremque(so2, 0); 417 if (persocket) 418 sounlock(so); 419 soabort(so2); 420 if (persocket) 421 solock(so); 422 } 423 while ((so2 = TAILQ_FIRST(&so->so_q)) != NULL) { 424 if (persocket) 425 solock(so2); 426 (void) soqremque(so2, 1); 427 if (persocket) 428 sounlock(so); 429 soabort(so2); 430 if (persocket) 431 solock(so); 432 } 433 } 434 discard: 435 if (so->so_state & SS_NOFDREF) 436 panic("soclose NOFDREF: so %p, so_type %d", so, so->so_type); 437 so->so_state |= SS_NOFDREF; 438 /* sofree() calls sounlock(). */ 439 sofree(so, 0); 440 return (error); 441 } 442 443 void 444 soabort(struct socket *so) 445 { 446 soassertlocked(so); 447 pru_abort(so); 448 } 449 450 int 451 soaccept(struct socket *so, struct mbuf *nam) 452 { 453 int error = 0; 454 455 soassertlocked(so); 456 457 if ((so->so_state & SS_NOFDREF) == 0) 458 panic("soaccept !NOFDREF: so %p, so_type %d", so, so->so_type); 459 so->so_state &= ~SS_NOFDREF; 460 if ((so->so_state & SS_ISDISCONNECTED) == 0 || 461 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) 462 error = pru_accept(so, nam); 463 else 464 error = ECONNABORTED; 465 return (error); 466 } 467 468 int 469 soconnect(struct socket *so, struct mbuf *nam) 470 { 471 int error; 472 473 soassertlocked(so); 474 475 if (so->so_options & SO_ACCEPTCONN) 476 return (EOPNOTSUPP); 477 /* 478 * If protocol is connection-based, can only connect once. 479 * Otherwise, if connected, try to disconnect first. 480 * This allows user to disconnect by connecting to, e.g., 481 * a null address. 482 */ 483 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 484 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 485 (error = sodisconnect(so)))) 486 error = EISCONN; 487 else 488 error = pru_connect(so, nam); 489 return (error); 490 } 491 492 int 493 soconnect2(struct socket *so1, struct socket *so2) 494 { 495 int persocket, error; 496 497 if ((persocket = solock_persocket(so1))) 498 solock_pair(so1, so2); 499 else 500 solock(so1); 501 502 error = pru_connect2(so1, so2); 503 504 if (persocket) 505 sounlock(so2); 506 sounlock(so1); 507 return (error); 508 } 509 510 int 511 sodisconnect(struct socket *so) 512 { 513 int error; 514 515 soassertlocked(so); 516 517 if ((so->so_state & SS_ISCONNECTED) == 0) 518 return (ENOTCONN); 519 if (so->so_state & SS_ISDISCONNECTING) 520 return (EALREADY); 521 error = pru_disconnect(so); 522 return (error); 523 } 524 525 int m_getuio(struct mbuf **, int, long, struct uio *); 526 527 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 528 /* 529 * Send on a socket. 530 * If send must go all at once and message is larger than 531 * send buffering, then hard error. 532 * Lock against other senders. 533 * If must go all at once and not enough room now, then 534 * inform user that this would block and do nothing. 535 * Otherwise, if nonblocking, send as much as possible. 536 * The data to be sent is described by "uio" if nonzero, 537 * otherwise by the mbuf chain "top" (which must be null 538 * if uio is not). Data provided in mbuf chain must be small 539 * enough to send all at once. 540 * 541 * Returns nonzero on error, timeout or signal; callers 542 * must check for short counts if EINTR/ERESTART are returned. 543 * Data and control buffers are freed on return. 544 */ 545 int 546 sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, 547 struct mbuf *control, int flags) 548 { 549 long space, clen = 0; 550 size_t resid; 551 int error; 552 int atomic = sosendallatonce(so) || top; 553 554 if (uio) 555 resid = uio->uio_resid; 556 else 557 resid = top->m_pkthdr.len; 558 /* MSG_EOR on a SOCK_STREAM socket is invalid. */ 559 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 560 m_freem(top); 561 m_freem(control); 562 return (EINVAL); 563 } 564 if (uio && uio->uio_procp) 565 uio->uio_procp->p_ru.ru_msgsnd++; 566 if (control) { 567 /* 568 * In theory clen should be unsigned (since control->m_len is). 569 * However, space must be signed, as it might be less than 0 570 * if we over-committed, and we must use a signed comparison 571 * of space and clen. 572 */ 573 clen = control->m_len; 574 /* reserve extra space for AF_UNIX's internalize */ 575 if (so->so_proto->pr_domain->dom_family == AF_UNIX && 576 clen >= CMSG_ALIGN(sizeof(struct cmsghdr)) && 577 mtod(control, struct cmsghdr *)->cmsg_type == SCM_RIGHTS) 578 clen = CMSG_SPACE( 579 (clen - CMSG_ALIGN(sizeof(struct cmsghdr))) * 580 (sizeof(struct fdpass) / sizeof(int))); 581 } 582 583 #define snderr(errno) { error = errno; goto release; } 584 585 solock(so); 586 restart: 587 if ((error = sblock(so, &so->so_snd, SBLOCKWAIT(flags))) != 0) 588 goto out; 589 so->so_snd.sb_state |= SS_ISSENDING; 590 do { 591 if (so->so_snd.sb_state & SS_CANTSENDMORE) 592 snderr(EPIPE); 593 if (so->so_error) { 594 error = so->so_error; 595 so->so_error = 0; 596 snderr(error); 597 } 598 if ((so->so_state & SS_ISCONNECTED) == 0) { 599 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 600 if (!(resid == 0 && clen != 0)) 601 snderr(ENOTCONN); 602 } else if (addr == NULL) 603 snderr(EDESTADDRREQ); 604 } 605 space = sbspace(so, &so->so_snd); 606 if (flags & MSG_OOB) 607 space += 1024; 608 if (so->so_proto->pr_domain->dom_family == AF_UNIX) { 609 if (atomic && resid > so->so_snd.sb_hiwat) 610 snderr(EMSGSIZE); 611 } else { 612 if (clen > so->so_snd.sb_hiwat || 613 (atomic && resid > so->so_snd.sb_hiwat - clen)) 614 snderr(EMSGSIZE); 615 } 616 if (space < clen || 617 (space - clen < resid && 618 (atomic || space < so->so_snd.sb_lowat))) { 619 if (flags & MSG_DONTWAIT) 620 snderr(EWOULDBLOCK); 621 sbunlock(so, &so->so_snd); 622 error = sbwait(so, &so->so_snd); 623 so->so_snd.sb_state &= ~SS_ISSENDING; 624 if (error) 625 goto out; 626 goto restart; 627 } 628 space -= clen; 629 do { 630 if (uio == NULL) { 631 /* 632 * Data is prepackaged in "top". 633 */ 634 resid = 0; 635 if (flags & MSG_EOR) 636 top->m_flags |= M_EOR; 637 } else { 638 sounlock(so); 639 error = m_getuio(&top, atomic, space, uio); 640 solock(so); 641 if (error) 642 goto release; 643 space -= top->m_pkthdr.len; 644 resid = uio->uio_resid; 645 if (flags & MSG_EOR) 646 top->m_flags |= M_EOR; 647 } 648 if (resid == 0) 649 so->so_snd.sb_state &= ~SS_ISSENDING; 650 if (top && so->so_options & SO_ZEROIZE) 651 top->m_flags |= M_ZEROIZE; 652 if (flags & MSG_OOB) 653 error = pru_sendoob(so, top, addr, control); 654 else 655 error = pru_send(so, top, addr, control); 656 clen = 0; 657 control = NULL; 658 top = NULL; 659 if (error) 660 goto release; 661 } while (resid && space > 0); 662 } while (resid); 663 664 release: 665 so->so_snd.sb_state &= ~SS_ISSENDING; 666 sbunlock(so, &so->so_snd); 667 out: 668 sounlock(so); 669 m_freem(top); 670 m_freem(control); 671 return (error); 672 } 673 674 int 675 m_getuio(struct mbuf **mp, int atomic, long space, struct uio *uio) 676 { 677 struct mbuf *m, *top = NULL; 678 struct mbuf **nextp = ⊤ 679 u_long len, mlen; 680 size_t resid = uio->uio_resid; 681 int error; 682 683 do { 684 if (top == NULL) { 685 MGETHDR(m, M_WAIT, MT_DATA); 686 mlen = MHLEN; 687 m->m_pkthdr.len = 0; 688 m->m_pkthdr.ph_ifidx = 0; 689 } else { 690 MGET(m, M_WAIT, MT_DATA); 691 mlen = MLEN; 692 } 693 /* chain mbuf together */ 694 *nextp = m; 695 nextp = &m->m_next; 696 697 resid = ulmin(resid, space); 698 if (resid >= MINCLSIZE) { 699 MCLGETL(m, M_NOWAIT, ulmin(resid, MAXMCLBYTES)); 700 if ((m->m_flags & M_EXT) == 0) 701 MCLGETL(m, M_NOWAIT, MCLBYTES); 702 if ((m->m_flags & M_EXT) == 0) 703 goto nopages; 704 mlen = m->m_ext.ext_size; 705 len = ulmin(mlen, resid); 706 /* 707 * For datagram protocols, leave room 708 * for protocol headers in first mbuf. 709 */ 710 if (atomic && m == top && len < mlen - max_hdr) 711 m->m_data += max_hdr; 712 } else { 713 nopages: 714 len = ulmin(mlen, resid); 715 /* 716 * For datagram protocols, leave room 717 * for protocol headers in first mbuf. 718 */ 719 if (atomic && m == top && len < mlen - max_hdr) 720 m_align(m, len); 721 } 722 723 error = uiomove(mtod(m, caddr_t), len, uio); 724 if (error) { 725 m_freem(top); 726 return (error); 727 } 728 729 /* adjust counters */ 730 resid = uio->uio_resid; 731 space -= len; 732 m->m_len = len; 733 top->m_pkthdr.len += len; 734 735 /* Is there more space and more data? */ 736 } while (space > 0 && resid > 0); 737 738 *mp = top; 739 return 0; 740 } 741 742 /* 743 * Following replacement or removal of the first mbuf on the first 744 * mbuf chain of a socket buffer, push necessary state changes back 745 * into the socket buffer so that other consumers see the values 746 * consistently. 'nextrecord' is the callers locally stored value of 747 * the original value of sb->sb_mb->m_nextpkt which must be restored 748 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. 749 */ 750 void 751 sbsync(struct sockbuf *sb, struct mbuf *nextrecord) 752 { 753 754 /* 755 * First, update for the new value of nextrecord. If necessary, 756 * make it the first record. 757 */ 758 if (sb->sb_mb != NULL) 759 sb->sb_mb->m_nextpkt = nextrecord; 760 else 761 sb->sb_mb = nextrecord; 762 763 /* 764 * Now update any dependent socket buffer fields to reflect 765 * the new state. This is an inline of SB_EMPTY_FIXUP, with 766 * the addition of a second clause that takes care of the 767 * case where sb_mb has been updated, but remains the last 768 * record. 769 */ 770 if (sb->sb_mb == NULL) { 771 sb->sb_mbtail = NULL; 772 sb->sb_lastrecord = NULL; 773 } else if (sb->sb_mb->m_nextpkt == NULL) 774 sb->sb_lastrecord = sb->sb_mb; 775 } 776 777 /* 778 * Implement receive operations on a socket. 779 * We depend on the way that records are added to the sockbuf 780 * by sbappend*. In particular, each record (mbufs linked through m_next) 781 * must begin with an address if the protocol so specifies, 782 * followed by an optional mbuf or mbufs containing ancillary data, 783 * and then zero or more mbufs of data. 784 * In order to avoid blocking network for the entire time here, we release 785 * the solock() while doing the actual copy to user space. 786 * Although the sockbuf is locked, new data may still be appended, 787 * and thus we must maintain consistency of the sockbuf during that time. 788 * 789 * The caller may receive the data as a single mbuf chain by supplying 790 * an mbuf **mp0 for use in returning the chain. The uio is then used 791 * only for the count in uio_resid. 792 */ 793 int 794 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, 795 struct mbuf **mp0, struct mbuf **controlp, int *flagsp, 796 socklen_t controllen) 797 { 798 struct mbuf *m, **mp; 799 struct mbuf *cm; 800 u_long len, offset, moff; 801 int flags, error, type, uio_error = 0; 802 const struct protosw *pr = so->so_proto; 803 struct mbuf *nextrecord; 804 size_t resid, orig_resid = uio->uio_resid; 805 806 mp = mp0; 807 if (paddr) 808 *paddr = NULL; 809 if (controlp) 810 *controlp = NULL; 811 if (flagsp) 812 flags = *flagsp &~ MSG_EOR; 813 else 814 flags = 0; 815 if (flags & MSG_OOB) { 816 m = m_get(M_WAIT, MT_DATA); 817 solock(so); 818 error = pru_rcvoob(so, m, flags & MSG_PEEK); 819 sounlock(so); 820 if (error) 821 goto bad; 822 do { 823 error = uiomove(mtod(m, caddr_t), 824 ulmin(uio->uio_resid, m->m_len), uio); 825 m = m_free(m); 826 } while (uio->uio_resid && error == 0 && m); 827 bad: 828 m_freem(m); 829 return (error); 830 } 831 if (mp) 832 *mp = NULL; 833 834 solock_shared(so); 835 restart: 836 if ((error = sblock(so, &so->so_rcv, SBLOCKWAIT(flags))) != 0) { 837 sounlock_shared(so); 838 return (error); 839 } 840 841 m = so->so_rcv.sb_mb; 842 #ifdef SOCKET_SPLICE 843 if (isspliced(so)) 844 m = NULL; 845 #endif /* SOCKET_SPLICE */ 846 /* 847 * If we have less data than requested, block awaiting more 848 * (subject to any timeout) if: 849 * 1. the current count is less than the low water mark, 850 * 2. MSG_WAITALL is set, and it is possible to do the entire 851 * receive operation at once if we block (resid <= hiwat), or 852 * 3. MSG_DONTWAIT is not set. 853 * If MSG_WAITALL is set but resid is larger than the receive buffer, 854 * we have to do the receive in sections, and thus risk returning 855 * a short count if a timeout or signal occurs after we start. 856 */ 857 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 858 so->so_rcv.sb_cc < uio->uio_resid) && 859 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 860 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 861 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 862 #ifdef DIAGNOSTIC 863 if (m == NULL && so->so_rcv.sb_cc) 864 #ifdef SOCKET_SPLICE 865 if (!isspliced(so)) 866 #endif /* SOCKET_SPLICE */ 867 panic("receive 1: so %p, so_type %d, sb_cc %lu", 868 so, so->so_type, so->so_rcv.sb_cc); 869 #endif 870 if (so->so_error) { 871 if (m) 872 goto dontblock; 873 error = so->so_error; 874 if ((flags & MSG_PEEK) == 0) 875 so->so_error = 0; 876 goto release; 877 } 878 if (so->so_rcv.sb_state & SS_CANTRCVMORE) { 879 if (m) 880 goto dontblock; 881 else if (so->so_rcv.sb_cc == 0) 882 goto release; 883 } 884 for (; m; m = m->m_next) 885 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 886 m = so->so_rcv.sb_mb; 887 goto dontblock; 888 } 889 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 890 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 891 error = ENOTCONN; 892 goto release; 893 } 894 if (uio->uio_resid == 0 && controlp == NULL) 895 goto release; 896 if (flags & MSG_DONTWAIT) { 897 error = EWOULDBLOCK; 898 goto release; 899 } 900 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); 901 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); 902 sbunlock(so, &so->so_rcv); 903 error = sbwait(so, &so->so_rcv); 904 if (error) { 905 sounlock_shared(so); 906 return (error); 907 } 908 goto restart; 909 } 910 dontblock: 911 /* 912 * On entry here, m points to the first record of the socket buffer. 913 * From this point onward, we maintain 'nextrecord' as a cache of the 914 * pointer to the next record in the socket buffer. We must keep the 915 * various socket buffer pointers and local stack versions of the 916 * pointers in sync, pushing out modifications before operations that 917 * may sleep, and re-reading them afterwards. 918 * 919 * Otherwise, we will race with the network stack appending new data 920 * or records onto the socket buffer by using inconsistent/stale 921 * versions of the field, possibly resulting in socket buffer 922 * corruption. 923 */ 924 if (uio->uio_procp) 925 uio->uio_procp->p_ru.ru_msgrcv++; 926 KASSERT(m == so->so_rcv.sb_mb); 927 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); 928 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); 929 nextrecord = m->m_nextpkt; 930 if (pr->pr_flags & PR_ADDR) { 931 #ifdef DIAGNOSTIC 932 if (m->m_type != MT_SONAME) 933 panic("receive 1a: so %p, so_type %d, m %p, m_type %d", 934 so, so->so_type, m, m->m_type); 935 #endif 936 orig_resid = 0; 937 if (flags & MSG_PEEK) { 938 if (paddr) 939 *paddr = m_copym(m, 0, m->m_len, M_NOWAIT); 940 m = m->m_next; 941 } else { 942 sbfree(so, &so->so_rcv, m); 943 if (paddr) { 944 *paddr = m; 945 so->so_rcv.sb_mb = m->m_next; 946 m->m_next = NULL; 947 m = so->so_rcv.sb_mb; 948 } else { 949 so->so_rcv.sb_mb = m_free(m); 950 m = so->so_rcv.sb_mb; 951 } 952 sbsync(&so->so_rcv, nextrecord); 953 } 954 } 955 while (m && m->m_type == MT_CONTROL && error == 0) { 956 int skip = 0; 957 if (flags & MSG_PEEK) { 958 if (mtod(m, struct cmsghdr *)->cmsg_type == 959 SCM_RIGHTS) { 960 /* don't leak internalized SCM_RIGHTS msgs */ 961 skip = 1; 962 } else if (controlp) 963 *controlp = m_copym(m, 0, m->m_len, M_NOWAIT); 964 m = m->m_next; 965 } else { 966 sbfree(so, &so->so_rcv, m); 967 so->so_rcv.sb_mb = m->m_next; 968 m->m_nextpkt = m->m_next = NULL; 969 cm = m; 970 m = so->so_rcv.sb_mb; 971 sbsync(&so->so_rcv, nextrecord); 972 if (controlp) { 973 if (pr->pr_domain->dom_externalize) { 974 sounlock_shared(so); 975 error = 976 (*pr->pr_domain->dom_externalize) 977 (cm, controllen, flags); 978 solock_shared(so); 979 } 980 *controlp = cm; 981 } else { 982 /* 983 * Dispose of any SCM_RIGHTS message that went 984 * through the read path rather than recv. 985 */ 986 if (pr->pr_domain->dom_dispose) 987 pr->pr_domain->dom_dispose(cm); 988 m_free(cm); 989 } 990 } 991 if (m != NULL) 992 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 993 else 994 nextrecord = so->so_rcv.sb_mb; 995 if (controlp && !skip) 996 controlp = &(*controlp)->m_next; 997 orig_resid = 0; 998 } 999 1000 /* If m is non-NULL, we have some data to read. */ 1001 if (m) { 1002 type = m->m_type; 1003 if (type == MT_OOBDATA) 1004 flags |= MSG_OOB; 1005 if (m->m_flags & M_BCAST) 1006 flags |= MSG_BCAST; 1007 if (m->m_flags & M_MCAST) 1008 flags |= MSG_MCAST; 1009 } 1010 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); 1011 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); 1012 1013 moff = 0; 1014 offset = 0; 1015 while (m && uio->uio_resid > 0 && error == 0) { 1016 if (m->m_type == MT_OOBDATA) { 1017 if (type != MT_OOBDATA) 1018 break; 1019 } else if (type == MT_OOBDATA) { 1020 break; 1021 } else if (m->m_type == MT_CONTROL) { 1022 /* 1023 * If there is more than one control message in the 1024 * stream, we do a short read. Next can be received 1025 * or disposed by another system call. 1026 */ 1027 break; 1028 #ifdef DIAGNOSTIC 1029 } else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) { 1030 panic("receive 3: so %p, so_type %d, m %p, m_type %d", 1031 so, so->so_type, m, m->m_type); 1032 #endif 1033 } 1034 so->so_rcv.sb_state &= ~SS_RCVATMARK; 1035 len = uio->uio_resid; 1036 if (so->so_oobmark && len > so->so_oobmark - offset) 1037 len = so->so_oobmark - offset; 1038 if (len > m->m_len - moff) 1039 len = m->m_len - moff; 1040 /* 1041 * If mp is set, just pass back the mbufs. 1042 * Otherwise copy them out via the uio, then free. 1043 * Sockbuf must be consistent here (points to current mbuf, 1044 * it points to next record) when we drop priority; 1045 * we must note any additions to the sockbuf when we 1046 * block interrupts again. 1047 */ 1048 if (mp == NULL && uio_error == 0) { 1049 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); 1050 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); 1051 resid = uio->uio_resid; 1052 sounlock_shared(so); 1053 uio_error = uiomove(mtod(m, caddr_t) + moff, len, uio); 1054 solock_shared(so); 1055 if (uio_error) 1056 uio->uio_resid = resid - len; 1057 } else 1058 uio->uio_resid -= len; 1059 if (len == m->m_len - moff) { 1060 if (m->m_flags & M_EOR) 1061 flags |= MSG_EOR; 1062 if (flags & MSG_PEEK) { 1063 m = m->m_next; 1064 moff = 0; 1065 orig_resid = 0; 1066 } else { 1067 nextrecord = m->m_nextpkt; 1068 sbfree(so, &so->so_rcv, m); 1069 if (mp) { 1070 *mp = m; 1071 mp = &m->m_next; 1072 so->so_rcv.sb_mb = m = m->m_next; 1073 *mp = NULL; 1074 } else { 1075 so->so_rcv.sb_mb = m_free(m); 1076 m = so->so_rcv.sb_mb; 1077 } 1078 /* 1079 * If m != NULL, we also know that 1080 * so->so_rcv.sb_mb != NULL. 1081 */ 1082 KASSERT(so->so_rcv.sb_mb == m); 1083 if (m) { 1084 m->m_nextpkt = nextrecord; 1085 if (nextrecord == NULL) 1086 so->so_rcv.sb_lastrecord = m; 1087 } else { 1088 so->so_rcv.sb_mb = nextrecord; 1089 SB_EMPTY_FIXUP(&so->so_rcv); 1090 } 1091 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); 1092 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); 1093 } 1094 } else { 1095 if (flags & MSG_PEEK) { 1096 moff += len; 1097 orig_resid = 0; 1098 } else { 1099 if (mp) 1100 *mp = m_copym(m, 0, len, M_WAIT); 1101 m->m_data += len; 1102 m->m_len -= len; 1103 so->so_rcv.sb_cc -= len; 1104 so->so_rcv.sb_datacc -= len; 1105 } 1106 } 1107 if (so->so_oobmark) { 1108 if ((flags & MSG_PEEK) == 0) { 1109 so->so_oobmark -= len; 1110 if (so->so_oobmark == 0) { 1111 so->so_rcv.sb_state |= SS_RCVATMARK; 1112 break; 1113 } 1114 } else { 1115 offset += len; 1116 if (offset == so->so_oobmark) 1117 break; 1118 } 1119 } 1120 if (flags & MSG_EOR) 1121 break; 1122 /* 1123 * If the MSG_WAITALL flag is set (for non-atomic socket), 1124 * we must not quit until "uio->uio_resid == 0" or an error 1125 * termination. If a signal/timeout occurs, return 1126 * with a short count but without error. 1127 * Keep sockbuf locked against other readers. 1128 */ 1129 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1130 !sosendallatonce(so) && !nextrecord) { 1131 if (so->so_rcv.sb_state & SS_CANTRCVMORE || 1132 so->so_error) 1133 break; 1134 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); 1135 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); 1136 error = sbwait(so, &so->so_rcv); 1137 if (error) { 1138 sbunlock(so, &so->so_rcv); 1139 sounlock_shared(so); 1140 return (0); 1141 } 1142 if ((m = so->so_rcv.sb_mb) != NULL) 1143 nextrecord = m->m_nextpkt; 1144 } 1145 } 1146 1147 if (m && pr->pr_flags & PR_ATOMIC) { 1148 flags |= MSG_TRUNC; 1149 if ((flags & MSG_PEEK) == 0) 1150 (void) sbdroprecord(so, &so->so_rcv); 1151 } 1152 if ((flags & MSG_PEEK) == 0) { 1153 if (m == NULL) { 1154 /* 1155 * First part is an inline SB_EMPTY_FIXUP(). Second 1156 * part makes sure sb_lastrecord is up-to-date if 1157 * there is still data in the socket buffer. 1158 */ 1159 so->so_rcv.sb_mb = nextrecord; 1160 if (so->so_rcv.sb_mb == NULL) { 1161 so->so_rcv.sb_mbtail = NULL; 1162 so->so_rcv.sb_lastrecord = NULL; 1163 } else if (nextrecord->m_nextpkt == NULL) 1164 so->so_rcv.sb_lastrecord = nextrecord; 1165 } 1166 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); 1167 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); 1168 if (pr->pr_flags & PR_WANTRCVD) 1169 pru_rcvd(so); 1170 } 1171 if (orig_resid == uio->uio_resid && orig_resid && 1172 (flags & MSG_EOR) == 0 && 1173 (so->so_rcv.sb_state & SS_CANTRCVMORE) == 0) { 1174 sbunlock(so, &so->so_rcv); 1175 goto restart; 1176 } 1177 1178 if (uio_error) 1179 error = uio_error; 1180 1181 if (flagsp) 1182 *flagsp |= flags; 1183 release: 1184 sbunlock(so, &so->so_rcv); 1185 sounlock_shared(so); 1186 return (error); 1187 } 1188 1189 int 1190 soshutdown(struct socket *so, int how) 1191 { 1192 int error = 0; 1193 1194 solock(so); 1195 switch (how) { 1196 case SHUT_RD: 1197 sorflush(so); 1198 break; 1199 case SHUT_RDWR: 1200 sorflush(so); 1201 /* FALLTHROUGH */ 1202 case SHUT_WR: 1203 error = pru_shutdown(so); 1204 break; 1205 default: 1206 error = EINVAL; 1207 break; 1208 } 1209 sounlock(so); 1210 1211 return (error); 1212 } 1213 1214 void 1215 sorflush(struct socket *so) 1216 { 1217 struct sockbuf *sb = &so->so_rcv; 1218 struct mbuf *m; 1219 const struct protosw *pr = so->so_proto; 1220 int error; 1221 1222 sb->sb_flags |= SB_NOINTR; 1223 error = sblock(so, sb, M_WAITOK); 1224 /* with SB_NOINTR and M_WAITOK sblock() must not fail */ 1225 KASSERT(error == 0); 1226 socantrcvmore(so); 1227 m = sb->sb_mb; 1228 memset(&sb->sb_startzero, 0, 1229 (caddr_t)&sb->sb_endzero - (caddr_t)&sb->sb_startzero); 1230 sb->sb_timeo_nsecs = INFSLP; 1231 sbunlock(so, sb); 1232 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 1233 (*pr->pr_domain->dom_dispose)(m); 1234 m_purge(m); 1235 } 1236 1237 #ifdef SOCKET_SPLICE 1238 1239 #define so_splicelen so_sp->ssp_len 1240 #define so_splicemax so_sp->ssp_max 1241 #define so_idletv so_sp->ssp_idletv 1242 #define so_idleto so_sp->ssp_idleto 1243 #define so_splicetask so_sp->ssp_task 1244 1245 int 1246 sosplice(struct socket *so, int fd, off_t max, struct timeval *tv) 1247 { 1248 struct file *fp; 1249 struct socket *sosp; 1250 struct sosplice *sp; 1251 struct taskq *tq; 1252 int error = 0; 1253 1254 soassertlocked(so); 1255 1256 if (sosplice_taskq == NULL) { 1257 rw_enter_write(&sosplice_lock); 1258 if (sosplice_taskq == NULL) { 1259 tq = taskq_create("sosplice", 1, IPL_SOFTNET, 1260 TASKQ_MPSAFE); 1261 if (tq == NULL) { 1262 rw_exit_write(&sosplice_lock); 1263 return (ENOMEM); 1264 } 1265 /* Ensure the taskq is fully visible to other CPUs. */ 1266 membar_producer(); 1267 sosplice_taskq = tq; 1268 } 1269 rw_exit_write(&sosplice_lock); 1270 } else { 1271 /* Ensure the taskq is fully visible on this CPU. */ 1272 membar_consumer(); 1273 } 1274 1275 if ((so->so_proto->pr_flags & PR_SPLICE) == 0) 1276 return (EPROTONOSUPPORT); 1277 if (so->so_options & SO_ACCEPTCONN) 1278 return (EOPNOTSUPP); 1279 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1280 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 1281 return (ENOTCONN); 1282 if (so->so_sp == NULL) { 1283 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1284 if (so->so_sp == NULL) 1285 so->so_sp = sp; 1286 else 1287 pool_put(&sosplice_pool, sp); 1288 } 1289 1290 /* If no fd is given, unsplice by removing existing link. */ 1291 if (fd < 0) { 1292 /* Lock receive buffer. */ 1293 if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) { 1294 return (error); 1295 } 1296 if (so->so_sp->ssp_socket) 1297 sounsplice(so, so->so_sp->ssp_socket, 0); 1298 sbunlock(so, &so->so_rcv); 1299 return (0); 1300 } 1301 1302 if (max && max < 0) 1303 return (EINVAL); 1304 1305 if (tv && (tv->tv_sec < 0 || !timerisvalid(tv))) 1306 return (EINVAL); 1307 1308 /* Find sosp, the drain socket where data will be spliced into. */ 1309 if ((error = getsock(curproc, fd, &fp)) != 0) 1310 return (error); 1311 sosp = fp->f_data; 1312 if (sosp->so_proto->pr_usrreqs->pru_send != 1313 so->so_proto->pr_usrreqs->pru_send) { 1314 error = EPROTONOSUPPORT; 1315 goto frele; 1316 } 1317 if (sosp->so_sp == NULL) { 1318 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1319 if (sosp->so_sp == NULL) 1320 sosp->so_sp = sp; 1321 else 1322 pool_put(&sosplice_pool, sp); 1323 } 1324 1325 /* Lock both receive and send buffer. */ 1326 if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) { 1327 goto frele; 1328 } 1329 if ((error = sblock(so, &sosp->so_snd, M_WAITOK)) != 0) { 1330 sbunlock(so, &so->so_rcv); 1331 goto frele; 1332 } 1333 1334 if (so->so_sp->ssp_socket || sosp->so_sp->ssp_soback) { 1335 error = EBUSY; 1336 goto release; 1337 } 1338 if (sosp->so_options & SO_ACCEPTCONN) { 1339 error = EOPNOTSUPP; 1340 goto release; 1341 } 1342 if ((sosp->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) { 1343 error = ENOTCONN; 1344 goto release; 1345 } 1346 1347 /* Splice so and sosp together. */ 1348 so->so_sp->ssp_socket = sosp; 1349 sosp->so_sp->ssp_soback = so; 1350 so->so_splicelen = 0; 1351 so->so_splicemax = max; 1352 if (tv) 1353 so->so_idletv = *tv; 1354 else 1355 timerclear(&so->so_idletv); 1356 timeout_set_proc(&so->so_idleto, soidle, so); 1357 task_set(&so->so_splicetask, sotask, so); 1358 1359 /* 1360 * To prevent softnet interrupt from calling somove() while 1361 * we sleep, the socket buffers are not marked as spliced yet. 1362 */ 1363 if (somove(so, M_WAIT)) { 1364 so->so_rcv.sb_flags |= SB_SPLICE; 1365 sosp->so_snd.sb_flags |= SB_SPLICE; 1366 } 1367 1368 release: 1369 sbunlock(sosp, &sosp->so_snd); 1370 sbunlock(so, &so->so_rcv); 1371 frele: 1372 /* 1373 * FRELE() must not be called with the socket lock held. It is safe to 1374 * release the lock here as long as no other operation happen on the 1375 * socket when sosplice() returns. The dance could be avoided by 1376 * grabbing the socket lock inside this function. 1377 */ 1378 sounlock(so); 1379 FRELE(fp, curproc); 1380 solock(so); 1381 return (error); 1382 } 1383 1384 void 1385 sounsplice(struct socket *so, struct socket *sosp, int freeing) 1386 { 1387 soassertlocked(so); 1388 1389 task_del(sosplice_taskq, &so->so_splicetask); 1390 timeout_del(&so->so_idleto); 1391 sosp->so_snd.sb_flags &= ~SB_SPLICE; 1392 so->so_rcv.sb_flags &= ~SB_SPLICE; 1393 so->so_sp->ssp_socket = sosp->so_sp->ssp_soback = NULL; 1394 /* Do not wakeup a socket that is about to be freed. */ 1395 if ((freeing & SOSP_FREEING_READ) == 0 && soreadable(so)) 1396 sorwakeup(so); 1397 if ((freeing & SOSP_FREEING_WRITE) == 0 && sowriteable(sosp)) 1398 sowwakeup(sosp); 1399 } 1400 1401 void 1402 soidle(void *arg) 1403 { 1404 struct socket *so = arg; 1405 1406 solock(so); 1407 if (so->so_rcv.sb_flags & SB_SPLICE) { 1408 so->so_error = ETIMEDOUT; 1409 sounsplice(so, so->so_sp->ssp_socket, 0); 1410 } 1411 sounlock(so); 1412 } 1413 1414 void 1415 sotask(void *arg) 1416 { 1417 struct socket *so = arg; 1418 1419 solock(so); 1420 if (so->so_rcv.sb_flags & SB_SPLICE) { 1421 /* 1422 * We may not sleep here as sofree() and unsplice() may be 1423 * called from softnet interrupt context. This would remove 1424 * the socket during somove(). 1425 */ 1426 somove(so, M_DONTWAIT); 1427 } 1428 sounlock(so); 1429 1430 /* Avoid user land starvation. */ 1431 yield(); 1432 } 1433 1434 /* 1435 * The socket splicing task or idle timeout may sleep while grabbing the net 1436 * lock. As sofree() can be called anytime, sotask() or soidle() could access 1437 * the socket memory of a freed socket after wakeup. So delay the pool_put() 1438 * after all pending socket splicing tasks or timeouts have finished. Do this 1439 * by scheduling it on the same threads. 1440 */ 1441 void 1442 soreaper(void *arg) 1443 { 1444 struct socket *so = arg; 1445 1446 /* Reuse splice task, sounsplice() has been called before. */ 1447 task_set(&so->so_sp->ssp_task, soput, so); 1448 task_add(sosplice_taskq, &so->so_sp->ssp_task); 1449 } 1450 1451 void 1452 soput(void *arg) 1453 { 1454 struct socket *so = arg; 1455 1456 pool_put(&sosplice_pool, so->so_sp); 1457 pool_put(&socket_pool, so); 1458 } 1459 1460 /* 1461 * Move data from receive buffer of spliced source socket to send 1462 * buffer of drain socket. Try to move as much as possible in one 1463 * big chunk. It is a TCP only implementation. 1464 * Return value 0 means splicing has been finished, 1 continue. 1465 */ 1466 int 1467 somove(struct socket *so, int wait) 1468 { 1469 struct socket *sosp = so->so_sp->ssp_socket; 1470 struct mbuf *m, **mp, *nextrecord; 1471 u_long len, off, oobmark; 1472 long space; 1473 int error = 0, maxreached = 0; 1474 unsigned int rcvstate; 1475 1476 soassertlocked(so); 1477 1478 nextpkt: 1479 if (so->so_error) { 1480 error = so->so_error; 1481 goto release; 1482 } 1483 if (sosp->so_snd.sb_state & SS_CANTSENDMORE) { 1484 error = EPIPE; 1485 goto release; 1486 } 1487 if (sosp->so_error && sosp->so_error != ETIMEDOUT && 1488 sosp->so_error != EFBIG && sosp->so_error != ELOOP) { 1489 error = sosp->so_error; 1490 goto release; 1491 } 1492 if ((sosp->so_state & SS_ISCONNECTED) == 0) 1493 goto release; 1494 1495 /* Calculate how many bytes can be copied now. */ 1496 len = so->so_rcv.sb_datacc; 1497 if (so->so_splicemax) { 1498 KASSERT(so->so_splicelen < so->so_splicemax); 1499 if (so->so_splicemax <= so->so_splicelen + len) { 1500 len = so->so_splicemax - so->so_splicelen; 1501 maxreached = 1; 1502 } 1503 } 1504 space = sbspace(sosp, &sosp->so_snd); 1505 if (so->so_oobmark && so->so_oobmark < len && 1506 so->so_oobmark < space + 1024) 1507 space += 1024; 1508 if (space <= 0) { 1509 maxreached = 0; 1510 goto release; 1511 } 1512 if (space < len) { 1513 maxreached = 0; 1514 if (space < sosp->so_snd.sb_lowat) 1515 goto release; 1516 len = space; 1517 } 1518 sosp->so_snd.sb_state |= SS_ISSENDING; 1519 1520 SBLASTRECORDCHK(&so->so_rcv, "somove 1"); 1521 SBLASTMBUFCHK(&so->so_rcv, "somove 1"); 1522 m = so->so_rcv.sb_mb; 1523 if (m == NULL) 1524 goto release; 1525 nextrecord = m->m_nextpkt; 1526 1527 /* Drop address and control information not used with splicing. */ 1528 if (so->so_proto->pr_flags & PR_ADDR) { 1529 #ifdef DIAGNOSTIC 1530 if (m->m_type != MT_SONAME) 1531 panic("somove soname: so %p, so_type %d, m %p, " 1532 "m_type %d", so, so->so_type, m, m->m_type); 1533 #endif 1534 m = m->m_next; 1535 } 1536 while (m && m->m_type == MT_CONTROL) 1537 m = m->m_next; 1538 if (m == NULL) { 1539 sbdroprecord(so, &so->so_rcv); 1540 if (so->so_proto->pr_flags & PR_WANTRCVD) 1541 pru_rcvd(so); 1542 goto nextpkt; 1543 } 1544 1545 /* 1546 * By splicing sockets connected to localhost, userland might create a 1547 * loop. Dissolve splicing with error if loop is detected by counter. 1548 * 1549 * If we deal with looped broadcast/multicast packet we bail out with 1550 * no error to suppress splice termination. 1551 */ 1552 if ((m->m_flags & M_PKTHDR) && 1553 ((m->m_pkthdr.ph_loopcnt++ >= M_MAXLOOP) || 1554 ((m->m_flags & M_LOOP) && (m->m_flags & (M_BCAST|M_MCAST))))) { 1555 error = ELOOP; 1556 goto release; 1557 } 1558 1559 if (so->so_proto->pr_flags & PR_ATOMIC) { 1560 if ((m->m_flags & M_PKTHDR) == 0) 1561 panic("somove !PKTHDR: so %p, so_type %d, m %p, " 1562 "m_type %d", so, so->so_type, m, m->m_type); 1563 if (sosp->so_snd.sb_hiwat < m->m_pkthdr.len) { 1564 error = EMSGSIZE; 1565 goto release; 1566 } 1567 if (len < m->m_pkthdr.len) 1568 goto release; 1569 if (m->m_pkthdr.len < len) { 1570 maxreached = 0; 1571 len = m->m_pkthdr.len; 1572 } 1573 /* 1574 * Throw away the name mbuf after it has been assured 1575 * that the whole first record can be processed. 1576 */ 1577 m = so->so_rcv.sb_mb; 1578 sbfree(so, &so->so_rcv, m); 1579 so->so_rcv.sb_mb = m_free(m); 1580 sbsync(&so->so_rcv, nextrecord); 1581 } 1582 /* 1583 * Throw away the control mbufs after it has been assured 1584 * that the whole first record can be processed. 1585 */ 1586 m = so->so_rcv.sb_mb; 1587 while (m && m->m_type == MT_CONTROL) { 1588 sbfree(so, &so->so_rcv, m); 1589 so->so_rcv.sb_mb = m_free(m); 1590 m = so->so_rcv.sb_mb; 1591 sbsync(&so->so_rcv, nextrecord); 1592 } 1593 1594 SBLASTRECORDCHK(&so->so_rcv, "somove 2"); 1595 SBLASTMBUFCHK(&so->so_rcv, "somove 2"); 1596 1597 /* Take at most len mbufs out of receive buffer. */ 1598 for (off = 0, mp = &m; off <= len && *mp; 1599 off += (*mp)->m_len, mp = &(*mp)->m_next) { 1600 u_long size = len - off; 1601 1602 #ifdef DIAGNOSTIC 1603 if ((*mp)->m_type != MT_DATA && (*mp)->m_type != MT_HEADER) 1604 panic("somove type: so %p, so_type %d, m %p, " 1605 "m_type %d", so, so->so_type, *mp, (*mp)->m_type); 1606 #endif 1607 if ((*mp)->m_len > size) { 1608 /* 1609 * Move only a partial mbuf at maximum splice length or 1610 * if the drain buffer is too small for this large mbuf. 1611 */ 1612 if (!maxreached && so->so_snd.sb_datacc > 0) { 1613 len -= size; 1614 break; 1615 } 1616 *mp = m_copym(so->so_rcv.sb_mb, 0, size, wait); 1617 if (*mp == NULL) { 1618 len -= size; 1619 break; 1620 } 1621 so->so_rcv.sb_mb->m_data += size; 1622 so->so_rcv.sb_mb->m_len -= size; 1623 so->so_rcv.sb_cc -= size; 1624 so->so_rcv.sb_datacc -= size; 1625 } else { 1626 *mp = so->so_rcv.sb_mb; 1627 sbfree(so, &so->so_rcv, *mp); 1628 so->so_rcv.sb_mb = (*mp)->m_next; 1629 sbsync(&so->so_rcv, nextrecord); 1630 } 1631 } 1632 *mp = NULL; 1633 1634 SBLASTRECORDCHK(&so->so_rcv, "somove 3"); 1635 SBLASTMBUFCHK(&so->so_rcv, "somove 3"); 1636 SBCHECK(so, &so->so_rcv); 1637 if (m == NULL) 1638 goto release; 1639 m->m_nextpkt = NULL; 1640 if (m->m_flags & M_PKTHDR) { 1641 m_resethdr(m); 1642 m->m_pkthdr.len = len; 1643 } 1644 1645 /* Send window update to source peer as receive buffer has changed. */ 1646 if (so->so_proto->pr_flags & PR_WANTRCVD) 1647 pru_rcvd(so); 1648 1649 /* Receive buffer did shrink by len bytes, adjust oob. */ 1650 rcvstate = so->so_rcv.sb_state; 1651 so->so_rcv.sb_state &= ~SS_RCVATMARK; 1652 oobmark = so->so_oobmark; 1653 so->so_oobmark = oobmark > len ? oobmark - len : 0; 1654 if (oobmark) { 1655 if (oobmark == len) 1656 so->so_rcv.sb_state |= SS_RCVATMARK; 1657 if (oobmark >= len) 1658 oobmark = 0; 1659 } 1660 1661 /* 1662 * Handle oob data. If any malloc fails, ignore error. 1663 * TCP urgent data is not very reliable anyway. 1664 */ 1665 while (((rcvstate & SS_RCVATMARK) || oobmark) && 1666 (so->so_options & SO_OOBINLINE)) { 1667 struct mbuf *o = NULL; 1668 1669 if (rcvstate & SS_RCVATMARK) { 1670 o = m_get(wait, MT_DATA); 1671 rcvstate &= ~SS_RCVATMARK; 1672 } else if (oobmark) { 1673 o = m_split(m, oobmark, wait); 1674 if (o) { 1675 error = pru_send(sosp, m, NULL, NULL); 1676 if (error) { 1677 if (sosp->so_snd.sb_state & 1678 SS_CANTSENDMORE) 1679 error = EPIPE; 1680 m_freem(o); 1681 goto release; 1682 } 1683 len -= oobmark; 1684 so->so_splicelen += oobmark; 1685 m = o; 1686 o = m_get(wait, MT_DATA); 1687 } 1688 oobmark = 0; 1689 } 1690 if (o) { 1691 o->m_len = 1; 1692 *mtod(o, caddr_t) = *mtod(m, caddr_t); 1693 error = pru_sendoob(sosp, o, NULL, NULL); 1694 if (error) { 1695 if (sosp->so_snd.sb_state & SS_CANTSENDMORE) 1696 error = EPIPE; 1697 m_freem(m); 1698 goto release; 1699 } 1700 len -= 1; 1701 so->so_splicelen += 1; 1702 if (oobmark) { 1703 oobmark -= 1; 1704 if (oobmark == 0) 1705 rcvstate |= SS_RCVATMARK; 1706 } 1707 m_adj(m, 1); 1708 } 1709 } 1710 1711 /* Append all remaining data to drain socket. */ 1712 if (so->so_rcv.sb_cc == 0 || maxreached) 1713 sosp->so_snd.sb_state &= ~SS_ISSENDING; 1714 error = pru_send(sosp, m, NULL, NULL); 1715 if (error) { 1716 if (sosp->so_snd.sb_state & SS_CANTSENDMORE) 1717 error = EPIPE; 1718 goto release; 1719 } 1720 so->so_splicelen += len; 1721 1722 /* Move several packets if possible. */ 1723 if (!maxreached && nextrecord) 1724 goto nextpkt; 1725 1726 release: 1727 sosp->so_snd.sb_state &= ~SS_ISSENDING; 1728 if (!error && maxreached && so->so_splicemax == so->so_splicelen) 1729 error = EFBIG; 1730 if (error) 1731 so->so_error = error; 1732 if (((so->so_rcv.sb_state & SS_CANTRCVMORE) && 1733 so->so_rcv.sb_cc == 0) || 1734 (sosp->so_snd.sb_state & SS_CANTSENDMORE) || 1735 maxreached || error) { 1736 sounsplice(so, sosp, 0); 1737 return (0); 1738 } 1739 if (timerisset(&so->so_idletv)) 1740 timeout_add_tv(&so->so_idleto, &so->so_idletv); 1741 return (1); 1742 } 1743 1744 #endif /* SOCKET_SPLICE */ 1745 1746 void 1747 sorwakeup(struct socket *so) 1748 { 1749 soassertlocked(so); 1750 1751 #ifdef SOCKET_SPLICE 1752 if (so->so_rcv.sb_flags & SB_SPLICE) { 1753 /* 1754 * TCP has a sendbuffer that can handle multiple packets 1755 * at once. So queue the stream a bit to accumulate data. 1756 * The sosplice thread will call somove() later and send 1757 * the packets calling tcp_output() only once. 1758 * In the UDP case, send out the packets immediately. 1759 * Using a thread would make things slower. 1760 */ 1761 if (so->so_proto->pr_flags & PR_WANTRCVD) 1762 task_add(sosplice_taskq, &so->so_splicetask); 1763 else 1764 somove(so, M_DONTWAIT); 1765 } 1766 if (isspliced(so)) 1767 return; 1768 #endif 1769 sowakeup(so, &so->so_rcv); 1770 if (so->so_upcall) 1771 (*(so->so_upcall))(so, so->so_upcallarg, M_DONTWAIT); 1772 } 1773 1774 void 1775 sowwakeup(struct socket *so) 1776 { 1777 soassertlocked(so); 1778 1779 #ifdef SOCKET_SPLICE 1780 if (so->so_snd.sb_flags & SB_SPLICE) 1781 task_add(sosplice_taskq, &so->so_sp->ssp_soback->so_splicetask); 1782 if (issplicedback(so)) 1783 return; 1784 #endif 1785 sowakeup(so, &so->so_snd); 1786 } 1787 1788 int 1789 sosetopt(struct socket *so, int level, int optname, struct mbuf *m) 1790 { 1791 int error = 0; 1792 1793 soassertlocked(so); 1794 1795 if (level != SOL_SOCKET) { 1796 if (so->so_proto->pr_ctloutput) { 1797 error = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1798 level, optname, m); 1799 return (error); 1800 } 1801 error = ENOPROTOOPT; 1802 } else { 1803 switch (optname) { 1804 case SO_BINDANY: 1805 if ((error = suser(curproc)) != 0) /* XXX */ 1806 return (error); 1807 break; 1808 } 1809 1810 switch (optname) { 1811 1812 case SO_LINGER: 1813 if (m == NULL || m->m_len != sizeof (struct linger) || 1814 mtod(m, struct linger *)->l_linger < 0 || 1815 mtod(m, struct linger *)->l_linger > SHRT_MAX) 1816 return (EINVAL); 1817 so->so_linger = mtod(m, struct linger *)->l_linger; 1818 /* FALLTHROUGH */ 1819 1820 case SO_BINDANY: 1821 case SO_DEBUG: 1822 case SO_KEEPALIVE: 1823 case SO_USELOOPBACK: 1824 case SO_BROADCAST: 1825 case SO_REUSEADDR: 1826 case SO_REUSEPORT: 1827 case SO_OOBINLINE: 1828 case SO_TIMESTAMP: 1829 case SO_ZEROIZE: 1830 if (m == NULL || m->m_len < sizeof (int)) 1831 return (EINVAL); 1832 if (*mtod(m, int *)) 1833 so->so_options |= optname; 1834 else 1835 so->so_options &= ~optname; 1836 break; 1837 1838 case SO_DONTROUTE: 1839 if (m == NULL || m->m_len < sizeof (int)) 1840 return (EINVAL); 1841 if (*mtod(m, int *)) 1842 error = EOPNOTSUPP; 1843 break; 1844 1845 case SO_SNDBUF: 1846 case SO_RCVBUF: 1847 case SO_SNDLOWAT: 1848 case SO_RCVLOWAT: 1849 { 1850 u_long cnt; 1851 1852 if (m == NULL || m->m_len < sizeof (int)) 1853 return (EINVAL); 1854 cnt = *mtod(m, int *); 1855 if ((long)cnt <= 0) 1856 cnt = 1; 1857 switch (optname) { 1858 1859 case SO_SNDBUF: 1860 if (so->so_snd.sb_state & SS_CANTSENDMORE) 1861 return (EINVAL); 1862 if (sbcheckreserve(cnt, so->so_snd.sb_wat) || 1863 sbreserve(so, &so->so_snd, cnt)) 1864 return (ENOBUFS); 1865 so->so_snd.sb_wat = cnt; 1866 break; 1867 1868 case SO_RCVBUF: 1869 if (so->so_rcv.sb_state & SS_CANTRCVMORE) 1870 return (EINVAL); 1871 if (sbcheckreserve(cnt, so->so_rcv.sb_wat) || 1872 sbreserve(so, &so->so_rcv, cnt)) 1873 return (ENOBUFS); 1874 so->so_rcv.sb_wat = cnt; 1875 break; 1876 1877 case SO_SNDLOWAT: 1878 so->so_snd.sb_lowat = 1879 (cnt > so->so_snd.sb_hiwat) ? 1880 so->so_snd.sb_hiwat : cnt; 1881 break; 1882 case SO_RCVLOWAT: 1883 so->so_rcv.sb_lowat = 1884 (cnt > so->so_rcv.sb_hiwat) ? 1885 so->so_rcv.sb_hiwat : cnt; 1886 break; 1887 } 1888 break; 1889 } 1890 1891 case SO_SNDTIMEO: 1892 case SO_RCVTIMEO: 1893 { 1894 struct timeval tv; 1895 uint64_t nsecs; 1896 1897 if (m == NULL || m->m_len < sizeof (tv)) 1898 return (EINVAL); 1899 memcpy(&tv, mtod(m, struct timeval *), sizeof tv); 1900 if (!timerisvalid(&tv)) 1901 return (EINVAL); 1902 nsecs = TIMEVAL_TO_NSEC(&tv); 1903 if (nsecs == UINT64_MAX) 1904 return (EDOM); 1905 if (nsecs == 0) 1906 nsecs = INFSLP; 1907 switch (optname) { 1908 1909 case SO_SNDTIMEO: 1910 so->so_snd.sb_timeo_nsecs = nsecs; 1911 break; 1912 case SO_RCVTIMEO: 1913 so->so_rcv.sb_timeo_nsecs = nsecs; 1914 break; 1915 } 1916 break; 1917 } 1918 1919 case SO_RTABLE: 1920 if (so->so_proto->pr_domain && 1921 so->so_proto->pr_domain->dom_protosw && 1922 so->so_proto->pr_ctloutput) { 1923 const struct domain *dom = 1924 so->so_proto->pr_domain; 1925 1926 level = dom->dom_protosw->pr_protocol; 1927 error = (*so->so_proto->pr_ctloutput) 1928 (PRCO_SETOPT, so, level, optname, m); 1929 return (error); 1930 } 1931 error = ENOPROTOOPT; 1932 break; 1933 1934 #ifdef SOCKET_SPLICE 1935 case SO_SPLICE: 1936 if (m == NULL) { 1937 error = sosplice(so, -1, 0, NULL); 1938 } else if (m->m_len < sizeof(int)) { 1939 return (EINVAL); 1940 } else if (m->m_len < sizeof(struct splice)) { 1941 error = sosplice(so, *mtod(m, int *), 0, NULL); 1942 } else { 1943 error = sosplice(so, 1944 mtod(m, struct splice *)->sp_fd, 1945 mtod(m, struct splice *)->sp_max, 1946 &mtod(m, struct splice *)->sp_idle); 1947 } 1948 break; 1949 #endif /* SOCKET_SPLICE */ 1950 1951 default: 1952 error = ENOPROTOOPT; 1953 break; 1954 } 1955 if (error == 0 && so->so_proto->pr_ctloutput) { 1956 (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1957 level, optname, m); 1958 } 1959 } 1960 1961 return (error); 1962 } 1963 1964 int 1965 sogetopt(struct socket *so, int level, int optname, struct mbuf *m) 1966 { 1967 int error = 0; 1968 1969 if (level != SOL_SOCKET) { 1970 if (so->so_proto->pr_ctloutput) { 1971 m->m_len = 0; 1972 1973 solock(so); 1974 error = (*so->so_proto->pr_ctloutput)(PRCO_GETOPT, so, 1975 level, optname, m); 1976 sounlock(so); 1977 return (error); 1978 } else 1979 return (ENOPROTOOPT); 1980 } else { 1981 m->m_len = sizeof (int); 1982 1983 switch (optname) { 1984 1985 case SO_LINGER: 1986 m->m_len = sizeof (struct linger); 1987 solock_shared(so); 1988 mtod(m, struct linger *)->l_onoff = 1989 so->so_options & SO_LINGER; 1990 mtod(m, struct linger *)->l_linger = so->so_linger; 1991 sounlock_shared(so); 1992 break; 1993 1994 case SO_BINDANY: 1995 case SO_USELOOPBACK: 1996 case SO_DEBUG: 1997 case SO_KEEPALIVE: 1998 case SO_REUSEADDR: 1999 case SO_REUSEPORT: 2000 case SO_BROADCAST: 2001 case SO_OOBINLINE: 2002 case SO_TIMESTAMP: 2003 case SO_ZEROIZE: 2004 *mtod(m, int *) = so->so_options & optname; 2005 break; 2006 2007 case SO_DONTROUTE: 2008 *mtod(m, int *) = 0; 2009 break; 2010 2011 case SO_TYPE: 2012 *mtod(m, int *) = so->so_type; 2013 break; 2014 2015 case SO_ERROR: 2016 solock(so); 2017 *mtod(m, int *) = so->so_error; 2018 so->so_error = 0; 2019 sounlock(so); 2020 2021 break; 2022 2023 case SO_DOMAIN: 2024 *mtod(m, int *) = so->so_proto->pr_domain->dom_family; 2025 break; 2026 2027 case SO_PROTOCOL: 2028 *mtod(m, int *) = so->so_proto->pr_protocol; 2029 break; 2030 2031 case SO_SNDBUF: 2032 *mtod(m, int *) = so->so_snd.sb_hiwat; 2033 break; 2034 2035 case SO_RCVBUF: 2036 *mtod(m, int *) = so->so_rcv.sb_hiwat; 2037 break; 2038 2039 case SO_SNDLOWAT: 2040 *mtod(m, int *) = so->so_snd.sb_lowat; 2041 break; 2042 2043 case SO_RCVLOWAT: 2044 *mtod(m, int *) = so->so_rcv.sb_lowat; 2045 break; 2046 2047 case SO_SNDTIMEO: 2048 case SO_RCVTIMEO: 2049 { 2050 struct sockbuf *sb = (optname == SO_SNDTIMEO ? 2051 &so->so_snd : &so->so_rcv); 2052 struct timeval tv; 2053 uint64_t nsecs; 2054 2055 solock_shared(so); 2056 nsecs = sb->sb_timeo_nsecs; 2057 sounlock_shared(so); 2058 2059 m->m_len = sizeof(struct timeval); 2060 memset(&tv, 0, sizeof(tv)); 2061 if (nsecs != INFSLP) 2062 NSEC_TO_TIMEVAL(nsecs, &tv); 2063 memcpy(mtod(m, struct timeval *), &tv, sizeof tv); 2064 break; 2065 } 2066 2067 case SO_RTABLE: 2068 if (so->so_proto->pr_domain && 2069 so->so_proto->pr_domain->dom_protosw && 2070 so->so_proto->pr_ctloutput) { 2071 const struct domain *dom = 2072 so->so_proto->pr_domain; 2073 2074 level = dom->dom_protosw->pr_protocol; 2075 solock(so); 2076 error = (*so->so_proto->pr_ctloutput) 2077 (PRCO_GETOPT, so, level, optname, m); 2078 sounlock(so); 2079 if (error) 2080 return (error); 2081 break; 2082 } 2083 return (ENOPROTOOPT); 2084 2085 #ifdef SOCKET_SPLICE 2086 case SO_SPLICE: 2087 { 2088 off_t len; 2089 2090 m->m_len = sizeof(off_t); 2091 solock_shared(so); 2092 len = so->so_sp ? so->so_sp->ssp_len : 0; 2093 sounlock_shared(so); 2094 memcpy(mtod(m, off_t *), &len, sizeof(off_t)); 2095 break; 2096 } 2097 #endif /* SOCKET_SPLICE */ 2098 2099 case SO_PEERCRED: 2100 if (so->so_proto->pr_protocol == AF_UNIX) { 2101 struct unpcb *unp = sotounpcb(so); 2102 2103 solock(so); 2104 if (unp->unp_flags & UNP_FEIDS) { 2105 m->m_len = sizeof(unp->unp_connid); 2106 memcpy(mtod(m, caddr_t), 2107 &(unp->unp_connid), m->m_len); 2108 sounlock(so); 2109 break; 2110 } 2111 sounlock(so); 2112 2113 return (ENOTCONN); 2114 } 2115 return (EOPNOTSUPP); 2116 2117 default: 2118 return (ENOPROTOOPT); 2119 } 2120 return (0); 2121 } 2122 } 2123 2124 void 2125 sohasoutofband(struct socket *so) 2126 { 2127 pgsigio(&so->so_sigio, SIGURG, 0); 2128 knote_locked(&so->so_rcv.sb_klist, 0); 2129 } 2130 2131 int 2132 soo_kqfilter(struct file *fp, struct knote *kn) 2133 { 2134 struct socket *so = kn->kn_fp->f_data; 2135 struct sockbuf *sb; 2136 2137 solock(so); 2138 switch (kn->kn_filter) { 2139 case EVFILT_READ: 2140 if (so->so_options & SO_ACCEPTCONN) 2141 kn->kn_fop = &solisten_filtops; 2142 else 2143 kn->kn_fop = &soread_filtops; 2144 sb = &so->so_rcv; 2145 break; 2146 case EVFILT_WRITE: 2147 kn->kn_fop = &sowrite_filtops; 2148 sb = &so->so_snd; 2149 break; 2150 case EVFILT_EXCEPT: 2151 kn->kn_fop = &soexcept_filtops; 2152 sb = &so->so_rcv; 2153 break; 2154 default: 2155 sounlock(so); 2156 return (EINVAL); 2157 } 2158 2159 klist_insert_locked(&sb->sb_klist, kn); 2160 sounlock(so); 2161 2162 return (0); 2163 } 2164 2165 void 2166 filt_sordetach(struct knote *kn) 2167 { 2168 struct socket *so = kn->kn_fp->f_data; 2169 2170 klist_remove(&so->so_rcv.sb_klist, kn); 2171 } 2172 2173 int 2174 filt_soread(struct knote *kn, long hint) 2175 { 2176 struct socket *so = kn->kn_fp->f_data; 2177 int rv = 0; 2178 2179 soassertlocked(so); 2180 2181 kn->kn_data = so->so_rcv.sb_cc; 2182 #ifdef SOCKET_SPLICE 2183 if (isspliced(so)) { 2184 rv = 0; 2185 } else 2186 #endif /* SOCKET_SPLICE */ 2187 if (so->so_rcv.sb_state & SS_CANTRCVMORE) { 2188 kn->kn_flags |= EV_EOF; 2189 if (kn->kn_flags & __EV_POLL) { 2190 if (so->so_state & SS_ISDISCONNECTED) 2191 kn->kn_flags |= __EV_HUP; 2192 } 2193 kn->kn_fflags = so->so_error; 2194 rv = 1; 2195 } else if (so->so_error) { /* temporary udp error */ 2196 rv = 1; 2197 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2198 rv = (kn->kn_data >= kn->kn_sdata); 2199 } else { 2200 rv = (kn->kn_data >= so->so_rcv.sb_lowat); 2201 } 2202 2203 return rv; 2204 } 2205 2206 void 2207 filt_sowdetach(struct knote *kn) 2208 { 2209 struct socket *so = kn->kn_fp->f_data; 2210 2211 klist_remove(&so->so_snd.sb_klist, kn); 2212 } 2213 2214 int 2215 filt_sowrite(struct knote *kn, long hint) 2216 { 2217 struct socket *so = kn->kn_fp->f_data; 2218 int rv; 2219 2220 soassertlocked(so); 2221 2222 kn->kn_data = sbspace(so, &so->so_snd); 2223 if (so->so_snd.sb_state & SS_CANTSENDMORE) { 2224 kn->kn_flags |= EV_EOF; 2225 if (kn->kn_flags & __EV_POLL) { 2226 if (so->so_state & SS_ISDISCONNECTED) 2227 kn->kn_flags |= __EV_HUP; 2228 } 2229 kn->kn_fflags = so->so_error; 2230 rv = 1; 2231 } else if (so->so_error) { /* temporary udp error */ 2232 rv = 1; 2233 } else if (((so->so_state & SS_ISCONNECTED) == 0) && 2234 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 2235 rv = 0; 2236 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2237 rv = (kn->kn_data >= kn->kn_sdata); 2238 } else { 2239 rv = (kn->kn_data >= so->so_snd.sb_lowat); 2240 } 2241 2242 return (rv); 2243 } 2244 2245 int 2246 filt_soexcept(struct knote *kn, long hint) 2247 { 2248 struct socket *so = kn->kn_fp->f_data; 2249 int rv = 0; 2250 2251 soassertlocked(so); 2252 2253 #ifdef SOCKET_SPLICE 2254 if (isspliced(so)) { 2255 rv = 0; 2256 } else 2257 #endif /* SOCKET_SPLICE */ 2258 if (kn->kn_sfflags & NOTE_OOB) { 2259 if (so->so_oobmark || (so->so_rcv.sb_state & SS_RCVATMARK)) { 2260 kn->kn_fflags |= NOTE_OOB; 2261 kn->kn_data -= so->so_oobmark; 2262 rv = 1; 2263 } 2264 } 2265 2266 if (kn->kn_flags & __EV_POLL) { 2267 if (so->so_state & SS_ISDISCONNECTED) { 2268 kn->kn_flags |= __EV_HUP; 2269 rv = 1; 2270 } 2271 } 2272 2273 return rv; 2274 } 2275 2276 int 2277 filt_solisten(struct knote *kn, long hint) 2278 { 2279 struct socket *so = kn->kn_fp->f_data; 2280 int active; 2281 2282 soassertlocked(so); 2283 2284 kn->kn_data = so->so_qlen; 2285 active = (kn->kn_data != 0); 2286 2287 if (kn->kn_flags & (__EV_POLL | __EV_SELECT)) { 2288 if (so->so_state & SS_ISDISCONNECTED) { 2289 kn->kn_flags |= __EV_HUP; 2290 active = 1; 2291 } else { 2292 active = soreadable(so); 2293 } 2294 } 2295 2296 return (active); 2297 } 2298 2299 int 2300 filt_somodify(struct kevent *kev, struct knote *kn) 2301 { 2302 struct socket *so = kn->kn_fp->f_data; 2303 int rv; 2304 2305 solock(so); 2306 rv = knote_modify(kev, kn); 2307 sounlock(so); 2308 2309 return (rv); 2310 } 2311 2312 int 2313 filt_soprocess(struct knote *kn, struct kevent *kev) 2314 { 2315 struct socket *so = kn->kn_fp->f_data; 2316 int rv; 2317 2318 solock(so); 2319 rv = knote_process(kn, kev); 2320 sounlock(so); 2321 2322 return (rv); 2323 } 2324 2325 void 2326 klist_soassertlk(void *arg) 2327 { 2328 struct socket *so = arg; 2329 2330 soassertlocked(so); 2331 } 2332 2333 int 2334 klist_solock(void *arg) 2335 { 2336 struct socket *so = arg; 2337 2338 solock(so); 2339 return (1); 2340 } 2341 2342 void 2343 klist_sounlock(void *arg, int ls) 2344 { 2345 struct socket *so = arg; 2346 2347 sounlock(so); 2348 } 2349 2350 #ifdef DDB 2351 void 2352 sobuf_print(struct sockbuf *, 2353 int (*)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))); 2354 2355 void 2356 sobuf_print(struct sockbuf *sb, 2357 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2358 { 2359 (*pr)("\tsb_cc: %lu\n", sb->sb_cc); 2360 (*pr)("\tsb_datacc: %lu\n", sb->sb_datacc); 2361 (*pr)("\tsb_hiwat: %lu\n", sb->sb_hiwat); 2362 (*pr)("\tsb_wat: %lu\n", sb->sb_wat); 2363 (*pr)("\tsb_mbcnt: %lu\n", sb->sb_mbcnt); 2364 (*pr)("\tsb_mbmax: %lu\n", sb->sb_mbmax); 2365 (*pr)("\tsb_lowat: %ld\n", sb->sb_lowat); 2366 (*pr)("\tsb_mb: %p\n", sb->sb_mb); 2367 (*pr)("\tsb_mbtail: %p\n", sb->sb_mbtail); 2368 (*pr)("\tsb_lastrecord: %p\n", sb->sb_lastrecord); 2369 (*pr)("\tsb_sel: ...\n"); 2370 (*pr)("\tsb_flags: %i\n", sb->sb_flags); 2371 (*pr)("\tsb_timeo_nsecs: %llu\n", sb->sb_timeo_nsecs); 2372 } 2373 2374 void 2375 so_print(void *v, 2376 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2377 { 2378 struct socket *so = v; 2379 2380 (*pr)("socket %p\n", so); 2381 (*pr)("so_type: %i\n", so->so_type); 2382 (*pr)("so_options: 0x%04x\n", so->so_options); /* %b */ 2383 (*pr)("so_linger: %i\n", so->so_linger); 2384 (*pr)("so_state: 0x%04x\n", so->so_state); 2385 (*pr)("so_pcb: %p\n", so->so_pcb); 2386 (*pr)("so_proto: %p\n", so->so_proto); 2387 (*pr)("so_sigio: %p\n", so->so_sigio.sir_sigio); 2388 2389 (*pr)("so_head: %p\n", so->so_head); 2390 (*pr)("so_onq: %p\n", so->so_onq); 2391 (*pr)("so_q0: @%p first: %p\n", &so->so_q0, TAILQ_FIRST(&so->so_q0)); 2392 (*pr)("so_q: @%p first: %p\n", &so->so_q, TAILQ_FIRST(&so->so_q)); 2393 (*pr)("so_eq: next: %p\n", TAILQ_NEXT(so, so_qe)); 2394 (*pr)("so_q0len: %i\n", so->so_q0len); 2395 (*pr)("so_qlen: %i\n", so->so_qlen); 2396 (*pr)("so_qlimit: %i\n", so->so_qlimit); 2397 (*pr)("so_timeo: %i\n", so->so_timeo); 2398 (*pr)("so_obmark: %lu\n", so->so_oobmark); 2399 2400 (*pr)("so_sp: %p\n", so->so_sp); 2401 if (so->so_sp != NULL) { 2402 (*pr)("\tssp_socket: %p\n", so->so_sp->ssp_socket); 2403 (*pr)("\tssp_soback: %p\n", so->so_sp->ssp_soback); 2404 (*pr)("\tssp_len: %lld\n", 2405 (unsigned long long)so->so_sp->ssp_len); 2406 (*pr)("\tssp_max: %lld\n", 2407 (unsigned long long)so->so_sp->ssp_max); 2408 (*pr)("\tssp_idletv: %lld %ld\n", so->so_sp->ssp_idletv.tv_sec, 2409 so->so_sp->ssp_idletv.tv_usec); 2410 (*pr)("\tssp_idleto: %spending (@%i)\n", 2411 timeout_pending(&so->so_sp->ssp_idleto) ? "" : "not ", 2412 so->so_sp->ssp_idleto.to_time); 2413 } 2414 2415 (*pr)("so_rcv:\n"); 2416 sobuf_print(&so->so_rcv, pr); 2417 (*pr)("so_snd:\n"); 2418 sobuf_print(&so->so_snd, pr); 2419 2420 (*pr)("so_upcall: %p so_upcallarg: %p\n", 2421 so->so_upcall, so->so_upcallarg); 2422 2423 (*pr)("so_euid: %d so_ruid: %d\n", so->so_euid, so->so_ruid); 2424 (*pr)("so_egid: %d so_rgid: %d\n", so->so_egid, so->so_rgid); 2425 (*pr)("so_cpid: %d\n", so->so_cpid); 2426 } 2427 #endif 2428