1 /* $OpenBSD: uipc_socket.c,v 1.261 2021/05/13 19:43:11 mvs Exp $ */ 2 /* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/file.h> 39 #include <sys/filedesc.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/domain.h> 43 #include <sys/kernel.h> 44 #include <sys/event.h> 45 #include <sys/protosw.h> 46 #include <sys/socket.h> 47 #include <sys/unpcb.h> 48 #include <sys/socketvar.h> 49 #include <sys/signalvar.h> 50 #include <net/if.h> 51 #include <sys/pool.h> 52 #include <sys/atomic.h> 53 #include <sys/rwlock.h> 54 #include <sys/time.h> 55 56 #ifdef DDB 57 #include <machine/db_machdep.h> 58 #endif 59 60 void sbsync(struct sockbuf *, struct mbuf *); 61 62 int sosplice(struct socket *, int, off_t, struct timeval *); 63 void sounsplice(struct socket *, struct socket *, int); 64 void soidle(void *); 65 void sotask(void *); 66 void soreaper(void *); 67 void soput(void *); 68 int somove(struct socket *, int); 69 void sorflush(struct socket *); 70 71 void filt_sordetach(struct knote *kn); 72 int filt_soread(struct knote *kn, long hint); 73 void filt_sowdetach(struct knote *kn); 74 int filt_sowrite(struct knote *kn, long hint); 75 int filt_solisten(struct knote *kn, long hint); 76 77 const struct filterops solisten_filtops = { 78 .f_flags = FILTEROP_ISFD, 79 .f_attach = NULL, 80 .f_detach = filt_sordetach, 81 .f_event = filt_solisten, 82 }; 83 84 const struct filterops soread_filtops = { 85 .f_flags = FILTEROP_ISFD, 86 .f_attach = NULL, 87 .f_detach = filt_sordetach, 88 .f_event = filt_soread, 89 }; 90 91 const struct filterops sowrite_filtops = { 92 .f_flags = FILTEROP_ISFD, 93 .f_attach = NULL, 94 .f_detach = filt_sowdetach, 95 .f_event = filt_sowrite, 96 }; 97 98 const struct filterops soexcept_filtops = { 99 .f_flags = FILTEROP_ISFD, 100 .f_attach = NULL, 101 .f_detach = filt_sordetach, 102 .f_event = filt_soread, 103 }; 104 105 #ifndef SOMINCONN 106 #define SOMINCONN 80 107 #endif /* SOMINCONN */ 108 109 int somaxconn = SOMAXCONN; 110 int sominconn = SOMINCONN; 111 112 struct pool socket_pool; 113 #ifdef SOCKET_SPLICE 114 struct pool sosplice_pool; 115 struct taskq *sosplice_taskq; 116 struct rwlock sosplice_lock = RWLOCK_INITIALIZER("sosplicelk"); 117 #endif 118 119 void 120 soinit(void) 121 { 122 pool_init(&socket_pool, sizeof(struct socket), 0, IPL_SOFTNET, 0, 123 "sockpl", NULL); 124 #ifdef SOCKET_SPLICE 125 pool_init(&sosplice_pool, sizeof(struct sosplice), 0, IPL_SOFTNET, 0, 126 "sosppl", NULL); 127 #endif 128 } 129 130 /* 131 * Socket operation routines. 132 * These routines are called by the routines in 133 * sys_socket.c or from a system process, and 134 * implement the semantics of socket operations by 135 * switching out to the protocol specific routines. 136 */ 137 int 138 socreate(int dom, struct socket **aso, int type, int proto) 139 { 140 struct proc *p = curproc; /* XXX */ 141 const struct protosw *prp; 142 struct socket *so; 143 int error, s; 144 145 if (proto) 146 prp = pffindproto(dom, proto, type); 147 else 148 prp = pffindtype(dom, type); 149 if (prp == NULL || prp->pr_attach == NULL) 150 return (EPROTONOSUPPORT); 151 if (prp->pr_type != type) 152 return (EPROTOTYPE); 153 so = pool_get(&socket_pool, PR_WAITOK | PR_ZERO); 154 rw_init(&so->so_lock, "solock"); 155 sigio_init(&so->so_sigio); 156 TAILQ_INIT(&so->so_q0); 157 TAILQ_INIT(&so->so_q); 158 so->so_type = type; 159 if (suser(p) == 0) 160 so->so_state = SS_PRIV; 161 so->so_ruid = p->p_ucred->cr_ruid; 162 so->so_euid = p->p_ucred->cr_uid; 163 so->so_rgid = p->p_ucred->cr_rgid; 164 so->so_egid = p->p_ucred->cr_gid; 165 so->so_cpid = p->p_p->ps_pid; 166 so->so_proto = prp; 167 so->so_snd.sb_timeo_nsecs = INFSLP; 168 so->so_rcv.sb_timeo_nsecs = INFSLP; 169 170 s = solock(so); 171 error = (*prp->pr_attach)(so, proto); 172 if (error) { 173 so->so_state |= SS_NOFDREF; 174 /* sofree() calls sounlock(). */ 175 sofree(so, s); 176 return (error); 177 } 178 sounlock(so, s); 179 *aso = so; 180 return (0); 181 } 182 183 int 184 sobind(struct socket *so, struct mbuf *nam, struct proc *p) 185 { 186 int error; 187 188 soassertlocked(so); 189 190 error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, p); 191 return (error); 192 } 193 194 int 195 solisten(struct socket *so, int backlog) 196 { 197 int error; 198 199 soassertlocked(so); 200 201 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) 202 return (EINVAL); 203 #ifdef SOCKET_SPLICE 204 if (isspliced(so) || issplicedback(so)) 205 return (EOPNOTSUPP); 206 #endif /* SOCKET_SPLICE */ 207 error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL, 208 curproc); 209 if (error) 210 return (error); 211 if (TAILQ_FIRST(&so->so_q) == NULL) 212 so->so_options |= SO_ACCEPTCONN; 213 if (backlog < 0 || backlog > somaxconn) 214 backlog = somaxconn; 215 if (backlog < sominconn) 216 backlog = sominconn; 217 so->so_qlimit = backlog; 218 return (0); 219 } 220 221 #define SOSP_FREEING_READ 1 222 #define SOSP_FREEING_WRITE 2 223 void 224 sofree(struct socket *so, int s) 225 { 226 soassertlocked(so); 227 228 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) { 229 sounlock(so, s); 230 return; 231 } 232 if (so->so_head) { 233 /* 234 * We must not decommission a socket that's on the accept(2) 235 * queue. If we do, then accept(2) may hang after select(2) 236 * indicated that the listening socket was ready. 237 */ 238 if (!soqremque(so, 0)) { 239 sounlock(so, s); 240 return; 241 } 242 } 243 sigio_free(&so->so_sigio); 244 #ifdef SOCKET_SPLICE 245 if (so->so_sp) { 246 if (issplicedback(so)) { 247 int freeing = SOSP_FREEING_WRITE; 248 249 if (so->so_sp->ssp_soback == so) 250 freeing |= SOSP_FREEING_READ; 251 sounsplice(so->so_sp->ssp_soback, so, freeing); 252 } 253 if (isspliced(so)) { 254 int freeing = SOSP_FREEING_READ; 255 256 if (so == so->so_sp->ssp_socket) 257 freeing |= SOSP_FREEING_WRITE; 258 sounsplice(so, so->so_sp->ssp_socket, freeing); 259 } 260 } 261 #endif /* SOCKET_SPLICE */ 262 sbrelease(so, &so->so_snd); 263 sorflush(so); 264 sounlock(so, s); 265 #ifdef SOCKET_SPLICE 266 if (so->so_sp) { 267 /* Reuse splice idle, sounsplice() has been called before. */ 268 timeout_set_proc(&so->so_sp->ssp_idleto, soreaper, so); 269 timeout_add(&so->so_sp->ssp_idleto, 0); 270 } else 271 #endif /* SOCKET_SPLICE */ 272 { 273 pool_put(&socket_pool, so); 274 } 275 } 276 277 static inline uint64_t 278 solinger_nsec(struct socket *so) 279 { 280 if (so->so_linger == 0) 281 return INFSLP; 282 283 return SEC_TO_NSEC(so->so_linger); 284 } 285 286 /* 287 * Close a socket on last file table reference removal. 288 * Initiate disconnect if connected. 289 * Free socket when disconnect complete. 290 */ 291 int 292 soclose(struct socket *so, int flags) 293 { 294 struct socket *so2; 295 int s, error = 0; 296 297 s = solock(so); 298 /* Revoke async IO early. There is a final revocation in sofree(). */ 299 sigio_free(&so->so_sigio); 300 if (so->so_options & SO_ACCEPTCONN) { 301 while ((so2 = TAILQ_FIRST(&so->so_q0)) != NULL) { 302 (void) soqremque(so2, 0); 303 (void) soabort(so2); 304 } 305 while ((so2 = TAILQ_FIRST(&so->so_q)) != NULL) { 306 (void) soqremque(so2, 1); 307 (void) soabort(so2); 308 } 309 } 310 if (so->so_pcb == NULL) 311 goto discard; 312 if (so->so_state & SS_ISCONNECTED) { 313 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 314 error = sodisconnect(so); 315 if (error) 316 goto drop; 317 } 318 if (so->so_options & SO_LINGER) { 319 if ((so->so_state & SS_ISDISCONNECTING) && 320 (flags & MSG_DONTWAIT)) 321 goto drop; 322 while (so->so_state & SS_ISCONNECTED) { 323 error = sosleep_nsec(so, &so->so_timeo, 324 PSOCK | PCATCH, "netcls", 325 solinger_nsec(so)); 326 if (error) 327 break; 328 } 329 } 330 } 331 drop: 332 if (so->so_pcb) { 333 int error2; 334 KASSERT(so->so_proto->pr_detach); 335 error2 = (*so->so_proto->pr_detach)(so); 336 if (error == 0) 337 error = error2; 338 } 339 discard: 340 if (so->so_state & SS_NOFDREF) 341 panic("soclose NOFDREF: so %p, so_type %d", so, so->so_type); 342 so->so_state |= SS_NOFDREF; 343 /* sofree() calls sounlock(). */ 344 sofree(so, s); 345 return (error); 346 } 347 348 int 349 soabort(struct socket *so) 350 { 351 soassertlocked(so); 352 353 return (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, NULL, NULL, 354 curproc); 355 } 356 357 int 358 soaccept(struct socket *so, struct mbuf *nam) 359 { 360 int error = 0; 361 362 soassertlocked(so); 363 364 if ((so->so_state & SS_NOFDREF) == 0) 365 panic("soaccept !NOFDREF: so %p, so_type %d", so, so->so_type); 366 so->so_state &= ~SS_NOFDREF; 367 if ((so->so_state & SS_ISDISCONNECTED) == 0 || 368 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) 369 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, NULL, 370 nam, NULL, curproc); 371 else 372 error = ECONNABORTED; 373 return (error); 374 } 375 376 int 377 soconnect(struct socket *so, struct mbuf *nam) 378 { 379 int error; 380 381 soassertlocked(so); 382 383 if (so->so_options & SO_ACCEPTCONN) 384 return (EOPNOTSUPP); 385 /* 386 * If protocol is connection-based, can only connect once. 387 * Otherwise, if connected, try to disconnect first. 388 * This allows user to disconnect by connecting to, e.g., 389 * a null address. 390 */ 391 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 392 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 393 (error = sodisconnect(so)))) 394 error = EISCONN; 395 else 396 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 397 NULL, nam, NULL, curproc); 398 return (error); 399 } 400 401 int 402 soconnect2(struct socket *so1, struct socket *so2) 403 { 404 int s, error; 405 406 s = solock(so1); 407 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, NULL, 408 (struct mbuf *)so2, NULL, curproc); 409 sounlock(so1, s); 410 return (error); 411 } 412 413 int 414 sodisconnect(struct socket *so) 415 { 416 int error; 417 418 soassertlocked(so); 419 420 if ((so->so_state & SS_ISCONNECTED) == 0) 421 return (ENOTCONN); 422 if (so->so_state & SS_ISDISCONNECTING) 423 return (EALREADY); 424 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, NULL, NULL, 425 NULL, curproc); 426 return (error); 427 } 428 429 int m_getuio(struct mbuf **, int, long, struct uio *); 430 431 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 432 /* 433 * Send on a socket. 434 * If send must go all at once and message is larger than 435 * send buffering, then hard error. 436 * Lock against other senders. 437 * If must go all at once and not enough room now, then 438 * inform user that this would block and do nothing. 439 * Otherwise, if nonblocking, send as much as possible. 440 * The data to be sent is described by "uio" if nonzero, 441 * otherwise by the mbuf chain "top" (which must be null 442 * if uio is not). Data provided in mbuf chain must be small 443 * enough to send all at once. 444 * 445 * Returns nonzero on error, timeout or signal; callers 446 * must check for short counts if EINTR/ERESTART are returned. 447 * Data and control buffers are freed on return. 448 */ 449 int 450 sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, 451 struct mbuf *control, int flags) 452 { 453 long space, clen = 0; 454 size_t resid; 455 int error, s; 456 int atomic = sosendallatonce(so) || top; 457 458 if (uio) 459 resid = uio->uio_resid; 460 else 461 resid = top->m_pkthdr.len; 462 /* MSG_EOR on a SOCK_STREAM socket is invalid. */ 463 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 464 m_freem(top); 465 m_freem(control); 466 return (EINVAL); 467 } 468 if (uio && uio->uio_procp) 469 uio->uio_procp->p_ru.ru_msgsnd++; 470 if (control) { 471 /* 472 * In theory clen should be unsigned (since control->m_len is). 473 * However, space must be signed, as it might be less than 0 474 * if we over-committed, and we must use a signed comparison 475 * of space and clen. 476 */ 477 clen = control->m_len; 478 /* reserve extra space for AF_UNIX's internalize */ 479 if (so->so_proto->pr_domain->dom_family == AF_UNIX && 480 clen >= CMSG_ALIGN(sizeof(struct cmsghdr)) && 481 mtod(control, struct cmsghdr *)->cmsg_type == SCM_RIGHTS) 482 clen = CMSG_SPACE( 483 (clen - CMSG_ALIGN(sizeof(struct cmsghdr))) * 484 (sizeof(struct fdpass) / sizeof(int))); 485 } 486 487 #define snderr(errno) { error = errno; goto release; } 488 489 s = solock(so); 490 restart: 491 if ((error = sblock(so, &so->so_snd, SBLOCKWAIT(flags))) != 0) 492 goto out; 493 so->so_state |= SS_ISSENDING; 494 do { 495 if (so->so_state & SS_CANTSENDMORE) 496 snderr(EPIPE); 497 if (so->so_error) { 498 error = so->so_error; 499 so->so_error = 0; 500 snderr(error); 501 } 502 if ((so->so_state & SS_ISCONNECTED) == 0) { 503 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 504 if (!(resid == 0 && clen != 0)) 505 snderr(ENOTCONN); 506 } else if (addr == NULL) 507 snderr(EDESTADDRREQ); 508 } 509 space = sbspace(so, &so->so_snd); 510 if (flags & MSG_OOB) 511 space += 1024; 512 if (so->so_proto->pr_domain->dom_family == AF_UNIX) { 513 if (atomic && resid > so->so_snd.sb_hiwat) 514 snderr(EMSGSIZE); 515 } else { 516 if (clen > so->so_snd.sb_hiwat || 517 (atomic && resid > so->so_snd.sb_hiwat - clen)) 518 snderr(EMSGSIZE); 519 } 520 if (space < clen || 521 (space - clen < resid && 522 (atomic || space < so->so_snd.sb_lowat))) { 523 if (flags & MSG_DONTWAIT) 524 snderr(EWOULDBLOCK); 525 sbunlock(so, &so->so_snd); 526 error = sbwait(so, &so->so_snd); 527 so->so_state &= ~SS_ISSENDING; 528 if (error) 529 goto out; 530 goto restart; 531 } 532 space -= clen; 533 do { 534 if (uio == NULL) { 535 /* 536 * Data is prepackaged in "top". 537 */ 538 resid = 0; 539 if (flags & MSG_EOR) 540 top->m_flags |= M_EOR; 541 } else { 542 sounlock(so, s); 543 error = m_getuio(&top, atomic, space, uio); 544 s = solock(so); 545 if (error) 546 goto release; 547 space -= top->m_pkthdr.len; 548 resid = uio->uio_resid; 549 if (flags & MSG_EOR) 550 top->m_flags |= M_EOR; 551 } 552 if (resid == 0) 553 so->so_state &= ~SS_ISSENDING; 554 if (top && so->so_options & SO_ZEROIZE) 555 top->m_flags |= M_ZEROIZE; 556 error = (*so->so_proto->pr_usrreq)(so, 557 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 558 top, addr, control, curproc); 559 clen = 0; 560 control = NULL; 561 top = NULL; 562 if (error) 563 goto release; 564 } while (resid && space > 0); 565 } while (resid); 566 567 release: 568 so->so_state &= ~SS_ISSENDING; 569 sbunlock(so, &so->so_snd); 570 out: 571 sounlock(so, s); 572 m_freem(top); 573 m_freem(control); 574 return (error); 575 } 576 577 int 578 m_getuio(struct mbuf **mp, int atomic, long space, struct uio *uio) 579 { 580 struct mbuf *m, *top = NULL; 581 struct mbuf **nextp = ⊤ 582 u_long len, mlen; 583 size_t resid = uio->uio_resid; 584 int error; 585 586 do { 587 if (top == NULL) { 588 MGETHDR(m, M_WAIT, MT_DATA); 589 mlen = MHLEN; 590 m->m_pkthdr.len = 0; 591 m->m_pkthdr.ph_ifidx = 0; 592 } else { 593 MGET(m, M_WAIT, MT_DATA); 594 mlen = MLEN; 595 } 596 /* chain mbuf together */ 597 *nextp = m; 598 nextp = &m->m_next; 599 600 resid = ulmin(resid, space); 601 if (resid >= MINCLSIZE) { 602 MCLGETL(m, M_NOWAIT, ulmin(resid, MAXMCLBYTES)); 603 if ((m->m_flags & M_EXT) == 0) 604 MCLGETL(m, M_NOWAIT, MCLBYTES); 605 if ((m->m_flags & M_EXT) == 0) 606 goto nopages; 607 mlen = m->m_ext.ext_size; 608 len = ulmin(mlen, resid); 609 /* 610 * For datagram protocols, leave room 611 * for protocol headers in first mbuf. 612 */ 613 if (atomic && m == top && len < mlen - max_hdr) 614 m->m_data += max_hdr; 615 } else { 616 nopages: 617 len = ulmin(mlen, resid); 618 /* 619 * For datagram protocols, leave room 620 * for protocol headers in first mbuf. 621 */ 622 if (atomic && m == top && len < mlen - max_hdr) 623 m_align(m, len); 624 } 625 626 error = uiomove(mtod(m, caddr_t), len, uio); 627 if (error) { 628 m_freem(top); 629 return (error); 630 } 631 632 /* adjust counters */ 633 resid = uio->uio_resid; 634 space -= len; 635 m->m_len = len; 636 top->m_pkthdr.len += len; 637 638 /* Is there more space and more data? */ 639 } while (space > 0 && resid > 0); 640 641 *mp = top; 642 return 0; 643 } 644 645 /* 646 * Following replacement or removal of the first mbuf on the first 647 * mbuf chain of a socket buffer, push necessary state changes back 648 * into the socket buffer so that other consumers see the values 649 * consistently. 'nextrecord' is the callers locally stored value of 650 * the original value of sb->sb_mb->m_nextpkt which must be restored 651 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. 652 */ 653 void 654 sbsync(struct sockbuf *sb, struct mbuf *nextrecord) 655 { 656 657 /* 658 * First, update for the new value of nextrecord. If necessary, 659 * make it the first record. 660 */ 661 if (sb->sb_mb != NULL) 662 sb->sb_mb->m_nextpkt = nextrecord; 663 else 664 sb->sb_mb = nextrecord; 665 666 /* 667 * Now update any dependent socket buffer fields to reflect 668 * the new state. This is an inline of SB_EMPTY_FIXUP, with 669 * the addition of a second clause that takes care of the 670 * case where sb_mb has been updated, but remains the last 671 * record. 672 */ 673 if (sb->sb_mb == NULL) { 674 sb->sb_mbtail = NULL; 675 sb->sb_lastrecord = NULL; 676 } else if (sb->sb_mb->m_nextpkt == NULL) 677 sb->sb_lastrecord = sb->sb_mb; 678 } 679 680 /* 681 * Implement receive operations on a socket. 682 * We depend on the way that records are added to the sockbuf 683 * by sbappend*. In particular, each record (mbufs linked through m_next) 684 * must begin with an address if the protocol so specifies, 685 * followed by an optional mbuf or mbufs containing ancillary data, 686 * and then zero or more mbufs of data. 687 * In order to avoid blocking network for the entire time here, we release 688 * the solock() while doing the actual copy to user space. 689 * Although the sockbuf is locked, new data may still be appended, 690 * and thus we must maintain consistency of the sockbuf during that time. 691 * 692 * The caller may receive the data as a single mbuf chain by supplying 693 * an mbuf **mp0 for use in returning the chain. The uio is then used 694 * only for the count in uio_resid. 695 */ 696 int 697 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, 698 struct mbuf **mp0, struct mbuf **controlp, int *flagsp, 699 socklen_t controllen) 700 { 701 struct mbuf *m, **mp; 702 struct mbuf *cm; 703 u_long len, offset, moff; 704 int flags, error, s, type, uio_error = 0; 705 const struct protosw *pr = so->so_proto; 706 struct mbuf *nextrecord; 707 size_t resid, orig_resid = uio->uio_resid; 708 709 mp = mp0; 710 if (paddr) 711 *paddr = NULL; 712 if (controlp) 713 *controlp = NULL; 714 if (flagsp) 715 flags = *flagsp &~ MSG_EOR; 716 else 717 flags = 0; 718 if (flags & MSG_OOB) { 719 m = m_get(M_WAIT, MT_DATA); 720 s = solock(so); 721 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, 722 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, curproc); 723 sounlock(so, s); 724 if (error) 725 goto bad; 726 do { 727 error = uiomove(mtod(m, caddr_t), 728 ulmin(uio->uio_resid, m->m_len), uio); 729 m = m_free(m); 730 } while (uio->uio_resid && error == 0 && m); 731 bad: 732 m_freem(m); 733 return (error); 734 } 735 if (mp) 736 *mp = NULL; 737 738 s = solock(so); 739 restart: 740 if ((error = sblock(so, &so->so_rcv, SBLOCKWAIT(flags))) != 0) { 741 sounlock(so, s); 742 return (error); 743 } 744 745 m = so->so_rcv.sb_mb; 746 #ifdef SOCKET_SPLICE 747 if (isspliced(so)) 748 m = NULL; 749 #endif /* SOCKET_SPLICE */ 750 /* 751 * If we have less data than requested, block awaiting more 752 * (subject to any timeout) if: 753 * 1. the current count is less than the low water mark, 754 * 2. MSG_WAITALL is set, and it is possible to do the entire 755 * receive operation at once if we block (resid <= hiwat), or 756 * 3. MSG_DONTWAIT is not set. 757 * If MSG_WAITALL is set but resid is larger than the receive buffer, 758 * we have to do the receive in sections, and thus risk returning 759 * a short count if a timeout or signal occurs after we start. 760 */ 761 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 762 so->so_rcv.sb_cc < uio->uio_resid) && 763 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 764 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 765 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 766 #ifdef DIAGNOSTIC 767 if (m == NULL && so->so_rcv.sb_cc) 768 #ifdef SOCKET_SPLICE 769 if (!isspliced(so)) 770 #endif /* SOCKET_SPLICE */ 771 panic("receive 1: so %p, so_type %d, sb_cc %lu", 772 so, so->so_type, so->so_rcv.sb_cc); 773 #endif 774 if (so->so_error) { 775 if (m) 776 goto dontblock; 777 error = so->so_error; 778 if ((flags & MSG_PEEK) == 0) 779 so->so_error = 0; 780 goto release; 781 } 782 if (so->so_state & SS_CANTRCVMORE) { 783 if (m) 784 goto dontblock; 785 else if (so->so_rcv.sb_cc == 0) 786 goto release; 787 } 788 for (; m; m = m->m_next) 789 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 790 m = so->so_rcv.sb_mb; 791 goto dontblock; 792 } 793 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 794 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 795 error = ENOTCONN; 796 goto release; 797 } 798 if (uio->uio_resid == 0 && controlp == NULL) 799 goto release; 800 if (flags & MSG_DONTWAIT) { 801 error = EWOULDBLOCK; 802 goto release; 803 } 804 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); 805 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); 806 sbunlock(so, &so->so_rcv); 807 error = sbwait(so, &so->so_rcv); 808 if (error) { 809 sounlock(so, s); 810 return (error); 811 } 812 goto restart; 813 } 814 dontblock: 815 /* 816 * On entry here, m points to the first record of the socket buffer. 817 * From this point onward, we maintain 'nextrecord' as a cache of the 818 * pointer to the next record in the socket buffer. We must keep the 819 * various socket buffer pointers and local stack versions of the 820 * pointers in sync, pushing out modifications before operations that 821 * may sleep, and re-reading them afterwards. 822 * 823 * Otherwise, we will race with the network stack appending new data 824 * or records onto the socket buffer by using inconsistent/stale 825 * versions of the field, possibly resulting in socket buffer 826 * corruption. 827 */ 828 if (uio->uio_procp) 829 uio->uio_procp->p_ru.ru_msgrcv++; 830 KASSERT(m == so->so_rcv.sb_mb); 831 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); 832 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); 833 nextrecord = m->m_nextpkt; 834 if (pr->pr_flags & PR_ADDR) { 835 #ifdef DIAGNOSTIC 836 if (m->m_type != MT_SONAME) 837 panic("receive 1a: so %p, so_type %d, m %p, m_type %d", 838 so, so->so_type, m, m->m_type); 839 #endif 840 orig_resid = 0; 841 if (flags & MSG_PEEK) { 842 if (paddr) 843 *paddr = m_copym(m, 0, m->m_len, M_NOWAIT); 844 m = m->m_next; 845 } else { 846 sbfree(&so->so_rcv, m); 847 if (paddr) { 848 *paddr = m; 849 so->so_rcv.sb_mb = m->m_next; 850 m->m_next = NULL; 851 m = so->so_rcv.sb_mb; 852 } else { 853 so->so_rcv.sb_mb = m_free(m); 854 m = so->so_rcv.sb_mb; 855 } 856 sbsync(&so->so_rcv, nextrecord); 857 } 858 } 859 while (m && m->m_type == MT_CONTROL && error == 0) { 860 int skip = 0; 861 if (flags & MSG_PEEK) { 862 if (mtod(m, struct cmsghdr *)->cmsg_type == 863 SCM_RIGHTS) { 864 /* don't leak internalized SCM_RIGHTS msgs */ 865 skip = 1; 866 } else if (controlp) 867 *controlp = m_copym(m, 0, m->m_len, M_NOWAIT); 868 m = m->m_next; 869 } else { 870 sbfree(&so->so_rcv, m); 871 so->so_rcv.sb_mb = m->m_next; 872 m->m_nextpkt = m->m_next = NULL; 873 cm = m; 874 m = so->so_rcv.sb_mb; 875 sbsync(&so->so_rcv, nextrecord); 876 if (controlp) { 877 if (pr->pr_domain->dom_externalize) { 878 error = 879 (*pr->pr_domain->dom_externalize) 880 (cm, controllen, flags); 881 } 882 *controlp = cm; 883 } else { 884 /* 885 * Dispose of any SCM_RIGHTS message that went 886 * through the read path rather than recv. 887 */ 888 if (pr->pr_domain->dom_dispose) 889 pr->pr_domain->dom_dispose(cm); 890 m_free(cm); 891 } 892 } 893 if (m != NULL) 894 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 895 else 896 nextrecord = so->so_rcv.sb_mb; 897 if (controlp && !skip) 898 controlp = &(*controlp)->m_next; 899 orig_resid = 0; 900 } 901 902 /* If m is non-NULL, we have some data to read. */ 903 if (m) { 904 type = m->m_type; 905 if (type == MT_OOBDATA) 906 flags |= MSG_OOB; 907 if (m->m_flags & M_BCAST) 908 flags |= MSG_BCAST; 909 if (m->m_flags & M_MCAST) 910 flags |= MSG_MCAST; 911 } 912 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); 913 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); 914 915 moff = 0; 916 offset = 0; 917 while (m && uio->uio_resid > 0 && error == 0) { 918 if (m->m_type == MT_OOBDATA) { 919 if (type != MT_OOBDATA) 920 break; 921 } else if (type == MT_OOBDATA) { 922 break; 923 } else if (m->m_type == MT_CONTROL) { 924 /* 925 * If there is more than one control message in the 926 * stream, we do a short read. Next can be received 927 * or disposed by another system call. 928 */ 929 break; 930 #ifdef DIAGNOSTIC 931 } else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) { 932 panic("receive 3: so %p, so_type %d, m %p, m_type %d", 933 so, so->so_type, m, m->m_type); 934 #endif 935 } 936 so->so_state &= ~SS_RCVATMARK; 937 len = uio->uio_resid; 938 if (so->so_oobmark && len > so->so_oobmark - offset) 939 len = so->so_oobmark - offset; 940 if (len > m->m_len - moff) 941 len = m->m_len - moff; 942 /* 943 * If mp is set, just pass back the mbufs. 944 * Otherwise copy them out via the uio, then free. 945 * Sockbuf must be consistent here (points to current mbuf, 946 * it points to next record) when we drop priority; 947 * we must note any additions to the sockbuf when we 948 * block interrupts again. 949 */ 950 if (mp == NULL && uio_error == 0) { 951 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); 952 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); 953 resid = uio->uio_resid; 954 sounlock(so, s); 955 uio_error = uiomove(mtod(m, caddr_t) + moff, len, uio); 956 s = solock(so); 957 if (uio_error) 958 uio->uio_resid = resid - len; 959 } else 960 uio->uio_resid -= len; 961 if (len == m->m_len - moff) { 962 if (m->m_flags & M_EOR) 963 flags |= MSG_EOR; 964 if (flags & MSG_PEEK) { 965 m = m->m_next; 966 moff = 0; 967 orig_resid = 0; 968 } else { 969 nextrecord = m->m_nextpkt; 970 sbfree(&so->so_rcv, m); 971 if (mp) { 972 *mp = m; 973 mp = &m->m_next; 974 so->so_rcv.sb_mb = m = m->m_next; 975 *mp = NULL; 976 } else { 977 so->so_rcv.sb_mb = m_free(m); 978 m = so->so_rcv.sb_mb; 979 } 980 /* 981 * If m != NULL, we also know that 982 * so->so_rcv.sb_mb != NULL. 983 */ 984 KASSERT(so->so_rcv.sb_mb == m); 985 if (m) { 986 m->m_nextpkt = nextrecord; 987 if (nextrecord == NULL) 988 so->so_rcv.sb_lastrecord = m; 989 } else { 990 so->so_rcv.sb_mb = nextrecord; 991 SB_EMPTY_FIXUP(&so->so_rcv); 992 } 993 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); 994 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); 995 } 996 } else { 997 if (flags & MSG_PEEK) { 998 moff += len; 999 orig_resid = 0; 1000 } else { 1001 if (mp) 1002 *mp = m_copym(m, 0, len, M_WAIT); 1003 m->m_data += len; 1004 m->m_len -= len; 1005 so->so_rcv.sb_cc -= len; 1006 so->so_rcv.sb_datacc -= len; 1007 } 1008 } 1009 if (so->so_oobmark) { 1010 if ((flags & MSG_PEEK) == 0) { 1011 so->so_oobmark -= len; 1012 if (so->so_oobmark == 0) { 1013 so->so_state |= SS_RCVATMARK; 1014 break; 1015 } 1016 } else { 1017 offset += len; 1018 if (offset == so->so_oobmark) 1019 break; 1020 } 1021 } 1022 if (flags & MSG_EOR) 1023 break; 1024 /* 1025 * If the MSG_WAITALL flag is set (for non-atomic socket), 1026 * we must not quit until "uio->uio_resid == 0" or an error 1027 * termination. If a signal/timeout occurs, return 1028 * with a short count but without error. 1029 * Keep sockbuf locked against other readers. 1030 */ 1031 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1032 !sosendallatonce(so) && !nextrecord) { 1033 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1034 break; 1035 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); 1036 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); 1037 error = sbwait(so, &so->so_rcv); 1038 if (error) { 1039 sbunlock(so, &so->so_rcv); 1040 sounlock(so, s); 1041 return (0); 1042 } 1043 if ((m = so->so_rcv.sb_mb) != NULL) 1044 nextrecord = m->m_nextpkt; 1045 } 1046 } 1047 1048 if (m && pr->pr_flags & PR_ATOMIC) { 1049 flags |= MSG_TRUNC; 1050 if ((flags & MSG_PEEK) == 0) 1051 (void) sbdroprecord(&so->so_rcv); 1052 } 1053 if ((flags & MSG_PEEK) == 0) { 1054 if (m == NULL) { 1055 /* 1056 * First part is an inline SB_EMPTY_FIXUP(). Second 1057 * part makes sure sb_lastrecord is up-to-date if 1058 * there is still data in the socket buffer. 1059 */ 1060 so->so_rcv.sb_mb = nextrecord; 1061 if (so->so_rcv.sb_mb == NULL) { 1062 so->so_rcv.sb_mbtail = NULL; 1063 so->so_rcv.sb_lastrecord = NULL; 1064 } else if (nextrecord->m_nextpkt == NULL) 1065 so->so_rcv.sb_lastrecord = nextrecord; 1066 } 1067 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); 1068 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); 1069 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1070 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, 1071 (struct mbuf *)(long)flags, NULL, curproc); 1072 } 1073 if (orig_resid == uio->uio_resid && orig_resid && 1074 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1075 sbunlock(so, &so->so_rcv); 1076 goto restart; 1077 } 1078 1079 if (uio_error) 1080 error = uio_error; 1081 1082 if (flagsp) 1083 *flagsp |= flags; 1084 release: 1085 sbunlock(so, &so->so_rcv); 1086 sounlock(so, s); 1087 return (error); 1088 } 1089 1090 int 1091 soshutdown(struct socket *so, int how) 1092 { 1093 const struct protosw *pr = so->so_proto; 1094 int s, error = 0; 1095 1096 s = solock(so); 1097 switch (how) { 1098 case SHUT_RD: 1099 sorflush(so); 1100 break; 1101 case SHUT_RDWR: 1102 sorflush(so); 1103 /* FALLTHROUGH */ 1104 case SHUT_WR: 1105 error = (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, NULL, NULL, 1106 curproc); 1107 break; 1108 default: 1109 error = EINVAL; 1110 break; 1111 } 1112 sounlock(so, s); 1113 1114 return (error); 1115 } 1116 1117 void 1118 sorflush(struct socket *so) 1119 { 1120 struct sockbuf *sb = &so->so_rcv; 1121 struct mbuf *m; 1122 const struct protosw *pr = so->so_proto; 1123 int error; 1124 1125 sb->sb_flags |= SB_NOINTR; 1126 error = sblock(so, sb, M_WAITOK); 1127 /* with SB_NOINTR and M_WAITOK sblock() must not fail */ 1128 KASSERT(error == 0); 1129 socantrcvmore(so); 1130 m = sb->sb_mb; 1131 memset(&sb->sb_startzero, 0, 1132 (caddr_t)&sb->sb_endzero - (caddr_t)&sb->sb_startzero); 1133 sb->sb_timeo_nsecs = INFSLP; 1134 sbunlock(so, sb); 1135 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 1136 (*pr->pr_domain->dom_dispose)(m); 1137 m_purge(m); 1138 } 1139 1140 #ifdef SOCKET_SPLICE 1141 1142 #define so_splicelen so_sp->ssp_len 1143 #define so_splicemax so_sp->ssp_max 1144 #define so_idletv so_sp->ssp_idletv 1145 #define so_idleto so_sp->ssp_idleto 1146 #define so_splicetask so_sp->ssp_task 1147 1148 int 1149 sosplice(struct socket *so, int fd, off_t max, struct timeval *tv) 1150 { 1151 struct file *fp; 1152 struct socket *sosp; 1153 struct sosplice *sp; 1154 struct taskq *tq; 1155 int error = 0; 1156 1157 soassertlocked(so); 1158 1159 if (sosplice_taskq == NULL) { 1160 rw_enter_write(&sosplice_lock); 1161 if (sosplice_taskq == NULL) { 1162 tq = taskq_create("sosplice", 1, IPL_SOFTNET, 1163 TASKQ_MPSAFE); 1164 /* Ensure the taskq is fully visible to other CPUs. */ 1165 membar_producer(); 1166 sosplice_taskq = tq; 1167 } 1168 rw_exit_write(&sosplice_lock); 1169 } 1170 if (sosplice_taskq == NULL) 1171 return (ENOMEM); 1172 1173 if ((so->so_proto->pr_flags & PR_SPLICE) == 0) 1174 return (EPROTONOSUPPORT); 1175 if (so->so_options & SO_ACCEPTCONN) 1176 return (EOPNOTSUPP); 1177 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1178 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 1179 return (ENOTCONN); 1180 if (so->so_sp == NULL) { 1181 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1182 if (so->so_sp == NULL) 1183 so->so_sp = sp; 1184 else 1185 pool_put(&sosplice_pool, sp); 1186 } 1187 1188 /* If no fd is given, unsplice by removing existing link. */ 1189 if (fd < 0) { 1190 /* Lock receive buffer. */ 1191 if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) { 1192 return (error); 1193 } 1194 if (so->so_sp->ssp_socket) 1195 sounsplice(so, so->so_sp->ssp_socket, 0); 1196 sbunlock(so, &so->so_rcv); 1197 return (0); 1198 } 1199 1200 if (max && max < 0) 1201 return (EINVAL); 1202 1203 if (tv && (tv->tv_sec < 0 || !timerisvalid(tv))) 1204 return (EINVAL); 1205 1206 /* Find sosp, the drain socket where data will be spliced into. */ 1207 if ((error = getsock(curproc, fd, &fp)) != 0) 1208 return (error); 1209 sosp = fp->f_data; 1210 if (sosp->so_proto->pr_usrreq != so->so_proto->pr_usrreq) { 1211 error = EPROTONOSUPPORT; 1212 goto frele; 1213 } 1214 if (sosp->so_sp == NULL) { 1215 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1216 if (sosp->so_sp == NULL) 1217 sosp->so_sp = sp; 1218 else 1219 pool_put(&sosplice_pool, sp); 1220 } 1221 1222 /* Lock both receive and send buffer. */ 1223 if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) { 1224 goto frele; 1225 } 1226 if ((error = sblock(so, &sosp->so_snd, M_WAITOK)) != 0) { 1227 sbunlock(so, &so->so_rcv); 1228 goto frele; 1229 } 1230 1231 if (so->so_sp->ssp_socket || sosp->so_sp->ssp_soback) { 1232 error = EBUSY; 1233 goto release; 1234 } 1235 if (sosp->so_options & SO_ACCEPTCONN) { 1236 error = EOPNOTSUPP; 1237 goto release; 1238 } 1239 if ((sosp->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) { 1240 error = ENOTCONN; 1241 goto release; 1242 } 1243 1244 /* Splice so and sosp together. */ 1245 so->so_sp->ssp_socket = sosp; 1246 sosp->so_sp->ssp_soback = so; 1247 so->so_splicelen = 0; 1248 so->so_splicemax = max; 1249 if (tv) 1250 so->so_idletv = *tv; 1251 else 1252 timerclear(&so->so_idletv); 1253 timeout_set_proc(&so->so_idleto, soidle, so); 1254 task_set(&so->so_splicetask, sotask, so); 1255 1256 /* 1257 * To prevent softnet interrupt from calling somove() while 1258 * we sleep, the socket buffers are not marked as spliced yet. 1259 */ 1260 if (somove(so, M_WAIT)) { 1261 so->so_rcv.sb_flags |= SB_SPLICE; 1262 sosp->so_snd.sb_flags |= SB_SPLICE; 1263 } 1264 1265 release: 1266 sbunlock(sosp, &sosp->so_snd); 1267 sbunlock(so, &so->so_rcv); 1268 frele: 1269 /* 1270 * FRELE() must not be called with the socket lock held. It is safe to 1271 * release the lock here as long as no other operation happen on the 1272 * socket when sosplice() returns. The dance could be avoided by 1273 * grabbing the socket lock inside this function. 1274 */ 1275 sounlock(so, SL_LOCKED); 1276 FRELE(fp, curproc); 1277 solock(so); 1278 return (error); 1279 } 1280 1281 void 1282 sounsplice(struct socket *so, struct socket *sosp, int freeing) 1283 { 1284 soassertlocked(so); 1285 1286 task_del(sosplice_taskq, &so->so_splicetask); 1287 timeout_del(&so->so_idleto); 1288 sosp->so_snd.sb_flags &= ~SB_SPLICE; 1289 so->so_rcv.sb_flags &= ~SB_SPLICE; 1290 so->so_sp->ssp_socket = sosp->so_sp->ssp_soback = NULL; 1291 /* Do not wakeup a socket that is about to be freed. */ 1292 if ((freeing & SOSP_FREEING_READ) == 0 && soreadable(so)) 1293 sorwakeup(so); 1294 if ((freeing & SOSP_FREEING_WRITE) == 0 && sowriteable(sosp)) 1295 sowwakeup(sosp); 1296 } 1297 1298 void 1299 soidle(void *arg) 1300 { 1301 struct socket *so = arg; 1302 int s; 1303 1304 s = solock(so); 1305 if (so->so_rcv.sb_flags & SB_SPLICE) { 1306 so->so_error = ETIMEDOUT; 1307 sounsplice(so, so->so_sp->ssp_socket, 0); 1308 } 1309 sounlock(so, s); 1310 } 1311 1312 void 1313 sotask(void *arg) 1314 { 1315 struct socket *so = arg; 1316 int s; 1317 1318 s = solock(so); 1319 if (so->so_rcv.sb_flags & SB_SPLICE) { 1320 /* 1321 * We may not sleep here as sofree() and unsplice() may be 1322 * called from softnet interrupt context. This would remove 1323 * the socket during somove(). 1324 */ 1325 somove(so, M_DONTWAIT); 1326 } 1327 sounlock(so, s); 1328 1329 /* Avoid user land starvation. */ 1330 yield(); 1331 } 1332 1333 /* 1334 * The socket splicing task or idle timeout may sleep while grabbing the net 1335 * lock. As sofree() can be called anytime, sotask() or soidle() could access 1336 * the socket memory of a freed socket after wakeup. So delay the pool_put() 1337 * after all pending socket splicing tasks or timeouts have finished. Do this 1338 * by scheduling it on the same threads. 1339 */ 1340 void 1341 soreaper(void *arg) 1342 { 1343 struct socket *so = arg; 1344 1345 /* Reuse splice task, sounsplice() has been called before. */ 1346 task_set(&so->so_sp->ssp_task, soput, so); 1347 task_add(sosplice_taskq, &so->so_sp->ssp_task); 1348 } 1349 1350 void 1351 soput(void *arg) 1352 { 1353 struct socket *so = arg; 1354 1355 pool_put(&sosplice_pool, so->so_sp); 1356 pool_put(&socket_pool, so); 1357 } 1358 1359 /* 1360 * Move data from receive buffer of spliced source socket to send 1361 * buffer of drain socket. Try to move as much as possible in one 1362 * big chunk. It is a TCP only implementation. 1363 * Return value 0 means splicing has been finished, 1 continue. 1364 */ 1365 int 1366 somove(struct socket *so, int wait) 1367 { 1368 struct socket *sosp = so->so_sp->ssp_socket; 1369 struct mbuf *m, **mp, *nextrecord; 1370 u_long len, off, oobmark; 1371 long space; 1372 int error = 0, maxreached = 0; 1373 unsigned int state; 1374 1375 soassertlocked(so); 1376 1377 nextpkt: 1378 if (so->so_error) { 1379 error = so->so_error; 1380 goto release; 1381 } 1382 if (sosp->so_state & SS_CANTSENDMORE) { 1383 error = EPIPE; 1384 goto release; 1385 } 1386 if (sosp->so_error && sosp->so_error != ETIMEDOUT && 1387 sosp->so_error != EFBIG && sosp->so_error != ELOOP) { 1388 error = sosp->so_error; 1389 goto release; 1390 } 1391 if ((sosp->so_state & SS_ISCONNECTED) == 0) 1392 goto release; 1393 1394 /* Calculate how many bytes can be copied now. */ 1395 len = so->so_rcv.sb_datacc; 1396 if (so->so_splicemax) { 1397 KASSERT(so->so_splicelen < so->so_splicemax); 1398 if (so->so_splicemax <= so->so_splicelen + len) { 1399 len = so->so_splicemax - so->so_splicelen; 1400 maxreached = 1; 1401 } 1402 } 1403 space = sbspace(sosp, &sosp->so_snd); 1404 if (so->so_oobmark && so->so_oobmark < len && 1405 so->so_oobmark < space + 1024) 1406 space += 1024; 1407 if (space <= 0) { 1408 maxreached = 0; 1409 goto release; 1410 } 1411 if (space < len) { 1412 maxreached = 0; 1413 if (space < sosp->so_snd.sb_lowat) 1414 goto release; 1415 len = space; 1416 } 1417 sosp->so_state |= SS_ISSENDING; 1418 1419 SBLASTRECORDCHK(&so->so_rcv, "somove 1"); 1420 SBLASTMBUFCHK(&so->so_rcv, "somove 1"); 1421 m = so->so_rcv.sb_mb; 1422 if (m == NULL) 1423 goto release; 1424 nextrecord = m->m_nextpkt; 1425 1426 /* Drop address and control information not used with splicing. */ 1427 if (so->so_proto->pr_flags & PR_ADDR) { 1428 #ifdef DIAGNOSTIC 1429 if (m->m_type != MT_SONAME) 1430 panic("somove soname: so %p, so_type %d, m %p, " 1431 "m_type %d", so, so->so_type, m, m->m_type); 1432 #endif 1433 m = m->m_next; 1434 } 1435 while (m && m->m_type == MT_CONTROL) 1436 m = m->m_next; 1437 if (m == NULL) { 1438 sbdroprecord(&so->so_rcv); 1439 if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb) 1440 (so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL, 1441 NULL, NULL, NULL); 1442 goto nextpkt; 1443 } 1444 1445 /* 1446 * By splicing sockets connected to localhost, userland might create a 1447 * loop. Dissolve splicing with error if loop is detected by counter. 1448 * 1449 * If we deal with looped broadcast/multicast packet we bail out with 1450 * no error to suppress splice termination. 1451 */ 1452 if ((m->m_flags & M_PKTHDR) && 1453 ((m->m_pkthdr.ph_loopcnt++ >= M_MAXLOOP) || 1454 ((m->m_flags & M_LOOP) && (m->m_flags & (M_BCAST|M_MCAST))))) { 1455 error = ELOOP; 1456 goto release; 1457 } 1458 1459 if (so->so_proto->pr_flags & PR_ATOMIC) { 1460 if ((m->m_flags & M_PKTHDR) == 0) 1461 panic("somove !PKTHDR: so %p, so_type %d, m %p, " 1462 "m_type %d", so, so->so_type, m, m->m_type); 1463 if (sosp->so_snd.sb_hiwat < m->m_pkthdr.len) { 1464 error = EMSGSIZE; 1465 goto release; 1466 } 1467 if (len < m->m_pkthdr.len) 1468 goto release; 1469 if (m->m_pkthdr.len < len) { 1470 maxreached = 0; 1471 len = m->m_pkthdr.len; 1472 } 1473 /* 1474 * Throw away the name mbuf after it has been assured 1475 * that the whole first record can be processed. 1476 */ 1477 m = so->so_rcv.sb_mb; 1478 sbfree(&so->so_rcv, m); 1479 so->so_rcv.sb_mb = m_free(m); 1480 sbsync(&so->so_rcv, nextrecord); 1481 } 1482 /* 1483 * Throw away the control mbufs after it has been assured 1484 * that the whole first record can be processed. 1485 */ 1486 m = so->so_rcv.sb_mb; 1487 while (m && m->m_type == MT_CONTROL) { 1488 sbfree(&so->so_rcv, m); 1489 so->so_rcv.sb_mb = m_free(m); 1490 m = so->so_rcv.sb_mb; 1491 sbsync(&so->so_rcv, nextrecord); 1492 } 1493 1494 SBLASTRECORDCHK(&so->so_rcv, "somove 2"); 1495 SBLASTMBUFCHK(&so->so_rcv, "somove 2"); 1496 1497 /* Take at most len mbufs out of receive buffer. */ 1498 for (off = 0, mp = &m; off <= len && *mp; 1499 off += (*mp)->m_len, mp = &(*mp)->m_next) { 1500 u_long size = len - off; 1501 1502 #ifdef DIAGNOSTIC 1503 if ((*mp)->m_type != MT_DATA && (*mp)->m_type != MT_HEADER) 1504 panic("somove type: so %p, so_type %d, m %p, " 1505 "m_type %d", so, so->so_type, *mp, (*mp)->m_type); 1506 #endif 1507 if ((*mp)->m_len > size) { 1508 /* 1509 * Move only a partial mbuf at maximum splice length or 1510 * if the drain buffer is too small for this large mbuf. 1511 */ 1512 if (!maxreached && so->so_snd.sb_datacc > 0) { 1513 len -= size; 1514 break; 1515 } 1516 *mp = m_copym(so->so_rcv.sb_mb, 0, size, wait); 1517 if (*mp == NULL) { 1518 len -= size; 1519 break; 1520 } 1521 so->so_rcv.sb_mb->m_data += size; 1522 so->so_rcv.sb_mb->m_len -= size; 1523 so->so_rcv.sb_cc -= size; 1524 so->so_rcv.sb_datacc -= size; 1525 } else { 1526 *mp = so->so_rcv.sb_mb; 1527 sbfree(&so->so_rcv, *mp); 1528 so->so_rcv.sb_mb = (*mp)->m_next; 1529 sbsync(&so->so_rcv, nextrecord); 1530 } 1531 } 1532 *mp = NULL; 1533 1534 SBLASTRECORDCHK(&so->so_rcv, "somove 3"); 1535 SBLASTMBUFCHK(&so->so_rcv, "somove 3"); 1536 SBCHECK(&so->so_rcv); 1537 if (m == NULL) 1538 goto release; 1539 m->m_nextpkt = NULL; 1540 if (m->m_flags & M_PKTHDR) { 1541 m_resethdr(m); 1542 m->m_pkthdr.len = len; 1543 } 1544 1545 /* Send window update to source peer as receive buffer has changed. */ 1546 if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb) 1547 (so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL, 1548 NULL, NULL, NULL); 1549 1550 /* Receive buffer did shrink by len bytes, adjust oob. */ 1551 state = so->so_state; 1552 so->so_state &= ~SS_RCVATMARK; 1553 oobmark = so->so_oobmark; 1554 so->so_oobmark = oobmark > len ? oobmark - len : 0; 1555 if (oobmark) { 1556 if (oobmark == len) 1557 so->so_state |= SS_RCVATMARK; 1558 if (oobmark >= len) 1559 oobmark = 0; 1560 } 1561 1562 /* 1563 * Handle oob data. If any malloc fails, ignore error. 1564 * TCP urgent data is not very reliable anyway. 1565 */ 1566 while (((state & SS_RCVATMARK) || oobmark) && 1567 (so->so_options & SO_OOBINLINE)) { 1568 struct mbuf *o = NULL; 1569 1570 if (state & SS_RCVATMARK) { 1571 o = m_get(wait, MT_DATA); 1572 state &= ~SS_RCVATMARK; 1573 } else if (oobmark) { 1574 o = m_split(m, oobmark, wait); 1575 if (o) { 1576 error = (*sosp->so_proto->pr_usrreq)(sosp, 1577 PRU_SEND, m, NULL, NULL, NULL); 1578 if (error) { 1579 if (sosp->so_state & SS_CANTSENDMORE) 1580 error = EPIPE; 1581 m_freem(o); 1582 goto release; 1583 } 1584 len -= oobmark; 1585 so->so_splicelen += oobmark; 1586 m = o; 1587 o = m_get(wait, MT_DATA); 1588 } 1589 oobmark = 0; 1590 } 1591 if (o) { 1592 o->m_len = 1; 1593 *mtod(o, caddr_t) = *mtod(m, caddr_t); 1594 error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SENDOOB, 1595 o, NULL, NULL, NULL); 1596 if (error) { 1597 if (sosp->so_state & SS_CANTSENDMORE) 1598 error = EPIPE; 1599 m_freem(m); 1600 goto release; 1601 } 1602 len -= 1; 1603 so->so_splicelen += 1; 1604 if (oobmark) { 1605 oobmark -= 1; 1606 if (oobmark == 0) 1607 state |= SS_RCVATMARK; 1608 } 1609 m_adj(m, 1); 1610 } 1611 } 1612 1613 /* Append all remaining data to drain socket. */ 1614 if (so->so_rcv.sb_cc == 0 || maxreached) 1615 sosp->so_state &= ~SS_ISSENDING; 1616 error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SEND, m, NULL, NULL, 1617 NULL); 1618 if (error) { 1619 if (sosp->so_state & SS_CANTSENDMORE) 1620 error = EPIPE; 1621 goto release; 1622 } 1623 so->so_splicelen += len; 1624 1625 /* Move several packets if possible. */ 1626 if (!maxreached && nextrecord) 1627 goto nextpkt; 1628 1629 release: 1630 sosp->so_state &= ~SS_ISSENDING; 1631 if (!error && maxreached && so->so_splicemax == so->so_splicelen) 1632 error = EFBIG; 1633 if (error) 1634 so->so_error = error; 1635 if (((so->so_state & SS_CANTRCVMORE) && so->so_rcv.sb_cc == 0) || 1636 (sosp->so_state & SS_CANTSENDMORE) || maxreached || error) { 1637 sounsplice(so, sosp, 0); 1638 return (0); 1639 } 1640 if (timerisset(&so->so_idletv)) 1641 timeout_add_tv(&so->so_idleto, &so->so_idletv); 1642 return (1); 1643 } 1644 1645 #endif /* SOCKET_SPLICE */ 1646 1647 void 1648 sorwakeup(struct socket *so) 1649 { 1650 soassertlocked(so); 1651 1652 #ifdef SOCKET_SPLICE 1653 if (so->so_rcv.sb_flags & SB_SPLICE) { 1654 /* 1655 * TCP has a sendbuffer that can handle multiple packets 1656 * at once. So queue the stream a bit to accumulate data. 1657 * The sosplice thread will call somove() later and send 1658 * the packets calling tcp_output() only once. 1659 * In the UDP case, send out the packets immediately. 1660 * Using a thread would make things slower. 1661 */ 1662 if (so->so_proto->pr_flags & PR_WANTRCVD) 1663 task_add(sosplice_taskq, &so->so_splicetask); 1664 else 1665 somove(so, M_DONTWAIT); 1666 } 1667 if (isspliced(so)) 1668 return; 1669 #endif 1670 sowakeup(so, &so->so_rcv); 1671 if (so->so_upcall) 1672 (*(so->so_upcall))(so, so->so_upcallarg, M_DONTWAIT); 1673 } 1674 1675 void 1676 sowwakeup(struct socket *so) 1677 { 1678 soassertlocked(so); 1679 1680 #ifdef SOCKET_SPLICE 1681 if (so->so_snd.sb_flags & SB_SPLICE) 1682 task_add(sosplice_taskq, &so->so_sp->ssp_soback->so_splicetask); 1683 if (issplicedback(so)) 1684 return; 1685 #endif 1686 sowakeup(so, &so->so_snd); 1687 } 1688 1689 int 1690 sosetopt(struct socket *so, int level, int optname, struct mbuf *m) 1691 { 1692 int error = 0; 1693 1694 soassertlocked(so); 1695 1696 if (level != SOL_SOCKET) { 1697 if (so->so_proto->pr_ctloutput) { 1698 error = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1699 level, optname, m); 1700 return (error); 1701 } 1702 error = ENOPROTOOPT; 1703 } else { 1704 switch (optname) { 1705 case SO_BINDANY: 1706 if ((error = suser(curproc)) != 0) /* XXX */ 1707 return (error); 1708 break; 1709 } 1710 1711 switch (optname) { 1712 1713 case SO_LINGER: 1714 if (m == NULL || m->m_len != sizeof (struct linger) || 1715 mtod(m, struct linger *)->l_linger < 0 || 1716 mtod(m, struct linger *)->l_linger > SHRT_MAX) 1717 return (EINVAL); 1718 so->so_linger = mtod(m, struct linger *)->l_linger; 1719 /* FALLTHROUGH */ 1720 1721 case SO_BINDANY: 1722 case SO_DEBUG: 1723 case SO_KEEPALIVE: 1724 case SO_USELOOPBACK: 1725 case SO_BROADCAST: 1726 case SO_REUSEADDR: 1727 case SO_REUSEPORT: 1728 case SO_OOBINLINE: 1729 case SO_TIMESTAMP: 1730 case SO_ZEROIZE: 1731 if (m == NULL || m->m_len < sizeof (int)) 1732 return (EINVAL); 1733 if (*mtod(m, int *)) 1734 so->so_options |= optname; 1735 else 1736 so->so_options &= ~optname; 1737 break; 1738 1739 case SO_DONTROUTE: 1740 if (m == NULL || m->m_len < sizeof (int)) 1741 return (EINVAL); 1742 if (*mtod(m, int *)) 1743 error = EOPNOTSUPP; 1744 break; 1745 1746 case SO_SNDBUF: 1747 case SO_RCVBUF: 1748 case SO_SNDLOWAT: 1749 case SO_RCVLOWAT: 1750 { 1751 u_long cnt; 1752 1753 if (m == NULL || m->m_len < sizeof (int)) 1754 return (EINVAL); 1755 cnt = *mtod(m, int *); 1756 if ((long)cnt <= 0) 1757 cnt = 1; 1758 switch (optname) { 1759 1760 case SO_SNDBUF: 1761 if (so->so_state & SS_CANTSENDMORE) 1762 return (EINVAL); 1763 if (sbcheckreserve(cnt, so->so_snd.sb_wat) || 1764 sbreserve(so, &so->so_snd, cnt)) 1765 return (ENOBUFS); 1766 so->so_snd.sb_wat = cnt; 1767 break; 1768 1769 case SO_RCVBUF: 1770 if (so->so_state & SS_CANTRCVMORE) 1771 return (EINVAL); 1772 if (sbcheckreserve(cnt, so->so_rcv.sb_wat) || 1773 sbreserve(so, &so->so_rcv, cnt)) 1774 return (ENOBUFS); 1775 so->so_rcv.sb_wat = cnt; 1776 break; 1777 1778 case SO_SNDLOWAT: 1779 so->so_snd.sb_lowat = 1780 (cnt > so->so_snd.sb_hiwat) ? 1781 so->so_snd.sb_hiwat : cnt; 1782 break; 1783 case SO_RCVLOWAT: 1784 so->so_rcv.sb_lowat = 1785 (cnt > so->so_rcv.sb_hiwat) ? 1786 so->so_rcv.sb_hiwat : cnt; 1787 break; 1788 } 1789 break; 1790 } 1791 1792 case SO_SNDTIMEO: 1793 case SO_RCVTIMEO: 1794 { 1795 struct timeval tv; 1796 uint64_t nsecs; 1797 1798 if (m == NULL || m->m_len < sizeof (tv)) 1799 return (EINVAL); 1800 memcpy(&tv, mtod(m, struct timeval *), sizeof tv); 1801 if (!timerisvalid(&tv)) 1802 return (EINVAL); 1803 nsecs = TIMEVAL_TO_NSEC(&tv); 1804 if (nsecs == UINT64_MAX) 1805 return (EDOM); 1806 if (nsecs == 0) 1807 nsecs = INFSLP; 1808 switch (optname) { 1809 1810 case SO_SNDTIMEO: 1811 so->so_snd.sb_timeo_nsecs = nsecs; 1812 break; 1813 case SO_RCVTIMEO: 1814 so->so_rcv.sb_timeo_nsecs = nsecs; 1815 break; 1816 } 1817 break; 1818 } 1819 1820 case SO_RTABLE: 1821 if (so->so_proto->pr_domain && 1822 so->so_proto->pr_domain->dom_protosw && 1823 so->so_proto->pr_ctloutput) { 1824 struct domain *dom = so->so_proto->pr_domain; 1825 1826 level = dom->dom_protosw->pr_protocol; 1827 error = (*so->so_proto->pr_ctloutput) 1828 (PRCO_SETOPT, so, level, optname, m); 1829 return (error); 1830 } 1831 error = ENOPROTOOPT; 1832 break; 1833 1834 #ifdef SOCKET_SPLICE 1835 case SO_SPLICE: 1836 if (m == NULL) { 1837 error = sosplice(so, -1, 0, NULL); 1838 } else if (m->m_len < sizeof(int)) { 1839 return (EINVAL); 1840 } else if (m->m_len < sizeof(struct splice)) { 1841 error = sosplice(so, *mtod(m, int *), 0, NULL); 1842 } else { 1843 error = sosplice(so, 1844 mtod(m, struct splice *)->sp_fd, 1845 mtod(m, struct splice *)->sp_max, 1846 &mtod(m, struct splice *)->sp_idle); 1847 } 1848 break; 1849 #endif /* SOCKET_SPLICE */ 1850 1851 default: 1852 error = ENOPROTOOPT; 1853 break; 1854 } 1855 if (error == 0 && so->so_proto->pr_ctloutput) { 1856 (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1857 level, optname, m); 1858 } 1859 } 1860 1861 return (error); 1862 } 1863 1864 int 1865 sogetopt(struct socket *so, int level, int optname, struct mbuf *m) 1866 { 1867 int error = 0; 1868 1869 soassertlocked(so); 1870 1871 if (level != SOL_SOCKET) { 1872 if (so->so_proto->pr_ctloutput) { 1873 m->m_len = 0; 1874 1875 error = (*so->so_proto->pr_ctloutput)(PRCO_GETOPT, so, 1876 level, optname, m); 1877 if (error) 1878 return (error); 1879 return (0); 1880 } else 1881 return (ENOPROTOOPT); 1882 } else { 1883 m->m_len = sizeof (int); 1884 1885 switch (optname) { 1886 1887 case SO_LINGER: 1888 m->m_len = sizeof (struct linger); 1889 mtod(m, struct linger *)->l_onoff = 1890 so->so_options & SO_LINGER; 1891 mtod(m, struct linger *)->l_linger = so->so_linger; 1892 break; 1893 1894 case SO_BINDANY: 1895 case SO_USELOOPBACK: 1896 case SO_DEBUG: 1897 case SO_KEEPALIVE: 1898 case SO_REUSEADDR: 1899 case SO_REUSEPORT: 1900 case SO_BROADCAST: 1901 case SO_OOBINLINE: 1902 case SO_TIMESTAMP: 1903 case SO_ZEROIZE: 1904 *mtod(m, int *) = so->so_options & optname; 1905 break; 1906 1907 case SO_DONTROUTE: 1908 *mtod(m, int *) = 0; 1909 break; 1910 1911 case SO_TYPE: 1912 *mtod(m, int *) = so->so_type; 1913 break; 1914 1915 case SO_ERROR: 1916 *mtod(m, int *) = so->so_error; 1917 so->so_error = 0; 1918 break; 1919 1920 case SO_DOMAIN: 1921 *mtod(m, int *) = so->so_proto->pr_domain->dom_family; 1922 break; 1923 1924 case SO_PROTOCOL: 1925 *mtod(m, int *) = so->so_proto->pr_protocol; 1926 break; 1927 1928 case SO_SNDBUF: 1929 *mtod(m, int *) = so->so_snd.sb_hiwat; 1930 break; 1931 1932 case SO_RCVBUF: 1933 *mtod(m, int *) = so->so_rcv.sb_hiwat; 1934 break; 1935 1936 case SO_SNDLOWAT: 1937 *mtod(m, int *) = so->so_snd.sb_lowat; 1938 break; 1939 1940 case SO_RCVLOWAT: 1941 *mtod(m, int *) = so->so_rcv.sb_lowat; 1942 break; 1943 1944 case SO_SNDTIMEO: 1945 case SO_RCVTIMEO: 1946 { 1947 struct timeval tv; 1948 uint64_t nsecs = (optname == SO_SNDTIMEO ? 1949 so->so_snd.sb_timeo_nsecs : 1950 so->so_rcv.sb_timeo_nsecs); 1951 1952 m->m_len = sizeof(struct timeval); 1953 memset(&tv, 0, sizeof(tv)); 1954 if (nsecs != INFSLP) 1955 NSEC_TO_TIMEVAL(nsecs, &tv); 1956 memcpy(mtod(m, struct timeval *), &tv, sizeof tv); 1957 break; 1958 } 1959 1960 case SO_RTABLE: 1961 if (so->so_proto->pr_domain && 1962 so->so_proto->pr_domain->dom_protosw && 1963 so->so_proto->pr_ctloutput) { 1964 struct domain *dom = so->so_proto->pr_domain; 1965 1966 level = dom->dom_protosw->pr_protocol; 1967 error = (*so->so_proto->pr_ctloutput) 1968 (PRCO_GETOPT, so, level, optname, m); 1969 if (error) 1970 return (error); 1971 break; 1972 } 1973 return (ENOPROTOOPT); 1974 1975 #ifdef SOCKET_SPLICE 1976 case SO_SPLICE: 1977 { 1978 off_t len; 1979 1980 m->m_len = sizeof(off_t); 1981 len = so->so_sp ? so->so_sp->ssp_len : 0; 1982 memcpy(mtod(m, off_t *), &len, sizeof(off_t)); 1983 break; 1984 } 1985 #endif /* SOCKET_SPLICE */ 1986 1987 case SO_PEERCRED: 1988 if (so->so_proto->pr_protocol == AF_UNIX) { 1989 struct unpcb *unp = sotounpcb(so); 1990 1991 if (unp->unp_flags & UNP_FEIDS) { 1992 m->m_len = sizeof(unp->unp_connid); 1993 memcpy(mtod(m, caddr_t), 1994 &(unp->unp_connid), m->m_len); 1995 break; 1996 } 1997 return (ENOTCONN); 1998 } 1999 return (EOPNOTSUPP); 2000 2001 default: 2002 return (ENOPROTOOPT); 2003 } 2004 return (0); 2005 } 2006 } 2007 2008 void 2009 sohasoutofband(struct socket *so) 2010 { 2011 pgsigio(&so->so_sigio, SIGURG, 0); 2012 selwakeup(&so->so_rcv.sb_sel); 2013 } 2014 2015 int 2016 soo_kqfilter(struct file *fp, struct knote *kn) 2017 { 2018 struct socket *so = kn->kn_fp->f_data; 2019 struct sockbuf *sb; 2020 2021 KERNEL_ASSERT_LOCKED(); 2022 2023 switch (kn->kn_filter) { 2024 case EVFILT_READ: 2025 if (so->so_options & SO_ACCEPTCONN) 2026 kn->kn_fop = &solisten_filtops; 2027 else 2028 kn->kn_fop = &soread_filtops; 2029 sb = &so->so_rcv; 2030 break; 2031 case EVFILT_WRITE: 2032 kn->kn_fop = &sowrite_filtops; 2033 sb = &so->so_snd; 2034 break; 2035 case EVFILT_EXCEPT: 2036 kn->kn_fop = &soexcept_filtops; 2037 sb = &so->so_rcv; 2038 break; 2039 default: 2040 return (EINVAL); 2041 } 2042 2043 klist_insert_locked(&sb->sb_sel.si_note, kn); 2044 2045 return (0); 2046 } 2047 2048 void 2049 filt_sordetach(struct knote *kn) 2050 { 2051 struct socket *so = kn->kn_fp->f_data; 2052 2053 KERNEL_ASSERT_LOCKED(); 2054 2055 klist_remove_locked(&so->so_rcv.sb_sel.si_note, kn); 2056 } 2057 2058 int 2059 filt_soread(struct knote *kn, long hint) 2060 { 2061 struct socket *so = kn->kn_fp->f_data; 2062 int s, rv = 0; 2063 2064 if ((hint & NOTE_SUBMIT) == 0) 2065 s = solock(so); 2066 kn->kn_data = so->so_rcv.sb_cc; 2067 #ifdef SOCKET_SPLICE 2068 if (isspliced(so)) { 2069 rv = 0; 2070 } else 2071 #endif /* SOCKET_SPLICE */ 2072 if (kn->kn_sfflags & NOTE_OOB) { 2073 if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) { 2074 kn->kn_fflags |= NOTE_OOB; 2075 kn->kn_data -= so->so_oobmark; 2076 rv = 1; 2077 } 2078 } else if (so->so_state & SS_CANTRCVMORE) { 2079 kn->kn_flags |= EV_EOF; 2080 if (kn->kn_flags & __EV_POLL) { 2081 if (so->so_state & SS_ISDISCONNECTED) 2082 kn->kn_flags |= __EV_HUP; 2083 } 2084 kn->kn_fflags = so->so_error; 2085 rv = 1; 2086 } else if (so->so_error) { /* temporary udp error */ 2087 rv = 1; 2088 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2089 rv = (kn->kn_data >= kn->kn_sdata); 2090 } else { 2091 rv = (kn->kn_data >= so->so_rcv.sb_lowat); 2092 } 2093 if ((hint & NOTE_SUBMIT) == 0) 2094 sounlock(so, s); 2095 2096 return rv; 2097 } 2098 2099 void 2100 filt_sowdetach(struct knote *kn) 2101 { 2102 struct socket *so = kn->kn_fp->f_data; 2103 2104 KERNEL_ASSERT_LOCKED(); 2105 2106 klist_remove_locked(&so->so_snd.sb_sel.si_note, kn); 2107 } 2108 2109 int 2110 filt_sowrite(struct knote *kn, long hint) 2111 { 2112 struct socket *so = kn->kn_fp->f_data; 2113 int s, rv; 2114 2115 if ((hint & NOTE_SUBMIT) == 0) 2116 s = solock(so); 2117 kn->kn_data = sbspace(so, &so->so_snd); 2118 if (so->so_state & SS_CANTSENDMORE) { 2119 kn->kn_flags |= EV_EOF; 2120 if (kn->kn_flags & __EV_POLL) { 2121 if (so->so_state & SS_ISDISCONNECTED) 2122 kn->kn_flags |= __EV_HUP; 2123 } 2124 kn->kn_fflags = so->so_error; 2125 rv = 1; 2126 } else if (so->so_error) { /* temporary udp error */ 2127 rv = 1; 2128 } else if (((so->so_state & SS_ISCONNECTED) == 0) && 2129 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 2130 rv = 0; 2131 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2132 rv = (kn->kn_data >= kn->kn_sdata); 2133 } else { 2134 rv = (kn->kn_data >= so->so_snd.sb_lowat); 2135 } 2136 if ((hint & NOTE_SUBMIT) == 0) 2137 sounlock(so, s); 2138 2139 return (rv); 2140 } 2141 2142 int 2143 filt_solisten(struct knote *kn, long hint) 2144 { 2145 struct socket *so = kn->kn_fp->f_data; 2146 int s; 2147 2148 if ((hint & NOTE_SUBMIT) == 0) 2149 s = solock(so); 2150 kn->kn_data = so->so_qlen; 2151 if ((hint & NOTE_SUBMIT) == 0) 2152 sounlock(so, s); 2153 2154 return (kn->kn_data != 0); 2155 } 2156 2157 #ifdef DDB 2158 void 2159 sobuf_print(struct sockbuf *, 2160 int (*)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))); 2161 2162 void 2163 sobuf_print(struct sockbuf *sb, 2164 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2165 { 2166 (*pr)("\tsb_cc: %lu\n", sb->sb_cc); 2167 (*pr)("\tsb_datacc: %lu\n", sb->sb_datacc); 2168 (*pr)("\tsb_hiwat: %lu\n", sb->sb_hiwat); 2169 (*pr)("\tsb_wat: %lu\n", sb->sb_wat); 2170 (*pr)("\tsb_mbcnt: %lu\n", sb->sb_mbcnt); 2171 (*pr)("\tsb_mbmax: %lu\n", sb->sb_mbmax); 2172 (*pr)("\tsb_lowat: %ld\n", sb->sb_lowat); 2173 (*pr)("\tsb_mb: %p\n", sb->sb_mb); 2174 (*pr)("\tsb_mbtail: %p\n", sb->sb_mbtail); 2175 (*pr)("\tsb_lastrecord: %p\n", sb->sb_lastrecord); 2176 (*pr)("\tsb_sel: ...\n"); 2177 (*pr)("\tsb_flags: %i\n", sb->sb_flags); 2178 (*pr)("\tsb_timeo_nsecs: %llu\n", sb->sb_timeo_nsecs); 2179 } 2180 2181 void 2182 so_print(void *v, 2183 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2184 { 2185 struct socket *so = v; 2186 2187 (*pr)("socket %p\n", so); 2188 (*pr)("so_type: %i\n", so->so_type); 2189 (*pr)("so_options: 0x%04x\n", so->so_options); /* %b */ 2190 (*pr)("so_linger: %i\n", so->so_linger); 2191 (*pr)("so_state: 0x%04x\n", so->so_state); 2192 (*pr)("so_pcb: %p\n", so->so_pcb); 2193 (*pr)("so_proto: %p\n", so->so_proto); 2194 (*pr)("so_sigio: %p\n", so->so_sigio.sir_sigio); 2195 2196 (*pr)("so_head: %p\n", so->so_head); 2197 (*pr)("so_onq: %p\n", so->so_onq); 2198 (*pr)("so_q0: @%p first: %p\n", &so->so_q0, TAILQ_FIRST(&so->so_q0)); 2199 (*pr)("so_q: @%p first: %p\n", &so->so_q, TAILQ_FIRST(&so->so_q)); 2200 (*pr)("so_eq: next: %p\n", TAILQ_NEXT(so, so_qe)); 2201 (*pr)("so_q0len: %i\n", so->so_q0len); 2202 (*pr)("so_qlen: %i\n", so->so_qlen); 2203 (*pr)("so_qlimit: %i\n", so->so_qlimit); 2204 (*pr)("so_timeo: %i\n", so->so_timeo); 2205 (*pr)("so_obmark: %lu\n", so->so_oobmark); 2206 2207 (*pr)("so_sp: %p\n", so->so_sp); 2208 if (so->so_sp != NULL) { 2209 (*pr)("\tssp_socket: %p\n", so->so_sp->ssp_socket); 2210 (*pr)("\tssp_soback: %p\n", so->so_sp->ssp_soback); 2211 (*pr)("\tssp_len: %lld\n", 2212 (unsigned long long)so->so_sp->ssp_len); 2213 (*pr)("\tssp_max: %lld\n", 2214 (unsigned long long)so->so_sp->ssp_max); 2215 (*pr)("\tssp_idletv: %lld %ld\n", so->so_sp->ssp_idletv.tv_sec, 2216 so->so_sp->ssp_idletv.tv_usec); 2217 (*pr)("\tssp_idleto: %spending (@%i)\n", 2218 timeout_pending(&so->so_sp->ssp_idleto) ? "" : "not ", 2219 so->so_sp->ssp_idleto.to_time); 2220 } 2221 2222 (*pr)("so_rcv:\n"); 2223 sobuf_print(&so->so_rcv, pr); 2224 (*pr)("so_snd:\n"); 2225 sobuf_print(&so->so_snd, pr); 2226 2227 (*pr)("so_upcall: %p so_upcallarg: %p\n", 2228 so->so_upcall, so->so_upcallarg); 2229 2230 (*pr)("so_euid: %d so_ruid: %d\n", so->so_euid, so->so_ruid); 2231 (*pr)("so_egid: %d so_rgid: %d\n", so->so_egid, so->so_rgid); 2232 (*pr)("so_cpid: %d\n", so->so_cpid); 2233 } 2234 #endif 2235