1 /* $OpenBSD: uipc_socket.c,v 1.241 2020/02/20 16:56:52 visa Exp $ */ 2 /* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/file.h> 39 #include <sys/filedesc.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/domain.h> 43 #include <sys/kernel.h> 44 #include <sys/event.h> 45 #include <sys/protosw.h> 46 #include <sys/socket.h> 47 #include <sys/unpcb.h> 48 #include <sys/socketvar.h> 49 #include <sys/signalvar.h> 50 #include <net/if.h> 51 #include <sys/pool.h> 52 #include <sys/atomic.h> 53 #include <sys/rwlock.h> 54 55 #ifdef DDB 56 #include <machine/db_machdep.h> 57 #endif 58 59 void sbsync(struct sockbuf *, struct mbuf *); 60 61 int sosplice(struct socket *, int, off_t, struct timeval *); 62 void sounsplice(struct socket *, struct socket *, int); 63 void soidle(void *); 64 void sotask(void *); 65 void soreaper(void *); 66 void soput(void *); 67 int somove(struct socket *, int); 68 69 void filt_sordetach(struct knote *kn); 70 int filt_soread(struct knote *kn, long hint); 71 void filt_sowdetach(struct knote *kn); 72 int filt_sowrite(struct knote *kn, long hint); 73 int filt_solisten(struct knote *kn, long hint); 74 75 const struct filterops solisten_filtops = { 76 .f_flags = FILTEROP_ISFD, 77 .f_attach = NULL, 78 .f_detach = filt_sordetach, 79 .f_event = filt_solisten, 80 }; 81 82 const struct filterops soread_filtops = { 83 .f_flags = FILTEROP_ISFD, 84 .f_attach = NULL, 85 .f_detach = filt_sordetach, 86 .f_event = filt_soread, 87 }; 88 89 const struct filterops sowrite_filtops = { 90 .f_flags = FILTEROP_ISFD, 91 .f_attach = NULL, 92 .f_detach = filt_sowdetach, 93 .f_event = filt_sowrite, 94 }; 95 96 97 #ifndef SOMINCONN 98 #define SOMINCONN 80 99 #endif /* SOMINCONN */ 100 101 int somaxconn = SOMAXCONN; 102 int sominconn = SOMINCONN; 103 104 struct pool socket_pool; 105 #ifdef SOCKET_SPLICE 106 struct pool sosplice_pool; 107 struct taskq *sosplice_taskq; 108 struct rwlock sosplice_lock = RWLOCK_INITIALIZER("sosplicelk"); 109 #endif 110 111 void 112 soinit(void) 113 { 114 pool_init(&socket_pool, sizeof(struct socket), 0, IPL_SOFTNET, 0, 115 "sockpl", NULL); 116 #ifdef SOCKET_SPLICE 117 pool_init(&sosplice_pool, sizeof(struct sosplice), 0, IPL_SOFTNET, 0, 118 "sosppl", NULL); 119 #endif 120 } 121 122 /* 123 * Socket operation routines. 124 * These routines are called by the routines in 125 * sys_socket.c or from a system process, and 126 * implement the semantics of socket operations by 127 * switching out to the protocol specific routines. 128 */ 129 int 130 socreate(int dom, struct socket **aso, int type, int proto) 131 { 132 struct proc *p = curproc; /* XXX */ 133 const struct protosw *prp; 134 struct socket *so; 135 int error, s; 136 137 if (proto) 138 prp = pffindproto(dom, proto, type); 139 else 140 prp = pffindtype(dom, type); 141 if (prp == NULL || prp->pr_attach == NULL) 142 return (EPROTONOSUPPORT); 143 if (prp->pr_type != type) 144 return (EPROTOTYPE); 145 so = pool_get(&socket_pool, PR_WAITOK | PR_ZERO); 146 sigio_init(&so->so_sigio); 147 TAILQ_INIT(&so->so_q0); 148 TAILQ_INIT(&so->so_q); 149 so->so_type = type; 150 if (suser(p) == 0) 151 so->so_state = SS_PRIV; 152 so->so_ruid = p->p_ucred->cr_ruid; 153 so->so_euid = p->p_ucred->cr_uid; 154 so->so_rgid = p->p_ucred->cr_rgid; 155 so->so_egid = p->p_ucred->cr_gid; 156 so->so_cpid = p->p_p->ps_pid; 157 so->so_proto = prp; 158 so->so_snd.sb_timeo_nsecs = INFSLP; 159 so->so_rcv.sb_timeo_nsecs = INFSLP; 160 161 s = solock(so); 162 error = (*prp->pr_attach)(so, proto); 163 if (error) { 164 so->so_state |= SS_NOFDREF; 165 /* sofree() calls sounlock(). */ 166 sofree(so, s); 167 return (error); 168 } 169 sounlock(so, s); 170 *aso = so; 171 return (0); 172 } 173 174 int 175 sobind(struct socket *so, struct mbuf *nam, struct proc *p) 176 { 177 int error; 178 179 soassertlocked(so); 180 181 error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, p); 182 return (error); 183 } 184 185 int 186 solisten(struct socket *so, int backlog) 187 { 188 int s, error; 189 190 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) 191 return (EINVAL); 192 #ifdef SOCKET_SPLICE 193 if (isspliced(so) || issplicedback(so)) 194 return (EOPNOTSUPP); 195 #endif /* SOCKET_SPLICE */ 196 s = solock(so); 197 error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL, 198 curproc); 199 if (error) { 200 sounlock(so, s); 201 return (error); 202 } 203 if (TAILQ_FIRST(&so->so_q) == NULL) 204 so->so_options |= SO_ACCEPTCONN; 205 if (backlog < 0 || backlog > somaxconn) 206 backlog = somaxconn; 207 if (backlog < sominconn) 208 backlog = sominconn; 209 so->so_qlimit = backlog; 210 sounlock(so, s); 211 return (0); 212 } 213 214 #define SOSP_FREEING_READ 1 215 #define SOSP_FREEING_WRITE 2 216 void 217 sofree(struct socket *so, int s) 218 { 219 soassertlocked(so); 220 221 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) { 222 sounlock(so, s); 223 return; 224 } 225 if (so->so_head) { 226 /* 227 * We must not decommission a socket that's on the accept(2) 228 * queue. If we do, then accept(2) may hang after select(2) 229 * indicated that the listening socket was ready. 230 */ 231 if (!soqremque(so, 0)) { 232 sounlock(so, s); 233 return; 234 } 235 } 236 sigio_free(&so->so_sigio); 237 #ifdef SOCKET_SPLICE 238 if (so->so_sp) { 239 if (issplicedback(so)) { 240 int freeing = SOSP_FREEING_WRITE; 241 242 if (so->so_sp->ssp_soback == so) 243 freeing |= SOSP_FREEING_READ; 244 sounsplice(so->so_sp->ssp_soback, so, freeing); 245 } 246 if (isspliced(so)) { 247 int freeing = SOSP_FREEING_READ; 248 249 if (so == so->so_sp->ssp_socket) 250 freeing |= SOSP_FREEING_WRITE; 251 sounsplice(so, so->so_sp->ssp_socket, freeing); 252 } 253 } 254 #endif /* SOCKET_SPLICE */ 255 sbrelease(so, &so->so_snd); 256 sorflush(so); 257 sounlock(so, s); 258 #ifdef SOCKET_SPLICE 259 if (so->so_sp) { 260 /* Reuse splice idle, sounsplice() has been called before. */ 261 timeout_set_proc(&so->so_sp->ssp_idleto, soreaper, so); 262 timeout_add(&so->so_sp->ssp_idleto, 0); 263 } else 264 #endif /* SOCKET_SPLICE */ 265 { 266 pool_put(&socket_pool, so); 267 } 268 } 269 270 static inline uint64_t 271 solinger_nsec(struct socket *so) 272 { 273 if (so->so_linger == 0) 274 return INFSLP; 275 276 return SEC_TO_NSEC(so->so_linger); 277 } 278 279 /* 280 * Close a socket on last file table reference removal. 281 * Initiate disconnect if connected. 282 * Free socket when disconnect complete. 283 */ 284 int 285 soclose(struct socket *so, int flags) 286 { 287 struct socket *so2; 288 int s, error = 0; 289 290 s = solock(so); 291 /* Revoke async IO early. There is a final revocation in sofree(). */ 292 sigio_free(&so->so_sigio); 293 if (so->so_options & SO_ACCEPTCONN) { 294 while ((so2 = TAILQ_FIRST(&so->so_q0)) != NULL) { 295 (void) soqremque(so2, 0); 296 (void) soabort(so2); 297 } 298 while ((so2 = TAILQ_FIRST(&so->so_q)) != NULL) { 299 (void) soqremque(so2, 1); 300 (void) soabort(so2); 301 } 302 } 303 if (so->so_pcb == NULL) 304 goto discard; 305 if (so->so_state & SS_ISCONNECTED) { 306 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 307 error = sodisconnect(so); 308 if (error) 309 goto drop; 310 } 311 if (so->so_options & SO_LINGER) { 312 if ((so->so_state & SS_ISDISCONNECTING) && 313 (flags & MSG_DONTWAIT)) 314 goto drop; 315 while (so->so_state & SS_ISCONNECTED) { 316 error = sosleep_nsec(so, &so->so_timeo, 317 PSOCK | PCATCH, "netcls", 318 solinger_nsec(so)); 319 if (error) 320 break; 321 } 322 } 323 } 324 drop: 325 if (so->so_pcb) { 326 int error2; 327 KASSERT(so->so_proto->pr_detach); 328 error2 = (*so->so_proto->pr_detach)(so); 329 if (error == 0) 330 error = error2; 331 } 332 discard: 333 if (so->so_state & SS_NOFDREF) 334 panic("soclose NOFDREF: so %p, so_type %d", so, so->so_type); 335 so->so_state |= SS_NOFDREF; 336 /* sofree() calls sounlock(). */ 337 sofree(so, s); 338 return (error); 339 } 340 341 int 342 soabort(struct socket *so) 343 { 344 soassertlocked(so); 345 346 return (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, NULL, NULL, 347 curproc); 348 } 349 350 int 351 soaccept(struct socket *so, struct mbuf *nam) 352 { 353 int error = 0; 354 355 soassertlocked(so); 356 357 if ((so->so_state & SS_NOFDREF) == 0) 358 panic("soaccept !NOFDREF: so %p, so_type %d", so, so->so_type); 359 so->so_state &= ~SS_NOFDREF; 360 if ((so->so_state & SS_ISDISCONNECTED) == 0 || 361 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) 362 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, NULL, 363 nam, NULL, curproc); 364 else 365 error = ECONNABORTED; 366 return (error); 367 } 368 369 int 370 soconnect(struct socket *so, struct mbuf *nam) 371 { 372 int error; 373 374 soassertlocked(so); 375 376 if (so->so_options & SO_ACCEPTCONN) 377 return (EOPNOTSUPP); 378 /* 379 * If protocol is connection-based, can only connect once. 380 * Otherwise, if connected, try to disconnect first. 381 * This allows user to disconnect by connecting to, e.g., 382 * a null address. 383 */ 384 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 385 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 386 (error = sodisconnect(so)))) 387 error = EISCONN; 388 else 389 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 390 NULL, nam, NULL, curproc); 391 return (error); 392 } 393 394 int 395 soconnect2(struct socket *so1, struct socket *so2) 396 { 397 int s, error; 398 399 s = solock(so1); 400 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, NULL, 401 (struct mbuf *)so2, NULL, curproc); 402 sounlock(so1, s); 403 return (error); 404 } 405 406 int 407 sodisconnect(struct socket *so) 408 { 409 int error; 410 411 soassertlocked(so); 412 413 if ((so->so_state & SS_ISCONNECTED) == 0) 414 return (ENOTCONN); 415 if (so->so_state & SS_ISDISCONNECTING) 416 return (EALREADY); 417 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, NULL, NULL, 418 NULL, curproc); 419 return (error); 420 } 421 422 int m_getuio(struct mbuf **, int, long, struct uio *); 423 424 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 425 /* 426 * Send on a socket. 427 * If send must go all at once and message is larger than 428 * send buffering, then hard error. 429 * Lock against other senders. 430 * If must go all at once and not enough room now, then 431 * inform user that this would block and do nothing. 432 * Otherwise, if nonblocking, send as much as possible. 433 * The data to be sent is described by "uio" if nonzero, 434 * otherwise by the mbuf chain "top" (which must be null 435 * if uio is not). Data provided in mbuf chain must be small 436 * enough to send all at once. 437 * 438 * Returns nonzero on error, timeout or signal; callers 439 * must check for short counts if EINTR/ERESTART are returned. 440 * Data and control buffers are freed on return. 441 */ 442 int 443 sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, 444 struct mbuf *control, int flags) 445 { 446 long space, clen = 0; 447 size_t resid; 448 int error, s; 449 int atomic = sosendallatonce(so) || top; 450 451 if (uio) 452 resid = uio->uio_resid; 453 else 454 resid = top->m_pkthdr.len; 455 /* MSG_EOR on a SOCK_STREAM socket is invalid. */ 456 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 457 m_freem(top); 458 m_freem(control); 459 return (EINVAL); 460 } 461 if (uio && uio->uio_procp) 462 uio->uio_procp->p_ru.ru_msgsnd++; 463 if (control) { 464 /* 465 * In theory clen should be unsigned (since control->m_len is). 466 * However, space must be signed, as it might be less than 0 467 * if we over-committed, and we must use a signed comparison 468 * of space and clen. 469 */ 470 clen = control->m_len; 471 /* reserve extra space for AF_UNIX's internalize */ 472 if (so->so_proto->pr_domain->dom_family == AF_UNIX && 473 clen >= CMSG_ALIGN(sizeof(struct cmsghdr)) && 474 mtod(control, struct cmsghdr *)->cmsg_type == SCM_RIGHTS) 475 clen = CMSG_SPACE( 476 (clen - CMSG_ALIGN(sizeof(struct cmsghdr))) * 477 (sizeof(struct fdpass) / sizeof(int))); 478 } 479 480 #define snderr(errno) { error = errno; goto release; } 481 482 s = solock(so); 483 restart: 484 if ((error = sblock(so, &so->so_snd, SBLOCKWAIT(flags))) != 0) 485 goto out; 486 so->so_state |= SS_ISSENDING; 487 do { 488 if (so->so_state & SS_CANTSENDMORE) 489 snderr(EPIPE); 490 if (so->so_error) { 491 error = so->so_error; 492 so->so_error = 0; 493 snderr(error); 494 } 495 if ((so->so_state & SS_ISCONNECTED) == 0) { 496 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 497 if (!(resid == 0 && clen != 0)) 498 snderr(ENOTCONN); 499 } else if (addr == 0) 500 snderr(EDESTADDRREQ); 501 } 502 space = sbspace(so, &so->so_snd); 503 if (flags & MSG_OOB) 504 space += 1024; 505 if (so->so_proto->pr_domain->dom_family == AF_UNIX) { 506 if (atomic && resid > so->so_snd.sb_hiwat) 507 snderr(EMSGSIZE); 508 } else { 509 if (clen > so->so_snd.sb_hiwat || 510 (atomic && resid > so->so_snd.sb_hiwat - clen)) 511 snderr(EMSGSIZE); 512 } 513 if (space < clen || 514 (space - clen < resid && 515 (atomic || space < so->so_snd.sb_lowat))) { 516 if (flags & MSG_DONTWAIT) 517 snderr(EWOULDBLOCK); 518 sbunlock(so, &so->so_snd); 519 error = sbwait(so, &so->so_snd); 520 so->so_state &= ~SS_ISSENDING; 521 if (error) 522 goto out; 523 goto restart; 524 } 525 space -= clen; 526 do { 527 if (uio == NULL) { 528 /* 529 * Data is prepackaged in "top". 530 */ 531 resid = 0; 532 if (flags & MSG_EOR) 533 top->m_flags |= M_EOR; 534 } else { 535 sounlock(so, s); 536 error = m_getuio(&top, atomic, space, uio); 537 s = solock(so); 538 if (error) 539 goto release; 540 space -= top->m_pkthdr.len; 541 resid = uio->uio_resid; 542 if (flags & MSG_EOR) 543 top->m_flags |= M_EOR; 544 } 545 if (resid == 0) 546 so->so_state &= ~SS_ISSENDING; 547 if (top && so->so_options & SO_ZEROIZE) 548 top->m_flags |= M_ZEROIZE; 549 error = (*so->so_proto->pr_usrreq)(so, 550 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 551 top, addr, control, curproc); 552 clen = 0; 553 control = NULL; 554 top = NULL; 555 if (error) 556 goto release; 557 } while (resid && space > 0); 558 } while (resid); 559 560 release: 561 so->so_state &= ~SS_ISSENDING; 562 sbunlock(so, &so->so_snd); 563 out: 564 sounlock(so, s); 565 m_freem(top); 566 m_freem(control); 567 return (error); 568 } 569 570 int 571 m_getuio(struct mbuf **mp, int atomic, long space, struct uio *uio) 572 { 573 struct mbuf *m, *top = NULL; 574 struct mbuf **nextp = ⊤ 575 u_long len, mlen; 576 size_t resid = uio->uio_resid; 577 int error; 578 579 do { 580 if (top == NULL) { 581 MGETHDR(m, M_WAIT, MT_DATA); 582 mlen = MHLEN; 583 m->m_pkthdr.len = 0; 584 m->m_pkthdr.ph_ifidx = 0; 585 } else { 586 MGET(m, M_WAIT, MT_DATA); 587 mlen = MLEN; 588 } 589 /* chain mbuf together */ 590 *nextp = m; 591 nextp = &m->m_next; 592 593 resid = ulmin(resid, space); 594 if (resid >= MINCLSIZE) { 595 MCLGETI(m, M_NOWAIT, NULL, ulmin(resid, MAXMCLBYTES)); 596 if ((m->m_flags & M_EXT) == 0) 597 MCLGETI(m, M_NOWAIT, NULL, MCLBYTES); 598 if ((m->m_flags & M_EXT) == 0) 599 goto nopages; 600 mlen = m->m_ext.ext_size; 601 len = ulmin(mlen, resid); 602 /* 603 * For datagram protocols, leave room 604 * for protocol headers in first mbuf. 605 */ 606 if (atomic && m == top && len < mlen - max_hdr) 607 m->m_data += max_hdr; 608 } else { 609 nopages: 610 len = ulmin(mlen, resid); 611 /* 612 * For datagram protocols, leave room 613 * for protocol headers in first mbuf. 614 */ 615 if (atomic && m == top && len < mlen - max_hdr) 616 m_align(m, len); 617 } 618 619 error = uiomove(mtod(m, caddr_t), len, uio); 620 if (error) { 621 m_freem(top); 622 return (error); 623 } 624 625 /* adjust counters */ 626 resid = uio->uio_resid; 627 space -= len; 628 m->m_len = len; 629 top->m_pkthdr.len += len; 630 631 /* Is there more space and more data? */ 632 } while (space > 0 && resid > 0); 633 634 *mp = top; 635 return 0; 636 } 637 638 /* 639 * Following replacement or removal of the first mbuf on the first 640 * mbuf chain of a socket buffer, push necessary state changes back 641 * into the socket buffer so that other consumers see the values 642 * consistently. 'nextrecord' is the callers locally stored value of 643 * the original value of sb->sb_mb->m_nextpkt which must be restored 644 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. 645 */ 646 void 647 sbsync(struct sockbuf *sb, struct mbuf *nextrecord) 648 { 649 650 /* 651 * First, update for the new value of nextrecord. If necessary, 652 * make it the first record. 653 */ 654 if (sb->sb_mb != NULL) 655 sb->sb_mb->m_nextpkt = nextrecord; 656 else 657 sb->sb_mb = nextrecord; 658 659 /* 660 * Now update any dependent socket buffer fields to reflect 661 * the new state. This is an inline of SB_EMPTY_FIXUP, with 662 * the addition of a second clause that takes care of the 663 * case where sb_mb has been updated, but remains the last 664 * record. 665 */ 666 if (sb->sb_mb == NULL) { 667 sb->sb_mbtail = NULL; 668 sb->sb_lastrecord = NULL; 669 } else if (sb->sb_mb->m_nextpkt == NULL) 670 sb->sb_lastrecord = sb->sb_mb; 671 } 672 673 /* 674 * Implement receive operations on a socket. 675 * We depend on the way that records are added to the sockbuf 676 * by sbappend*. In particular, each record (mbufs linked through m_next) 677 * must begin with an address if the protocol so specifies, 678 * followed by an optional mbuf or mbufs containing ancillary data, 679 * and then zero or more mbufs of data. 680 * In order to avoid blocking network for the entire time here, we release 681 * the solock() while doing the actual copy to user space. 682 * Although the sockbuf is locked, new data may still be appended, 683 * and thus we must maintain consistency of the sockbuf during that time. 684 * 685 * The caller may receive the data as a single mbuf chain by supplying 686 * an mbuf **mp0 for use in returning the chain. The uio is then used 687 * only for the count in uio_resid. 688 */ 689 int 690 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, 691 struct mbuf **mp0, struct mbuf **controlp, int *flagsp, 692 socklen_t controllen) 693 { 694 struct mbuf *m, **mp; 695 struct mbuf *cm; 696 u_long len, offset, moff; 697 int flags, error, s, type, uio_error = 0; 698 const struct protosw *pr = so->so_proto; 699 struct mbuf *nextrecord; 700 size_t resid, orig_resid = uio->uio_resid; 701 702 mp = mp0; 703 if (paddr) 704 *paddr = NULL; 705 if (controlp) 706 *controlp = NULL; 707 if (flagsp) 708 flags = *flagsp &~ MSG_EOR; 709 else 710 flags = 0; 711 if (flags & MSG_OOB) { 712 m = m_get(M_WAIT, MT_DATA); 713 s = solock(so); 714 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, 715 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, curproc); 716 sounlock(so, s); 717 if (error) 718 goto bad; 719 do { 720 error = uiomove(mtod(m, caddr_t), 721 ulmin(uio->uio_resid, m->m_len), uio); 722 m = m_free(m); 723 } while (uio->uio_resid && error == 0 && m); 724 bad: 725 m_freem(m); 726 return (error); 727 } 728 if (mp) 729 *mp = NULL; 730 731 s = solock(so); 732 restart: 733 if ((error = sblock(so, &so->so_rcv, SBLOCKWAIT(flags))) != 0) { 734 sounlock(so, s); 735 return (error); 736 } 737 738 m = so->so_rcv.sb_mb; 739 #ifdef SOCKET_SPLICE 740 if (isspliced(so)) 741 m = NULL; 742 #endif /* SOCKET_SPLICE */ 743 /* 744 * If we have less data than requested, block awaiting more 745 * (subject to any timeout) if: 746 * 1. the current count is less than the low water mark, 747 * 2. MSG_WAITALL is set, and it is possible to do the entire 748 * receive operation at once if we block (resid <= hiwat), or 749 * 3. MSG_DONTWAIT is not set. 750 * If MSG_WAITALL is set but resid is larger than the receive buffer, 751 * we have to do the receive in sections, and thus risk returning 752 * a short count if a timeout or signal occurs after we start. 753 */ 754 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 755 so->so_rcv.sb_cc < uio->uio_resid) && 756 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 757 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 758 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 759 #ifdef DIAGNOSTIC 760 if (m == NULL && so->so_rcv.sb_cc) 761 #ifdef SOCKET_SPLICE 762 if (!isspliced(so)) 763 #endif /* SOCKET_SPLICE */ 764 panic("receive 1: so %p, so_type %d, sb_cc %lu", 765 so, so->so_type, so->so_rcv.sb_cc); 766 #endif 767 if (so->so_error) { 768 if (m) 769 goto dontblock; 770 error = so->so_error; 771 if ((flags & MSG_PEEK) == 0) 772 so->so_error = 0; 773 goto release; 774 } 775 if (so->so_state & SS_CANTRCVMORE) { 776 if (m) 777 goto dontblock; 778 else if (so->so_rcv.sb_cc == 0) 779 goto release; 780 } 781 for (; m; m = m->m_next) 782 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 783 m = so->so_rcv.sb_mb; 784 goto dontblock; 785 } 786 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 787 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 788 error = ENOTCONN; 789 goto release; 790 } 791 if (uio->uio_resid == 0 && controlp == NULL) 792 goto release; 793 if (flags & MSG_DONTWAIT) { 794 error = EWOULDBLOCK; 795 goto release; 796 } 797 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); 798 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); 799 sbunlock(so, &so->so_rcv); 800 error = sbwait(so, &so->so_rcv); 801 if (error) { 802 sounlock(so, s); 803 return (error); 804 } 805 goto restart; 806 } 807 dontblock: 808 /* 809 * On entry here, m points to the first record of the socket buffer. 810 * From this point onward, we maintain 'nextrecord' as a cache of the 811 * pointer to the next record in the socket buffer. We must keep the 812 * various socket buffer pointers and local stack versions of the 813 * pointers in sync, pushing out modifications before operations that 814 * may sleep, and re-reading them afterwards. 815 * 816 * Otherwise, we will race with the network stack appending new data 817 * or records onto the socket buffer by using inconsistent/stale 818 * versions of the field, possibly resulting in socket buffer 819 * corruption. 820 */ 821 if (uio->uio_procp) 822 uio->uio_procp->p_ru.ru_msgrcv++; 823 KASSERT(m == so->so_rcv.sb_mb); 824 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); 825 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); 826 nextrecord = m->m_nextpkt; 827 if (pr->pr_flags & PR_ADDR) { 828 #ifdef DIAGNOSTIC 829 if (m->m_type != MT_SONAME) 830 panic("receive 1a: so %p, so_type %d, m %p, m_type %d", 831 so, so->so_type, m, m->m_type); 832 #endif 833 orig_resid = 0; 834 if (flags & MSG_PEEK) { 835 if (paddr) 836 *paddr = m_copym(m, 0, m->m_len, M_NOWAIT); 837 m = m->m_next; 838 } else { 839 sbfree(&so->so_rcv, m); 840 if (paddr) { 841 *paddr = m; 842 so->so_rcv.sb_mb = m->m_next; 843 m->m_next = 0; 844 m = so->so_rcv.sb_mb; 845 } else { 846 so->so_rcv.sb_mb = m_free(m); 847 m = so->so_rcv.sb_mb; 848 } 849 sbsync(&so->so_rcv, nextrecord); 850 } 851 } 852 while (m && m->m_type == MT_CONTROL && error == 0) { 853 int skip = 0; 854 if (flags & MSG_PEEK) { 855 if (mtod(m, struct cmsghdr *)->cmsg_type == 856 SCM_RIGHTS) { 857 /* don't leak internalized SCM_RIGHTS msgs */ 858 skip = 1; 859 } else if (controlp) 860 *controlp = m_copym(m, 0, m->m_len, M_NOWAIT); 861 m = m->m_next; 862 } else { 863 sbfree(&so->so_rcv, m); 864 so->so_rcv.sb_mb = m->m_next; 865 m->m_nextpkt = m->m_next = NULL; 866 cm = m; 867 m = so->so_rcv.sb_mb; 868 sbsync(&so->so_rcv, nextrecord); 869 if (controlp) { 870 if (pr->pr_domain->dom_externalize) { 871 error = 872 (*pr->pr_domain->dom_externalize) 873 (cm, controllen, flags); 874 } 875 *controlp = cm; 876 } else { 877 /* 878 * Dispose of any SCM_RIGHTS message that went 879 * through the read path rather than recv. 880 */ 881 if (pr->pr_domain->dom_dispose) 882 pr->pr_domain->dom_dispose(cm); 883 m_free(cm); 884 } 885 } 886 if (m != NULL) 887 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 888 else 889 nextrecord = so->so_rcv.sb_mb; 890 if (controlp && !skip) { 891 orig_resid = 0; 892 controlp = &(*controlp)->m_next; 893 } 894 } 895 896 /* If m is non-NULL, we have some data to read. */ 897 if (m) { 898 type = m->m_type; 899 if (type == MT_OOBDATA) 900 flags |= MSG_OOB; 901 if (m->m_flags & M_BCAST) 902 flags |= MSG_BCAST; 903 if (m->m_flags & M_MCAST) 904 flags |= MSG_MCAST; 905 } 906 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); 907 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); 908 909 moff = 0; 910 offset = 0; 911 while (m && uio->uio_resid > 0 && error == 0) { 912 if (m->m_type == MT_OOBDATA) { 913 if (type != MT_OOBDATA) 914 break; 915 } else if (type == MT_OOBDATA) { 916 break; 917 } else if (m->m_type == MT_CONTROL) { 918 /* 919 * If there is more than one control message in the 920 * stream, we do a short read. Next can be received 921 * or disposed by another system call. 922 */ 923 break; 924 #ifdef DIAGNOSTIC 925 } else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) { 926 panic("receive 3: so %p, so_type %d, m %p, m_type %d", 927 so, so->so_type, m, m->m_type); 928 #endif 929 } 930 so->so_state &= ~SS_RCVATMARK; 931 len = uio->uio_resid; 932 if (so->so_oobmark && len > so->so_oobmark - offset) 933 len = so->so_oobmark - offset; 934 if (len > m->m_len - moff) 935 len = m->m_len - moff; 936 /* 937 * If mp is set, just pass back the mbufs. 938 * Otherwise copy them out via the uio, then free. 939 * Sockbuf must be consistent here (points to current mbuf, 940 * it points to next record) when we drop priority; 941 * we must note any additions to the sockbuf when we 942 * block interrupts again. 943 */ 944 if (mp == NULL && uio_error == 0) { 945 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); 946 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); 947 resid = uio->uio_resid; 948 sounlock(so, s); 949 uio_error = uiomove(mtod(m, caddr_t) + moff, len, uio); 950 s = solock(so); 951 if (uio_error) 952 uio->uio_resid = resid - len; 953 } else 954 uio->uio_resid -= len; 955 if (len == m->m_len - moff) { 956 if (m->m_flags & M_EOR) 957 flags |= MSG_EOR; 958 if (flags & MSG_PEEK) { 959 m = m->m_next; 960 moff = 0; 961 } else { 962 nextrecord = m->m_nextpkt; 963 sbfree(&so->so_rcv, m); 964 if (mp) { 965 *mp = m; 966 mp = &m->m_next; 967 so->so_rcv.sb_mb = m = m->m_next; 968 *mp = NULL; 969 } else { 970 so->so_rcv.sb_mb = m_free(m); 971 m = so->so_rcv.sb_mb; 972 } 973 /* 974 * If m != NULL, we also know that 975 * so->so_rcv.sb_mb != NULL. 976 */ 977 KASSERT(so->so_rcv.sb_mb == m); 978 if (m) { 979 m->m_nextpkt = nextrecord; 980 if (nextrecord == NULL) 981 so->so_rcv.sb_lastrecord = m; 982 } else { 983 so->so_rcv.sb_mb = nextrecord; 984 SB_EMPTY_FIXUP(&so->so_rcv); 985 } 986 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); 987 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); 988 } 989 } else { 990 if (flags & MSG_PEEK) 991 moff += len; 992 else { 993 if (mp) 994 *mp = m_copym(m, 0, len, M_WAIT); 995 m->m_data += len; 996 m->m_len -= len; 997 so->so_rcv.sb_cc -= len; 998 so->so_rcv.sb_datacc -= len; 999 } 1000 } 1001 if (so->so_oobmark) { 1002 if ((flags & MSG_PEEK) == 0) { 1003 so->so_oobmark -= len; 1004 if (so->so_oobmark == 0) { 1005 so->so_state |= SS_RCVATMARK; 1006 break; 1007 } 1008 } else { 1009 offset += len; 1010 if (offset == so->so_oobmark) 1011 break; 1012 } 1013 } 1014 if (flags & MSG_EOR) 1015 break; 1016 /* 1017 * If the MSG_WAITALL flag is set (for non-atomic socket), 1018 * we must not quit until "uio->uio_resid == 0" or an error 1019 * termination. If a signal/timeout occurs, return 1020 * with a short count but without error. 1021 * Keep sockbuf locked against other readers. 1022 */ 1023 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1024 !sosendallatonce(so) && !nextrecord) { 1025 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1026 break; 1027 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); 1028 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); 1029 error = sbwait(so, &so->so_rcv); 1030 if (error) { 1031 sbunlock(so, &so->so_rcv); 1032 sounlock(so, s); 1033 return (0); 1034 } 1035 if ((m = so->so_rcv.sb_mb) != NULL) 1036 nextrecord = m->m_nextpkt; 1037 } 1038 } 1039 1040 if (m && pr->pr_flags & PR_ATOMIC) { 1041 flags |= MSG_TRUNC; 1042 if ((flags & MSG_PEEK) == 0) 1043 (void) sbdroprecord(&so->so_rcv); 1044 } 1045 if ((flags & MSG_PEEK) == 0) { 1046 if (m == NULL) { 1047 /* 1048 * First part is an inline SB_EMPTY_FIXUP(). Second 1049 * part makes sure sb_lastrecord is up-to-date if 1050 * there is still data in the socket buffer. 1051 */ 1052 so->so_rcv.sb_mb = nextrecord; 1053 if (so->so_rcv.sb_mb == NULL) { 1054 so->so_rcv.sb_mbtail = NULL; 1055 so->so_rcv.sb_lastrecord = NULL; 1056 } else if (nextrecord->m_nextpkt == NULL) 1057 so->so_rcv.sb_lastrecord = nextrecord; 1058 } 1059 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); 1060 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); 1061 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1062 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, 1063 (struct mbuf *)(long)flags, NULL, curproc); 1064 } 1065 if (orig_resid == uio->uio_resid && orig_resid && 1066 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1067 sbunlock(so, &so->so_rcv); 1068 goto restart; 1069 } 1070 1071 if (uio_error) 1072 error = uio_error; 1073 1074 if (flagsp) 1075 *flagsp |= flags; 1076 release: 1077 sbunlock(so, &so->so_rcv); 1078 sounlock(so, s); 1079 return (error); 1080 } 1081 1082 int 1083 soshutdown(struct socket *so, int how) 1084 { 1085 const struct protosw *pr = so->so_proto; 1086 int s, error = 0; 1087 1088 s = solock(so); 1089 switch (how) { 1090 case SHUT_RD: 1091 sorflush(so); 1092 break; 1093 case SHUT_RDWR: 1094 sorflush(so); 1095 /* FALLTHROUGH */ 1096 case SHUT_WR: 1097 error = (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, NULL, NULL, 1098 curproc); 1099 break; 1100 default: 1101 error = EINVAL; 1102 break; 1103 } 1104 sounlock(so, s); 1105 1106 return (error); 1107 } 1108 1109 void 1110 sorflush(struct socket *so) 1111 { 1112 struct sockbuf *sb = &so->so_rcv; 1113 const struct protosw *pr = so->so_proto; 1114 struct socket aso; 1115 int error; 1116 1117 sb->sb_flags |= SB_NOINTR; 1118 error = sblock(so, sb, M_WAITOK); 1119 /* with SB_NOINTR and M_WAITOK sblock() must not fail */ 1120 KASSERT(error == 0); 1121 socantrcvmore(so); 1122 sbunlock(so, sb); 1123 aso.so_proto = pr; 1124 aso.so_rcv = *sb; 1125 memset(&sb->sb_startzero, 0, 1126 (caddr_t)&sb->sb_endzero - (caddr_t)&sb->sb_startzero); 1127 sb->sb_timeo_nsecs = INFSLP; 1128 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 1129 (*pr->pr_domain->dom_dispose)(aso.so_rcv.sb_mb); 1130 sbrelease(&aso, &aso.so_rcv); 1131 } 1132 1133 #ifdef SOCKET_SPLICE 1134 1135 #define so_splicelen so_sp->ssp_len 1136 #define so_splicemax so_sp->ssp_max 1137 #define so_idletv so_sp->ssp_idletv 1138 #define so_idleto so_sp->ssp_idleto 1139 #define so_splicetask so_sp->ssp_task 1140 1141 int 1142 sosplice(struct socket *so, int fd, off_t max, struct timeval *tv) 1143 { 1144 struct file *fp; 1145 struct socket *sosp; 1146 struct sosplice *sp; 1147 struct taskq *tq; 1148 int error = 0; 1149 1150 soassertlocked(so); 1151 1152 if (sosplice_taskq == NULL) { 1153 rw_enter_write(&sosplice_lock); 1154 if (sosplice_taskq == NULL) { 1155 tq = taskq_create("sosplice", 1, IPL_SOFTNET, 1156 TASKQ_MPSAFE); 1157 /* Ensure the taskq is fully visible to other CPUs. */ 1158 membar_producer(); 1159 sosplice_taskq = tq; 1160 } 1161 rw_exit_write(&sosplice_lock); 1162 } 1163 if (sosplice_taskq == NULL) 1164 return (ENOMEM); 1165 1166 if ((so->so_proto->pr_flags & PR_SPLICE) == 0) 1167 return (EPROTONOSUPPORT); 1168 if (so->so_options & SO_ACCEPTCONN) 1169 return (EOPNOTSUPP); 1170 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1171 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 1172 return (ENOTCONN); 1173 if (so->so_sp == NULL) { 1174 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1175 if (so->so_sp == NULL) 1176 so->so_sp = sp; 1177 else 1178 pool_put(&sosplice_pool, sp); 1179 } 1180 1181 /* If no fd is given, unsplice by removing existing link. */ 1182 if (fd < 0) { 1183 /* Lock receive buffer. */ 1184 if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) { 1185 return (error); 1186 } 1187 if (so->so_sp->ssp_socket) 1188 sounsplice(so, so->so_sp->ssp_socket, 0); 1189 sbunlock(so, &so->so_rcv); 1190 return (0); 1191 } 1192 1193 if (max && max < 0) 1194 return (EINVAL); 1195 1196 if (tv && (tv->tv_sec < 0 || tv->tv_usec < 0)) 1197 return (EINVAL); 1198 1199 /* Find sosp, the drain socket where data will be spliced into. */ 1200 if ((error = getsock(curproc, fd, &fp)) != 0) 1201 return (error); 1202 sosp = fp->f_data; 1203 if (sosp->so_sp == NULL) { 1204 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1205 if (sosp->so_sp == NULL) 1206 sosp->so_sp = sp; 1207 else 1208 pool_put(&sosplice_pool, sp); 1209 } 1210 1211 /* Lock both receive and send buffer. */ 1212 if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) { 1213 goto frele; 1214 } 1215 if ((error = sblock(so, &sosp->so_snd, M_WAITOK)) != 0) { 1216 sbunlock(so, &so->so_rcv); 1217 goto frele; 1218 } 1219 1220 if (so->so_sp->ssp_socket || sosp->so_sp->ssp_soback) { 1221 error = EBUSY; 1222 goto release; 1223 } 1224 if (sosp->so_proto->pr_usrreq != so->so_proto->pr_usrreq) { 1225 error = EPROTONOSUPPORT; 1226 goto release; 1227 } 1228 if (sosp->so_options & SO_ACCEPTCONN) { 1229 error = EOPNOTSUPP; 1230 goto release; 1231 } 1232 if ((sosp->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) { 1233 error = ENOTCONN; 1234 goto release; 1235 } 1236 1237 /* Splice so and sosp together. */ 1238 so->so_sp->ssp_socket = sosp; 1239 sosp->so_sp->ssp_soback = so; 1240 so->so_splicelen = 0; 1241 so->so_splicemax = max; 1242 if (tv) 1243 so->so_idletv = *tv; 1244 else 1245 timerclear(&so->so_idletv); 1246 timeout_set_proc(&so->so_idleto, soidle, so); 1247 task_set(&so->so_splicetask, sotask, so); 1248 1249 /* 1250 * To prevent softnet interrupt from calling somove() while 1251 * we sleep, the socket buffers are not marked as spliced yet. 1252 */ 1253 if (somove(so, M_WAIT)) { 1254 so->so_rcv.sb_flags |= SB_SPLICE; 1255 sosp->so_snd.sb_flags |= SB_SPLICE; 1256 } 1257 1258 release: 1259 sbunlock(sosp, &sosp->so_snd); 1260 sbunlock(so, &so->so_rcv); 1261 frele: 1262 FRELE(fp, curproc); 1263 return (error); 1264 } 1265 1266 void 1267 sounsplice(struct socket *so, struct socket *sosp, int freeing) 1268 { 1269 soassertlocked(so); 1270 1271 task_del(sosplice_taskq, &so->so_splicetask); 1272 timeout_del(&so->so_idleto); 1273 sosp->so_snd.sb_flags &= ~SB_SPLICE; 1274 so->so_rcv.sb_flags &= ~SB_SPLICE; 1275 so->so_sp->ssp_socket = sosp->so_sp->ssp_soback = NULL; 1276 /* Do not wakeup a socket that is about to be freed. */ 1277 if ((freeing & SOSP_FREEING_READ) == 0 && soreadable(so)) 1278 sorwakeup(so); 1279 if ((freeing & SOSP_FREEING_WRITE) == 0 && sowriteable(sosp)) 1280 sowwakeup(sosp); 1281 } 1282 1283 void 1284 soidle(void *arg) 1285 { 1286 struct socket *so = arg; 1287 int s; 1288 1289 s = solock(so); 1290 if (so->so_rcv.sb_flags & SB_SPLICE) { 1291 so->so_error = ETIMEDOUT; 1292 sounsplice(so, so->so_sp->ssp_socket, 0); 1293 } 1294 sounlock(so, s); 1295 } 1296 1297 void 1298 sotask(void *arg) 1299 { 1300 struct socket *so = arg; 1301 int s; 1302 1303 s = solock(so); 1304 if (so->so_rcv.sb_flags & SB_SPLICE) { 1305 /* 1306 * We may not sleep here as sofree() and unsplice() may be 1307 * called from softnet interrupt context. This would remove 1308 * the socket during somove(). 1309 */ 1310 somove(so, M_DONTWAIT); 1311 } 1312 sounlock(so, s); 1313 1314 /* Avoid user land starvation. */ 1315 yield(); 1316 } 1317 1318 /* 1319 * The socket splicing task or idle timeout may sleep while grabbing the net 1320 * lock. As sofree() can be called anytime, sotask() or soidle() could access 1321 * the socket memory of a freed socket after wakeup. So delay the pool_put() 1322 * after all pending socket splicing tasks or timeouts have finished. Do this 1323 * by scheduling it on the same threads. 1324 */ 1325 void 1326 soreaper(void *arg) 1327 { 1328 struct socket *so = arg; 1329 1330 /* Reuse splice task, sounsplice() has been called before. */ 1331 task_set(&so->so_sp->ssp_task, soput, so); 1332 task_add(sosplice_taskq, &so->so_sp->ssp_task); 1333 } 1334 1335 void 1336 soput(void *arg) 1337 { 1338 struct socket *so = arg; 1339 1340 pool_put(&sosplice_pool, so->so_sp); 1341 pool_put(&socket_pool, so); 1342 } 1343 1344 /* 1345 * Move data from receive buffer of spliced source socket to send 1346 * buffer of drain socket. Try to move as much as possible in one 1347 * big chunk. It is a TCP only implementation. 1348 * Return value 0 means splicing has been finished, 1 continue. 1349 */ 1350 int 1351 somove(struct socket *so, int wait) 1352 { 1353 struct socket *sosp = so->so_sp->ssp_socket; 1354 struct mbuf *m, **mp, *nextrecord; 1355 u_long len, off, oobmark; 1356 long space; 1357 int error = 0, maxreached = 0; 1358 unsigned int state; 1359 1360 soassertlocked(so); 1361 1362 nextpkt: 1363 if (so->so_error) { 1364 error = so->so_error; 1365 goto release; 1366 } 1367 if (sosp->so_state & SS_CANTSENDMORE) { 1368 error = EPIPE; 1369 goto release; 1370 } 1371 if (sosp->so_error && sosp->so_error != ETIMEDOUT && 1372 sosp->so_error != EFBIG && sosp->so_error != ELOOP) { 1373 error = sosp->so_error; 1374 goto release; 1375 } 1376 if ((sosp->so_state & SS_ISCONNECTED) == 0) 1377 goto release; 1378 1379 /* Calculate how many bytes can be copied now. */ 1380 len = so->so_rcv.sb_datacc; 1381 if (so->so_splicemax) { 1382 KASSERT(so->so_splicelen < so->so_splicemax); 1383 if (so->so_splicemax <= so->so_splicelen + len) { 1384 len = so->so_splicemax - so->so_splicelen; 1385 maxreached = 1; 1386 } 1387 } 1388 space = sbspace(sosp, &sosp->so_snd); 1389 if (so->so_oobmark && so->so_oobmark < len && 1390 so->so_oobmark < space + 1024) 1391 space += 1024; 1392 if (space <= 0) { 1393 maxreached = 0; 1394 goto release; 1395 } 1396 if (space < len) { 1397 maxreached = 0; 1398 if (space < sosp->so_snd.sb_lowat) 1399 goto release; 1400 len = space; 1401 } 1402 sosp->so_state |= SS_ISSENDING; 1403 1404 SBLASTRECORDCHK(&so->so_rcv, "somove 1"); 1405 SBLASTMBUFCHK(&so->so_rcv, "somove 1"); 1406 m = so->so_rcv.sb_mb; 1407 if (m == NULL) 1408 goto release; 1409 nextrecord = m->m_nextpkt; 1410 1411 /* Drop address and control information not used with splicing. */ 1412 if (so->so_proto->pr_flags & PR_ADDR) { 1413 #ifdef DIAGNOSTIC 1414 if (m->m_type != MT_SONAME) 1415 panic("somove soname: so %p, so_type %d, m %p, " 1416 "m_type %d", so, so->so_type, m, m->m_type); 1417 #endif 1418 m = m->m_next; 1419 } 1420 while (m && m->m_type == MT_CONTROL) 1421 m = m->m_next; 1422 if (m == NULL) { 1423 sbdroprecord(&so->so_rcv); 1424 if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb) 1425 (so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL, 1426 NULL, NULL, NULL); 1427 goto nextpkt; 1428 } 1429 1430 /* 1431 * By splicing sockets connected to localhost, userland might create a 1432 * loop. Dissolve splicing with error if loop is detected by counter. 1433 */ 1434 if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.ph_loopcnt++ >= M_MAXLOOP) { 1435 error = ELOOP; 1436 goto release; 1437 } 1438 1439 if (so->so_proto->pr_flags & PR_ATOMIC) { 1440 if ((m->m_flags & M_PKTHDR) == 0) 1441 panic("somove !PKTHDR: so %p, so_type %d, m %p, " 1442 "m_type %d", so, so->so_type, m, m->m_type); 1443 if (sosp->so_snd.sb_hiwat < m->m_pkthdr.len) { 1444 error = EMSGSIZE; 1445 goto release; 1446 } 1447 if (len < m->m_pkthdr.len) 1448 goto release; 1449 if (m->m_pkthdr.len < len) { 1450 maxreached = 0; 1451 len = m->m_pkthdr.len; 1452 } 1453 /* 1454 * Throw away the name mbuf after it has been assured 1455 * that the whole first record can be processed. 1456 */ 1457 m = so->so_rcv.sb_mb; 1458 sbfree(&so->so_rcv, m); 1459 so->so_rcv.sb_mb = m_free(m); 1460 sbsync(&so->so_rcv, nextrecord); 1461 } 1462 /* 1463 * Throw away the control mbufs after it has been assured 1464 * that the whole first record can be processed. 1465 */ 1466 m = so->so_rcv.sb_mb; 1467 while (m && m->m_type == MT_CONTROL) { 1468 sbfree(&so->so_rcv, m); 1469 so->so_rcv.sb_mb = m_free(m); 1470 m = so->so_rcv.sb_mb; 1471 sbsync(&so->so_rcv, nextrecord); 1472 } 1473 1474 SBLASTRECORDCHK(&so->so_rcv, "somove 2"); 1475 SBLASTMBUFCHK(&so->so_rcv, "somove 2"); 1476 1477 /* Take at most len mbufs out of receive buffer. */ 1478 for (off = 0, mp = &m; off <= len && *mp; 1479 off += (*mp)->m_len, mp = &(*mp)->m_next) { 1480 u_long size = len - off; 1481 1482 #ifdef DIAGNOSTIC 1483 if ((*mp)->m_type != MT_DATA && (*mp)->m_type != MT_HEADER) 1484 panic("somove type: so %p, so_type %d, m %p, " 1485 "m_type %d", so, so->so_type, *mp, (*mp)->m_type); 1486 #endif 1487 if ((*mp)->m_len > size) { 1488 /* 1489 * Move only a partial mbuf at maximum splice length or 1490 * if the drain buffer is too small for this large mbuf. 1491 */ 1492 if (!maxreached && so->so_snd.sb_datacc > 0) { 1493 len -= size; 1494 break; 1495 } 1496 *mp = m_copym(so->so_rcv.sb_mb, 0, size, wait); 1497 if (*mp == NULL) { 1498 len -= size; 1499 break; 1500 } 1501 so->so_rcv.sb_mb->m_data += size; 1502 so->so_rcv.sb_mb->m_len -= size; 1503 so->so_rcv.sb_cc -= size; 1504 so->so_rcv.sb_datacc -= size; 1505 } else { 1506 *mp = so->so_rcv.sb_mb; 1507 sbfree(&so->so_rcv, *mp); 1508 so->so_rcv.sb_mb = (*mp)->m_next; 1509 sbsync(&so->so_rcv, nextrecord); 1510 } 1511 } 1512 *mp = NULL; 1513 1514 SBLASTRECORDCHK(&so->so_rcv, "somove 3"); 1515 SBLASTMBUFCHK(&so->so_rcv, "somove 3"); 1516 SBCHECK(&so->so_rcv); 1517 if (m == NULL) 1518 goto release; 1519 m->m_nextpkt = NULL; 1520 if (m->m_flags & M_PKTHDR) { 1521 m_resethdr(m); 1522 m->m_pkthdr.len = len; 1523 } 1524 1525 /* Send window update to source peer as receive buffer has changed. */ 1526 if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb) 1527 (so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL, 1528 NULL, NULL, NULL); 1529 1530 /* Receive buffer did shrink by len bytes, adjust oob. */ 1531 state = so->so_state; 1532 so->so_state &= ~SS_RCVATMARK; 1533 oobmark = so->so_oobmark; 1534 so->so_oobmark = oobmark > len ? oobmark - len : 0; 1535 if (oobmark) { 1536 if (oobmark == len) 1537 so->so_state |= SS_RCVATMARK; 1538 if (oobmark >= len) 1539 oobmark = 0; 1540 } 1541 1542 /* 1543 * Handle oob data. If any malloc fails, ignore error. 1544 * TCP urgent data is not very reliable anyway. 1545 */ 1546 while (((state & SS_RCVATMARK) || oobmark) && 1547 (so->so_options & SO_OOBINLINE)) { 1548 struct mbuf *o = NULL; 1549 1550 if (state & SS_RCVATMARK) { 1551 o = m_get(wait, MT_DATA); 1552 state &= ~SS_RCVATMARK; 1553 } else if (oobmark) { 1554 o = m_split(m, oobmark, wait); 1555 if (o) { 1556 error = (*sosp->so_proto->pr_usrreq)(sosp, 1557 PRU_SEND, m, NULL, NULL, NULL); 1558 if (error) { 1559 if (sosp->so_state & SS_CANTSENDMORE) 1560 error = EPIPE; 1561 m_freem(o); 1562 goto release; 1563 } 1564 len -= oobmark; 1565 so->so_splicelen += oobmark; 1566 m = o; 1567 o = m_get(wait, MT_DATA); 1568 } 1569 oobmark = 0; 1570 } 1571 if (o) { 1572 o->m_len = 1; 1573 *mtod(o, caddr_t) = *mtod(m, caddr_t); 1574 error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SENDOOB, 1575 o, NULL, NULL, NULL); 1576 if (error) { 1577 if (sosp->so_state & SS_CANTSENDMORE) 1578 error = EPIPE; 1579 m_freem(m); 1580 goto release; 1581 } 1582 len -= 1; 1583 so->so_splicelen += 1; 1584 if (oobmark) { 1585 oobmark -= 1; 1586 if (oobmark == 0) 1587 state |= SS_RCVATMARK; 1588 } 1589 m_adj(m, 1); 1590 } 1591 } 1592 1593 /* Append all remaining data to drain socket. */ 1594 if (so->so_rcv.sb_cc == 0 || maxreached) 1595 sosp->so_state &= ~SS_ISSENDING; 1596 error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SEND, m, NULL, NULL, 1597 NULL); 1598 if (error) { 1599 if (sosp->so_state & SS_CANTSENDMORE) 1600 error = EPIPE; 1601 goto release; 1602 } 1603 so->so_splicelen += len; 1604 1605 /* Move several packets if possible. */ 1606 if (!maxreached && nextrecord) 1607 goto nextpkt; 1608 1609 release: 1610 sosp->so_state &= ~SS_ISSENDING; 1611 if (!error && maxreached && so->so_splicemax == so->so_splicelen) 1612 error = EFBIG; 1613 if (error) 1614 so->so_error = error; 1615 if (((so->so_state & SS_CANTRCVMORE) && so->so_rcv.sb_cc == 0) || 1616 (sosp->so_state & SS_CANTSENDMORE) || maxreached || error) { 1617 sounsplice(so, sosp, 0); 1618 return (0); 1619 } 1620 if (timerisset(&so->so_idletv)) 1621 timeout_add_tv(&so->so_idleto, &so->so_idletv); 1622 return (1); 1623 } 1624 1625 #endif /* SOCKET_SPLICE */ 1626 1627 void 1628 sorwakeup(struct socket *so) 1629 { 1630 soassertlocked(so); 1631 1632 #ifdef SOCKET_SPLICE 1633 if (so->so_rcv.sb_flags & SB_SPLICE) { 1634 /* 1635 * TCP has a sendbuffer that can handle multiple packets 1636 * at once. So queue the stream a bit to accumulate data. 1637 * The sosplice thread will call somove() later and send 1638 * the packets calling tcp_output() only once. 1639 * In the UDP case, send out the packets immediately. 1640 * Using a thread would make things slower. 1641 */ 1642 if (so->so_proto->pr_flags & PR_WANTRCVD) 1643 task_add(sosplice_taskq, &so->so_splicetask); 1644 else 1645 somove(so, M_DONTWAIT); 1646 } 1647 if (isspliced(so)) 1648 return; 1649 #endif 1650 sowakeup(so, &so->so_rcv); 1651 if (so->so_upcall) 1652 (*(so->so_upcall))(so, so->so_upcallarg, M_DONTWAIT); 1653 } 1654 1655 void 1656 sowwakeup(struct socket *so) 1657 { 1658 soassertlocked(so); 1659 1660 #ifdef SOCKET_SPLICE 1661 if (so->so_snd.sb_flags & SB_SPLICE) 1662 task_add(sosplice_taskq, &so->so_sp->ssp_soback->so_splicetask); 1663 if (issplicedback(so)) 1664 return; 1665 #endif 1666 sowakeup(so, &so->so_snd); 1667 } 1668 1669 int 1670 sosetopt(struct socket *so, int level, int optname, struct mbuf *m) 1671 { 1672 int error = 0; 1673 1674 soassertlocked(so); 1675 1676 if (level != SOL_SOCKET) { 1677 if (so->so_proto->pr_ctloutput) { 1678 error = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1679 level, optname, m); 1680 return (error); 1681 } 1682 error = ENOPROTOOPT; 1683 } else { 1684 switch (optname) { 1685 case SO_BINDANY: 1686 if ((error = suser(curproc)) != 0) /* XXX */ 1687 return (error); 1688 break; 1689 } 1690 1691 switch (optname) { 1692 1693 case SO_LINGER: 1694 if (m == NULL || m->m_len != sizeof (struct linger) || 1695 mtod(m, struct linger *)->l_linger < 0 || 1696 mtod(m, struct linger *)->l_linger > SHRT_MAX) 1697 return (EINVAL); 1698 so->so_linger = mtod(m, struct linger *)->l_linger; 1699 /* FALLTHROUGH */ 1700 1701 case SO_BINDANY: 1702 case SO_DEBUG: 1703 case SO_KEEPALIVE: 1704 case SO_USELOOPBACK: 1705 case SO_BROADCAST: 1706 case SO_REUSEADDR: 1707 case SO_REUSEPORT: 1708 case SO_OOBINLINE: 1709 case SO_TIMESTAMP: 1710 case SO_ZEROIZE: 1711 if (m == NULL || m->m_len < sizeof (int)) 1712 return (EINVAL); 1713 if (*mtod(m, int *)) 1714 so->so_options |= optname; 1715 else 1716 so->so_options &= ~optname; 1717 break; 1718 1719 case SO_DONTROUTE: 1720 if (m == NULL || m->m_len < sizeof (int)) 1721 return (EINVAL); 1722 if (*mtod(m, int *)) 1723 error = EOPNOTSUPP; 1724 break; 1725 1726 case SO_SNDBUF: 1727 case SO_RCVBUF: 1728 case SO_SNDLOWAT: 1729 case SO_RCVLOWAT: 1730 { 1731 u_long cnt; 1732 1733 if (m == NULL || m->m_len < sizeof (int)) 1734 return (EINVAL); 1735 cnt = *mtod(m, int *); 1736 if ((long)cnt <= 0) 1737 cnt = 1; 1738 switch (optname) { 1739 1740 case SO_SNDBUF: 1741 if (so->so_state & SS_CANTSENDMORE) 1742 return (EINVAL); 1743 if (sbcheckreserve(cnt, so->so_snd.sb_wat) || 1744 sbreserve(so, &so->so_snd, cnt)) 1745 return (ENOBUFS); 1746 so->so_snd.sb_wat = cnt; 1747 break; 1748 1749 case SO_RCVBUF: 1750 if (so->so_state & SS_CANTRCVMORE) 1751 return (EINVAL); 1752 if (sbcheckreserve(cnt, so->so_rcv.sb_wat) || 1753 sbreserve(so, &so->so_rcv, cnt)) 1754 return (ENOBUFS); 1755 so->so_rcv.sb_wat = cnt; 1756 break; 1757 1758 case SO_SNDLOWAT: 1759 so->so_snd.sb_lowat = 1760 (cnt > so->so_snd.sb_hiwat) ? 1761 so->so_snd.sb_hiwat : cnt; 1762 break; 1763 case SO_RCVLOWAT: 1764 so->so_rcv.sb_lowat = 1765 (cnt > so->so_rcv.sb_hiwat) ? 1766 so->so_rcv.sb_hiwat : cnt; 1767 break; 1768 } 1769 break; 1770 } 1771 1772 case SO_SNDTIMEO: 1773 case SO_RCVTIMEO: 1774 { 1775 struct timeval tv; 1776 uint64_t nsecs; 1777 1778 if (m == NULL || m->m_len < sizeof (tv)) 1779 return (EINVAL); 1780 memcpy(&tv, mtod(m, struct timeval *), sizeof tv); 1781 if (!timerisvalid(&tv)) 1782 return (EINVAL); 1783 nsecs = TIMEVAL_TO_NSEC(&tv); 1784 if (nsecs == UINT64_MAX) 1785 return (EDOM); 1786 if (nsecs == 0) 1787 nsecs = INFSLP; 1788 switch (optname) { 1789 1790 case SO_SNDTIMEO: 1791 so->so_snd.sb_timeo_nsecs = nsecs; 1792 break; 1793 case SO_RCVTIMEO: 1794 so->so_rcv.sb_timeo_nsecs = nsecs; 1795 break; 1796 } 1797 break; 1798 } 1799 1800 case SO_RTABLE: 1801 if (so->so_proto->pr_domain && 1802 so->so_proto->pr_domain->dom_protosw && 1803 so->so_proto->pr_ctloutput) { 1804 struct domain *dom = so->so_proto->pr_domain; 1805 1806 level = dom->dom_protosw->pr_protocol; 1807 error = (*so->so_proto->pr_ctloutput) 1808 (PRCO_SETOPT, so, level, optname, m); 1809 return (error); 1810 } 1811 error = ENOPROTOOPT; 1812 break; 1813 1814 #ifdef SOCKET_SPLICE 1815 case SO_SPLICE: 1816 if (m == NULL) { 1817 error = sosplice(so, -1, 0, NULL); 1818 } else if (m->m_len < sizeof(int)) { 1819 return (EINVAL); 1820 } else if (m->m_len < sizeof(struct splice)) { 1821 error = sosplice(so, *mtod(m, int *), 0, NULL); 1822 } else { 1823 error = sosplice(so, 1824 mtod(m, struct splice *)->sp_fd, 1825 mtod(m, struct splice *)->sp_max, 1826 &mtod(m, struct splice *)->sp_idle); 1827 } 1828 break; 1829 #endif /* SOCKET_SPLICE */ 1830 1831 default: 1832 error = ENOPROTOOPT; 1833 break; 1834 } 1835 if (error == 0 && so->so_proto->pr_ctloutput) { 1836 (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1837 level, optname, m); 1838 } 1839 } 1840 1841 return (error); 1842 } 1843 1844 int 1845 sogetopt(struct socket *so, int level, int optname, struct mbuf *m) 1846 { 1847 int error = 0; 1848 1849 soassertlocked(so); 1850 1851 if (level != SOL_SOCKET) { 1852 if (so->so_proto->pr_ctloutput) { 1853 m->m_len = 0; 1854 1855 error = (*so->so_proto->pr_ctloutput)(PRCO_GETOPT, so, 1856 level, optname, m); 1857 if (error) 1858 return (error); 1859 return (0); 1860 } else 1861 return (ENOPROTOOPT); 1862 } else { 1863 m->m_len = sizeof (int); 1864 1865 switch (optname) { 1866 1867 case SO_LINGER: 1868 m->m_len = sizeof (struct linger); 1869 mtod(m, struct linger *)->l_onoff = 1870 so->so_options & SO_LINGER; 1871 mtod(m, struct linger *)->l_linger = so->so_linger; 1872 break; 1873 1874 case SO_BINDANY: 1875 case SO_USELOOPBACK: 1876 case SO_DEBUG: 1877 case SO_KEEPALIVE: 1878 case SO_REUSEADDR: 1879 case SO_REUSEPORT: 1880 case SO_BROADCAST: 1881 case SO_OOBINLINE: 1882 case SO_TIMESTAMP: 1883 case SO_ZEROIZE: 1884 *mtod(m, int *) = so->so_options & optname; 1885 break; 1886 1887 case SO_DONTROUTE: 1888 *mtod(m, int *) = 0; 1889 break; 1890 1891 case SO_TYPE: 1892 *mtod(m, int *) = so->so_type; 1893 break; 1894 1895 case SO_ERROR: 1896 *mtod(m, int *) = so->so_error; 1897 so->so_error = 0; 1898 break; 1899 1900 case SO_DOMAIN: 1901 *mtod(m, int *) = so->so_proto->pr_domain->dom_family; 1902 break; 1903 1904 case SO_PROTOCOL: 1905 *mtod(m, int *) = so->so_proto->pr_protocol; 1906 break; 1907 1908 case SO_SNDBUF: 1909 *mtod(m, int *) = so->so_snd.sb_hiwat; 1910 break; 1911 1912 case SO_RCVBUF: 1913 *mtod(m, int *) = so->so_rcv.sb_hiwat; 1914 break; 1915 1916 case SO_SNDLOWAT: 1917 *mtod(m, int *) = so->so_snd.sb_lowat; 1918 break; 1919 1920 case SO_RCVLOWAT: 1921 *mtod(m, int *) = so->so_rcv.sb_lowat; 1922 break; 1923 1924 case SO_SNDTIMEO: 1925 case SO_RCVTIMEO: 1926 { 1927 struct timeval tv; 1928 uint64_t nsecs = (optname == SO_SNDTIMEO ? 1929 so->so_snd.sb_timeo_nsecs : 1930 so->so_rcv.sb_timeo_nsecs); 1931 1932 m->m_len = sizeof(struct timeval); 1933 memset(&tv, 0, sizeof(tv)); 1934 if (nsecs != INFSLP) 1935 NSEC_TO_TIMEVAL(nsecs, &tv); 1936 memcpy(mtod(m, struct timeval *), &tv, sizeof tv); 1937 break; 1938 } 1939 1940 case SO_RTABLE: 1941 if (so->so_proto->pr_domain && 1942 so->so_proto->pr_domain->dom_protosw && 1943 so->so_proto->pr_ctloutput) { 1944 struct domain *dom = so->so_proto->pr_domain; 1945 1946 level = dom->dom_protosw->pr_protocol; 1947 error = (*so->so_proto->pr_ctloutput) 1948 (PRCO_GETOPT, so, level, optname, m); 1949 if (error) 1950 return (error); 1951 break; 1952 } 1953 return (ENOPROTOOPT); 1954 1955 #ifdef SOCKET_SPLICE 1956 case SO_SPLICE: 1957 { 1958 off_t len; 1959 1960 m->m_len = sizeof(off_t); 1961 len = so->so_sp ? so->so_sp->ssp_len : 0; 1962 memcpy(mtod(m, off_t *), &len, sizeof(off_t)); 1963 break; 1964 } 1965 #endif /* SOCKET_SPLICE */ 1966 1967 case SO_PEERCRED: 1968 if (so->so_proto->pr_protocol == AF_UNIX) { 1969 struct unpcb *unp = sotounpcb(so); 1970 1971 if (unp->unp_flags & UNP_FEIDS) { 1972 m->m_len = sizeof(unp->unp_connid); 1973 memcpy(mtod(m, caddr_t), 1974 &(unp->unp_connid), m->m_len); 1975 break; 1976 } 1977 return (ENOTCONN); 1978 } 1979 return (EOPNOTSUPP); 1980 1981 default: 1982 return (ENOPROTOOPT); 1983 } 1984 return (0); 1985 } 1986 } 1987 1988 void 1989 sohasoutofband(struct socket *so) 1990 { 1991 pgsigio(&so->so_sigio, SIGURG, 0); 1992 selwakeup(&so->so_rcv.sb_sel); 1993 } 1994 1995 int 1996 soo_kqfilter(struct file *fp, struct knote *kn) 1997 { 1998 struct socket *so = kn->kn_fp->f_data; 1999 struct sockbuf *sb; 2000 2001 KERNEL_ASSERT_LOCKED(); 2002 2003 switch (kn->kn_filter) { 2004 case EVFILT_READ: 2005 if (so->so_options & SO_ACCEPTCONN) 2006 kn->kn_fop = &solisten_filtops; 2007 else 2008 kn->kn_fop = &soread_filtops; 2009 sb = &so->so_rcv; 2010 break; 2011 case EVFILT_WRITE: 2012 kn->kn_fop = &sowrite_filtops; 2013 sb = &so->so_snd; 2014 break; 2015 default: 2016 return (EINVAL); 2017 } 2018 2019 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext); 2020 sb->sb_flagsintr |= SB_KNOTE; 2021 2022 return (0); 2023 } 2024 2025 void 2026 filt_sordetach(struct knote *kn) 2027 { 2028 struct socket *so = kn->kn_fp->f_data; 2029 2030 KERNEL_ASSERT_LOCKED(); 2031 2032 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext); 2033 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note)) 2034 so->so_rcv.sb_flagsintr &= ~SB_KNOTE; 2035 } 2036 2037 int 2038 filt_soread(struct knote *kn, long hint) 2039 { 2040 struct socket *so = kn->kn_fp->f_data; 2041 int s, rv; 2042 2043 if ((hint & NOTE_SUBMIT) == 0) 2044 s = solock(so); 2045 kn->kn_data = so->so_rcv.sb_cc; 2046 #ifdef SOCKET_SPLICE 2047 if (isspliced(so)) { 2048 rv = 0; 2049 } else 2050 #endif /* SOCKET_SPLICE */ 2051 if (so->so_state & SS_CANTRCVMORE) { 2052 kn->kn_flags |= EV_EOF; 2053 kn->kn_fflags = so->so_error; 2054 rv = 1; 2055 } else if (so->so_error) { /* temporary udp error */ 2056 rv = 1; 2057 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2058 rv = (kn->kn_data >= kn->kn_sdata); 2059 } else { 2060 rv = (kn->kn_data >= so->so_rcv.sb_lowat); 2061 } 2062 if ((hint & NOTE_SUBMIT) == 0) 2063 sounlock(so, s); 2064 2065 return rv; 2066 } 2067 2068 void 2069 filt_sowdetach(struct knote *kn) 2070 { 2071 struct socket *so = kn->kn_fp->f_data; 2072 2073 KERNEL_ASSERT_LOCKED(); 2074 2075 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext); 2076 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note)) 2077 so->so_snd.sb_flagsintr &= ~SB_KNOTE; 2078 } 2079 2080 int 2081 filt_sowrite(struct knote *kn, long hint) 2082 { 2083 struct socket *so = kn->kn_fp->f_data; 2084 int s, rv; 2085 2086 if ((hint & NOTE_SUBMIT) == 0) 2087 s = solock(so); 2088 kn->kn_data = sbspace(so, &so->so_snd); 2089 if (so->so_state & SS_CANTSENDMORE) { 2090 kn->kn_flags |= EV_EOF; 2091 kn->kn_fflags = so->so_error; 2092 rv = 1; 2093 } else if (so->so_error) { /* temporary udp error */ 2094 rv = 1; 2095 } else if (((so->so_state & SS_ISCONNECTED) == 0) && 2096 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 2097 rv = 0; 2098 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2099 rv = (kn->kn_data >= kn->kn_sdata); 2100 } else { 2101 rv = (kn->kn_data >= so->so_snd.sb_lowat); 2102 } 2103 if ((hint & NOTE_SUBMIT) == 0) 2104 sounlock(so, s); 2105 2106 return (rv); 2107 } 2108 2109 int 2110 filt_solisten(struct knote *kn, long hint) 2111 { 2112 struct socket *so = kn->kn_fp->f_data; 2113 int s; 2114 2115 if ((hint & NOTE_SUBMIT) == 0) 2116 s = solock(so); 2117 kn->kn_data = so->so_qlen; 2118 if ((hint & NOTE_SUBMIT) == 0) 2119 sounlock(so, s); 2120 2121 return (kn->kn_data != 0); 2122 } 2123 2124 #ifdef DDB 2125 void 2126 sobuf_print(struct sockbuf *, 2127 int (*)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))); 2128 2129 void 2130 sobuf_print(struct sockbuf *sb, 2131 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2132 { 2133 (*pr)("\tsb_cc: %lu\n", sb->sb_cc); 2134 (*pr)("\tsb_datacc: %lu\n", sb->sb_datacc); 2135 (*pr)("\tsb_hiwat: %lu\n", sb->sb_hiwat); 2136 (*pr)("\tsb_wat: %lu\n", sb->sb_wat); 2137 (*pr)("\tsb_mbcnt: %lu\n", sb->sb_mbcnt); 2138 (*pr)("\tsb_mbmax: %lu\n", sb->sb_mbmax); 2139 (*pr)("\tsb_lowat: %ld\n", sb->sb_lowat); 2140 (*pr)("\tsb_mb: %p\n", sb->sb_mb); 2141 (*pr)("\tsb_mbtail: %p\n", sb->sb_mbtail); 2142 (*pr)("\tsb_lastrecord: %p\n", sb->sb_lastrecord); 2143 (*pr)("\tsb_sel: ...\n"); 2144 (*pr)("\tsb_flagsintr: %d\n", sb->sb_flagsintr); 2145 (*pr)("\tsb_flags: %i\n", sb->sb_flags); 2146 (*pr)("\tsb_timeo_nsecs: %llu\n", sb->sb_timeo_nsecs); 2147 } 2148 2149 void 2150 so_print(void *v, 2151 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2152 { 2153 struct socket *so = v; 2154 2155 (*pr)("socket %p\n", so); 2156 (*pr)("so_type: %i\n", so->so_type); 2157 (*pr)("so_options: 0x%04x\n", so->so_options); /* %b */ 2158 (*pr)("so_linger: %i\n", so->so_linger); 2159 (*pr)("so_state: 0x%04x\n", so->so_state); 2160 (*pr)("so_pcb: %p\n", so->so_pcb); 2161 (*pr)("so_proto: %p\n", so->so_proto); 2162 (*pr)("so_sigio: %p\n", so->so_sigio.sir_sigio); 2163 2164 (*pr)("so_head: %p\n", so->so_head); 2165 (*pr)("so_onq: %p\n", so->so_onq); 2166 (*pr)("so_q0: @%p first: %p\n", &so->so_q0, TAILQ_FIRST(&so->so_q0)); 2167 (*pr)("so_q: @%p first: %p\n", &so->so_q, TAILQ_FIRST(&so->so_q)); 2168 (*pr)("so_eq: next: %p\n", TAILQ_NEXT(so, so_qe)); 2169 (*pr)("so_q0len: %i\n", so->so_q0len); 2170 (*pr)("so_qlen: %i\n", so->so_qlen); 2171 (*pr)("so_qlimit: %i\n", so->so_qlimit); 2172 (*pr)("so_timeo: %i\n", so->so_timeo); 2173 (*pr)("so_obmark: %lu\n", so->so_oobmark); 2174 2175 (*pr)("so_sp: %p\n", so->so_sp); 2176 if (so->so_sp != NULL) { 2177 (*pr)("\tssp_socket: %p\n", so->so_sp->ssp_socket); 2178 (*pr)("\tssp_soback: %p\n", so->so_sp->ssp_soback); 2179 (*pr)("\tssp_len: %lld\n", 2180 (unsigned long long)so->so_sp->ssp_len); 2181 (*pr)("\tssp_max: %lld\n", 2182 (unsigned long long)so->so_sp->ssp_max); 2183 (*pr)("\tssp_idletv: %lld %ld\n", so->so_sp->ssp_idletv.tv_sec, 2184 so->so_sp->ssp_idletv.tv_usec); 2185 (*pr)("\tssp_idleto: %spending (@%i)\n", 2186 timeout_pending(&so->so_sp->ssp_idleto) ? "" : "not ", 2187 so->so_sp->ssp_idleto.to_time); 2188 } 2189 2190 (*pr)("so_rcv:\n"); 2191 sobuf_print(&so->so_rcv, pr); 2192 (*pr)("so_snd:\n"); 2193 sobuf_print(&so->so_snd, pr); 2194 2195 (*pr)("so_upcall: %p so_upcallarg: %p\n", 2196 so->so_upcall, so->so_upcallarg); 2197 2198 (*pr)("so_euid: %d so_ruid: %d\n", so->so_euid, so->so_ruid); 2199 (*pr)("so_egid: %d so_rgid: %d\n", so->so_egid, so->so_rgid); 2200 (*pr)("so_cpid: %d\n", so->so_cpid); 2201 } 2202 #endif 2203