1 /* $OpenBSD: uipc_socket.c,v 1.263 2021/05/28 16:24:53 visa Exp $ */ 2 /* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/file.h> 39 #include <sys/filedesc.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/domain.h> 43 #include <sys/kernel.h> 44 #include <sys/event.h> 45 #include <sys/protosw.h> 46 #include <sys/socket.h> 47 #include <sys/unpcb.h> 48 #include <sys/socketvar.h> 49 #include <sys/signalvar.h> 50 #include <net/if.h> 51 #include <sys/pool.h> 52 #include <sys/atomic.h> 53 #include <sys/rwlock.h> 54 #include <sys/time.h> 55 56 #ifdef DDB 57 #include <machine/db_machdep.h> 58 #endif 59 60 void sbsync(struct sockbuf *, struct mbuf *); 61 62 int sosplice(struct socket *, int, off_t, struct timeval *); 63 void sounsplice(struct socket *, struct socket *, int); 64 void soidle(void *); 65 void sotask(void *); 66 void soreaper(void *); 67 void soput(void *); 68 int somove(struct socket *, int); 69 void sorflush(struct socket *); 70 71 void filt_sordetach(struct knote *kn); 72 int filt_soread(struct knote *kn, long hint); 73 int filt_soreadmodify(struct kevent *kev, struct knote *kn); 74 int filt_soreadprocess(struct knote *kn, struct kevent *kev); 75 int filt_soread_common(struct knote *kn, struct socket *so); 76 void filt_sowdetach(struct knote *kn); 77 int filt_sowrite(struct knote *kn, long hint); 78 int filt_sowritemodify(struct kevent *kev, struct knote *kn); 79 int filt_sowriteprocess(struct knote *kn, struct kevent *kev); 80 int filt_sowrite_common(struct knote *kn, struct socket *so); 81 int filt_solisten(struct knote *kn, long hint); 82 int filt_solistenmodify(struct kevent *kev, struct knote *kn); 83 int filt_solistenprocess(struct knote *kn, struct kevent *kev); 84 int filt_solisten_common(struct knote *kn, struct socket *so); 85 86 const struct filterops solisten_filtops = { 87 .f_flags = FILTEROP_ISFD, 88 .f_attach = NULL, 89 .f_detach = filt_sordetach, 90 .f_event = filt_solisten, 91 .f_modify = filt_solistenmodify, 92 .f_process = filt_solistenprocess, 93 }; 94 95 const struct filterops soread_filtops = { 96 .f_flags = FILTEROP_ISFD, 97 .f_attach = NULL, 98 .f_detach = filt_sordetach, 99 .f_event = filt_soread, 100 .f_modify = filt_soreadmodify, 101 .f_process = filt_soreadprocess, 102 }; 103 104 const struct filterops sowrite_filtops = { 105 .f_flags = FILTEROP_ISFD, 106 .f_attach = NULL, 107 .f_detach = filt_sowdetach, 108 .f_event = filt_sowrite, 109 .f_modify = filt_sowritemodify, 110 .f_process = filt_sowriteprocess, 111 }; 112 113 const struct filterops soexcept_filtops = { 114 .f_flags = FILTEROP_ISFD, 115 .f_attach = NULL, 116 .f_detach = filt_sordetach, 117 .f_event = filt_soread, 118 .f_modify = filt_soreadmodify, 119 .f_process = filt_soreadprocess, 120 }; 121 122 #ifndef SOMINCONN 123 #define SOMINCONN 80 124 #endif /* SOMINCONN */ 125 126 int somaxconn = SOMAXCONN; 127 int sominconn = SOMINCONN; 128 129 struct pool socket_pool; 130 #ifdef SOCKET_SPLICE 131 struct pool sosplice_pool; 132 struct taskq *sosplice_taskq; 133 struct rwlock sosplice_lock = RWLOCK_INITIALIZER("sosplicelk"); 134 #endif 135 136 void 137 soinit(void) 138 { 139 pool_init(&socket_pool, sizeof(struct socket), 0, IPL_SOFTNET, 0, 140 "sockpl", NULL); 141 #ifdef SOCKET_SPLICE 142 pool_init(&sosplice_pool, sizeof(struct sosplice), 0, IPL_SOFTNET, 0, 143 "sosppl", NULL); 144 #endif 145 } 146 147 /* 148 * Socket operation routines. 149 * These routines are called by the routines in 150 * sys_socket.c or from a system process, and 151 * implement the semantics of socket operations by 152 * switching out to the protocol specific routines. 153 */ 154 int 155 socreate(int dom, struct socket **aso, int type, int proto) 156 { 157 struct proc *p = curproc; /* XXX */ 158 const struct protosw *prp; 159 struct socket *so; 160 int error, s; 161 162 if (proto) 163 prp = pffindproto(dom, proto, type); 164 else 165 prp = pffindtype(dom, type); 166 if (prp == NULL || prp->pr_attach == NULL) 167 return (EPROTONOSUPPORT); 168 if (prp->pr_type != type) 169 return (EPROTOTYPE); 170 so = pool_get(&socket_pool, PR_WAITOK | PR_ZERO); 171 rw_init(&so->so_lock, "solock"); 172 sigio_init(&so->so_sigio); 173 TAILQ_INIT(&so->so_q0); 174 TAILQ_INIT(&so->so_q); 175 so->so_type = type; 176 if (suser(p) == 0) 177 so->so_state = SS_PRIV; 178 so->so_ruid = p->p_ucred->cr_ruid; 179 so->so_euid = p->p_ucred->cr_uid; 180 so->so_rgid = p->p_ucred->cr_rgid; 181 so->so_egid = p->p_ucred->cr_gid; 182 so->so_cpid = p->p_p->ps_pid; 183 so->so_proto = prp; 184 so->so_snd.sb_timeo_nsecs = INFSLP; 185 so->so_rcv.sb_timeo_nsecs = INFSLP; 186 187 s = solock(so); 188 error = (*prp->pr_attach)(so, proto); 189 if (error) { 190 so->so_state |= SS_NOFDREF; 191 /* sofree() calls sounlock(). */ 192 sofree(so, s); 193 return (error); 194 } 195 sounlock(so, s); 196 *aso = so; 197 return (0); 198 } 199 200 int 201 sobind(struct socket *so, struct mbuf *nam, struct proc *p) 202 { 203 int error; 204 205 soassertlocked(so); 206 207 error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, p); 208 return (error); 209 } 210 211 int 212 solisten(struct socket *so, int backlog) 213 { 214 int error; 215 216 soassertlocked(so); 217 218 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) 219 return (EINVAL); 220 #ifdef SOCKET_SPLICE 221 if (isspliced(so) || issplicedback(so)) 222 return (EOPNOTSUPP); 223 #endif /* SOCKET_SPLICE */ 224 error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL, 225 curproc); 226 if (error) 227 return (error); 228 if (TAILQ_FIRST(&so->so_q) == NULL) 229 so->so_options |= SO_ACCEPTCONN; 230 if (backlog < 0 || backlog > somaxconn) 231 backlog = somaxconn; 232 if (backlog < sominconn) 233 backlog = sominconn; 234 so->so_qlimit = backlog; 235 return (0); 236 } 237 238 #define SOSP_FREEING_READ 1 239 #define SOSP_FREEING_WRITE 2 240 void 241 sofree(struct socket *so, int s) 242 { 243 soassertlocked(so); 244 245 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) { 246 sounlock(so, s); 247 return; 248 } 249 if (so->so_head) { 250 /* 251 * We must not decommission a socket that's on the accept(2) 252 * queue. If we do, then accept(2) may hang after select(2) 253 * indicated that the listening socket was ready. 254 */ 255 if (!soqremque(so, 0)) { 256 sounlock(so, s); 257 return; 258 } 259 } 260 sigio_free(&so->so_sigio); 261 #ifdef SOCKET_SPLICE 262 if (so->so_sp) { 263 if (issplicedback(so)) { 264 int freeing = SOSP_FREEING_WRITE; 265 266 if (so->so_sp->ssp_soback == so) 267 freeing |= SOSP_FREEING_READ; 268 sounsplice(so->so_sp->ssp_soback, so, freeing); 269 } 270 if (isspliced(so)) { 271 int freeing = SOSP_FREEING_READ; 272 273 if (so == so->so_sp->ssp_socket) 274 freeing |= SOSP_FREEING_WRITE; 275 sounsplice(so, so->so_sp->ssp_socket, freeing); 276 } 277 } 278 #endif /* SOCKET_SPLICE */ 279 sbrelease(so, &so->so_snd); 280 sorflush(so); 281 sounlock(so, s); 282 #ifdef SOCKET_SPLICE 283 if (so->so_sp) { 284 /* Reuse splice idle, sounsplice() has been called before. */ 285 timeout_set_proc(&so->so_sp->ssp_idleto, soreaper, so); 286 timeout_add(&so->so_sp->ssp_idleto, 0); 287 } else 288 #endif /* SOCKET_SPLICE */ 289 { 290 pool_put(&socket_pool, so); 291 } 292 } 293 294 static inline uint64_t 295 solinger_nsec(struct socket *so) 296 { 297 if (so->so_linger == 0) 298 return INFSLP; 299 300 return SEC_TO_NSEC(so->so_linger); 301 } 302 303 /* 304 * Close a socket on last file table reference removal. 305 * Initiate disconnect if connected. 306 * Free socket when disconnect complete. 307 */ 308 int 309 soclose(struct socket *so, int flags) 310 { 311 struct socket *so2; 312 int s, error = 0; 313 314 s = solock(so); 315 /* Revoke async IO early. There is a final revocation in sofree(). */ 316 sigio_free(&so->so_sigio); 317 if (so->so_options & SO_ACCEPTCONN) { 318 while ((so2 = TAILQ_FIRST(&so->so_q0)) != NULL) { 319 (void) soqremque(so2, 0); 320 (void) soabort(so2); 321 } 322 while ((so2 = TAILQ_FIRST(&so->so_q)) != NULL) { 323 (void) soqremque(so2, 1); 324 (void) soabort(so2); 325 } 326 } 327 if (so->so_pcb == NULL) 328 goto discard; 329 if (so->so_state & SS_ISCONNECTED) { 330 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 331 error = sodisconnect(so); 332 if (error) 333 goto drop; 334 } 335 if (so->so_options & SO_LINGER) { 336 if ((so->so_state & SS_ISDISCONNECTING) && 337 (flags & MSG_DONTWAIT)) 338 goto drop; 339 while (so->so_state & SS_ISCONNECTED) { 340 error = sosleep_nsec(so, &so->so_timeo, 341 PSOCK | PCATCH, "netcls", 342 solinger_nsec(so)); 343 if (error) 344 break; 345 } 346 } 347 } 348 drop: 349 if (so->so_pcb) { 350 int error2; 351 KASSERT(so->so_proto->pr_detach); 352 error2 = (*so->so_proto->pr_detach)(so); 353 if (error == 0) 354 error = error2; 355 } 356 discard: 357 if (so->so_state & SS_NOFDREF) 358 panic("soclose NOFDREF: so %p, so_type %d", so, so->so_type); 359 so->so_state |= SS_NOFDREF; 360 /* sofree() calls sounlock(). */ 361 sofree(so, s); 362 return (error); 363 } 364 365 int 366 soabort(struct socket *so) 367 { 368 soassertlocked(so); 369 370 return (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, NULL, NULL, 371 curproc); 372 } 373 374 int 375 soaccept(struct socket *so, struct mbuf *nam) 376 { 377 int error = 0; 378 379 soassertlocked(so); 380 381 if ((so->so_state & SS_NOFDREF) == 0) 382 panic("soaccept !NOFDREF: so %p, so_type %d", so, so->so_type); 383 so->so_state &= ~SS_NOFDREF; 384 if ((so->so_state & SS_ISDISCONNECTED) == 0 || 385 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) 386 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, NULL, 387 nam, NULL, curproc); 388 else 389 error = ECONNABORTED; 390 return (error); 391 } 392 393 int 394 soconnect(struct socket *so, struct mbuf *nam) 395 { 396 int error; 397 398 soassertlocked(so); 399 400 if (so->so_options & SO_ACCEPTCONN) 401 return (EOPNOTSUPP); 402 /* 403 * If protocol is connection-based, can only connect once. 404 * Otherwise, if connected, try to disconnect first. 405 * This allows user to disconnect by connecting to, e.g., 406 * a null address. 407 */ 408 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 409 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 410 (error = sodisconnect(so)))) 411 error = EISCONN; 412 else 413 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 414 NULL, nam, NULL, curproc); 415 return (error); 416 } 417 418 int 419 soconnect2(struct socket *so1, struct socket *so2) 420 { 421 int s, error; 422 423 s = solock(so1); 424 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, NULL, 425 (struct mbuf *)so2, NULL, curproc); 426 sounlock(so1, s); 427 return (error); 428 } 429 430 int 431 sodisconnect(struct socket *so) 432 { 433 int error; 434 435 soassertlocked(so); 436 437 if ((so->so_state & SS_ISCONNECTED) == 0) 438 return (ENOTCONN); 439 if (so->so_state & SS_ISDISCONNECTING) 440 return (EALREADY); 441 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, NULL, NULL, 442 NULL, curproc); 443 return (error); 444 } 445 446 int m_getuio(struct mbuf **, int, long, struct uio *); 447 448 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 449 /* 450 * Send on a socket. 451 * If send must go all at once and message is larger than 452 * send buffering, then hard error. 453 * Lock against other senders. 454 * If must go all at once and not enough room now, then 455 * inform user that this would block and do nothing. 456 * Otherwise, if nonblocking, send as much as possible. 457 * The data to be sent is described by "uio" if nonzero, 458 * otherwise by the mbuf chain "top" (which must be null 459 * if uio is not). Data provided in mbuf chain must be small 460 * enough to send all at once. 461 * 462 * Returns nonzero on error, timeout or signal; callers 463 * must check for short counts if EINTR/ERESTART are returned. 464 * Data and control buffers are freed on return. 465 */ 466 int 467 sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, 468 struct mbuf *control, int flags) 469 { 470 long space, clen = 0; 471 size_t resid; 472 int error, s; 473 int atomic = sosendallatonce(so) || top; 474 475 if (uio) 476 resid = uio->uio_resid; 477 else 478 resid = top->m_pkthdr.len; 479 /* MSG_EOR on a SOCK_STREAM socket is invalid. */ 480 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 481 m_freem(top); 482 m_freem(control); 483 return (EINVAL); 484 } 485 if (uio && uio->uio_procp) 486 uio->uio_procp->p_ru.ru_msgsnd++; 487 if (control) { 488 /* 489 * In theory clen should be unsigned (since control->m_len is). 490 * However, space must be signed, as it might be less than 0 491 * if we over-committed, and we must use a signed comparison 492 * of space and clen. 493 */ 494 clen = control->m_len; 495 /* reserve extra space for AF_UNIX's internalize */ 496 if (so->so_proto->pr_domain->dom_family == AF_UNIX && 497 clen >= CMSG_ALIGN(sizeof(struct cmsghdr)) && 498 mtod(control, struct cmsghdr *)->cmsg_type == SCM_RIGHTS) 499 clen = CMSG_SPACE( 500 (clen - CMSG_ALIGN(sizeof(struct cmsghdr))) * 501 (sizeof(struct fdpass) / sizeof(int))); 502 } 503 504 #define snderr(errno) { error = errno; goto release; } 505 506 s = solock(so); 507 restart: 508 if ((error = sblock(so, &so->so_snd, SBLOCKWAIT(flags))) != 0) 509 goto out; 510 so->so_state |= SS_ISSENDING; 511 do { 512 if (so->so_state & SS_CANTSENDMORE) 513 snderr(EPIPE); 514 if (so->so_error) { 515 error = so->so_error; 516 so->so_error = 0; 517 snderr(error); 518 } 519 if ((so->so_state & SS_ISCONNECTED) == 0) { 520 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 521 if (!(resid == 0 && clen != 0)) 522 snderr(ENOTCONN); 523 } else if (addr == NULL) 524 snderr(EDESTADDRREQ); 525 } 526 space = sbspace(so, &so->so_snd); 527 if (flags & MSG_OOB) 528 space += 1024; 529 if (so->so_proto->pr_domain->dom_family == AF_UNIX) { 530 if (atomic && resid > so->so_snd.sb_hiwat) 531 snderr(EMSGSIZE); 532 } else { 533 if (clen > so->so_snd.sb_hiwat || 534 (atomic && resid > so->so_snd.sb_hiwat - clen)) 535 snderr(EMSGSIZE); 536 } 537 if (space < clen || 538 (space - clen < resid && 539 (atomic || space < so->so_snd.sb_lowat))) { 540 if (flags & MSG_DONTWAIT) 541 snderr(EWOULDBLOCK); 542 sbunlock(so, &so->so_snd); 543 error = sbwait(so, &so->so_snd); 544 so->so_state &= ~SS_ISSENDING; 545 if (error) 546 goto out; 547 goto restart; 548 } 549 space -= clen; 550 do { 551 if (uio == NULL) { 552 /* 553 * Data is prepackaged in "top". 554 */ 555 resid = 0; 556 if (flags & MSG_EOR) 557 top->m_flags |= M_EOR; 558 } else { 559 sounlock(so, s); 560 error = m_getuio(&top, atomic, space, uio); 561 s = solock(so); 562 if (error) 563 goto release; 564 space -= top->m_pkthdr.len; 565 resid = uio->uio_resid; 566 if (flags & MSG_EOR) 567 top->m_flags |= M_EOR; 568 } 569 if (resid == 0) 570 so->so_state &= ~SS_ISSENDING; 571 if (top && so->so_options & SO_ZEROIZE) 572 top->m_flags |= M_ZEROIZE; 573 error = (*so->so_proto->pr_usrreq)(so, 574 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 575 top, addr, control, curproc); 576 clen = 0; 577 control = NULL; 578 top = NULL; 579 if (error) 580 goto release; 581 } while (resid && space > 0); 582 } while (resid); 583 584 release: 585 so->so_state &= ~SS_ISSENDING; 586 sbunlock(so, &so->so_snd); 587 out: 588 sounlock(so, s); 589 m_freem(top); 590 m_freem(control); 591 return (error); 592 } 593 594 int 595 m_getuio(struct mbuf **mp, int atomic, long space, struct uio *uio) 596 { 597 struct mbuf *m, *top = NULL; 598 struct mbuf **nextp = ⊤ 599 u_long len, mlen; 600 size_t resid = uio->uio_resid; 601 int error; 602 603 do { 604 if (top == NULL) { 605 MGETHDR(m, M_WAIT, MT_DATA); 606 mlen = MHLEN; 607 m->m_pkthdr.len = 0; 608 m->m_pkthdr.ph_ifidx = 0; 609 } else { 610 MGET(m, M_WAIT, MT_DATA); 611 mlen = MLEN; 612 } 613 /* chain mbuf together */ 614 *nextp = m; 615 nextp = &m->m_next; 616 617 resid = ulmin(resid, space); 618 if (resid >= MINCLSIZE) { 619 MCLGETL(m, M_NOWAIT, ulmin(resid, MAXMCLBYTES)); 620 if ((m->m_flags & M_EXT) == 0) 621 MCLGETL(m, M_NOWAIT, MCLBYTES); 622 if ((m->m_flags & M_EXT) == 0) 623 goto nopages; 624 mlen = m->m_ext.ext_size; 625 len = ulmin(mlen, resid); 626 /* 627 * For datagram protocols, leave room 628 * for protocol headers in first mbuf. 629 */ 630 if (atomic && m == top && len < mlen - max_hdr) 631 m->m_data += max_hdr; 632 } else { 633 nopages: 634 len = ulmin(mlen, resid); 635 /* 636 * For datagram protocols, leave room 637 * for protocol headers in first mbuf. 638 */ 639 if (atomic && m == top && len < mlen - max_hdr) 640 m_align(m, len); 641 } 642 643 error = uiomove(mtod(m, caddr_t), len, uio); 644 if (error) { 645 m_freem(top); 646 return (error); 647 } 648 649 /* adjust counters */ 650 resid = uio->uio_resid; 651 space -= len; 652 m->m_len = len; 653 top->m_pkthdr.len += len; 654 655 /* Is there more space and more data? */ 656 } while (space > 0 && resid > 0); 657 658 *mp = top; 659 return 0; 660 } 661 662 /* 663 * Following replacement or removal of the first mbuf on the first 664 * mbuf chain of a socket buffer, push necessary state changes back 665 * into the socket buffer so that other consumers see the values 666 * consistently. 'nextrecord' is the callers locally stored value of 667 * the original value of sb->sb_mb->m_nextpkt which must be restored 668 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. 669 */ 670 void 671 sbsync(struct sockbuf *sb, struct mbuf *nextrecord) 672 { 673 674 /* 675 * First, update for the new value of nextrecord. If necessary, 676 * make it the first record. 677 */ 678 if (sb->sb_mb != NULL) 679 sb->sb_mb->m_nextpkt = nextrecord; 680 else 681 sb->sb_mb = nextrecord; 682 683 /* 684 * Now update any dependent socket buffer fields to reflect 685 * the new state. This is an inline of SB_EMPTY_FIXUP, with 686 * the addition of a second clause that takes care of the 687 * case where sb_mb has been updated, but remains the last 688 * record. 689 */ 690 if (sb->sb_mb == NULL) { 691 sb->sb_mbtail = NULL; 692 sb->sb_lastrecord = NULL; 693 } else if (sb->sb_mb->m_nextpkt == NULL) 694 sb->sb_lastrecord = sb->sb_mb; 695 } 696 697 /* 698 * Implement receive operations on a socket. 699 * We depend on the way that records are added to the sockbuf 700 * by sbappend*. In particular, each record (mbufs linked through m_next) 701 * must begin with an address if the protocol so specifies, 702 * followed by an optional mbuf or mbufs containing ancillary data, 703 * and then zero or more mbufs of data. 704 * In order to avoid blocking network for the entire time here, we release 705 * the solock() while doing the actual copy to user space. 706 * Although the sockbuf is locked, new data may still be appended, 707 * and thus we must maintain consistency of the sockbuf during that time. 708 * 709 * The caller may receive the data as a single mbuf chain by supplying 710 * an mbuf **mp0 for use in returning the chain. The uio is then used 711 * only for the count in uio_resid. 712 */ 713 int 714 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, 715 struct mbuf **mp0, struct mbuf **controlp, int *flagsp, 716 socklen_t controllen) 717 { 718 struct mbuf *m, **mp; 719 struct mbuf *cm; 720 u_long len, offset, moff; 721 int flags, error, s, type, uio_error = 0; 722 const struct protosw *pr = so->so_proto; 723 struct mbuf *nextrecord; 724 size_t resid, orig_resid = uio->uio_resid; 725 726 mp = mp0; 727 if (paddr) 728 *paddr = NULL; 729 if (controlp) 730 *controlp = NULL; 731 if (flagsp) 732 flags = *flagsp &~ MSG_EOR; 733 else 734 flags = 0; 735 if (flags & MSG_OOB) { 736 m = m_get(M_WAIT, MT_DATA); 737 s = solock(so); 738 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, 739 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, curproc); 740 sounlock(so, s); 741 if (error) 742 goto bad; 743 do { 744 error = uiomove(mtod(m, caddr_t), 745 ulmin(uio->uio_resid, m->m_len), uio); 746 m = m_free(m); 747 } while (uio->uio_resid && error == 0 && m); 748 bad: 749 m_freem(m); 750 return (error); 751 } 752 if (mp) 753 *mp = NULL; 754 755 s = solock(so); 756 restart: 757 if ((error = sblock(so, &so->so_rcv, SBLOCKWAIT(flags))) != 0) { 758 sounlock(so, s); 759 return (error); 760 } 761 762 m = so->so_rcv.sb_mb; 763 #ifdef SOCKET_SPLICE 764 if (isspliced(so)) 765 m = NULL; 766 #endif /* SOCKET_SPLICE */ 767 /* 768 * If we have less data than requested, block awaiting more 769 * (subject to any timeout) if: 770 * 1. the current count is less than the low water mark, 771 * 2. MSG_WAITALL is set, and it is possible to do the entire 772 * receive operation at once if we block (resid <= hiwat), or 773 * 3. MSG_DONTWAIT is not set. 774 * If MSG_WAITALL is set but resid is larger than the receive buffer, 775 * we have to do the receive in sections, and thus risk returning 776 * a short count if a timeout or signal occurs after we start. 777 */ 778 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 779 so->so_rcv.sb_cc < uio->uio_resid) && 780 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 781 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 782 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 783 #ifdef DIAGNOSTIC 784 if (m == NULL && so->so_rcv.sb_cc) 785 #ifdef SOCKET_SPLICE 786 if (!isspliced(so)) 787 #endif /* SOCKET_SPLICE */ 788 panic("receive 1: so %p, so_type %d, sb_cc %lu", 789 so, so->so_type, so->so_rcv.sb_cc); 790 #endif 791 if (so->so_error) { 792 if (m) 793 goto dontblock; 794 error = so->so_error; 795 if ((flags & MSG_PEEK) == 0) 796 so->so_error = 0; 797 goto release; 798 } 799 if (so->so_state & SS_CANTRCVMORE) { 800 if (m) 801 goto dontblock; 802 else if (so->so_rcv.sb_cc == 0) 803 goto release; 804 } 805 for (; m; m = m->m_next) 806 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 807 m = so->so_rcv.sb_mb; 808 goto dontblock; 809 } 810 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 811 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 812 error = ENOTCONN; 813 goto release; 814 } 815 if (uio->uio_resid == 0 && controlp == NULL) 816 goto release; 817 if (flags & MSG_DONTWAIT) { 818 error = EWOULDBLOCK; 819 goto release; 820 } 821 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); 822 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); 823 sbunlock(so, &so->so_rcv); 824 error = sbwait(so, &so->so_rcv); 825 if (error) { 826 sounlock(so, s); 827 return (error); 828 } 829 goto restart; 830 } 831 dontblock: 832 /* 833 * On entry here, m points to the first record of the socket buffer. 834 * From this point onward, we maintain 'nextrecord' as a cache of the 835 * pointer to the next record in the socket buffer. We must keep the 836 * various socket buffer pointers and local stack versions of the 837 * pointers in sync, pushing out modifications before operations that 838 * may sleep, and re-reading them afterwards. 839 * 840 * Otherwise, we will race with the network stack appending new data 841 * or records onto the socket buffer by using inconsistent/stale 842 * versions of the field, possibly resulting in socket buffer 843 * corruption. 844 */ 845 if (uio->uio_procp) 846 uio->uio_procp->p_ru.ru_msgrcv++; 847 KASSERT(m == so->so_rcv.sb_mb); 848 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); 849 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); 850 nextrecord = m->m_nextpkt; 851 if (pr->pr_flags & PR_ADDR) { 852 #ifdef DIAGNOSTIC 853 if (m->m_type != MT_SONAME) 854 panic("receive 1a: so %p, so_type %d, m %p, m_type %d", 855 so, so->so_type, m, m->m_type); 856 #endif 857 orig_resid = 0; 858 if (flags & MSG_PEEK) { 859 if (paddr) 860 *paddr = m_copym(m, 0, m->m_len, M_NOWAIT); 861 m = m->m_next; 862 } else { 863 sbfree(&so->so_rcv, m); 864 if (paddr) { 865 *paddr = m; 866 so->so_rcv.sb_mb = m->m_next; 867 m->m_next = NULL; 868 m = so->so_rcv.sb_mb; 869 } else { 870 so->so_rcv.sb_mb = m_free(m); 871 m = so->so_rcv.sb_mb; 872 } 873 sbsync(&so->so_rcv, nextrecord); 874 } 875 } 876 while (m && m->m_type == MT_CONTROL && error == 0) { 877 int skip = 0; 878 if (flags & MSG_PEEK) { 879 if (mtod(m, struct cmsghdr *)->cmsg_type == 880 SCM_RIGHTS) { 881 /* don't leak internalized SCM_RIGHTS msgs */ 882 skip = 1; 883 } else if (controlp) 884 *controlp = m_copym(m, 0, m->m_len, M_NOWAIT); 885 m = m->m_next; 886 } else { 887 sbfree(&so->so_rcv, m); 888 so->so_rcv.sb_mb = m->m_next; 889 m->m_nextpkt = m->m_next = NULL; 890 cm = m; 891 m = so->so_rcv.sb_mb; 892 sbsync(&so->so_rcv, nextrecord); 893 if (controlp) { 894 if (pr->pr_domain->dom_externalize) { 895 error = 896 (*pr->pr_domain->dom_externalize) 897 (cm, controllen, flags); 898 } 899 *controlp = cm; 900 } else { 901 /* 902 * Dispose of any SCM_RIGHTS message that went 903 * through the read path rather than recv. 904 */ 905 if (pr->pr_domain->dom_dispose) 906 pr->pr_domain->dom_dispose(cm); 907 m_free(cm); 908 } 909 } 910 if (m != NULL) 911 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 912 else 913 nextrecord = so->so_rcv.sb_mb; 914 if (controlp && !skip) 915 controlp = &(*controlp)->m_next; 916 orig_resid = 0; 917 } 918 919 /* If m is non-NULL, we have some data to read. */ 920 if (m) { 921 type = m->m_type; 922 if (type == MT_OOBDATA) 923 flags |= MSG_OOB; 924 if (m->m_flags & M_BCAST) 925 flags |= MSG_BCAST; 926 if (m->m_flags & M_MCAST) 927 flags |= MSG_MCAST; 928 } 929 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); 930 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); 931 932 moff = 0; 933 offset = 0; 934 while (m && uio->uio_resid > 0 && error == 0) { 935 if (m->m_type == MT_OOBDATA) { 936 if (type != MT_OOBDATA) 937 break; 938 } else if (type == MT_OOBDATA) { 939 break; 940 } else if (m->m_type == MT_CONTROL) { 941 /* 942 * If there is more than one control message in the 943 * stream, we do a short read. Next can be received 944 * or disposed by another system call. 945 */ 946 break; 947 #ifdef DIAGNOSTIC 948 } else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) { 949 panic("receive 3: so %p, so_type %d, m %p, m_type %d", 950 so, so->so_type, m, m->m_type); 951 #endif 952 } 953 so->so_state &= ~SS_RCVATMARK; 954 len = uio->uio_resid; 955 if (so->so_oobmark && len > so->so_oobmark - offset) 956 len = so->so_oobmark - offset; 957 if (len > m->m_len - moff) 958 len = m->m_len - moff; 959 /* 960 * If mp is set, just pass back the mbufs. 961 * Otherwise copy them out via the uio, then free. 962 * Sockbuf must be consistent here (points to current mbuf, 963 * it points to next record) when we drop priority; 964 * we must note any additions to the sockbuf when we 965 * block interrupts again. 966 */ 967 if (mp == NULL && uio_error == 0) { 968 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); 969 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); 970 resid = uio->uio_resid; 971 sounlock(so, s); 972 uio_error = uiomove(mtod(m, caddr_t) + moff, len, uio); 973 s = solock(so); 974 if (uio_error) 975 uio->uio_resid = resid - len; 976 } else 977 uio->uio_resid -= len; 978 if (len == m->m_len - moff) { 979 if (m->m_flags & M_EOR) 980 flags |= MSG_EOR; 981 if (flags & MSG_PEEK) { 982 m = m->m_next; 983 moff = 0; 984 orig_resid = 0; 985 } else { 986 nextrecord = m->m_nextpkt; 987 sbfree(&so->so_rcv, m); 988 if (mp) { 989 *mp = m; 990 mp = &m->m_next; 991 so->so_rcv.sb_mb = m = m->m_next; 992 *mp = NULL; 993 } else { 994 so->so_rcv.sb_mb = m_free(m); 995 m = so->so_rcv.sb_mb; 996 } 997 /* 998 * If m != NULL, we also know that 999 * so->so_rcv.sb_mb != NULL. 1000 */ 1001 KASSERT(so->so_rcv.sb_mb == m); 1002 if (m) { 1003 m->m_nextpkt = nextrecord; 1004 if (nextrecord == NULL) 1005 so->so_rcv.sb_lastrecord = m; 1006 } else { 1007 so->so_rcv.sb_mb = nextrecord; 1008 SB_EMPTY_FIXUP(&so->so_rcv); 1009 } 1010 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); 1011 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); 1012 } 1013 } else { 1014 if (flags & MSG_PEEK) { 1015 moff += len; 1016 orig_resid = 0; 1017 } else { 1018 if (mp) 1019 *mp = m_copym(m, 0, len, M_WAIT); 1020 m->m_data += len; 1021 m->m_len -= len; 1022 so->so_rcv.sb_cc -= len; 1023 so->so_rcv.sb_datacc -= len; 1024 } 1025 } 1026 if (so->so_oobmark) { 1027 if ((flags & MSG_PEEK) == 0) { 1028 so->so_oobmark -= len; 1029 if (so->so_oobmark == 0) { 1030 so->so_state |= SS_RCVATMARK; 1031 break; 1032 } 1033 } else { 1034 offset += len; 1035 if (offset == so->so_oobmark) 1036 break; 1037 } 1038 } 1039 if (flags & MSG_EOR) 1040 break; 1041 /* 1042 * If the MSG_WAITALL flag is set (for non-atomic socket), 1043 * we must not quit until "uio->uio_resid == 0" or an error 1044 * termination. If a signal/timeout occurs, return 1045 * with a short count but without error. 1046 * Keep sockbuf locked against other readers. 1047 */ 1048 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 1049 !sosendallatonce(so) && !nextrecord) { 1050 if (so->so_error || so->so_state & SS_CANTRCVMORE) 1051 break; 1052 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); 1053 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); 1054 error = sbwait(so, &so->so_rcv); 1055 if (error) { 1056 sbunlock(so, &so->so_rcv); 1057 sounlock(so, s); 1058 return (0); 1059 } 1060 if ((m = so->so_rcv.sb_mb) != NULL) 1061 nextrecord = m->m_nextpkt; 1062 } 1063 } 1064 1065 if (m && pr->pr_flags & PR_ATOMIC) { 1066 flags |= MSG_TRUNC; 1067 if ((flags & MSG_PEEK) == 0) 1068 (void) sbdroprecord(&so->so_rcv); 1069 } 1070 if ((flags & MSG_PEEK) == 0) { 1071 if (m == NULL) { 1072 /* 1073 * First part is an inline SB_EMPTY_FIXUP(). Second 1074 * part makes sure sb_lastrecord is up-to-date if 1075 * there is still data in the socket buffer. 1076 */ 1077 so->so_rcv.sb_mb = nextrecord; 1078 if (so->so_rcv.sb_mb == NULL) { 1079 so->so_rcv.sb_mbtail = NULL; 1080 so->so_rcv.sb_lastrecord = NULL; 1081 } else if (nextrecord->m_nextpkt == NULL) 1082 so->so_rcv.sb_lastrecord = nextrecord; 1083 } 1084 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); 1085 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); 1086 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1087 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, 1088 (struct mbuf *)(long)flags, NULL, curproc); 1089 } 1090 if (orig_resid == uio->uio_resid && orig_resid && 1091 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1092 sbunlock(so, &so->so_rcv); 1093 goto restart; 1094 } 1095 1096 if (uio_error) 1097 error = uio_error; 1098 1099 if (flagsp) 1100 *flagsp |= flags; 1101 release: 1102 sbunlock(so, &so->so_rcv); 1103 sounlock(so, s); 1104 return (error); 1105 } 1106 1107 int 1108 soshutdown(struct socket *so, int how) 1109 { 1110 const struct protosw *pr = so->so_proto; 1111 int s, error = 0; 1112 1113 s = solock(so); 1114 switch (how) { 1115 case SHUT_RD: 1116 sorflush(so); 1117 break; 1118 case SHUT_RDWR: 1119 sorflush(so); 1120 /* FALLTHROUGH */ 1121 case SHUT_WR: 1122 error = (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, NULL, NULL, 1123 curproc); 1124 break; 1125 default: 1126 error = EINVAL; 1127 break; 1128 } 1129 sounlock(so, s); 1130 1131 return (error); 1132 } 1133 1134 void 1135 sorflush(struct socket *so) 1136 { 1137 struct sockbuf *sb = &so->so_rcv; 1138 struct mbuf *m; 1139 const struct protosw *pr = so->so_proto; 1140 int error; 1141 1142 sb->sb_flags |= SB_NOINTR; 1143 error = sblock(so, sb, M_WAITOK); 1144 /* with SB_NOINTR and M_WAITOK sblock() must not fail */ 1145 KASSERT(error == 0); 1146 socantrcvmore(so); 1147 m = sb->sb_mb; 1148 memset(&sb->sb_startzero, 0, 1149 (caddr_t)&sb->sb_endzero - (caddr_t)&sb->sb_startzero); 1150 sb->sb_timeo_nsecs = INFSLP; 1151 sbunlock(so, sb); 1152 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 1153 (*pr->pr_domain->dom_dispose)(m); 1154 m_purge(m); 1155 } 1156 1157 #ifdef SOCKET_SPLICE 1158 1159 #define so_splicelen so_sp->ssp_len 1160 #define so_splicemax so_sp->ssp_max 1161 #define so_idletv so_sp->ssp_idletv 1162 #define so_idleto so_sp->ssp_idleto 1163 #define so_splicetask so_sp->ssp_task 1164 1165 int 1166 sosplice(struct socket *so, int fd, off_t max, struct timeval *tv) 1167 { 1168 struct file *fp; 1169 struct socket *sosp; 1170 struct sosplice *sp; 1171 struct taskq *tq; 1172 int error = 0; 1173 1174 soassertlocked(so); 1175 1176 if (sosplice_taskq == NULL) { 1177 rw_enter_write(&sosplice_lock); 1178 if (sosplice_taskq == NULL) { 1179 tq = taskq_create("sosplice", 1, IPL_SOFTNET, 1180 TASKQ_MPSAFE); 1181 /* Ensure the taskq is fully visible to other CPUs. */ 1182 membar_producer(); 1183 sosplice_taskq = tq; 1184 } 1185 rw_exit_write(&sosplice_lock); 1186 } 1187 if (sosplice_taskq == NULL) 1188 return (ENOMEM); 1189 1190 if ((so->so_proto->pr_flags & PR_SPLICE) == 0) 1191 return (EPROTONOSUPPORT); 1192 if (so->so_options & SO_ACCEPTCONN) 1193 return (EOPNOTSUPP); 1194 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1195 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 1196 return (ENOTCONN); 1197 if (so->so_sp == NULL) { 1198 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1199 if (so->so_sp == NULL) 1200 so->so_sp = sp; 1201 else 1202 pool_put(&sosplice_pool, sp); 1203 } 1204 1205 /* If no fd is given, unsplice by removing existing link. */ 1206 if (fd < 0) { 1207 /* Lock receive buffer. */ 1208 if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) { 1209 return (error); 1210 } 1211 if (so->so_sp->ssp_socket) 1212 sounsplice(so, so->so_sp->ssp_socket, 0); 1213 sbunlock(so, &so->so_rcv); 1214 return (0); 1215 } 1216 1217 if (max && max < 0) 1218 return (EINVAL); 1219 1220 if (tv && (tv->tv_sec < 0 || !timerisvalid(tv))) 1221 return (EINVAL); 1222 1223 /* Find sosp, the drain socket where data will be spliced into. */ 1224 if ((error = getsock(curproc, fd, &fp)) != 0) 1225 return (error); 1226 sosp = fp->f_data; 1227 if (sosp->so_proto->pr_usrreq != so->so_proto->pr_usrreq) { 1228 error = EPROTONOSUPPORT; 1229 goto frele; 1230 } 1231 if (sosp->so_sp == NULL) { 1232 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1233 if (sosp->so_sp == NULL) 1234 sosp->so_sp = sp; 1235 else 1236 pool_put(&sosplice_pool, sp); 1237 } 1238 1239 /* Lock both receive and send buffer. */ 1240 if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) { 1241 goto frele; 1242 } 1243 if ((error = sblock(so, &sosp->so_snd, M_WAITOK)) != 0) { 1244 sbunlock(so, &so->so_rcv); 1245 goto frele; 1246 } 1247 1248 if (so->so_sp->ssp_socket || sosp->so_sp->ssp_soback) { 1249 error = EBUSY; 1250 goto release; 1251 } 1252 if (sosp->so_options & SO_ACCEPTCONN) { 1253 error = EOPNOTSUPP; 1254 goto release; 1255 } 1256 if ((sosp->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) { 1257 error = ENOTCONN; 1258 goto release; 1259 } 1260 1261 /* Splice so and sosp together. */ 1262 so->so_sp->ssp_socket = sosp; 1263 sosp->so_sp->ssp_soback = so; 1264 so->so_splicelen = 0; 1265 so->so_splicemax = max; 1266 if (tv) 1267 so->so_idletv = *tv; 1268 else 1269 timerclear(&so->so_idletv); 1270 timeout_set_proc(&so->so_idleto, soidle, so); 1271 task_set(&so->so_splicetask, sotask, so); 1272 1273 /* 1274 * To prevent softnet interrupt from calling somove() while 1275 * we sleep, the socket buffers are not marked as spliced yet. 1276 */ 1277 if (somove(so, M_WAIT)) { 1278 so->so_rcv.sb_flags |= SB_SPLICE; 1279 sosp->so_snd.sb_flags |= SB_SPLICE; 1280 } 1281 1282 release: 1283 sbunlock(sosp, &sosp->so_snd); 1284 sbunlock(so, &so->so_rcv); 1285 frele: 1286 /* 1287 * FRELE() must not be called with the socket lock held. It is safe to 1288 * release the lock here as long as no other operation happen on the 1289 * socket when sosplice() returns. The dance could be avoided by 1290 * grabbing the socket lock inside this function. 1291 */ 1292 sounlock(so, SL_LOCKED); 1293 FRELE(fp, curproc); 1294 solock(so); 1295 return (error); 1296 } 1297 1298 void 1299 sounsplice(struct socket *so, struct socket *sosp, int freeing) 1300 { 1301 soassertlocked(so); 1302 1303 task_del(sosplice_taskq, &so->so_splicetask); 1304 timeout_del(&so->so_idleto); 1305 sosp->so_snd.sb_flags &= ~SB_SPLICE; 1306 so->so_rcv.sb_flags &= ~SB_SPLICE; 1307 so->so_sp->ssp_socket = sosp->so_sp->ssp_soback = NULL; 1308 /* Do not wakeup a socket that is about to be freed. */ 1309 if ((freeing & SOSP_FREEING_READ) == 0 && soreadable(so)) 1310 sorwakeup(so); 1311 if ((freeing & SOSP_FREEING_WRITE) == 0 && sowriteable(sosp)) 1312 sowwakeup(sosp); 1313 } 1314 1315 void 1316 soidle(void *arg) 1317 { 1318 struct socket *so = arg; 1319 int s; 1320 1321 s = solock(so); 1322 if (so->so_rcv.sb_flags & SB_SPLICE) { 1323 so->so_error = ETIMEDOUT; 1324 sounsplice(so, so->so_sp->ssp_socket, 0); 1325 } 1326 sounlock(so, s); 1327 } 1328 1329 void 1330 sotask(void *arg) 1331 { 1332 struct socket *so = arg; 1333 int s; 1334 1335 s = solock(so); 1336 if (so->so_rcv.sb_flags & SB_SPLICE) { 1337 /* 1338 * We may not sleep here as sofree() and unsplice() may be 1339 * called from softnet interrupt context. This would remove 1340 * the socket during somove(). 1341 */ 1342 somove(so, M_DONTWAIT); 1343 } 1344 sounlock(so, s); 1345 1346 /* Avoid user land starvation. */ 1347 yield(); 1348 } 1349 1350 /* 1351 * The socket splicing task or idle timeout may sleep while grabbing the net 1352 * lock. As sofree() can be called anytime, sotask() or soidle() could access 1353 * the socket memory of a freed socket after wakeup. So delay the pool_put() 1354 * after all pending socket splicing tasks or timeouts have finished. Do this 1355 * by scheduling it on the same threads. 1356 */ 1357 void 1358 soreaper(void *arg) 1359 { 1360 struct socket *so = arg; 1361 1362 /* Reuse splice task, sounsplice() has been called before. */ 1363 task_set(&so->so_sp->ssp_task, soput, so); 1364 task_add(sosplice_taskq, &so->so_sp->ssp_task); 1365 } 1366 1367 void 1368 soput(void *arg) 1369 { 1370 struct socket *so = arg; 1371 1372 pool_put(&sosplice_pool, so->so_sp); 1373 pool_put(&socket_pool, so); 1374 } 1375 1376 /* 1377 * Move data from receive buffer of spliced source socket to send 1378 * buffer of drain socket. Try to move as much as possible in one 1379 * big chunk. It is a TCP only implementation. 1380 * Return value 0 means splicing has been finished, 1 continue. 1381 */ 1382 int 1383 somove(struct socket *so, int wait) 1384 { 1385 struct socket *sosp = so->so_sp->ssp_socket; 1386 struct mbuf *m, **mp, *nextrecord; 1387 u_long len, off, oobmark; 1388 long space; 1389 int error = 0, maxreached = 0; 1390 unsigned int state; 1391 1392 soassertlocked(so); 1393 1394 nextpkt: 1395 if (so->so_error) { 1396 error = so->so_error; 1397 goto release; 1398 } 1399 if (sosp->so_state & SS_CANTSENDMORE) { 1400 error = EPIPE; 1401 goto release; 1402 } 1403 if (sosp->so_error && sosp->so_error != ETIMEDOUT && 1404 sosp->so_error != EFBIG && sosp->so_error != ELOOP) { 1405 error = sosp->so_error; 1406 goto release; 1407 } 1408 if ((sosp->so_state & SS_ISCONNECTED) == 0) 1409 goto release; 1410 1411 /* Calculate how many bytes can be copied now. */ 1412 len = so->so_rcv.sb_datacc; 1413 if (so->so_splicemax) { 1414 KASSERT(so->so_splicelen < so->so_splicemax); 1415 if (so->so_splicemax <= so->so_splicelen + len) { 1416 len = so->so_splicemax - so->so_splicelen; 1417 maxreached = 1; 1418 } 1419 } 1420 space = sbspace(sosp, &sosp->so_snd); 1421 if (so->so_oobmark && so->so_oobmark < len && 1422 so->so_oobmark < space + 1024) 1423 space += 1024; 1424 if (space <= 0) { 1425 maxreached = 0; 1426 goto release; 1427 } 1428 if (space < len) { 1429 maxreached = 0; 1430 if (space < sosp->so_snd.sb_lowat) 1431 goto release; 1432 len = space; 1433 } 1434 sosp->so_state |= SS_ISSENDING; 1435 1436 SBLASTRECORDCHK(&so->so_rcv, "somove 1"); 1437 SBLASTMBUFCHK(&so->so_rcv, "somove 1"); 1438 m = so->so_rcv.sb_mb; 1439 if (m == NULL) 1440 goto release; 1441 nextrecord = m->m_nextpkt; 1442 1443 /* Drop address and control information not used with splicing. */ 1444 if (so->so_proto->pr_flags & PR_ADDR) { 1445 #ifdef DIAGNOSTIC 1446 if (m->m_type != MT_SONAME) 1447 panic("somove soname: so %p, so_type %d, m %p, " 1448 "m_type %d", so, so->so_type, m, m->m_type); 1449 #endif 1450 m = m->m_next; 1451 } 1452 while (m && m->m_type == MT_CONTROL) 1453 m = m->m_next; 1454 if (m == NULL) { 1455 sbdroprecord(&so->so_rcv); 1456 if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb) 1457 (so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL, 1458 NULL, NULL, NULL); 1459 goto nextpkt; 1460 } 1461 1462 /* 1463 * By splicing sockets connected to localhost, userland might create a 1464 * loop. Dissolve splicing with error if loop is detected by counter. 1465 * 1466 * If we deal with looped broadcast/multicast packet we bail out with 1467 * no error to suppress splice termination. 1468 */ 1469 if ((m->m_flags & M_PKTHDR) && 1470 ((m->m_pkthdr.ph_loopcnt++ >= M_MAXLOOP) || 1471 ((m->m_flags & M_LOOP) && (m->m_flags & (M_BCAST|M_MCAST))))) { 1472 error = ELOOP; 1473 goto release; 1474 } 1475 1476 if (so->so_proto->pr_flags & PR_ATOMIC) { 1477 if ((m->m_flags & M_PKTHDR) == 0) 1478 panic("somove !PKTHDR: so %p, so_type %d, m %p, " 1479 "m_type %d", so, so->so_type, m, m->m_type); 1480 if (sosp->so_snd.sb_hiwat < m->m_pkthdr.len) { 1481 error = EMSGSIZE; 1482 goto release; 1483 } 1484 if (len < m->m_pkthdr.len) 1485 goto release; 1486 if (m->m_pkthdr.len < len) { 1487 maxreached = 0; 1488 len = m->m_pkthdr.len; 1489 } 1490 /* 1491 * Throw away the name mbuf after it has been assured 1492 * that the whole first record can be processed. 1493 */ 1494 m = so->so_rcv.sb_mb; 1495 sbfree(&so->so_rcv, m); 1496 so->so_rcv.sb_mb = m_free(m); 1497 sbsync(&so->so_rcv, nextrecord); 1498 } 1499 /* 1500 * Throw away the control mbufs after it has been assured 1501 * that the whole first record can be processed. 1502 */ 1503 m = so->so_rcv.sb_mb; 1504 while (m && m->m_type == MT_CONTROL) { 1505 sbfree(&so->so_rcv, m); 1506 so->so_rcv.sb_mb = m_free(m); 1507 m = so->so_rcv.sb_mb; 1508 sbsync(&so->so_rcv, nextrecord); 1509 } 1510 1511 SBLASTRECORDCHK(&so->so_rcv, "somove 2"); 1512 SBLASTMBUFCHK(&so->so_rcv, "somove 2"); 1513 1514 /* Take at most len mbufs out of receive buffer. */ 1515 for (off = 0, mp = &m; off <= len && *mp; 1516 off += (*mp)->m_len, mp = &(*mp)->m_next) { 1517 u_long size = len - off; 1518 1519 #ifdef DIAGNOSTIC 1520 if ((*mp)->m_type != MT_DATA && (*mp)->m_type != MT_HEADER) 1521 panic("somove type: so %p, so_type %d, m %p, " 1522 "m_type %d", so, so->so_type, *mp, (*mp)->m_type); 1523 #endif 1524 if ((*mp)->m_len > size) { 1525 /* 1526 * Move only a partial mbuf at maximum splice length or 1527 * if the drain buffer is too small for this large mbuf. 1528 */ 1529 if (!maxreached && so->so_snd.sb_datacc > 0) { 1530 len -= size; 1531 break; 1532 } 1533 *mp = m_copym(so->so_rcv.sb_mb, 0, size, wait); 1534 if (*mp == NULL) { 1535 len -= size; 1536 break; 1537 } 1538 so->so_rcv.sb_mb->m_data += size; 1539 so->so_rcv.sb_mb->m_len -= size; 1540 so->so_rcv.sb_cc -= size; 1541 so->so_rcv.sb_datacc -= size; 1542 } else { 1543 *mp = so->so_rcv.sb_mb; 1544 sbfree(&so->so_rcv, *mp); 1545 so->so_rcv.sb_mb = (*mp)->m_next; 1546 sbsync(&so->so_rcv, nextrecord); 1547 } 1548 } 1549 *mp = NULL; 1550 1551 SBLASTRECORDCHK(&so->so_rcv, "somove 3"); 1552 SBLASTMBUFCHK(&so->so_rcv, "somove 3"); 1553 SBCHECK(&so->so_rcv); 1554 if (m == NULL) 1555 goto release; 1556 m->m_nextpkt = NULL; 1557 if (m->m_flags & M_PKTHDR) { 1558 m_resethdr(m); 1559 m->m_pkthdr.len = len; 1560 } 1561 1562 /* Send window update to source peer as receive buffer has changed. */ 1563 if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb) 1564 (so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL, 1565 NULL, NULL, NULL); 1566 1567 /* Receive buffer did shrink by len bytes, adjust oob. */ 1568 state = so->so_state; 1569 so->so_state &= ~SS_RCVATMARK; 1570 oobmark = so->so_oobmark; 1571 so->so_oobmark = oobmark > len ? oobmark - len : 0; 1572 if (oobmark) { 1573 if (oobmark == len) 1574 so->so_state |= SS_RCVATMARK; 1575 if (oobmark >= len) 1576 oobmark = 0; 1577 } 1578 1579 /* 1580 * Handle oob data. If any malloc fails, ignore error. 1581 * TCP urgent data is not very reliable anyway. 1582 */ 1583 while (((state & SS_RCVATMARK) || oobmark) && 1584 (so->so_options & SO_OOBINLINE)) { 1585 struct mbuf *o = NULL; 1586 1587 if (state & SS_RCVATMARK) { 1588 o = m_get(wait, MT_DATA); 1589 state &= ~SS_RCVATMARK; 1590 } else if (oobmark) { 1591 o = m_split(m, oobmark, wait); 1592 if (o) { 1593 error = (*sosp->so_proto->pr_usrreq)(sosp, 1594 PRU_SEND, m, NULL, NULL, NULL); 1595 if (error) { 1596 if (sosp->so_state & SS_CANTSENDMORE) 1597 error = EPIPE; 1598 m_freem(o); 1599 goto release; 1600 } 1601 len -= oobmark; 1602 so->so_splicelen += oobmark; 1603 m = o; 1604 o = m_get(wait, MT_DATA); 1605 } 1606 oobmark = 0; 1607 } 1608 if (o) { 1609 o->m_len = 1; 1610 *mtod(o, caddr_t) = *mtod(m, caddr_t); 1611 error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SENDOOB, 1612 o, NULL, NULL, NULL); 1613 if (error) { 1614 if (sosp->so_state & SS_CANTSENDMORE) 1615 error = EPIPE; 1616 m_freem(m); 1617 goto release; 1618 } 1619 len -= 1; 1620 so->so_splicelen += 1; 1621 if (oobmark) { 1622 oobmark -= 1; 1623 if (oobmark == 0) 1624 state |= SS_RCVATMARK; 1625 } 1626 m_adj(m, 1); 1627 } 1628 } 1629 1630 /* Append all remaining data to drain socket. */ 1631 if (so->so_rcv.sb_cc == 0 || maxreached) 1632 sosp->so_state &= ~SS_ISSENDING; 1633 error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SEND, m, NULL, NULL, 1634 NULL); 1635 if (error) { 1636 if (sosp->so_state & SS_CANTSENDMORE) 1637 error = EPIPE; 1638 goto release; 1639 } 1640 so->so_splicelen += len; 1641 1642 /* Move several packets if possible. */ 1643 if (!maxreached && nextrecord) 1644 goto nextpkt; 1645 1646 release: 1647 sosp->so_state &= ~SS_ISSENDING; 1648 if (!error && maxreached && so->so_splicemax == so->so_splicelen) 1649 error = EFBIG; 1650 if (error) 1651 so->so_error = error; 1652 if (((so->so_state & SS_CANTRCVMORE) && so->so_rcv.sb_cc == 0) || 1653 (sosp->so_state & SS_CANTSENDMORE) || maxreached || error) { 1654 sounsplice(so, sosp, 0); 1655 return (0); 1656 } 1657 if (timerisset(&so->so_idletv)) 1658 timeout_add_tv(&so->so_idleto, &so->so_idletv); 1659 return (1); 1660 } 1661 1662 #endif /* SOCKET_SPLICE */ 1663 1664 void 1665 sorwakeup(struct socket *so) 1666 { 1667 soassertlocked(so); 1668 1669 #ifdef SOCKET_SPLICE 1670 if (so->so_rcv.sb_flags & SB_SPLICE) { 1671 /* 1672 * TCP has a sendbuffer that can handle multiple packets 1673 * at once. So queue the stream a bit to accumulate data. 1674 * The sosplice thread will call somove() later and send 1675 * the packets calling tcp_output() only once. 1676 * In the UDP case, send out the packets immediately. 1677 * Using a thread would make things slower. 1678 */ 1679 if (so->so_proto->pr_flags & PR_WANTRCVD) 1680 task_add(sosplice_taskq, &so->so_splicetask); 1681 else 1682 somove(so, M_DONTWAIT); 1683 } 1684 if (isspliced(so)) 1685 return; 1686 #endif 1687 sowakeup(so, &so->so_rcv); 1688 if (so->so_upcall) 1689 (*(so->so_upcall))(so, so->so_upcallarg, M_DONTWAIT); 1690 } 1691 1692 void 1693 sowwakeup(struct socket *so) 1694 { 1695 soassertlocked(so); 1696 1697 #ifdef SOCKET_SPLICE 1698 if (so->so_snd.sb_flags & SB_SPLICE) 1699 task_add(sosplice_taskq, &so->so_sp->ssp_soback->so_splicetask); 1700 if (issplicedback(so)) 1701 return; 1702 #endif 1703 sowakeup(so, &so->so_snd); 1704 } 1705 1706 int 1707 sosetopt(struct socket *so, int level, int optname, struct mbuf *m) 1708 { 1709 int error = 0; 1710 1711 soassertlocked(so); 1712 1713 if (level != SOL_SOCKET) { 1714 if (so->so_proto->pr_ctloutput) { 1715 error = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1716 level, optname, m); 1717 return (error); 1718 } 1719 error = ENOPROTOOPT; 1720 } else { 1721 switch (optname) { 1722 case SO_BINDANY: 1723 if ((error = suser(curproc)) != 0) /* XXX */ 1724 return (error); 1725 break; 1726 } 1727 1728 switch (optname) { 1729 1730 case SO_LINGER: 1731 if (m == NULL || m->m_len != sizeof (struct linger) || 1732 mtod(m, struct linger *)->l_linger < 0 || 1733 mtod(m, struct linger *)->l_linger > SHRT_MAX) 1734 return (EINVAL); 1735 so->so_linger = mtod(m, struct linger *)->l_linger; 1736 /* FALLTHROUGH */ 1737 1738 case SO_BINDANY: 1739 case SO_DEBUG: 1740 case SO_KEEPALIVE: 1741 case SO_USELOOPBACK: 1742 case SO_BROADCAST: 1743 case SO_REUSEADDR: 1744 case SO_REUSEPORT: 1745 case SO_OOBINLINE: 1746 case SO_TIMESTAMP: 1747 case SO_ZEROIZE: 1748 if (m == NULL || m->m_len < sizeof (int)) 1749 return (EINVAL); 1750 if (*mtod(m, int *)) 1751 so->so_options |= optname; 1752 else 1753 so->so_options &= ~optname; 1754 break; 1755 1756 case SO_DONTROUTE: 1757 if (m == NULL || m->m_len < sizeof (int)) 1758 return (EINVAL); 1759 if (*mtod(m, int *)) 1760 error = EOPNOTSUPP; 1761 break; 1762 1763 case SO_SNDBUF: 1764 case SO_RCVBUF: 1765 case SO_SNDLOWAT: 1766 case SO_RCVLOWAT: 1767 { 1768 u_long cnt; 1769 1770 if (m == NULL || m->m_len < sizeof (int)) 1771 return (EINVAL); 1772 cnt = *mtod(m, int *); 1773 if ((long)cnt <= 0) 1774 cnt = 1; 1775 switch (optname) { 1776 1777 case SO_SNDBUF: 1778 if (so->so_state & SS_CANTSENDMORE) 1779 return (EINVAL); 1780 if (sbcheckreserve(cnt, so->so_snd.sb_wat) || 1781 sbreserve(so, &so->so_snd, cnt)) 1782 return (ENOBUFS); 1783 so->so_snd.sb_wat = cnt; 1784 break; 1785 1786 case SO_RCVBUF: 1787 if (so->so_state & SS_CANTRCVMORE) 1788 return (EINVAL); 1789 if (sbcheckreserve(cnt, so->so_rcv.sb_wat) || 1790 sbreserve(so, &so->so_rcv, cnt)) 1791 return (ENOBUFS); 1792 so->so_rcv.sb_wat = cnt; 1793 break; 1794 1795 case SO_SNDLOWAT: 1796 so->so_snd.sb_lowat = 1797 (cnt > so->so_snd.sb_hiwat) ? 1798 so->so_snd.sb_hiwat : cnt; 1799 break; 1800 case SO_RCVLOWAT: 1801 so->so_rcv.sb_lowat = 1802 (cnt > so->so_rcv.sb_hiwat) ? 1803 so->so_rcv.sb_hiwat : cnt; 1804 break; 1805 } 1806 break; 1807 } 1808 1809 case SO_SNDTIMEO: 1810 case SO_RCVTIMEO: 1811 { 1812 struct timeval tv; 1813 uint64_t nsecs; 1814 1815 if (m == NULL || m->m_len < sizeof (tv)) 1816 return (EINVAL); 1817 memcpy(&tv, mtod(m, struct timeval *), sizeof tv); 1818 if (!timerisvalid(&tv)) 1819 return (EINVAL); 1820 nsecs = TIMEVAL_TO_NSEC(&tv); 1821 if (nsecs == UINT64_MAX) 1822 return (EDOM); 1823 if (nsecs == 0) 1824 nsecs = INFSLP; 1825 switch (optname) { 1826 1827 case SO_SNDTIMEO: 1828 so->so_snd.sb_timeo_nsecs = nsecs; 1829 break; 1830 case SO_RCVTIMEO: 1831 so->so_rcv.sb_timeo_nsecs = nsecs; 1832 break; 1833 } 1834 break; 1835 } 1836 1837 case SO_RTABLE: 1838 if (so->so_proto->pr_domain && 1839 so->so_proto->pr_domain->dom_protosw && 1840 so->so_proto->pr_ctloutput) { 1841 const struct domain *dom = 1842 so->so_proto->pr_domain; 1843 1844 level = dom->dom_protosw->pr_protocol; 1845 error = (*so->so_proto->pr_ctloutput) 1846 (PRCO_SETOPT, so, level, optname, m); 1847 return (error); 1848 } 1849 error = ENOPROTOOPT; 1850 break; 1851 1852 #ifdef SOCKET_SPLICE 1853 case SO_SPLICE: 1854 if (m == NULL) { 1855 error = sosplice(so, -1, 0, NULL); 1856 } else if (m->m_len < sizeof(int)) { 1857 return (EINVAL); 1858 } else if (m->m_len < sizeof(struct splice)) { 1859 error = sosplice(so, *mtod(m, int *), 0, NULL); 1860 } else { 1861 error = sosplice(so, 1862 mtod(m, struct splice *)->sp_fd, 1863 mtod(m, struct splice *)->sp_max, 1864 &mtod(m, struct splice *)->sp_idle); 1865 } 1866 break; 1867 #endif /* SOCKET_SPLICE */ 1868 1869 default: 1870 error = ENOPROTOOPT; 1871 break; 1872 } 1873 if (error == 0 && so->so_proto->pr_ctloutput) { 1874 (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1875 level, optname, m); 1876 } 1877 } 1878 1879 return (error); 1880 } 1881 1882 int 1883 sogetopt(struct socket *so, int level, int optname, struct mbuf *m) 1884 { 1885 int error = 0; 1886 1887 soassertlocked(so); 1888 1889 if (level != SOL_SOCKET) { 1890 if (so->so_proto->pr_ctloutput) { 1891 m->m_len = 0; 1892 1893 error = (*so->so_proto->pr_ctloutput)(PRCO_GETOPT, so, 1894 level, optname, m); 1895 if (error) 1896 return (error); 1897 return (0); 1898 } else 1899 return (ENOPROTOOPT); 1900 } else { 1901 m->m_len = sizeof (int); 1902 1903 switch (optname) { 1904 1905 case SO_LINGER: 1906 m->m_len = sizeof (struct linger); 1907 mtod(m, struct linger *)->l_onoff = 1908 so->so_options & SO_LINGER; 1909 mtod(m, struct linger *)->l_linger = so->so_linger; 1910 break; 1911 1912 case SO_BINDANY: 1913 case SO_USELOOPBACK: 1914 case SO_DEBUG: 1915 case SO_KEEPALIVE: 1916 case SO_REUSEADDR: 1917 case SO_REUSEPORT: 1918 case SO_BROADCAST: 1919 case SO_OOBINLINE: 1920 case SO_TIMESTAMP: 1921 case SO_ZEROIZE: 1922 *mtod(m, int *) = so->so_options & optname; 1923 break; 1924 1925 case SO_DONTROUTE: 1926 *mtod(m, int *) = 0; 1927 break; 1928 1929 case SO_TYPE: 1930 *mtod(m, int *) = so->so_type; 1931 break; 1932 1933 case SO_ERROR: 1934 *mtod(m, int *) = so->so_error; 1935 so->so_error = 0; 1936 break; 1937 1938 case SO_DOMAIN: 1939 *mtod(m, int *) = so->so_proto->pr_domain->dom_family; 1940 break; 1941 1942 case SO_PROTOCOL: 1943 *mtod(m, int *) = so->so_proto->pr_protocol; 1944 break; 1945 1946 case SO_SNDBUF: 1947 *mtod(m, int *) = so->so_snd.sb_hiwat; 1948 break; 1949 1950 case SO_RCVBUF: 1951 *mtod(m, int *) = so->so_rcv.sb_hiwat; 1952 break; 1953 1954 case SO_SNDLOWAT: 1955 *mtod(m, int *) = so->so_snd.sb_lowat; 1956 break; 1957 1958 case SO_RCVLOWAT: 1959 *mtod(m, int *) = so->so_rcv.sb_lowat; 1960 break; 1961 1962 case SO_SNDTIMEO: 1963 case SO_RCVTIMEO: 1964 { 1965 struct timeval tv; 1966 uint64_t nsecs = (optname == SO_SNDTIMEO ? 1967 so->so_snd.sb_timeo_nsecs : 1968 so->so_rcv.sb_timeo_nsecs); 1969 1970 m->m_len = sizeof(struct timeval); 1971 memset(&tv, 0, sizeof(tv)); 1972 if (nsecs != INFSLP) 1973 NSEC_TO_TIMEVAL(nsecs, &tv); 1974 memcpy(mtod(m, struct timeval *), &tv, sizeof tv); 1975 break; 1976 } 1977 1978 case SO_RTABLE: 1979 if (so->so_proto->pr_domain && 1980 so->so_proto->pr_domain->dom_protosw && 1981 so->so_proto->pr_ctloutput) { 1982 const struct domain *dom = 1983 so->so_proto->pr_domain; 1984 1985 level = dom->dom_protosw->pr_protocol; 1986 error = (*so->so_proto->pr_ctloutput) 1987 (PRCO_GETOPT, so, level, optname, m); 1988 if (error) 1989 return (error); 1990 break; 1991 } 1992 return (ENOPROTOOPT); 1993 1994 #ifdef SOCKET_SPLICE 1995 case SO_SPLICE: 1996 { 1997 off_t len; 1998 1999 m->m_len = sizeof(off_t); 2000 len = so->so_sp ? so->so_sp->ssp_len : 0; 2001 memcpy(mtod(m, off_t *), &len, sizeof(off_t)); 2002 break; 2003 } 2004 #endif /* SOCKET_SPLICE */ 2005 2006 case SO_PEERCRED: 2007 if (so->so_proto->pr_protocol == AF_UNIX) { 2008 struct unpcb *unp = sotounpcb(so); 2009 2010 if (unp->unp_flags & UNP_FEIDS) { 2011 m->m_len = sizeof(unp->unp_connid); 2012 memcpy(mtod(m, caddr_t), 2013 &(unp->unp_connid), m->m_len); 2014 break; 2015 } 2016 return (ENOTCONN); 2017 } 2018 return (EOPNOTSUPP); 2019 2020 default: 2021 return (ENOPROTOOPT); 2022 } 2023 return (0); 2024 } 2025 } 2026 2027 void 2028 sohasoutofband(struct socket *so) 2029 { 2030 pgsigio(&so->so_sigio, SIGURG, 0); 2031 selwakeup(&so->so_rcv.sb_sel); 2032 } 2033 2034 int 2035 soo_kqfilter(struct file *fp, struct knote *kn) 2036 { 2037 struct socket *so = kn->kn_fp->f_data; 2038 struct sockbuf *sb; 2039 2040 KERNEL_ASSERT_LOCKED(); 2041 2042 switch (kn->kn_filter) { 2043 case EVFILT_READ: 2044 if (so->so_options & SO_ACCEPTCONN) 2045 kn->kn_fop = &solisten_filtops; 2046 else 2047 kn->kn_fop = &soread_filtops; 2048 sb = &so->so_rcv; 2049 break; 2050 case EVFILT_WRITE: 2051 kn->kn_fop = &sowrite_filtops; 2052 sb = &so->so_snd; 2053 break; 2054 case EVFILT_EXCEPT: 2055 kn->kn_fop = &soexcept_filtops; 2056 sb = &so->so_rcv; 2057 break; 2058 default: 2059 return (EINVAL); 2060 } 2061 2062 klist_insert_locked(&sb->sb_sel.si_note, kn); 2063 2064 return (0); 2065 } 2066 2067 void 2068 filt_sordetach(struct knote *kn) 2069 { 2070 struct socket *so = kn->kn_fp->f_data; 2071 2072 KERNEL_ASSERT_LOCKED(); 2073 2074 klist_remove_locked(&so->so_rcv.sb_sel.si_note, kn); 2075 } 2076 2077 int 2078 filt_soread_common(struct knote *kn, struct socket *so) 2079 { 2080 int rv = 0; 2081 2082 soassertlocked(so); 2083 2084 kn->kn_data = so->so_rcv.sb_cc; 2085 #ifdef SOCKET_SPLICE 2086 if (isspliced(so)) { 2087 rv = 0; 2088 } else 2089 #endif /* SOCKET_SPLICE */ 2090 if (kn->kn_sfflags & NOTE_OOB) { 2091 if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) { 2092 kn->kn_fflags |= NOTE_OOB; 2093 kn->kn_data -= so->so_oobmark; 2094 rv = 1; 2095 } 2096 } else if (so->so_state & SS_CANTRCVMORE) { 2097 kn->kn_flags |= EV_EOF; 2098 if (kn->kn_flags & __EV_POLL) { 2099 if (so->so_state & SS_ISDISCONNECTED) 2100 kn->kn_flags |= __EV_HUP; 2101 } 2102 kn->kn_fflags = so->so_error; 2103 rv = 1; 2104 } else if (so->so_error) { /* temporary udp error */ 2105 rv = 1; 2106 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2107 rv = (kn->kn_data >= kn->kn_sdata); 2108 } else { 2109 rv = (kn->kn_data >= so->so_rcv.sb_lowat); 2110 } 2111 2112 return rv; 2113 } 2114 2115 int 2116 filt_soread(struct knote *kn, long hint) 2117 { 2118 struct socket *so = kn->kn_fp->f_data; 2119 2120 return (filt_soread_common(kn, so)); 2121 } 2122 2123 int 2124 filt_soreadmodify(struct kevent *kev, struct knote *kn) 2125 { 2126 struct socket *so = kn->kn_fp->f_data; 2127 int rv, s; 2128 2129 s = solock(so); 2130 knote_modify(kev, kn); 2131 rv = filt_soread_common(kn, so); 2132 sounlock(so, s); 2133 2134 return (rv); 2135 } 2136 2137 int 2138 filt_soreadprocess(struct knote *kn, struct kevent *kev) 2139 { 2140 struct socket *so = kn->kn_fp->f_data; 2141 int rv, s; 2142 2143 s = solock(so); 2144 if (kev != NULL && (kn->kn_flags & EV_ONESHOT)) 2145 rv = 1; 2146 else 2147 rv = filt_soread_common(kn, so); 2148 if (rv != 0) 2149 knote_submit(kn, kev); 2150 sounlock(so, s); 2151 2152 return (rv); 2153 } 2154 2155 void 2156 filt_sowdetach(struct knote *kn) 2157 { 2158 struct socket *so = kn->kn_fp->f_data; 2159 2160 KERNEL_ASSERT_LOCKED(); 2161 2162 klist_remove_locked(&so->so_snd.sb_sel.si_note, kn); 2163 } 2164 2165 int 2166 filt_sowrite_common(struct knote *kn, struct socket *so) 2167 { 2168 int rv; 2169 2170 soassertlocked(so); 2171 2172 kn->kn_data = sbspace(so, &so->so_snd); 2173 if (so->so_state & SS_CANTSENDMORE) { 2174 kn->kn_flags |= EV_EOF; 2175 if (kn->kn_flags & __EV_POLL) { 2176 if (so->so_state & SS_ISDISCONNECTED) 2177 kn->kn_flags |= __EV_HUP; 2178 } 2179 kn->kn_fflags = so->so_error; 2180 rv = 1; 2181 } else if (so->so_error) { /* temporary udp error */ 2182 rv = 1; 2183 } else if (((so->so_state & SS_ISCONNECTED) == 0) && 2184 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 2185 rv = 0; 2186 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2187 rv = (kn->kn_data >= kn->kn_sdata); 2188 } else { 2189 rv = (kn->kn_data >= so->so_snd.sb_lowat); 2190 } 2191 2192 return (rv); 2193 } 2194 2195 int 2196 filt_sowrite(struct knote *kn, long hint) 2197 { 2198 struct socket *so = kn->kn_fp->f_data; 2199 2200 return (filt_sowrite_common(kn, so)); 2201 } 2202 2203 int 2204 filt_sowritemodify(struct kevent *kev, struct knote *kn) 2205 { 2206 struct socket *so = kn->kn_fp->f_data; 2207 int rv, s; 2208 2209 s = solock(so); 2210 knote_modify(kev, kn); 2211 rv = filt_sowrite_common(kn, so); 2212 sounlock(so, s); 2213 2214 return (rv); 2215 } 2216 2217 int 2218 filt_sowriteprocess(struct knote *kn, struct kevent *kev) 2219 { 2220 struct socket *so = kn->kn_fp->f_data; 2221 int rv, s; 2222 2223 s = solock(so); 2224 if (kev != NULL && (kn->kn_flags & EV_ONESHOT)) 2225 rv = 1; 2226 else 2227 rv = filt_sowrite_common(kn, so); 2228 if (rv != 0) 2229 knote_submit(kn, kev); 2230 sounlock(so, s); 2231 2232 return (rv); 2233 } 2234 2235 int 2236 filt_solisten_common(struct knote *kn, struct socket *so) 2237 { 2238 soassertlocked(so); 2239 2240 kn->kn_data = so->so_qlen; 2241 2242 return (kn->kn_data != 0); 2243 } 2244 2245 int 2246 filt_solisten(struct knote *kn, long hint) 2247 { 2248 struct socket *so = kn->kn_fp->f_data; 2249 2250 return (filt_solisten_common(kn, so)); 2251 } 2252 2253 int 2254 filt_solistenmodify(struct kevent *kev, struct knote *kn) 2255 { 2256 struct socket *so = kn->kn_fp->f_data; 2257 int rv, s; 2258 2259 s = solock(so); 2260 knote_modify(kev, kn); 2261 rv = filt_solisten_common(kn, so); 2262 sounlock(so, s); 2263 2264 return (rv); 2265 } 2266 2267 int 2268 filt_solistenprocess(struct knote *kn, struct kevent *kev) 2269 { 2270 struct socket *so = kn->kn_fp->f_data; 2271 int rv, s; 2272 2273 s = solock(so); 2274 if (kev != NULL && (kn->kn_flags & EV_ONESHOT)) 2275 rv = 1; 2276 else 2277 rv = filt_solisten_common(kn, so); 2278 if (rv != 0) 2279 knote_submit(kn, kev); 2280 sounlock(so, s); 2281 2282 return (rv); 2283 } 2284 2285 #ifdef DDB 2286 void 2287 sobuf_print(struct sockbuf *, 2288 int (*)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))); 2289 2290 void 2291 sobuf_print(struct sockbuf *sb, 2292 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2293 { 2294 (*pr)("\tsb_cc: %lu\n", sb->sb_cc); 2295 (*pr)("\tsb_datacc: %lu\n", sb->sb_datacc); 2296 (*pr)("\tsb_hiwat: %lu\n", sb->sb_hiwat); 2297 (*pr)("\tsb_wat: %lu\n", sb->sb_wat); 2298 (*pr)("\tsb_mbcnt: %lu\n", sb->sb_mbcnt); 2299 (*pr)("\tsb_mbmax: %lu\n", sb->sb_mbmax); 2300 (*pr)("\tsb_lowat: %ld\n", sb->sb_lowat); 2301 (*pr)("\tsb_mb: %p\n", sb->sb_mb); 2302 (*pr)("\tsb_mbtail: %p\n", sb->sb_mbtail); 2303 (*pr)("\tsb_lastrecord: %p\n", sb->sb_lastrecord); 2304 (*pr)("\tsb_sel: ...\n"); 2305 (*pr)("\tsb_flags: %i\n", sb->sb_flags); 2306 (*pr)("\tsb_timeo_nsecs: %llu\n", sb->sb_timeo_nsecs); 2307 } 2308 2309 void 2310 so_print(void *v, 2311 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2312 { 2313 struct socket *so = v; 2314 2315 (*pr)("socket %p\n", so); 2316 (*pr)("so_type: %i\n", so->so_type); 2317 (*pr)("so_options: 0x%04x\n", so->so_options); /* %b */ 2318 (*pr)("so_linger: %i\n", so->so_linger); 2319 (*pr)("so_state: 0x%04x\n", so->so_state); 2320 (*pr)("so_pcb: %p\n", so->so_pcb); 2321 (*pr)("so_proto: %p\n", so->so_proto); 2322 (*pr)("so_sigio: %p\n", so->so_sigio.sir_sigio); 2323 2324 (*pr)("so_head: %p\n", so->so_head); 2325 (*pr)("so_onq: %p\n", so->so_onq); 2326 (*pr)("so_q0: @%p first: %p\n", &so->so_q0, TAILQ_FIRST(&so->so_q0)); 2327 (*pr)("so_q: @%p first: %p\n", &so->so_q, TAILQ_FIRST(&so->so_q)); 2328 (*pr)("so_eq: next: %p\n", TAILQ_NEXT(so, so_qe)); 2329 (*pr)("so_q0len: %i\n", so->so_q0len); 2330 (*pr)("so_qlen: %i\n", so->so_qlen); 2331 (*pr)("so_qlimit: %i\n", so->so_qlimit); 2332 (*pr)("so_timeo: %i\n", so->so_timeo); 2333 (*pr)("so_obmark: %lu\n", so->so_oobmark); 2334 2335 (*pr)("so_sp: %p\n", so->so_sp); 2336 if (so->so_sp != NULL) { 2337 (*pr)("\tssp_socket: %p\n", so->so_sp->ssp_socket); 2338 (*pr)("\tssp_soback: %p\n", so->so_sp->ssp_soback); 2339 (*pr)("\tssp_len: %lld\n", 2340 (unsigned long long)so->so_sp->ssp_len); 2341 (*pr)("\tssp_max: %lld\n", 2342 (unsigned long long)so->so_sp->ssp_max); 2343 (*pr)("\tssp_idletv: %lld %ld\n", so->so_sp->ssp_idletv.tv_sec, 2344 so->so_sp->ssp_idletv.tv_usec); 2345 (*pr)("\tssp_idleto: %spending (@%i)\n", 2346 timeout_pending(&so->so_sp->ssp_idleto) ? "" : "not ", 2347 so->so_sp->ssp_idleto.to_time); 2348 } 2349 2350 (*pr)("so_rcv:\n"); 2351 sobuf_print(&so->so_rcv, pr); 2352 (*pr)("so_snd:\n"); 2353 sobuf_print(&so->so_snd, pr); 2354 2355 (*pr)("so_upcall: %p so_upcallarg: %p\n", 2356 so->so_upcall, so->so_upcallarg); 2357 2358 (*pr)("so_euid: %d so_ruid: %d\n", so->so_euid, so->so_ruid); 2359 (*pr)("so_egid: %d so_rgid: %d\n", so->so_egid, so->so_rgid); 2360 (*pr)("so_cpid: %d\n", so->so_cpid); 2361 } 2362 #endif 2363