1 /* $OpenBSD: uipc_socket.c,v 1.231 2018/12/17 16:46:59 bluhm Exp $ */ 2 /* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/file.h> 39 #include <sys/filedesc.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/domain.h> 43 #include <sys/kernel.h> 44 #include <sys/event.h> 45 #include <sys/protosw.h> 46 #include <sys/socket.h> 47 #include <sys/unpcb.h> 48 #include <sys/socketvar.h> 49 #include <sys/signalvar.h> 50 #include <net/if.h> 51 #include <sys/pool.h> 52 #include <sys/atomic.h> 53 #include <sys/rwlock.h> 54 55 #ifdef DDB 56 #include <machine/db_machdep.h> 57 #endif 58 59 void sbsync(struct sockbuf *, struct mbuf *); 60 61 int sosplice(struct socket *, int, off_t, struct timeval *); 62 void sounsplice(struct socket *, struct socket *, int); 63 void soidle(void *); 64 void sotask(void *); 65 void soreaper(void *); 66 void soput(void *); 67 int somove(struct socket *, int); 68 69 void filt_sordetach(struct knote *kn); 70 int filt_soread(struct knote *kn, long hint); 71 void filt_sowdetach(struct knote *kn); 72 int filt_sowrite(struct knote *kn, long hint); 73 int filt_solisten(struct knote *kn, long hint); 74 75 struct filterops solisten_filtops = 76 { 1, NULL, filt_sordetach, filt_solisten }; 77 struct filterops soread_filtops = 78 { 1, NULL, filt_sordetach, filt_soread }; 79 struct filterops sowrite_filtops = 80 { 1, NULL, filt_sowdetach, filt_sowrite }; 81 82 83 #ifndef SOMINCONN 84 #define SOMINCONN 80 85 #endif /* SOMINCONN */ 86 87 int somaxconn = SOMAXCONN; 88 int sominconn = SOMINCONN; 89 90 struct pool socket_pool; 91 #ifdef SOCKET_SPLICE 92 struct pool sosplice_pool; 93 struct taskq *sosplice_taskq; 94 struct rwlock sosplice_lock = RWLOCK_INITIALIZER("sosplicelk"); 95 #endif 96 97 void 98 soinit(void) 99 { 100 pool_init(&socket_pool, sizeof(struct socket), 0, IPL_SOFTNET, 0, 101 "sockpl", NULL); 102 #ifdef SOCKET_SPLICE 103 pool_init(&sosplice_pool, sizeof(struct sosplice), 0, IPL_SOFTNET, 0, 104 "sosppl", NULL); 105 #endif 106 } 107 108 /* 109 * Socket operation routines. 110 * These routines are called by the routines in 111 * sys_socket.c or from a system process, and 112 * implement the semantics of socket operations by 113 * switching out to the protocol specific routines. 114 */ 115 int 116 socreate(int dom, struct socket **aso, int type, int proto) 117 { 118 struct proc *p = curproc; /* XXX */ 119 const struct protosw *prp; 120 struct socket *so; 121 int error, s; 122 123 if (proto) 124 prp = pffindproto(dom, proto, type); 125 else 126 prp = pffindtype(dom, type); 127 if (prp == NULL || prp->pr_attach == NULL) 128 return (EPROTONOSUPPORT); 129 if (prp->pr_type != type) 130 return (EPROTOTYPE); 131 so = pool_get(&socket_pool, PR_WAITOK | PR_ZERO); 132 sigio_init(&so->so_sigio); 133 TAILQ_INIT(&so->so_q0); 134 TAILQ_INIT(&so->so_q); 135 so->so_type = type; 136 if (suser(p) == 0) 137 so->so_state = SS_PRIV; 138 so->so_ruid = p->p_ucred->cr_ruid; 139 so->so_euid = p->p_ucred->cr_uid; 140 so->so_rgid = p->p_ucred->cr_rgid; 141 so->so_egid = p->p_ucred->cr_gid; 142 so->so_cpid = p->p_p->ps_pid; 143 so->so_proto = prp; 144 145 s = solock(so); 146 error = (*prp->pr_attach)(so, proto); 147 if (error) { 148 so->so_state |= SS_NOFDREF; 149 /* sofree() calls sounlock(). */ 150 sofree(so, s); 151 return (error); 152 } 153 sounlock(so, s); 154 *aso = so; 155 return (0); 156 } 157 158 int 159 sobind(struct socket *so, struct mbuf *nam, struct proc *p) 160 { 161 int error; 162 163 soassertlocked(so); 164 165 error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, p); 166 return (error); 167 } 168 169 int 170 solisten(struct socket *so, int backlog) 171 { 172 int s, error; 173 174 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) 175 return (EOPNOTSUPP); 176 #ifdef SOCKET_SPLICE 177 if (isspliced(so) || issplicedback(so)) 178 return (EOPNOTSUPP); 179 #endif /* SOCKET_SPLICE */ 180 s = solock(so); 181 error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL, 182 curproc); 183 if (error) { 184 sounlock(so, s); 185 return (error); 186 } 187 if (TAILQ_FIRST(&so->so_q) == NULL) 188 so->so_options |= SO_ACCEPTCONN; 189 if (backlog < 0 || backlog > somaxconn) 190 backlog = somaxconn; 191 if (backlog < sominconn) 192 backlog = sominconn; 193 so->so_qlimit = backlog; 194 sounlock(so, s); 195 return (0); 196 } 197 198 void 199 sofree(struct socket *so, int s) 200 { 201 soassertlocked(so); 202 203 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) { 204 sounlock(so, s); 205 return; 206 } 207 if (so->so_head) { 208 /* 209 * We must not decommission a socket that's on the accept(2) 210 * queue. If we do, then accept(2) may hang after select(2) 211 * indicated that the listening socket was ready. 212 */ 213 if (!soqremque(so, 0)) { 214 sounlock(so, s); 215 return; 216 } 217 } 218 sigio_free(&so->so_sigio); 219 #ifdef SOCKET_SPLICE 220 if (so->so_sp) { 221 if (issplicedback(so)) 222 sounsplice(so->so_sp->ssp_soback, so, 223 so->so_sp->ssp_soback != so); 224 if (isspliced(so)) 225 sounsplice(so, so->so_sp->ssp_socket, 0); 226 } 227 #endif /* SOCKET_SPLICE */ 228 sbrelease(so, &so->so_snd); 229 sorflush(so); 230 sounlock(so, s); 231 #ifdef SOCKET_SPLICE 232 if (so->so_sp) { 233 /* Reuse splice idle, sounsplice() has been called before. */ 234 timeout_set_proc(&so->so_sp->ssp_idleto, soreaper, so); 235 timeout_add(&so->so_sp->ssp_idleto, 0); 236 } else 237 #endif /* SOCKET_SPLICE */ 238 { 239 pool_put(&socket_pool, so); 240 } 241 } 242 243 /* 244 * Close a socket on last file table reference removal. 245 * Initiate disconnect if connected. 246 * Free socket when disconnect complete. 247 */ 248 int 249 soclose(struct socket *so, int flags) 250 { 251 struct socket *so2; 252 int s, error = 0; 253 254 s = solock(so); 255 /* Revoke async IO early. There is a final revocation in sofree(). */ 256 sigio_free(&so->so_sigio); 257 if (so->so_options & SO_ACCEPTCONN) { 258 while ((so2 = TAILQ_FIRST(&so->so_q0)) != NULL) { 259 (void) soqremque(so2, 0); 260 (void) soabort(so2); 261 } 262 while ((so2 = TAILQ_FIRST(&so->so_q)) != NULL) { 263 (void) soqremque(so2, 1); 264 (void) soabort(so2); 265 } 266 } 267 if (so->so_pcb == NULL) 268 goto discard; 269 if (so->so_state & SS_ISCONNECTED) { 270 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 271 error = sodisconnect(so); 272 if (error) 273 goto drop; 274 } 275 if (so->so_options & SO_LINGER) { 276 if ((so->so_state & SS_ISDISCONNECTING) && 277 (flags & MSG_DONTWAIT)) 278 goto drop; 279 while (so->so_state & SS_ISCONNECTED) { 280 error = sosleep(so, &so->so_timeo, 281 PSOCK | PCATCH, "netcls", 282 so->so_linger * hz); 283 if (error) 284 break; 285 } 286 } 287 } 288 drop: 289 if (so->so_pcb) { 290 int error2; 291 KASSERT(so->so_proto->pr_detach); 292 error2 = (*so->so_proto->pr_detach)(so); 293 if (error == 0) 294 error = error2; 295 } 296 discard: 297 if (so->so_state & SS_NOFDREF) 298 panic("soclose NOFDREF: so %p, so_type %d", so, so->so_type); 299 so->so_state |= SS_NOFDREF; 300 /* sofree() calls sounlock(). */ 301 sofree(so, s); 302 return (error); 303 } 304 305 int 306 soabort(struct socket *so) 307 { 308 soassertlocked(so); 309 310 return (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, NULL, NULL, 311 curproc); 312 } 313 314 int 315 soaccept(struct socket *so, struct mbuf *nam) 316 { 317 int error = 0; 318 319 soassertlocked(so); 320 321 if ((so->so_state & SS_NOFDREF) == 0) 322 panic("soaccept !NOFDREF: so %p, so_type %d", so, so->so_type); 323 so->so_state &= ~SS_NOFDREF; 324 if ((so->so_state & SS_ISDISCONNECTED) == 0 || 325 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) 326 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, NULL, 327 nam, NULL, curproc); 328 else 329 error = ECONNABORTED; 330 return (error); 331 } 332 333 int 334 soconnect(struct socket *so, struct mbuf *nam) 335 { 336 int error; 337 338 soassertlocked(so); 339 340 if (so->so_options & SO_ACCEPTCONN) 341 return (EOPNOTSUPP); 342 /* 343 * If protocol is connection-based, can only connect once. 344 * Otherwise, if connected, try to disconnect first. 345 * This allows user to disconnect by connecting to, e.g., 346 * a null address. 347 */ 348 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 349 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 350 (error = sodisconnect(so)))) 351 error = EISCONN; 352 else 353 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 354 NULL, nam, NULL, curproc); 355 return (error); 356 } 357 358 int 359 soconnect2(struct socket *so1, struct socket *so2) 360 { 361 int s, error; 362 363 s = solock(so1); 364 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, NULL, 365 (struct mbuf *)so2, NULL, curproc); 366 sounlock(so1, s); 367 return (error); 368 } 369 370 int 371 sodisconnect(struct socket *so) 372 { 373 int error; 374 375 soassertlocked(so); 376 377 if ((so->so_state & SS_ISCONNECTED) == 0) 378 return (ENOTCONN); 379 if (so->so_state & SS_ISDISCONNECTING) 380 return (EALREADY); 381 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, NULL, NULL, 382 NULL, curproc); 383 return (error); 384 } 385 386 int m_getuio(struct mbuf **, int, long, struct uio *); 387 388 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 389 /* 390 * Send on a socket. 391 * If send must go all at once and message is larger than 392 * send buffering, then hard error. 393 * Lock against other senders. 394 * If must go all at once and not enough room now, then 395 * inform user that this would block and do nothing. 396 * Otherwise, if nonblocking, send as much as possible. 397 * The data to be sent is described by "uio" if nonzero, 398 * otherwise by the mbuf chain "top" (which must be null 399 * if uio is not). Data provided in mbuf chain must be small 400 * enough to send all at once. 401 * 402 * Returns nonzero on error, timeout or signal; callers 403 * must check for short counts if EINTR/ERESTART are returned. 404 * Data and control buffers are freed on return. 405 */ 406 int 407 sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, 408 struct mbuf *control, int flags) 409 { 410 long space, clen = 0; 411 size_t resid; 412 int error, s; 413 int atomic = sosendallatonce(so) || top; 414 415 if (uio) 416 resid = uio->uio_resid; 417 else 418 resid = top->m_pkthdr.len; 419 /* MSG_EOR on a SOCK_STREAM socket is invalid. */ 420 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 421 m_freem(top); 422 m_freem(control); 423 return (EINVAL); 424 } 425 if (uio && uio->uio_procp) 426 uio->uio_procp->p_ru.ru_msgsnd++; 427 if (control) { 428 /* 429 * In theory clen should be unsigned (since control->m_len is). 430 * However, space must be signed, as it might be less than 0 431 * if we over-committed, and we must use a signed comparison 432 * of space and clen. 433 */ 434 clen = control->m_len; 435 /* reserve extra space for AF_UNIX's internalize */ 436 if (so->so_proto->pr_domain->dom_family == AF_UNIX && 437 clen >= CMSG_ALIGN(sizeof(struct cmsghdr)) && 438 mtod(control, struct cmsghdr *)->cmsg_type == SCM_RIGHTS) 439 clen = CMSG_SPACE( 440 (clen - CMSG_ALIGN(sizeof(struct cmsghdr))) * 441 (sizeof(struct fdpass) / sizeof(int))); 442 } 443 444 #define snderr(errno) { error = errno; goto release; } 445 446 s = solock(so); 447 restart: 448 if ((error = sblock(so, &so->so_snd, SBLOCKWAIT(flags))) != 0) 449 goto out; 450 so->so_state |= SS_ISSENDING; 451 do { 452 if (so->so_state & SS_CANTSENDMORE) 453 snderr(EPIPE); 454 if (so->so_error) { 455 error = so->so_error; 456 so->so_error = 0; 457 snderr(error); 458 } 459 if ((so->so_state & SS_ISCONNECTED) == 0) { 460 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 461 if (!(resid == 0 && clen != 0)) 462 snderr(ENOTCONN); 463 } else if (addr == 0) 464 snderr(EDESTADDRREQ); 465 } 466 space = sbspace(so, &so->so_snd); 467 if (flags & MSG_OOB) 468 space += 1024; 469 if (so->so_proto->pr_domain->dom_family == AF_UNIX) { 470 if (atomic && resid > so->so_snd.sb_hiwat) 471 snderr(EMSGSIZE); 472 } else { 473 if (clen > so->so_snd.sb_hiwat || 474 (atomic && resid > so->so_snd.sb_hiwat - clen)) 475 snderr(EMSGSIZE); 476 } 477 if (space < clen || 478 (space - clen < resid && 479 (atomic || space < so->so_snd.sb_lowat))) { 480 if (flags & MSG_DONTWAIT) 481 snderr(EWOULDBLOCK); 482 sbunlock(so, &so->so_snd); 483 error = sbwait(so, &so->so_snd); 484 so->so_state &= ~SS_ISSENDING; 485 if (error) 486 goto out; 487 goto restart; 488 } 489 space -= clen; 490 do { 491 if (uio == NULL) { 492 /* 493 * Data is prepackaged in "top". 494 */ 495 resid = 0; 496 if (flags & MSG_EOR) 497 top->m_flags |= M_EOR; 498 } else { 499 sounlock(so, s); 500 error = m_getuio(&top, atomic, space, uio); 501 s = solock(so); 502 if (error) 503 goto release; 504 space -= top->m_pkthdr.len; 505 resid = uio->uio_resid; 506 if (flags & MSG_EOR) 507 top->m_flags |= M_EOR; 508 } 509 if (resid == 0) 510 so->so_state &= ~SS_ISSENDING; 511 if (top && so->so_options & SO_ZEROIZE) 512 top->m_flags |= M_ZEROIZE; 513 error = (*so->so_proto->pr_usrreq)(so, 514 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 515 top, addr, control, curproc); 516 clen = 0; 517 control = NULL; 518 top = NULL; 519 if (error) 520 goto release; 521 } while (resid && space > 0); 522 } while (resid); 523 524 release: 525 so->so_state &= ~SS_ISSENDING; 526 sbunlock(so, &so->so_snd); 527 out: 528 sounlock(so, s); 529 m_freem(top); 530 m_freem(control); 531 return (error); 532 } 533 534 int 535 m_getuio(struct mbuf **mp, int atomic, long space, struct uio *uio) 536 { 537 struct mbuf *m, *top = NULL; 538 struct mbuf **nextp = ⊤ 539 u_long len, mlen; 540 size_t resid = uio->uio_resid; 541 int error; 542 543 do { 544 if (top == NULL) { 545 MGETHDR(m, M_WAIT, MT_DATA); 546 mlen = MHLEN; 547 m->m_pkthdr.len = 0; 548 m->m_pkthdr.ph_ifidx = 0; 549 } else { 550 MGET(m, M_WAIT, MT_DATA); 551 mlen = MLEN; 552 } 553 /* chain mbuf together */ 554 *nextp = m; 555 nextp = &m->m_next; 556 557 resid = ulmin(resid, space); 558 if (resid >= MINCLSIZE) { 559 MCLGETI(m, M_NOWAIT, NULL, ulmin(resid, MAXMCLBYTES)); 560 if ((m->m_flags & M_EXT) == 0) 561 MCLGETI(m, M_NOWAIT, NULL, MCLBYTES); 562 if ((m->m_flags & M_EXT) == 0) 563 goto nopages; 564 mlen = m->m_ext.ext_size; 565 len = ulmin(mlen, resid); 566 /* 567 * For datagram protocols, leave room 568 * for protocol headers in first mbuf. 569 */ 570 if (atomic && m == top && len < mlen - max_hdr) 571 m->m_data += max_hdr; 572 } else { 573 nopages: 574 len = ulmin(mlen, resid); 575 /* 576 * For datagram protocols, leave room 577 * for protocol headers in first mbuf. 578 */ 579 if (atomic && m == top && len < mlen - max_hdr) 580 m_align(m, len); 581 } 582 583 error = uiomove(mtod(m, caddr_t), len, uio); 584 if (error) { 585 m_freem(top); 586 return (error); 587 } 588 589 /* adjust counters */ 590 resid = uio->uio_resid; 591 space -= len; 592 m->m_len = len; 593 top->m_pkthdr.len += len; 594 595 /* Is there more space and more data? */ 596 } while (space > 0 && resid > 0); 597 598 *mp = top; 599 return 0; 600 } 601 602 /* 603 * Following replacement or removal of the first mbuf on the first 604 * mbuf chain of a socket buffer, push necessary state changes back 605 * into the socket buffer so that other consumers see the values 606 * consistently. 'nextrecord' is the callers locally stored value of 607 * the original value of sb->sb_mb->m_nextpkt which must be restored 608 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. 609 */ 610 void 611 sbsync(struct sockbuf *sb, struct mbuf *nextrecord) 612 { 613 614 /* 615 * First, update for the new value of nextrecord. If necessary, 616 * make it the first record. 617 */ 618 if (sb->sb_mb != NULL) 619 sb->sb_mb->m_nextpkt = nextrecord; 620 else 621 sb->sb_mb = nextrecord; 622 623 /* 624 * Now update any dependent socket buffer fields to reflect 625 * the new state. This is an inline of SB_EMPTY_FIXUP, with 626 * the addition of a second clause that takes care of the 627 * case where sb_mb has been updated, but remains the last 628 * record. 629 */ 630 if (sb->sb_mb == NULL) { 631 sb->sb_mbtail = NULL; 632 sb->sb_lastrecord = NULL; 633 } else if (sb->sb_mb->m_nextpkt == NULL) 634 sb->sb_lastrecord = sb->sb_mb; 635 } 636 637 /* 638 * Implement receive operations on a socket. 639 * We depend on the way that records are added to the sockbuf 640 * by sbappend*. In particular, each record (mbufs linked through m_next) 641 * must begin with an address if the protocol so specifies, 642 * followed by an optional mbuf or mbufs containing ancillary data, 643 * and then zero or more mbufs of data. 644 * In order to avoid blocking network for the entire time here, we release 645 * the solock() while doing the actual copy to user space. 646 * Although the sockbuf is locked, new data may still be appended, 647 * and thus we must maintain consistency of the sockbuf during that time. 648 * 649 * The caller may receive the data as a single mbuf chain by supplying 650 * an mbuf **mp0 for use in returning the chain. The uio is then used 651 * only for the count in uio_resid. 652 */ 653 int 654 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, 655 struct mbuf **mp0, struct mbuf **controlp, int *flagsp, 656 socklen_t controllen) 657 { 658 struct mbuf *m, **mp; 659 struct mbuf *cm; 660 u_long len, offset, moff; 661 int flags, error, s, type, uio_error = 0; 662 const struct protosw *pr = so->so_proto; 663 struct mbuf *nextrecord; 664 size_t resid, orig_resid = uio->uio_resid; 665 666 mp = mp0; 667 if (paddr) 668 *paddr = NULL; 669 if (controlp) 670 *controlp = NULL; 671 if (flagsp) 672 flags = *flagsp &~ MSG_EOR; 673 else 674 flags = 0; 675 if (flags & MSG_OOB) { 676 m = m_get(M_WAIT, MT_DATA); 677 s = solock(so); 678 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, 679 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, curproc); 680 sounlock(so, s); 681 if (error) 682 goto bad; 683 do { 684 error = uiomove(mtod(m, caddr_t), 685 ulmin(uio->uio_resid, m->m_len), uio); 686 m = m_free(m); 687 } while (uio->uio_resid && error == 0 && m); 688 bad: 689 m_freem(m); 690 return (error); 691 } 692 if (mp) 693 *mp = NULL; 694 695 s = solock(so); 696 restart: 697 if ((error = sblock(so, &so->so_rcv, SBLOCKWAIT(flags))) != 0) { 698 sounlock(so, s); 699 return (error); 700 } 701 702 m = so->so_rcv.sb_mb; 703 #ifdef SOCKET_SPLICE 704 if (isspliced(so)) 705 m = NULL; 706 #endif /* SOCKET_SPLICE */ 707 /* 708 * If we have less data than requested, block awaiting more 709 * (subject to any timeout) if: 710 * 1. the current count is less than the low water mark, 711 * 2. MSG_WAITALL is set, and it is possible to do the entire 712 * receive operation at once if we block (resid <= hiwat), or 713 * 3. MSG_DONTWAIT is not set. 714 * If MSG_WAITALL is set but resid is larger than the receive buffer, 715 * we have to do the receive in sections, and thus risk returning 716 * a short count if a timeout or signal occurs after we start. 717 */ 718 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 719 so->so_rcv.sb_cc < uio->uio_resid) && 720 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 721 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 722 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 723 #ifdef DIAGNOSTIC 724 if (m == NULL && so->so_rcv.sb_cc) 725 #ifdef SOCKET_SPLICE 726 if (!isspliced(so)) 727 #endif /* SOCKET_SPLICE */ 728 panic("receive 1: so %p, so_type %d, sb_cc %lu", 729 so, so->so_type, so->so_rcv.sb_cc); 730 #endif 731 if (so->so_error) { 732 if (m) 733 goto dontblock; 734 error = so->so_error; 735 if ((flags & MSG_PEEK) == 0) 736 so->so_error = 0; 737 goto release; 738 } 739 if (so->so_state & SS_CANTRCVMORE) { 740 if (m) 741 goto dontblock; 742 else if (so->so_rcv.sb_cc == 0) 743 goto release; 744 } 745 for (; m; m = m->m_next) 746 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 747 m = so->so_rcv.sb_mb; 748 goto dontblock; 749 } 750 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 751 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 752 error = ENOTCONN; 753 goto release; 754 } 755 if (uio->uio_resid == 0 && controlp == NULL) 756 goto release; 757 if (flags & MSG_DONTWAIT) { 758 error = EWOULDBLOCK; 759 goto release; 760 } 761 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); 762 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); 763 sbunlock(so, &so->so_rcv); 764 error = sbwait(so, &so->so_rcv); 765 if (error) { 766 sounlock(so, s); 767 return (error); 768 } 769 goto restart; 770 } 771 dontblock: 772 /* 773 * On entry here, m points to the first record of the socket buffer. 774 * From this point onward, we maintain 'nextrecord' as a cache of the 775 * pointer to the next record in the socket buffer. We must keep the 776 * various socket buffer pointers and local stack versions of the 777 * pointers in sync, pushing out modifications before operations that 778 * may sleep, and re-reading them afterwards. 779 * 780 * Otherwise, we will race with the network stack appending new data 781 * or records onto the socket buffer by using inconsistent/stale 782 * versions of the field, possibly resulting in socket buffer 783 * corruption. 784 */ 785 if (uio->uio_procp) 786 uio->uio_procp->p_ru.ru_msgrcv++; 787 KASSERT(m == so->so_rcv.sb_mb); 788 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); 789 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); 790 nextrecord = m->m_nextpkt; 791 if (pr->pr_flags & PR_ADDR) { 792 #ifdef DIAGNOSTIC 793 if (m->m_type != MT_SONAME) 794 panic("receive 1a: so %p, so_type %d, m %p, m_type %d", 795 so, so->so_type, m, m->m_type); 796 #endif 797 orig_resid = 0; 798 if (flags & MSG_PEEK) { 799 if (paddr) 800 *paddr = m_copym(m, 0, m->m_len, M_NOWAIT); 801 m = m->m_next; 802 } else { 803 sbfree(&so->so_rcv, m); 804 if (paddr) { 805 *paddr = m; 806 so->so_rcv.sb_mb = m->m_next; 807 m->m_next = 0; 808 m = so->so_rcv.sb_mb; 809 } else { 810 so->so_rcv.sb_mb = m_free(m); 811 m = so->so_rcv.sb_mb; 812 } 813 sbsync(&so->so_rcv, nextrecord); 814 } 815 } 816 while (m && m->m_type == MT_CONTROL && error == 0) { 817 int skip = 0; 818 if (flags & MSG_PEEK) { 819 if (mtod(m, struct cmsghdr *)->cmsg_type == 820 SCM_RIGHTS) { 821 /* don't leak internalized SCM_RIGHTS msgs */ 822 skip = 1; 823 } else if (controlp) 824 *controlp = m_copym(m, 0, m->m_len, M_NOWAIT); 825 m = m->m_next; 826 } else { 827 sbfree(&so->so_rcv, m); 828 so->so_rcv.sb_mb = m->m_next; 829 m->m_nextpkt = m->m_next = NULL; 830 cm = m; 831 m = so->so_rcv.sb_mb; 832 sbsync(&so->so_rcv, nextrecord); 833 if (controlp) { 834 if (pr->pr_domain->dom_externalize) { 835 error = 836 (*pr->pr_domain->dom_externalize) 837 (cm, controllen, flags); 838 } 839 *controlp = cm; 840 } else { 841 /* 842 * Dispose of any SCM_RIGHTS message that went 843 * through the read path rather than recv. 844 */ 845 if (pr->pr_domain->dom_dispose) 846 pr->pr_domain->dom_dispose(cm); 847 m_free(cm); 848 } 849 } 850 if (m != NULL) 851 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 852 else 853 nextrecord = so->so_rcv.sb_mb; 854 if (controlp && !skip) { 855 orig_resid = 0; 856 controlp = &(*controlp)->m_next; 857 } 858 } 859 860 /* If m is non-NULL, we have some data to read. */ 861 if (m) { 862 type = m->m_type; 863 if (type == MT_OOBDATA) 864 flags |= MSG_OOB; 865 if (m->m_flags & M_BCAST) 866 flags |= MSG_BCAST; 867 if (m->m_flags & M_MCAST) 868 flags |= MSG_MCAST; 869 } 870 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); 871 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); 872 873 moff = 0; 874 offset = 0; 875 while (m && uio->uio_resid > 0 && error == 0) { 876 if (m->m_type == MT_OOBDATA) { 877 if (type != MT_OOBDATA) 878 break; 879 } else if (type == MT_OOBDATA) { 880 break; 881 } else if (m->m_type == MT_CONTROL) { 882 /* 883 * If there is more than one control message in the 884 * stream, we do a short read. Next can be received 885 * or disposed by another system call. 886 */ 887 break; 888 #ifdef DIAGNOSTIC 889 } else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) { 890 panic("receive 3: so %p, so_type %d, m %p, m_type %d", 891 so, so->so_type, m, m->m_type); 892 #endif 893 } 894 so->so_state &= ~SS_RCVATMARK; 895 len = uio->uio_resid; 896 if (so->so_oobmark && len > so->so_oobmark - offset) 897 len = so->so_oobmark - offset; 898 if (len > m->m_len - moff) 899 len = m->m_len - moff; 900 /* 901 * If mp is set, just pass back the mbufs. 902 * Otherwise copy them out via the uio, then free. 903 * Sockbuf must be consistent here (points to current mbuf, 904 * it points to next record) when we drop priority; 905 * we must note any additions to the sockbuf when we 906 * block interrupts again. 907 */ 908 if (mp == NULL && uio_error == 0) { 909 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); 910 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); 911 resid = uio->uio_resid; 912 sounlock(so, s); 913 uio_error = uiomove(mtod(m, caddr_t) + moff, len, uio); 914 s = solock(so); 915 if (uio_error) 916 uio->uio_resid = resid - len; 917 } else 918 uio->uio_resid -= len; 919 if (len == m->m_len - moff) { 920 if (m->m_flags & M_EOR) 921 flags |= MSG_EOR; 922 if (flags & MSG_PEEK) { 923 m = m->m_next; 924 moff = 0; 925 } else { 926 nextrecord = m->m_nextpkt; 927 sbfree(&so->so_rcv, m); 928 if (mp) { 929 *mp = m; 930 mp = &m->m_next; 931 so->so_rcv.sb_mb = m = m->m_next; 932 *mp = NULL; 933 } else { 934 so->so_rcv.sb_mb = m_free(m); 935 m = so->so_rcv.sb_mb; 936 } 937 /* 938 * If m != NULL, we also know that 939 * so->so_rcv.sb_mb != NULL. 940 */ 941 KASSERT(so->so_rcv.sb_mb == m); 942 if (m) { 943 m->m_nextpkt = nextrecord; 944 if (nextrecord == NULL) 945 so->so_rcv.sb_lastrecord = m; 946 } else { 947 so->so_rcv.sb_mb = nextrecord; 948 SB_EMPTY_FIXUP(&so->so_rcv); 949 } 950 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); 951 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); 952 } 953 } else { 954 if (flags & MSG_PEEK) 955 moff += len; 956 else { 957 if (mp) 958 *mp = m_copym(m, 0, len, M_WAIT); 959 m->m_data += len; 960 m->m_len -= len; 961 so->so_rcv.sb_cc -= len; 962 so->so_rcv.sb_datacc -= len; 963 } 964 } 965 if (so->so_oobmark) { 966 if ((flags & MSG_PEEK) == 0) { 967 so->so_oobmark -= len; 968 if (so->so_oobmark == 0) { 969 so->so_state |= SS_RCVATMARK; 970 break; 971 } 972 } else { 973 offset += len; 974 if (offset == so->so_oobmark) 975 break; 976 } 977 } 978 if (flags & MSG_EOR) 979 break; 980 /* 981 * If the MSG_WAITALL flag is set (for non-atomic socket), 982 * we must not quit until "uio->uio_resid == 0" or an error 983 * termination. If a signal/timeout occurs, return 984 * with a short count but without error. 985 * Keep sockbuf locked against other readers. 986 */ 987 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 988 !sosendallatonce(so) && !nextrecord) { 989 if (so->so_error || so->so_state & SS_CANTRCVMORE) 990 break; 991 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); 992 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); 993 error = sbwait(so, &so->so_rcv); 994 if (error) { 995 sbunlock(so, &so->so_rcv); 996 sounlock(so, s); 997 return (0); 998 } 999 if ((m = so->so_rcv.sb_mb) != NULL) 1000 nextrecord = m->m_nextpkt; 1001 } 1002 } 1003 1004 if (m && pr->pr_flags & PR_ATOMIC) { 1005 flags |= MSG_TRUNC; 1006 if ((flags & MSG_PEEK) == 0) 1007 (void) sbdroprecord(&so->so_rcv); 1008 } 1009 if ((flags & MSG_PEEK) == 0) { 1010 if (m == NULL) { 1011 /* 1012 * First part is an inline SB_EMPTY_FIXUP(). Second 1013 * part makes sure sb_lastrecord is up-to-date if 1014 * there is still data in the socket buffer. 1015 */ 1016 so->so_rcv.sb_mb = nextrecord; 1017 if (so->so_rcv.sb_mb == NULL) { 1018 so->so_rcv.sb_mbtail = NULL; 1019 so->so_rcv.sb_lastrecord = NULL; 1020 } else if (nextrecord->m_nextpkt == NULL) 1021 so->so_rcv.sb_lastrecord = nextrecord; 1022 } 1023 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); 1024 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); 1025 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1026 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, 1027 (struct mbuf *)(long)flags, NULL, curproc); 1028 } 1029 if (orig_resid == uio->uio_resid && orig_resid && 1030 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1031 sbunlock(so, &so->so_rcv); 1032 goto restart; 1033 } 1034 1035 if (uio_error) 1036 error = uio_error; 1037 1038 if (flagsp) 1039 *flagsp |= flags; 1040 release: 1041 sbunlock(so, &so->so_rcv); 1042 sounlock(so, s); 1043 return (error); 1044 } 1045 1046 int 1047 soshutdown(struct socket *so, int how) 1048 { 1049 const struct protosw *pr = so->so_proto; 1050 int s, error = 0; 1051 1052 s = solock(so); 1053 switch (how) { 1054 case SHUT_RD: 1055 sorflush(so); 1056 break; 1057 case SHUT_RDWR: 1058 sorflush(so); 1059 /* FALLTHROUGH */ 1060 case SHUT_WR: 1061 error = (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, NULL, NULL, 1062 curproc); 1063 break; 1064 default: 1065 error = EINVAL; 1066 break; 1067 } 1068 sounlock(so, s); 1069 1070 return (error); 1071 } 1072 1073 void 1074 sorflush(struct socket *so) 1075 { 1076 struct sockbuf *sb = &so->so_rcv; 1077 const struct protosw *pr = so->so_proto; 1078 struct socket aso; 1079 int error; 1080 1081 sb->sb_flags |= SB_NOINTR; 1082 error = sblock(so, sb, M_WAITOK); 1083 /* with SB_NOINTR and M_WAITOK sblock() must not fail */ 1084 KASSERT(error == 0); 1085 socantrcvmore(so); 1086 sbunlock(so, sb); 1087 aso.so_proto = pr; 1088 aso.so_rcv = *sb; 1089 memset(&sb->sb_startzero, 0, 1090 (caddr_t)&sb->sb_endzero - (caddr_t)&sb->sb_startzero); 1091 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 1092 (*pr->pr_domain->dom_dispose)(aso.so_rcv.sb_mb); 1093 sbrelease(&aso, &aso.so_rcv); 1094 } 1095 1096 #ifdef SOCKET_SPLICE 1097 1098 #define so_splicelen so_sp->ssp_len 1099 #define so_splicemax so_sp->ssp_max 1100 #define so_idletv so_sp->ssp_idletv 1101 #define so_idleto so_sp->ssp_idleto 1102 #define so_splicetask so_sp->ssp_task 1103 1104 int 1105 sosplice(struct socket *so, int fd, off_t max, struct timeval *tv) 1106 { 1107 struct file *fp; 1108 struct socket *sosp; 1109 struct sosplice *sp; 1110 struct taskq *tq; 1111 int error = 0; 1112 1113 soassertlocked(so); 1114 1115 if (sosplice_taskq == NULL) { 1116 rw_enter_write(&sosplice_lock); 1117 if (sosplice_taskq == NULL) { 1118 tq = taskq_create("sosplice", 1, IPL_SOFTNET, 1119 TASKQ_MPSAFE); 1120 /* Ensure the taskq is fully visible to other CPUs. */ 1121 membar_producer(); 1122 sosplice_taskq = tq; 1123 } 1124 rw_exit_write(&sosplice_lock); 1125 } 1126 if (sosplice_taskq == NULL) 1127 return (ENOMEM); 1128 1129 if ((so->so_proto->pr_flags & PR_SPLICE) == 0) 1130 return (EPROTONOSUPPORT); 1131 if (so->so_options & SO_ACCEPTCONN) 1132 return (EOPNOTSUPP); 1133 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1134 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 1135 return (ENOTCONN); 1136 if (so->so_sp == NULL) { 1137 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1138 if (so->so_sp == NULL) 1139 so->so_sp = sp; 1140 else 1141 pool_put(&sosplice_pool, sp); 1142 } 1143 1144 /* If no fd is given, unsplice by removing existing link. */ 1145 if (fd < 0) { 1146 /* Lock receive buffer. */ 1147 if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) { 1148 return (error); 1149 } 1150 if (so->so_sp->ssp_socket) 1151 sounsplice(so, so->so_sp->ssp_socket, 1); 1152 sbunlock(so, &so->so_rcv); 1153 return (0); 1154 } 1155 1156 if (max && max < 0) 1157 return (EINVAL); 1158 1159 if (tv && (tv->tv_sec < 0 || tv->tv_usec < 0)) 1160 return (EINVAL); 1161 1162 /* Find sosp, the drain socket where data will be spliced into. */ 1163 if ((error = getsock(curproc, fd, &fp)) != 0) 1164 return (error); 1165 sosp = fp->f_data; 1166 if (sosp->so_sp == NULL) { 1167 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1168 if (sosp->so_sp == NULL) 1169 sosp->so_sp = sp; 1170 else 1171 pool_put(&sosplice_pool, sp); 1172 } 1173 1174 /* Lock both receive and send buffer. */ 1175 if ((error = sblock(so, &so->so_rcv, M_WAITOK)) != 0) { 1176 goto frele; 1177 } 1178 if ((error = sblock(so, &sosp->so_snd, M_WAITOK)) != 0) { 1179 sbunlock(so, &so->so_rcv); 1180 goto frele; 1181 } 1182 1183 if (so->so_sp->ssp_socket || sosp->so_sp->ssp_soback) { 1184 error = EBUSY; 1185 goto release; 1186 } 1187 if (sosp->so_proto->pr_usrreq != so->so_proto->pr_usrreq) { 1188 error = EPROTONOSUPPORT; 1189 goto release; 1190 } 1191 if (sosp->so_options & SO_ACCEPTCONN) { 1192 error = EOPNOTSUPP; 1193 goto release; 1194 } 1195 if ((sosp->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) { 1196 error = ENOTCONN; 1197 goto release; 1198 } 1199 1200 /* Splice so and sosp together. */ 1201 so->so_sp->ssp_socket = sosp; 1202 sosp->so_sp->ssp_soback = so; 1203 so->so_splicelen = 0; 1204 so->so_splicemax = max; 1205 if (tv) 1206 so->so_idletv = *tv; 1207 else 1208 timerclear(&so->so_idletv); 1209 timeout_set_proc(&so->so_idleto, soidle, so); 1210 task_set(&so->so_splicetask, sotask, so); 1211 1212 /* 1213 * To prevent softnet interrupt from calling somove() while 1214 * we sleep, the socket buffers are not marked as spliced yet. 1215 */ 1216 if (somove(so, M_WAIT)) { 1217 so->so_rcv.sb_flags |= SB_SPLICE; 1218 sosp->so_snd.sb_flags |= SB_SPLICE; 1219 } 1220 1221 release: 1222 sbunlock(sosp, &sosp->so_snd); 1223 sbunlock(so, &so->so_rcv); 1224 frele: 1225 FRELE(fp, curproc); 1226 return (error); 1227 } 1228 1229 void 1230 sounsplice(struct socket *so, struct socket *sosp, int wakeup) 1231 { 1232 soassertlocked(so); 1233 1234 task_del(sosplice_taskq, &so->so_splicetask); 1235 timeout_del(&so->so_idleto); 1236 sosp->so_snd.sb_flags &= ~SB_SPLICE; 1237 so->so_rcv.sb_flags &= ~SB_SPLICE; 1238 so->so_sp->ssp_socket = sosp->so_sp->ssp_soback = NULL; 1239 if (wakeup && soreadable(so)) 1240 sorwakeup(so); 1241 } 1242 1243 void 1244 soidle(void *arg) 1245 { 1246 struct socket *so = arg; 1247 int s; 1248 1249 s = solock(so); 1250 if (so->so_rcv.sb_flags & SB_SPLICE) { 1251 so->so_error = ETIMEDOUT; 1252 sounsplice(so, so->so_sp->ssp_socket, 1); 1253 } 1254 sounlock(so, s); 1255 } 1256 1257 void 1258 sotask(void *arg) 1259 { 1260 struct socket *so = arg; 1261 int s; 1262 1263 s = solock(so); 1264 if (so->so_rcv.sb_flags & SB_SPLICE) { 1265 /* 1266 * We may not sleep here as sofree() and unsplice() may be 1267 * called from softnet interrupt context. This would remove 1268 * the socket during somove(). 1269 */ 1270 somove(so, M_DONTWAIT); 1271 } 1272 sounlock(so, s); 1273 1274 /* Avoid user land starvation. */ 1275 yield(); 1276 } 1277 1278 /* 1279 * The socket splicing task or idle timeout may sleep while grabbing the net 1280 * lock. As sofree() can be called anytime, sotask() or soidle() could access 1281 * the socket memory of a freed socket after wakeup. So delay the pool_put() 1282 * after all pending socket splicing tasks or timeouts have finished. Do this 1283 * by scheduling it on the same threads. 1284 */ 1285 void 1286 soreaper(void *arg) 1287 { 1288 struct socket *so = arg; 1289 1290 /* Reuse splice task, sounsplice() has been called before. */ 1291 task_set(&so->so_sp->ssp_task, soput, so); 1292 task_add(sosplice_taskq, &so->so_sp->ssp_task); 1293 } 1294 1295 void 1296 soput(void *arg) 1297 { 1298 struct socket *so = arg; 1299 1300 pool_put(&sosplice_pool, so->so_sp); 1301 pool_put(&socket_pool, so); 1302 } 1303 1304 /* 1305 * Move data from receive buffer of spliced source socket to send 1306 * buffer of drain socket. Try to move as much as possible in one 1307 * big chunk. It is a TCP only implementation. 1308 * Return value 0 means splicing has been finished, 1 continue. 1309 */ 1310 int 1311 somove(struct socket *so, int wait) 1312 { 1313 struct socket *sosp = so->so_sp->ssp_socket; 1314 struct mbuf *m, **mp, *nextrecord; 1315 u_long len, off, oobmark; 1316 long space; 1317 int error = 0, maxreached = 0; 1318 unsigned int state; 1319 1320 soassertlocked(so); 1321 1322 nextpkt: 1323 if (so->so_error) { 1324 error = so->so_error; 1325 goto release; 1326 } 1327 if (sosp->so_state & SS_CANTSENDMORE) { 1328 error = EPIPE; 1329 goto release; 1330 } 1331 if (sosp->so_error && sosp->so_error != ETIMEDOUT && 1332 sosp->so_error != EFBIG && sosp->so_error != ELOOP) { 1333 error = sosp->so_error; 1334 goto release; 1335 } 1336 if ((sosp->so_state & SS_ISCONNECTED) == 0) 1337 goto release; 1338 1339 /* Calculate how many bytes can be copied now. */ 1340 len = so->so_rcv.sb_datacc; 1341 if (so->so_splicemax) { 1342 KASSERT(so->so_splicelen < so->so_splicemax); 1343 if (so->so_splicemax <= so->so_splicelen + len) { 1344 len = so->so_splicemax - so->so_splicelen; 1345 maxreached = 1; 1346 } 1347 } 1348 space = sbspace(sosp, &sosp->so_snd); 1349 if (so->so_oobmark && so->so_oobmark < len && 1350 so->so_oobmark < space + 1024) 1351 space += 1024; 1352 if (space <= 0) { 1353 maxreached = 0; 1354 goto release; 1355 } 1356 if (space < len) { 1357 maxreached = 0; 1358 if (space < sosp->so_snd.sb_lowat) 1359 goto release; 1360 len = space; 1361 } 1362 sosp->so_state |= SS_ISSENDING; 1363 1364 SBLASTRECORDCHK(&so->so_rcv, "somove 1"); 1365 SBLASTMBUFCHK(&so->so_rcv, "somove 1"); 1366 m = so->so_rcv.sb_mb; 1367 if (m == NULL) 1368 goto release; 1369 nextrecord = m->m_nextpkt; 1370 1371 /* Drop address and control information not used with splicing. */ 1372 if (so->so_proto->pr_flags & PR_ADDR) { 1373 #ifdef DIAGNOSTIC 1374 if (m->m_type != MT_SONAME) 1375 panic("somove soname: so %p, so_type %d, m %p, " 1376 "m_type %d", so, so->so_type, m, m->m_type); 1377 #endif 1378 m = m->m_next; 1379 } 1380 while (m && m->m_type == MT_CONTROL) 1381 m = m->m_next; 1382 if (m == NULL) { 1383 sbdroprecord(&so->so_rcv); 1384 if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb) 1385 (so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL, 1386 NULL, NULL, NULL); 1387 goto nextpkt; 1388 } 1389 1390 /* 1391 * By splicing sockets connected to localhost, userland might create a 1392 * loop. Dissolve splicing with error if loop is detected by counter. 1393 */ 1394 if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.ph_loopcnt++ >= M_MAXLOOP) { 1395 error = ELOOP; 1396 goto release; 1397 } 1398 1399 if (so->so_proto->pr_flags & PR_ATOMIC) { 1400 if ((m->m_flags & M_PKTHDR) == 0) 1401 panic("somove !PKTHDR: so %p, so_type %d, m %p, " 1402 "m_type %d", so, so->so_type, m, m->m_type); 1403 if (sosp->so_snd.sb_hiwat < m->m_pkthdr.len) { 1404 error = EMSGSIZE; 1405 goto release; 1406 } 1407 if (len < m->m_pkthdr.len) 1408 goto release; 1409 if (m->m_pkthdr.len < len) { 1410 maxreached = 0; 1411 len = m->m_pkthdr.len; 1412 } 1413 /* 1414 * Throw away the name mbuf after it has been assured 1415 * that the whole first record can be processed. 1416 */ 1417 m = so->so_rcv.sb_mb; 1418 sbfree(&so->so_rcv, m); 1419 so->so_rcv.sb_mb = m_free(m); 1420 sbsync(&so->so_rcv, nextrecord); 1421 } 1422 /* 1423 * Throw away the control mbufs after it has been assured 1424 * that the whole first record can be processed. 1425 */ 1426 m = so->so_rcv.sb_mb; 1427 while (m && m->m_type == MT_CONTROL) { 1428 sbfree(&so->so_rcv, m); 1429 so->so_rcv.sb_mb = m_free(m); 1430 m = so->so_rcv.sb_mb; 1431 sbsync(&so->so_rcv, nextrecord); 1432 } 1433 1434 SBLASTRECORDCHK(&so->so_rcv, "somove 2"); 1435 SBLASTMBUFCHK(&so->so_rcv, "somove 2"); 1436 1437 /* Take at most len mbufs out of receive buffer. */ 1438 for (off = 0, mp = &m; off <= len && *mp; 1439 off += (*mp)->m_len, mp = &(*mp)->m_next) { 1440 u_long size = len - off; 1441 1442 #ifdef DIAGNOSTIC 1443 if ((*mp)->m_type != MT_DATA && (*mp)->m_type != MT_HEADER) 1444 panic("somove type: so %p, so_type %d, m %p, " 1445 "m_type %d", so, so->so_type, *mp, (*mp)->m_type); 1446 #endif 1447 if ((*mp)->m_len > size) { 1448 /* 1449 * Move only a partial mbuf at maximum splice length or 1450 * if the drain buffer is too small for this large mbuf. 1451 */ 1452 if (!maxreached && so->so_snd.sb_datacc > 0) { 1453 len -= size; 1454 break; 1455 } 1456 *mp = m_copym(so->so_rcv.sb_mb, 0, size, wait); 1457 if (*mp == NULL) { 1458 len -= size; 1459 break; 1460 } 1461 so->so_rcv.sb_mb->m_data += size; 1462 so->so_rcv.sb_mb->m_len -= size; 1463 so->so_rcv.sb_cc -= size; 1464 so->so_rcv.sb_datacc -= size; 1465 } else { 1466 *mp = so->so_rcv.sb_mb; 1467 sbfree(&so->so_rcv, *mp); 1468 so->so_rcv.sb_mb = (*mp)->m_next; 1469 sbsync(&so->so_rcv, nextrecord); 1470 } 1471 } 1472 *mp = NULL; 1473 1474 SBLASTRECORDCHK(&so->so_rcv, "somove 3"); 1475 SBLASTMBUFCHK(&so->so_rcv, "somove 3"); 1476 SBCHECK(&so->so_rcv); 1477 if (m == NULL) 1478 goto release; 1479 m->m_nextpkt = NULL; 1480 if (m->m_flags & M_PKTHDR) { 1481 m_resethdr(m); 1482 m->m_pkthdr.len = len; 1483 } 1484 1485 /* Send window update to source peer as receive buffer has changed. */ 1486 if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb) 1487 (so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL, 1488 NULL, NULL, NULL); 1489 1490 /* Receive buffer did shrink by len bytes, adjust oob. */ 1491 state = so->so_state; 1492 so->so_state &= ~SS_RCVATMARK; 1493 oobmark = so->so_oobmark; 1494 so->so_oobmark = oobmark > len ? oobmark - len : 0; 1495 if (oobmark) { 1496 if (oobmark == len) 1497 so->so_state |= SS_RCVATMARK; 1498 if (oobmark >= len) 1499 oobmark = 0; 1500 } 1501 1502 /* 1503 * Handle oob data. If any malloc fails, ignore error. 1504 * TCP urgent data is not very reliable anyway. 1505 */ 1506 while (((state & SS_RCVATMARK) || oobmark) && 1507 (so->so_options & SO_OOBINLINE)) { 1508 struct mbuf *o = NULL; 1509 1510 if (state & SS_RCVATMARK) { 1511 o = m_get(wait, MT_DATA); 1512 state &= ~SS_RCVATMARK; 1513 } else if (oobmark) { 1514 o = m_split(m, oobmark, wait); 1515 if (o) { 1516 error = (*sosp->so_proto->pr_usrreq)(sosp, 1517 PRU_SEND, m, NULL, NULL, NULL); 1518 if (error) { 1519 if (sosp->so_state & SS_CANTSENDMORE) 1520 error = EPIPE; 1521 m_freem(o); 1522 goto release; 1523 } 1524 len -= oobmark; 1525 so->so_splicelen += oobmark; 1526 m = o; 1527 o = m_get(wait, MT_DATA); 1528 } 1529 oobmark = 0; 1530 } 1531 if (o) { 1532 o->m_len = 1; 1533 *mtod(o, caddr_t) = *mtod(m, caddr_t); 1534 error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SENDOOB, 1535 o, NULL, NULL, NULL); 1536 if (error) { 1537 if (sosp->so_state & SS_CANTSENDMORE) 1538 error = EPIPE; 1539 m_freem(m); 1540 goto release; 1541 } 1542 len -= 1; 1543 so->so_splicelen += 1; 1544 if (oobmark) { 1545 oobmark -= 1; 1546 if (oobmark == 0) 1547 state |= SS_RCVATMARK; 1548 } 1549 m_adj(m, 1); 1550 } 1551 } 1552 1553 /* Append all remaining data to drain socket. */ 1554 if (so->so_rcv.sb_cc == 0 || maxreached) 1555 sosp->so_state &= ~SS_ISSENDING; 1556 error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SEND, m, NULL, NULL, 1557 NULL); 1558 if (error) { 1559 if (sosp->so_state & SS_CANTSENDMORE) 1560 error = EPIPE; 1561 goto release; 1562 } 1563 so->so_splicelen += len; 1564 1565 /* Move several packets if possible. */ 1566 if (!maxreached && nextrecord) 1567 goto nextpkt; 1568 1569 release: 1570 sosp->so_state &= ~SS_ISSENDING; 1571 if (!error && maxreached && so->so_splicemax == so->so_splicelen) 1572 error = EFBIG; 1573 if (error) 1574 so->so_error = error; 1575 if (((so->so_state & SS_CANTRCVMORE) && so->so_rcv.sb_cc == 0) || 1576 (sosp->so_state & SS_CANTSENDMORE) || maxreached || error) { 1577 sounsplice(so, sosp, 1); 1578 return (0); 1579 } 1580 if (timerisset(&so->so_idletv)) 1581 timeout_add_tv(&so->so_idleto, &so->so_idletv); 1582 return (1); 1583 } 1584 1585 #endif /* SOCKET_SPLICE */ 1586 1587 void 1588 sorwakeup(struct socket *so) 1589 { 1590 soassertlocked(so); 1591 1592 #ifdef SOCKET_SPLICE 1593 if (so->so_rcv.sb_flags & SB_SPLICE) { 1594 /* 1595 * TCP has a sendbuffer that can handle multiple packets 1596 * at once. So queue the stream a bit to accumulate data. 1597 * The sosplice thread will call somove() later and send 1598 * the packets calling tcp_output() only once. 1599 * In the UDP case, send out the packets immediately. 1600 * Using a thread would make things slower. 1601 */ 1602 if (so->so_proto->pr_flags & PR_WANTRCVD) 1603 task_add(sosplice_taskq, &so->so_splicetask); 1604 else 1605 somove(so, M_DONTWAIT); 1606 } 1607 if (isspliced(so)) 1608 return; 1609 #endif 1610 sowakeup(so, &so->so_rcv); 1611 if (so->so_upcall) 1612 (*(so->so_upcall))(so, so->so_upcallarg, M_DONTWAIT); 1613 } 1614 1615 void 1616 sowwakeup(struct socket *so) 1617 { 1618 soassertlocked(so); 1619 1620 #ifdef SOCKET_SPLICE 1621 if (so->so_snd.sb_flags & SB_SPLICE) 1622 task_add(sosplice_taskq, &so->so_sp->ssp_soback->so_splicetask); 1623 #endif 1624 sowakeup(so, &so->so_snd); 1625 } 1626 1627 int 1628 sosetopt(struct socket *so, int level, int optname, struct mbuf *m) 1629 { 1630 int error = 0; 1631 1632 soassertlocked(so); 1633 1634 if (level != SOL_SOCKET) { 1635 if (so->so_proto->pr_ctloutput) { 1636 error = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1637 level, optname, m); 1638 return (error); 1639 } 1640 error = ENOPROTOOPT; 1641 } else { 1642 switch (optname) { 1643 case SO_BINDANY: 1644 if ((error = suser(curproc)) != 0) /* XXX */ 1645 return (error); 1646 break; 1647 } 1648 1649 switch (optname) { 1650 1651 case SO_LINGER: 1652 if (m == NULL || m->m_len != sizeof (struct linger) || 1653 mtod(m, struct linger *)->l_linger < 0 || 1654 mtod(m, struct linger *)->l_linger > SHRT_MAX) 1655 return (EINVAL); 1656 so->so_linger = mtod(m, struct linger *)->l_linger; 1657 /* FALLTHROUGH */ 1658 1659 case SO_BINDANY: 1660 case SO_DEBUG: 1661 case SO_KEEPALIVE: 1662 case SO_USELOOPBACK: 1663 case SO_BROADCAST: 1664 case SO_REUSEADDR: 1665 case SO_REUSEPORT: 1666 case SO_OOBINLINE: 1667 case SO_TIMESTAMP: 1668 case SO_ZEROIZE: 1669 if (m == NULL || m->m_len < sizeof (int)) 1670 return (EINVAL); 1671 if (*mtod(m, int *)) 1672 so->so_options |= optname; 1673 else 1674 so->so_options &= ~optname; 1675 break; 1676 1677 case SO_DONTROUTE: 1678 if (m == NULL || m->m_len < sizeof (int)) 1679 return (EINVAL); 1680 if (*mtod(m, int *)) 1681 error = EOPNOTSUPP; 1682 break; 1683 1684 case SO_SNDBUF: 1685 case SO_RCVBUF: 1686 case SO_SNDLOWAT: 1687 case SO_RCVLOWAT: 1688 { 1689 u_long cnt; 1690 1691 if (m == NULL || m->m_len < sizeof (int)) 1692 return (EINVAL); 1693 cnt = *mtod(m, int *); 1694 if ((long)cnt <= 0) 1695 cnt = 1; 1696 switch (optname) { 1697 1698 case SO_SNDBUF: 1699 if (so->so_state & SS_CANTSENDMORE) 1700 return (EINVAL); 1701 if (sbcheckreserve(cnt, so->so_snd.sb_wat) || 1702 sbreserve(so, &so->so_snd, cnt)) 1703 return (ENOBUFS); 1704 so->so_snd.sb_wat = cnt; 1705 break; 1706 1707 case SO_RCVBUF: 1708 if (so->so_state & SS_CANTRCVMORE) 1709 return (EINVAL); 1710 if (sbcheckreserve(cnt, so->so_rcv.sb_wat) || 1711 sbreserve(so, &so->so_rcv, cnt)) 1712 return (ENOBUFS); 1713 so->so_rcv.sb_wat = cnt; 1714 break; 1715 1716 case SO_SNDLOWAT: 1717 so->so_snd.sb_lowat = 1718 (cnt > so->so_snd.sb_hiwat) ? 1719 so->so_snd.sb_hiwat : cnt; 1720 break; 1721 case SO_RCVLOWAT: 1722 so->so_rcv.sb_lowat = 1723 (cnt > so->so_rcv.sb_hiwat) ? 1724 so->so_rcv.sb_hiwat : cnt; 1725 break; 1726 } 1727 break; 1728 } 1729 1730 case SO_SNDTIMEO: 1731 case SO_RCVTIMEO: 1732 { 1733 struct timeval tv; 1734 int val; 1735 1736 if (m == NULL || m->m_len < sizeof (tv)) 1737 return (EINVAL); 1738 memcpy(&tv, mtod(m, struct timeval *), sizeof tv); 1739 val = tvtohz(&tv); 1740 if (val > USHRT_MAX) 1741 return (EDOM); 1742 1743 switch (optname) { 1744 1745 case SO_SNDTIMEO: 1746 so->so_snd.sb_timeo = val; 1747 break; 1748 case SO_RCVTIMEO: 1749 so->so_rcv.sb_timeo = val; 1750 break; 1751 } 1752 break; 1753 } 1754 1755 case SO_RTABLE: 1756 if (so->so_proto->pr_domain && 1757 so->so_proto->pr_domain->dom_protosw && 1758 so->so_proto->pr_ctloutput) { 1759 struct domain *dom = so->so_proto->pr_domain; 1760 1761 level = dom->dom_protosw->pr_protocol; 1762 error = (*so->so_proto->pr_ctloutput) 1763 (PRCO_SETOPT, so, level, optname, m); 1764 return (error); 1765 } 1766 error = ENOPROTOOPT; 1767 break; 1768 1769 #ifdef SOCKET_SPLICE 1770 case SO_SPLICE: 1771 if (m == NULL) { 1772 error = sosplice(so, -1, 0, NULL); 1773 } else if (m->m_len < sizeof(int)) { 1774 return (EINVAL); 1775 } else if (m->m_len < sizeof(struct splice)) { 1776 error = sosplice(so, *mtod(m, int *), 0, NULL); 1777 } else { 1778 error = sosplice(so, 1779 mtod(m, struct splice *)->sp_fd, 1780 mtod(m, struct splice *)->sp_max, 1781 &mtod(m, struct splice *)->sp_idle); 1782 } 1783 break; 1784 #endif /* SOCKET_SPLICE */ 1785 1786 default: 1787 error = ENOPROTOOPT; 1788 break; 1789 } 1790 if (error == 0 && so->so_proto->pr_ctloutput) { 1791 (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1792 level, optname, m); 1793 } 1794 } 1795 1796 return (error); 1797 } 1798 1799 int 1800 sogetopt(struct socket *so, int level, int optname, struct mbuf *m) 1801 { 1802 int error = 0; 1803 1804 soassertlocked(so); 1805 1806 if (level != SOL_SOCKET) { 1807 if (so->so_proto->pr_ctloutput) { 1808 m->m_len = 0; 1809 1810 error = (*so->so_proto->pr_ctloutput)(PRCO_GETOPT, so, 1811 level, optname, m); 1812 if (error) 1813 return (error); 1814 return (0); 1815 } else 1816 return (ENOPROTOOPT); 1817 } else { 1818 m->m_len = sizeof (int); 1819 1820 switch (optname) { 1821 1822 case SO_LINGER: 1823 m->m_len = sizeof (struct linger); 1824 mtod(m, struct linger *)->l_onoff = 1825 so->so_options & SO_LINGER; 1826 mtod(m, struct linger *)->l_linger = so->so_linger; 1827 break; 1828 1829 case SO_BINDANY: 1830 case SO_USELOOPBACK: 1831 case SO_DEBUG: 1832 case SO_KEEPALIVE: 1833 case SO_REUSEADDR: 1834 case SO_REUSEPORT: 1835 case SO_BROADCAST: 1836 case SO_OOBINLINE: 1837 case SO_TIMESTAMP: 1838 case SO_ZEROIZE: 1839 *mtod(m, int *) = so->so_options & optname; 1840 break; 1841 1842 case SO_DONTROUTE: 1843 *mtod(m, int *) = 0; 1844 break; 1845 1846 case SO_TYPE: 1847 *mtod(m, int *) = so->so_type; 1848 break; 1849 1850 case SO_ERROR: 1851 *mtod(m, int *) = so->so_error; 1852 so->so_error = 0; 1853 break; 1854 1855 case SO_SNDBUF: 1856 *mtod(m, int *) = so->so_snd.sb_hiwat; 1857 break; 1858 1859 case SO_RCVBUF: 1860 *mtod(m, int *) = so->so_rcv.sb_hiwat; 1861 break; 1862 1863 case SO_SNDLOWAT: 1864 *mtod(m, int *) = so->so_snd.sb_lowat; 1865 break; 1866 1867 case SO_RCVLOWAT: 1868 *mtod(m, int *) = so->so_rcv.sb_lowat; 1869 break; 1870 1871 case SO_SNDTIMEO: 1872 case SO_RCVTIMEO: 1873 { 1874 struct timeval tv; 1875 int val = (optname == SO_SNDTIMEO ? 1876 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 1877 1878 m->m_len = sizeof(struct timeval); 1879 memset(&tv, 0, sizeof(tv)); 1880 tv.tv_sec = val / hz; 1881 tv.tv_usec = (val % hz) * tick; 1882 memcpy(mtod(m, struct timeval *), &tv, sizeof tv); 1883 break; 1884 } 1885 1886 case SO_RTABLE: 1887 if (so->so_proto->pr_domain && 1888 so->so_proto->pr_domain->dom_protosw && 1889 so->so_proto->pr_ctloutput) { 1890 struct domain *dom = so->so_proto->pr_domain; 1891 1892 level = dom->dom_protosw->pr_protocol; 1893 error = (*so->so_proto->pr_ctloutput) 1894 (PRCO_GETOPT, so, level, optname, m); 1895 if (error) 1896 return (error); 1897 break; 1898 } 1899 return (ENOPROTOOPT); 1900 1901 #ifdef SOCKET_SPLICE 1902 case SO_SPLICE: 1903 { 1904 off_t len; 1905 1906 m->m_len = sizeof(off_t); 1907 len = so->so_sp ? so->so_sp->ssp_len : 0; 1908 memcpy(mtod(m, off_t *), &len, sizeof(off_t)); 1909 break; 1910 } 1911 #endif /* SOCKET_SPLICE */ 1912 1913 case SO_PEERCRED: 1914 if (so->so_proto->pr_protocol == AF_UNIX) { 1915 struct unpcb *unp = sotounpcb(so); 1916 1917 if (unp->unp_flags & UNP_FEIDS) { 1918 m->m_len = sizeof(unp->unp_connid); 1919 memcpy(mtod(m, caddr_t), 1920 &(unp->unp_connid), m->m_len); 1921 break; 1922 } 1923 return (ENOTCONN); 1924 } 1925 return (EOPNOTSUPP); 1926 1927 default: 1928 return (ENOPROTOOPT); 1929 } 1930 return (0); 1931 } 1932 } 1933 1934 void 1935 sohasoutofband(struct socket *so) 1936 { 1937 KERNEL_LOCK(); 1938 pgsigio(&so->so_sigio, SIGURG, 0); 1939 selwakeup(&so->so_rcv.sb_sel); 1940 KERNEL_UNLOCK(); 1941 } 1942 1943 int 1944 soo_kqfilter(struct file *fp, struct knote *kn) 1945 { 1946 struct socket *so = kn->kn_fp->f_data; 1947 struct sockbuf *sb; 1948 1949 KERNEL_ASSERT_LOCKED(); 1950 1951 switch (kn->kn_filter) { 1952 case EVFILT_READ: 1953 if (so->so_options & SO_ACCEPTCONN) 1954 kn->kn_fop = &solisten_filtops; 1955 else 1956 kn->kn_fop = &soread_filtops; 1957 sb = &so->so_rcv; 1958 break; 1959 case EVFILT_WRITE: 1960 kn->kn_fop = &sowrite_filtops; 1961 sb = &so->so_snd; 1962 break; 1963 default: 1964 return (EINVAL); 1965 } 1966 1967 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext); 1968 sb->sb_flagsintr |= SB_KNOTE; 1969 1970 return (0); 1971 } 1972 1973 void 1974 filt_sordetach(struct knote *kn) 1975 { 1976 struct socket *so = kn->kn_fp->f_data; 1977 1978 KERNEL_ASSERT_LOCKED(); 1979 1980 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext); 1981 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note)) 1982 so->so_rcv.sb_flagsintr &= ~SB_KNOTE; 1983 } 1984 1985 int 1986 filt_soread(struct knote *kn, long hint) 1987 { 1988 struct socket *so = kn->kn_fp->f_data; 1989 int rv; 1990 1991 kn->kn_data = so->so_rcv.sb_cc; 1992 #ifdef SOCKET_SPLICE 1993 if (isspliced(so)) { 1994 rv = 0; 1995 } else 1996 #endif /* SOCKET_SPLICE */ 1997 if (so->so_state & SS_CANTRCVMORE) { 1998 kn->kn_flags |= EV_EOF; 1999 kn->kn_fflags = so->so_error; 2000 rv = 1; 2001 } else if (so->so_error) { /* temporary udp error */ 2002 rv = 1; 2003 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2004 rv = (kn->kn_data >= kn->kn_sdata); 2005 } else { 2006 rv = (kn->kn_data >= so->so_rcv.sb_lowat); 2007 } 2008 2009 return rv; 2010 } 2011 2012 void 2013 filt_sowdetach(struct knote *kn) 2014 { 2015 struct socket *so = kn->kn_fp->f_data; 2016 2017 KERNEL_ASSERT_LOCKED(); 2018 2019 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext); 2020 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note)) 2021 so->so_snd.sb_flagsintr &= ~SB_KNOTE; 2022 } 2023 2024 int 2025 filt_sowrite(struct knote *kn, long hint) 2026 { 2027 struct socket *so = kn->kn_fp->f_data; 2028 int rv; 2029 2030 kn->kn_data = sbspace(so, &so->so_snd); 2031 if (so->so_state & SS_CANTSENDMORE) { 2032 kn->kn_flags |= EV_EOF; 2033 kn->kn_fflags = so->so_error; 2034 rv = 1; 2035 } else if (so->so_error) { /* temporary udp error */ 2036 rv = 1; 2037 } else if (((so->so_state & SS_ISCONNECTED) == 0) && 2038 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 2039 rv = 0; 2040 } else if (kn->kn_sfflags & NOTE_LOWAT) { 2041 rv = (kn->kn_data >= kn->kn_sdata); 2042 } else { 2043 rv = (kn->kn_data >= so->so_snd.sb_lowat); 2044 } 2045 2046 return (rv); 2047 } 2048 2049 int 2050 filt_solisten(struct knote *kn, long hint) 2051 { 2052 struct socket *so = kn->kn_fp->f_data; 2053 2054 kn->kn_data = so->so_qlen; 2055 2056 return (kn->kn_data != 0); 2057 } 2058 2059 #ifdef DDB 2060 void 2061 sobuf_print(struct sockbuf *, 2062 int (*)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))); 2063 2064 void 2065 sobuf_print(struct sockbuf *sb, 2066 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2067 { 2068 (*pr)("\tsb_cc: %lu\n", sb->sb_cc); 2069 (*pr)("\tsb_datacc: %lu\n", sb->sb_datacc); 2070 (*pr)("\tsb_hiwat: %lu\n", sb->sb_hiwat); 2071 (*pr)("\tsb_wat: %lu\n", sb->sb_wat); 2072 (*pr)("\tsb_mbcnt: %lu\n", sb->sb_mbcnt); 2073 (*pr)("\tsb_mbmax: %lu\n", sb->sb_mbmax); 2074 (*pr)("\tsb_lowat: %ld\n", sb->sb_lowat); 2075 (*pr)("\tsb_mb: %p\n", sb->sb_mb); 2076 (*pr)("\tsb_mbtail: %p\n", sb->sb_mbtail); 2077 (*pr)("\tsb_lastrecord: %p\n", sb->sb_lastrecord); 2078 (*pr)("\tsb_sel: ...\n"); 2079 (*pr)("\tsb_flagsintr: %d\n", sb->sb_flagsintr); 2080 (*pr)("\tsb_flags: %i\n", sb->sb_flags); 2081 (*pr)("\tsb_timeo: %i\n", sb->sb_timeo); 2082 } 2083 2084 void 2085 so_print(void *v, 2086 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2087 { 2088 struct socket *so = v; 2089 2090 (*pr)("socket %p\n", so); 2091 (*pr)("so_type: %i\n", so->so_type); 2092 (*pr)("so_options: 0x%04x\n", so->so_options); /* %b */ 2093 (*pr)("so_linger: %i\n", so->so_linger); 2094 (*pr)("so_state: 0x%04x\n", so->so_state); 2095 (*pr)("so_pcb: %p\n", so->so_pcb); 2096 (*pr)("so_proto: %p\n", so->so_proto); 2097 (*pr)("so_sigio: %p\n", so->so_sigio.sir_sigio); 2098 2099 (*pr)("so_head: %p\n", so->so_head); 2100 (*pr)("so_onq: %p\n", so->so_onq); 2101 (*pr)("so_q0: @%p first: %p\n", &so->so_q0, TAILQ_FIRST(&so->so_q0)); 2102 (*pr)("so_q: @%p first: %p\n", &so->so_q, TAILQ_FIRST(&so->so_q)); 2103 (*pr)("so_eq: next: %p\n", TAILQ_NEXT(so, so_qe)); 2104 (*pr)("so_q0len: %i\n", so->so_q0len); 2105 (*pr)("so_qlen: %i\n", so->so_qlen); 2106 (*pr)("so_qlimit: %i\n", so->so_qlimit); 2107 (*pr)("so_timeo: %i\n", so->so_timeo); 2108 (*pr)("so_obmark: %lu\n", so->so_oobmark); 2109 2110 (*pr)("so_sp: %p\n", so->so_sp); 2111 if (so->so_sp != NULL) { 2112 (*pr)("\tssp_socket: %p\n", so->so_sp->ssp_socket); 2113 (*pr)("\tssp_soback: %p\n", so->so_sp->ssp_soback); 2114 (*pr)("\tssp_len: %lld\n", 2115 (unsigned long long)so->so_sp->ssp_len); 2116 (*pr)("\tssp_max: %lld\n", 2117 (unsigned long long)so->so_sp->ssp_max); 2118 (*pr)("\tssp_idletv: %lld %ld\n", so->so_sp->ssp_idletv.tv_sec, 2119 so->so_sp->ssp_idletv.tv_usec); 2120 (*pr)("\tssp_idleto: %spending (@%i)\n", 2121 timeout_pending(&so->so_sp->ssp_idleto) ? "" : "not ", 2122 so->so_sp->ssp_idleto.to_time); 2123 } 2124 2125 (*pr)("so_rcv:\n"); 2126 sobuf_print(&so->so_rcv, pr); 2127 (*pr)("so_snd:\n"); 2128 sobuf_print(&so->so_snd, pr); 2129 2130 (*pr)("so_upcall: %p so_upcallarg: %p\n", 2131 so->so_upcall, so->so_upcallarg); 2132 2133 (*pr)("so_euid: %d so_ruid: %d\n", so->so_euid, so->so_ruid); 2134 (*pr)("so_egid: %d so_rgid: %d\n", so->so_egid, so->so_rgid); 2135 (*pr)("so_cpid: %d\n", so->so_cpid); 2136 } 2137 #endif 2138 2139