1 /* $OpenBSD: uipc_socket.c,v 1.218 2018/03/01 14:11:11 bluhm Exp $ */ 2 /* $NetBSD: uipc_socket.c,v 1.21 1996/02/04 02:17:52 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1988, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/file.h> 39 #include <sys/filedesc.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/domain.h> 43 #include <sys/kernel.h> 44 #include <sys/event.h> 45 #include <sys/protosw.h> 46 #include <sys/socket.h> 47 #include <sys/unpcb.h> 48 #include <sys/socketvar.h> 49 #include <sys/signalvar.h> 50 #include <net/if.h> 51 #include <sys/pool.h> 52 53 #ifdef DDB 54 #include <machine/db_machdep.h> 55 #endif 56 57 void sbsync(struct sockbuf *, struct mbuf *); 58 59 int sosplice(struct socket *, int, off_t, struct timeval *); 60 void sounsplice(struct socket *, struct socket *, int); 61 void soidle(void *); 62 void sotask(void *); 63 void soput(void *); 64 int somove(struct socket *, int); 65 66 void filt_sordetach(struct knote *kn); 67 int filt_soread(struct knote *kn, long hint); 68 void filt_sowdetach(struct knote *kn); 69 int filt_sowrite(struct knote *kn, long hint); 70 int filt_solisten(struct knote *kn, long hint); 71 72 struct filterops solisten_filtops = 73 { 1, NULL, filt_sordetach, filt_solisten }; 74 struct filterops soread_filtops = 75 { 1, NULL, filt_sordetach, filt_soread }; 76 struct filterops sowrite_filtops = 77 { 1, NULL, filt_sowdetach, filt_sowrite }; 78 79 80 #ifndef SOMINCONN 81 #define SOMINCONN 80 82 #endif /* SOMINCONN */ 83 84 int somaxconn = SOMAXCONN; 85 int sominconn = SOMINCONN; 86 87 struct pool socket_pool; 88 #ifdef SOCKET_SPLICE 89 struct pool sosplice_pool; 90 struct taskq *sosplice_taskq; 91 #endif 92 93 void 94 soinit(void) 95 { 96 pool_init(&socket_pool, sizeof(struct socket), 0, IPL_SOFTNET, 0, 97 "sockpl", NULL); 98 #ifdef SOCKET_SPLICE 99 pool_init(&sosplice_pool, sizeof(struct sosplice), 0, IPL_SOFTNET, 0, 100 "sosppl", NULL); 101 #endif 102 } 103 104 /* 105 * Socket operation routines. 106 * These routines are called by the routines in 107 * sys_socket.c or from a system process, and 108 * implement the semantics of socket operations by 109 * switching out to the protocol specific routines. 110 */ 111 int 112 socreate(int dom, struct socket **aso, int type, int proto) 113 { 114 struct proc *p = curproc; /* XXX */ 115 const struct protosw *prp; 116 struct socket *so; 117 int error, s; 118 119 if (proto) 120 prp = pffindproto(dom, proto, type); 121 else 122 prp = pffindtype(dom, type); 123 if (prp == NULL || prp->pr_attach == NULL) 124 return (EPROTONOSUPPORT); 125 if (prp->pr_type != type) 126 return (EPROTOTYPE); 127 so = pool_get(&socket_pool, PR_WAITOK | PR_ZERO); 128 TAILQ_INIT(&so->so_q0); 129 TAILQ_INIT(&so->so_q); 130 so->so_type = type; 131 if (suser(p) == 0) 132 so->so_state = SS_PRIV; 133 so->so_ruid = p->p_ucred->cr_ruid; 134 so->so_euid = p->p_ucred->cr_uid; 135 so->so_rgid = p->p_ucred->cr_rgid; 136 so->so_egid = p->p_ucred->cr_gid; 137 so->so_cpid = p->p_p->ps_pid; 138 so->so_proto = prp; 139 140 s = solock(so); 141 error = (*prp->pr_attach)(so, proto); 142 if (error) { 143 so->so_state |= SS_NOFDREF; 144 sofree(so); 145 sounlock(s); 146 return (error); 147 } 148 sounlock(s); 149 *aso = so; 150 return (0); 151 } 152 153 int 154 sobind(struct socket *so, struct mbuf *nam, struct proc *p) 155 { 156 int error; 157 158 soassertlocked(so); 159 160 error = (*so->so_proto->pr_usrreq)(so, PRU_BIND, NULL, nam, NULL, p); 161 return (error); 162 } 163 164 int 165 solisten(struct socket *so, int backlog) 166 { 167 int s, error; 168 169 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) 170 return (EOPNOTSUPP); 171 #ifdef SOCKET_SPLICE 172 if (isspliced(so) || issplicedback(so)) 173 return (EOPNOTSUPP); 174 #endif /* SOCKET_SPLICE */ 175 s = solock(so); 176 error = (*so->so_proto->pr_usrreq)(so, PRU_LISTEN, NULL, NULL, NULL, 177 curproc); 178 if (error) { 179 sounlock(s); 180 return (error); 181 } 182 if (TAILQ_FIRST(&so->so_q) == NULL) 183 so->so_options |= SO_ACCEPTCONN; 184 if (backlog < 0 || backlog > somaxconn) 185 backlog = somaxconn; 186 if (backlog < sominconn) 187 backlog = sominconn; 188 so->so_qlimit = backlog; 189 sounlock(s); 190 return (0); 191 } 192 193 void 194 sofree(struct socket *so) 195 { 196 soassertlocked(so); 197 198 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) 199 return; 200 if (so->so_head) { 201 /* 202 * We must not decommission a socket that's on the accept(2) 203 * queue. If we do, then accept(2) may hang after select(2) 204 * indicated that the listening socket was ready. 205 */ 206 if (!soqremque(so, 0)) 207 return; 208 } 209 #ifdef SOCKET_SPLICE 210 if (so->so_sp) { 211 if (issplicedback(so)) 212 sounsplice(so->so_sp->ssp_soback, so, 213 so->so_sp->ssp_soback != so); 214 if (isspliced(so)) 215 sounsplice(so, so->so_sp->ssp_socket, 0); 216 } 217 #endif /* SOCKET_SPLICE */ 218 sbrelease(so, &so->so_snd); 219 sorflush(so); 220 #ifdef SOCKET_SPLICE 221 if (so->so_sp) { 222 /* Reuse splice task, sounsplice() has been called before. */ 223 task_set(&so->so_sp->ssp_task, soput, so); 224 task_add(sosplice_taskq, &so->so_sp->ssp_task); 225 } else 226 #endif /* SOCKET_SPLICE */ 227 { 228 pool_put(&socket_pool, so); 229 } 230 } 231 232 /* 233 * Close a socket on last file table reference removal. 234 * Initiate disconnect if connected. 235 * Free socket when disconnect complete. 236 */ 237 int 238 soclose(struct socket *so) 239 { 240 struct socket *so2; 241 int s, error = 0; 242 243 s = solock(so); 244 if (so->so_options & SO_ACCEPTCONN) { 245 while ((so2 = TAILQ_FIRST(&so->so_q0)) != NULL) { 246 (void) soqremque(so2, 0); 247 (void) soabort(so2); 248 } 249 while ((so2 = TAILQ_FIRST(&so->so_q)) != NULL) { 250 (void) soqremque(so2, 1); 251 (void) soabort(so2); 252 } 253 } 254 if (so->so_pcb == 0) 255 goto discard; 256 if (so->so_state & SS_ISCONNECTED) { 257 if ((so->so_state & SS_ISDISCONNECTING) == 0) { 258 error = sodisconnect(so); 259 if (error) 260 goto drop; 261 } 262 if (so->so_options & SO_LINGER) { 263 if ((so->so_state & SS_ISDISCONNECTING) && 264 (so->so_state & SS_NBIO)) 265 goto drop; 266 while (so->so_state & SS_ISCONNECTED) { 267 error = sosleep(so, &so->so_timeo, 268 PSOCK | PCATCH, "netcls", 269 so->so_linger * hz); 270 if (error) 271 break; 272 } 273 } 274 } 275 drop: 276 if (so->so_pcb) { 277 int error2; 278 KASSERT(so->so_proto->pr_detach); 279 error2 = (*so->so_proto->pr_detach)(so); 280 if (error == 0) 281 error = error2; 282 } 283 discard: 284 if (so->so_state & SS_NOFDREF) 285 panic("soclose NOFDREF: so %p, so_type %d", so, so->so_type); 286 so->so_state |= SS_NOFDREF; 287 sofree(so); 288 sounlock(s); 289 return (error); 290 } 291 292 int 293 soabort(struct socket *so) 294 { 295 soassertlocked(so); 296 297 return (*so->so_proto->pr_usrreq)(so, PRU_ABORT, NULL, NULL, NULL, 298 curproc); 299 } 300 301 int 302 soaccept(struct socket *so, struct mbuf *nam) 303 { 304 int error = 0; 305 306 soassertlocked(so); 307 308 if ((so->so_state & SS_NOFDREF) == 0) 309 panic("soaccept !NOFDREF: so %p, so_type %d", so, so->so_type); 310 so->so_state &= ~SS_NOFDREF; 311 if ((so->so_state & SS_ISDISCONNECTED) == 0 || 312 (so->so_proto->pr_flags & PR_ABRTACPTDIS) == 0) 313 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT, NULL, 314 nam, NULL, curproc); 315 else 316 error = ECONNABORTED; 317 return (error); 318 } 319 320 int 321 soconnect(struct socket *so, struct mbuf *nam) 322 { 323 int error; 324 325 soassertlocked(so); 326 327 if (so->so_options & SO_ACCEPTCONN) 328 return (EOPNOTSUPP); 329 /* 330 * If protocol is connection-based, can only connect once. 331 * Otherwise, if connected, try to disconnect first. 332 * This allows user to disconnect by connecting to, e.g., 333 * a null address. 334 */ 335 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && 336 ((so->so_proto->pr_flags & PR_CONNREQUIRED) || 337 (error = sodisconnect(so)))) 338 error = EISCONN; 339 else 340 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT, 341 NULL, nam, NULL, curproc); 342 return (error); 343 } 344 345 int 346 soconnect2(struct socket *so1, struct socket *so2) 347 { 348 int s, error; 349 350 s = solock(so1); 351 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2, NULL, 352 (struct mbuf *)so2, NULL, curproc); 353 sounlock(s); 354 return (error); 355 } 356 357 int 358 sodisconnect(struct socket *so) 359 { 360 int error; 361 362 soassertlocked(so); 363 364 if ((so->so_state & SS_ISCONNECTED) == 0) 365 return (ENOTCONN); 366 if (so->so_state & SS_ISDISCONNECTING) 367 return (EALREADY); 368 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT, NULL, NULL, 369 NULL, curproc); 370 return (error); 371 } 372 373 int m_getuio(struct mbuf **, int, long, struct uio *); 374 375 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK) 376 /* 377 * Send on a socket. 378 * If send must go all at once and message is larger than 379 * send buffering, then hard error. 380 * Lock against other senders. 381 * If must go all at once and not enough room now, then 382 * inform user that this would block and do nothing. 383 * Otherwise, if nonblocking, send as much as possible. 384 * The data to be sent is described by "uio" if nonzero, 385 * otherwise by the mbuf chain "top" (which must be null 386 * if uio is not). Data provided in mbuf chain must be small 387 * enough to send all at once. 388 * 389 * Returns nonzero on error, timeout or signal; callers 390 * must check for short counts if EINTR/ERESTART are returned. 391 * Data and control buffers are freed on return. 392 */ 393 int 394 sosend(struct socket *so, struct mbuf *addr, struct uio *uio, struct mbuf *top, 395 struct mbuf *control, int flags) 396 { 397 long space, clen = 0; 398 size_t resid; 399 int error, s; 400 int atomic = sosendallatonce(so) || top; 401 402 if (uio) 403 resid = uio->uio_resid; 404 else 405 resid = top->m_pkthdr.len; 406 /* MSG_EOR on a SOCK_STREAM socket is invalid. */ 407 if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { 408 m_freem(top); 409 m_freem(control); 410 return (EINVAL); 411 } 412 if (uio && uio->uio_procp) 413 uio->uio_procp->p_ru.ru_msgsnd++; 414 if (control) { 415 /* 416 * In theory clen should be unsigned (since control->m_len is). 417 * However, space must be signed, as it might be less than 0 418 * if we over-committed, and we must use a signed comparison 419 * of space and clen. 420 */ 421 clen = control->m_len; 422 /* reserve extra space for AF_LOCAL's internalize */ 423 if (so->so_proto->pr_domain->dom_family == AF_LOCAL && 424 clen >= CMSG_ALIGN(sizeof(struct cmsghdr)) && 425 mtod(control, struct cmsghdr *)->cmsg_type == SCM_RIGHTS) 426 clen = CMSG_SPACE( 427 (clen - CMSG_ALIGN(sizeof(struct cmsghdr))) * 428 (sizeof(struct fdpass) / sizeof(int))); 429 } 430 431 #define snderr(errno) { error = errno; goto release; } 432 433 s = solock(so); 434 restart: 435 if ((error = sblock(so, &so->so_snd, SBLOCKWAIT(flags))) != 0) 436 goto out; 437 so->so_state |= SS_ISSENDING; 438 do { 439 if (so->so_state & SS_CANTSENDMORE) 440 snderr(EPIPE); 441 if (so->so_error) { 442 error = so->so_error; 443 so->so_error = 0; 444 snderr(error); 445 } 446 if ((so->so_state & SS_ISCONNECTED) == 0) { 447 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 448 if (!(resid == 0 && clen != 0)) 449 snderr(ENOTCONN); 450 } else if (addr == 0) 451 snderr(EDESTADDRREQ); 452 } 453 space = sbspace(so, &so->so_snd); 454 if (flags & MSG_OOB) 455 space += 1024; 456 if ((atomic && resid > so->so_snd.sb_hiwat) || 457 (so->so_proto->pr_domain->dom_family != AF_LOCAL && 458 clen > so->so_snd.sb_hiwat)) 459 snderr(EMSGSIZE); 460 if (space < clen || 461 (space - clen < resid && 462 (atomic || space < so->so_snd.sb_lowat))) { 463 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) 464 snderr(EWOULDBLOCK); 465 sbunlock(so, &so->so_snd); 466 error = sbwait(so, &so->so_snd); 467 so->so_state &= ~SS_ISSENDING; 468 if (error) 469 goto out; 470 goto restart; 471 } 472 space -= clen; 473 do { 474 if (uio == NULL) { 475 /* 476 * Data is prepackaged in "top". 477 */ 478 resid = 0; 479 if (flags & MSG_EOR) 480 top->m_flags |= M_EOR; 481 } else { 482 sounlock(s); 483 error = m_getuio(&top, atomic, space, uio); 484 s = solock(so); 485 if (error) 486 goto release; 487 space -= top->m_pkthdr.len; 488 resid = uio->uio_resid; 489 if (flags & MSG_EOR) 490 top->m_flags |= M_EOR; 491 } 492 if (resid == 0) 493 so->so_state &= ~SS_ISSENDING; 494 if (top && so->so_options & SO_ZEROIZE) 495 top->m_flags |= M_ZEROIZE; 496 error = (*so->so_proto->pr_usrreq)(so, 497 (flags & MSG_OOB) ? PRU_SENDOOB : PRU_SEND, 498 top, addr, control, curproc); 499 clen = 0; 500 control = NULL; 501 top = NULL; 502 if (error) 503 goto release; 504 } while (resid && space > 0); 505 } while (resid); 506 507 release: 508 so->so_state &= ~SS_ISSENDING; 509 sbunlock(so, &so->so_snd); 510 out: 511 sounlock(s); 512 m_freem(top); 513 m_freem(control); 514 return (error); 515 } 516 517 int 518 m_getuio(struct mbuf **mp, int atomic, long space, struct uio *uio) 519 { 520 struct mbuf *m, *top = NULL; 521 struct mbuf **nextp = ⊤ 522 u_long len, mlen; 523 size_t resid = uio->uio_resid; 524 int error; 525 526 do { 527 if (top == NULL) { 528 MGETHDR(m, M_WAIT, MT_DATA); 529 mlen = MHLEN; 530 m->m_pkthdr.len = 0; 531 m->m_pkthdr.ph_ifidx = 0; 532 } else { 533 MGET(m, M_WAIT, MT_DATA); 534 mlen = MLEN; 535 } 536 /* chain mbuf together */ 537 *nextp = m; 538 nextp = &m->m_next; 539 540 resid = ulmin(resid, space); 541 if (resid >= MINCLSIZE) { 542 MCLGETI(m, M_NOWAIT, NULL, ulmin(resid, MAXMCLBYTES)); 543 if ((m->m_flags & M_EXT) == 0) 544 MCLGETI(m, M_NOWAIT, NULL, MCLBYTES); 545 if ((m->m_flags & M_EXT) == 0) 546 goto nopages; 547 mlen = m->m_ext.ext_size; 548 len = ulmin(mlen, resid); 549 /* 550 * For datagram protocols, leave room 551 * for protocol headers in first mbuf. 552 */ 553 if (atomic && m == top && len < mlen - max_hdr) 554 m->m_data += max_hdr; 555 } else { 556 nopages: 557 len = ulmin(mlen, resid); 558 /* 559 * For datagram protocols, leave room 560 * for protocol headers in first mbuf. 561 */ 562 if (atomic && m == top && len < mlen - max_hdr) 563 MH_ALIGN(m, len); 564 } 565 566 error = uiomove(mtod(m, caddr_t), len, uio); 567 if (error) { 568 m_freem(top); 569 return (error); 570 } 571 572 /* adjust counters */ 573 resid = uio->uio_resid; 574 space -= len; 575 m->m_len = len; 576 top->m_pkthdr.len += len; 577 578 /* Is there more space and more data? */ 579 } while (space > 0 && resid > 0); 580 581 *mp = top; 582 return 0; 583 } 584 585 /* 586 * Following replacement or removal of the first mbuf on the first 587 * mbuf chain of a socket buffer, push necessary state changes back 588 * into the socket buffer so that other consumers see the values 589 * consistently. 'nextrecord' is the callers locally stored value of 590 * the original value of sb->sb_mb->m_nextpkt which must be restored 591 * when the lead mbuf changes. NOTE: 'nextrecord' may be NULL. 592 */ 593 void 594 sbsync(struct sockbuf *sb, struct mbuf *nextrecord) 595 { 596 597 /* 598 * First, update for the new value of nextrecord. If necessary, 599 * make it the first record. 600 */ 601 if (sb->sb_mb != NULL) 602 sb->sb_mb->m_nextpkt = nextrecord; 603 else 604 sb->sb_mb = nextrecord; 605 606 /* 607 * Now update any dependent socket buffer fields to reflect 608 * the new state. This is an inline of SB_EMPTY_FIXUP, with 609 * the addition of a second clause that takes care of the 610 * case where sb_mb has been updated, but remains the last 611 * record. 612 */ 613 if (sb->sb_mb == NULL) { 614 sb->sb_mbtail = NULL; 615 sb->sb_lastrecord = NULL; 616 } else if (sb->sb_mb->m_nextpkt == NULL) 617 sb->sb_lastrecord = sb->sb_mb; 618 } 619 620 /* 621 * Implement receive operations on a socket. 622 * We depend on the way that records are added to the sockbuf 623 * by sbappend*. In particular, each record (mbufs linked through m_next) 624 * must begin with an address if the protocol so specifies, 625 * followed by an optional mbuf or mbufs containing ancillary data, 626 * and then zero or more mbufs of data. 627 * In order to avoid blocking network for the entire time here, we release 628 * the solock() while doing the actual copy to user space. 629 * Although the sockbuf is locked, new data may still be appended, 630 * and thus we must maintain consistency of the sockbuf during that time. 631 * 632 * The caller may receive the data as a single mbuf chain by supplying 633 * an mbuf **mp0 for use in returning the chain. The uio is then used 634 * only for the count in uio_resid. 635 */ 636 int 637 soreceive(struct socket *so, struct mbuf **paddr, struct uio *uio, 638 struct mbuf **mp0, struct mbuf **controlp, int *flagsp, 639 socklen_t controllen) 640 { 641 struct mbuf *m, **mp; 642 struct mbuf *cm; 643 u_long len, offset, moff; 644 int flags, error, s, type, uio_error = 0; 645 const struct protosw *pr = so->so_proto; 646 struct mbuf *nextrecord; 647 size_t resid, orig_resid = uio->uio_resid; 648 649 mp = mp0; 650 if (paddr) 651 *paddr = 0; 652 if (controlp) 653 *controlp = 0; 654 if (flagsp) 655 flags = *flagsp &~ MSG_EOR; 656 else 657 flags = 0; 658 if (so->so_state & SS_NBIO) 659 flags |= MSG_DONTWAIT; 660 if (flags & MSG_OOB) { 661 m = m_get(M_WAIT, MT_DATA); 662 s = solock(so); 663 error = (*pr->pr_usrreq)(so, PRU_RCVOOB, m, 664 (struct mbuf *)(long)(flags & MSG_PEEK), NULL, curproc); 665 sounlock(s); 666 if (error) 667 goto bad; 668 do { 669 error = uiomove(mtod(m, caddr_t), 670 ulmin(uio->uio_resid, m->m_len), uio); 671 m = m_free(m); 672 } while (uio->uio_resid && error == 0 && m); 673 bad: 674 m_freem(m); 675 return (error); 676 } 677 if (mp) 678 *mp = NULL; 679 680 s = solock(so); 681 restart: 682 if ((error = sblock(so, &so->so_rcv, SBLOCKWAIT(flags))) != 0) { 683 sounlock(s); 684 return (error); 685 } 686 687 m = so->so_rcv.sb_mb; 688 #ifdef SOCKET_SPLICE 689 if (isspliced(so)) 690 m = NULL; 691 #endif /* SOCKET_SPLICE */ 692 /* 693 * If we have less data than requested, block awaiting more 694 * (subject to any timeout) if: 695 * 1. the current count is less than the low water mark, 696 * 2. MSG_WAITALL is set, and it is possible to do the entire 697 * receive operation at once if we block (resid <= hiwat), or 698 * 3. MSG_DONTWAIT is not set. 699 * If MSG_WAITALL is set but resid is larger than the receive buffer, 700 * we have to do the receive in sections, and thus risk returning 701 * a short count if a timeout or signal occurs after we start. 702 */ 703 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 && 704 so->so_rcv.sb_cc < uio->uio_resid) && 705 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || 706 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && 707 m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) { 708 #ifdef DIAGNOSTIC 709 if (m == NULL && so->so_rcv.sb_cc) 710 #ifdef SOCKET_SPLICE 711 if (!isspliced(so)) 712 #endif /* SOCKET_SPLICE */ 713 panic("receive 1: so %p, so_type %d, sb_cc %lu", 714 so, so->so_type, so->so_rcv.sb_cc); 715 #endif 716 if (so->so_error) { 717 if (m) 718 goto dontblock; 719 error = so->so_error; 720 if ((flags & MSG_PEEK) == 0) 721 so->so_error = 0; 722 goto release; 723 } 724 if (so->so_state & SS_CANTRCVMORE) { 725 if (m) 726 goto dontblock; 727 else if (so->so_rcv.sb_cc == 0) 728 goto release; 729 } 730 for (; m; m = m->m_next) 731 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { 732 m = so->so_rcv.sb_mb; 733 goto dontblock; 734 } 735 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 736 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 737 error = ENOTCONN; 738 goto release; 739 } 740 if (uio->uio_resid == 0 && controlp == NULL) 741 goto release; 742 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) { 743 error = EWOULDBLOCK; 744 goto release; 745 } 746 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); 747 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); 748 sbunlock(so, &so->so_rcv); 749 error = sbwait(so, &so->so_rcv); 750 if (error) { 751 sounlock(s); 752 return (error); 753 } 754 goto restart; 755 } 756 dontblock: 757 /* 758 * On entry here, m points to the first record of the socket buffer. 759 * From this point onward, we maintain 'nextrecord' as a cache of the 760 * pointer to the next record in the socket buffer. We must keep the 761 * various socket buffer pointers and local stack versions of the 762 * pointers in sync, pushing out modifications before operations that 763 * may sleep, and re-reading them afterwards. 764 * 765 * Otherwise, we will race with the network stack appending new data 766 * or records onto the socket buffer by using inconsistent/stale 767 * versions of the field, possibly resulting in socket buffer 768 * corruption. 769 */ 770 if (uio->uio_procp) 771 uio->uio_procp->p_ru.ru_msgrcv++; 772 KASSERT(m == so->so_rcv.sb_mb); 773 SBLASTRECORDCHK(&so->so_rcv, "soreceive 1"); 774 SBLASTMBUFCHK(&so->so_rcv, "soreceive 1"); 775 nextrecord = m->m_nextpkt; 776 if (pr->pr_flags & PR_ADDR) { 777 #ifdef DIAGNOSTIC 778 if (m->m_type != MT_SONAME) 779 panic("receive 1a: so %p, so_type %d, m %p, m_type %d", 780 so, so->so_type, m, m->m_type); 781 #endif 782 orig_resid = 0; 783 if (flags & MSG_PEEK) { 784 if (paddr) 785 *paddr = m_copym(m, 0, m->m_len, M_NOWAIT); 786 m = m->m_next; 787 } else { 788 sbfree(&so->so_rcv, m); 789 if (paddr) { 790 *paddr = m; 791 so->so_rcv.sb_mb = m->m_next; 792 m->m_next = 0; 793 m = so->so_rcv.sb_mb; 794 } else { 795 so->so_rcv.sb_mb = m_free(m); 796 m = so->so_rcv.sb_mb; 797 } 798 sbsync(&so->so_rcv, nextrecord); 799 } 800 } 801 while (m && m->m_type == MT_CONTROL && error == 0) { 802 if (flags & MSG_PEEK) { 803 if (controlp) 804 *controlp = m_copym(m, 0, m->m_len, M_NOWAIT); 805 m = m->m_next; 806 } else { 807 sbfree(&so->so_rcv, m); 808 so->so_rcv.sb_mb = m->m_next; 809 m->m_nextpkt = m->m_next = NULL; 810 cm = m; 811 m = so->so_rcv.sb_mb; 812 sbsync(&so->so_rcv, nextrecord); 813 if (controlp) { 814 if (pr->pr_domain->dom_externalize && 815 mtod(cm, struct cmsghdr *)->cmsg_type == 816 SCM_RIGHTS) { 817 error = 818 (*pr->pr_domain->dom_externalize) 819 (cm, controllen, flags); 820 } 821 *controlp = cm; 822 } else { 823 /* 824 * Dispose of any SCM_RIGHTS message that went 825 * through the read path rather than recv. 826 */ 827 if (pr->pr_domain->dom_dispose && 828 mtod(cm, struct cmsghdr *)->cmsg_type == SCM_RIGHTS) 829 pr->pr_domain->dom_dispose(cm); 830 m_free(cm); 831 } 832 } 833 if (m != NULL) 834 nextrecord = so->so_rcv.sb_mb->m_nextpkt; 835 else 836 nextrecord = so->so_rcv.sb_mb; 837 if (controlp) { 838 orig_resid = 0; 839 controlp = &(*controlp)->m_next; 840 } 841 } 842 843 /* If m is non-NULL, we have some data to read. */ 844 if (m) { 845 type = m->m_type; 846 if (type == MT_OOBDATA) 847 flags |= MSG_OOB; 848 if (m->m_flags & M_BCAST) 849 flags |= MSG_BCAST; 850 if (m->m_flags & M_MCAST) 851 flags |= MSG_MCAST; 852 } 853 SBLASTRECORDCHK(&so->so_rcv, "soreceive 2"); 854 SBLASTMBUFCHK(&so->so_rcv, "soreceive 2"); 855 856 moff = 0; 857 offset = 0; 858 while (m && uio->uio_resid > 0 && error == 0) { 859 if (m->m_type == MT_OOBDATA) { 860 if (type != MT_OOBDATA) 861 break; 862 } else if (type == MT_OOBDATA) 863 break; 864 #ifdef DIAGNOSTIC 865 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER) 866 panic("receive 3: so %p, so_type %d, m %p, m_type %d", 867 so, so->so_type, m, m->m_type); 868 #endif 869 so->so_state &= ~SS_RCVATMARK; 870 len = uio->uio_resid; 871 if (so->so_oobmark && len > so->so_oobmark - offset) 872 len = so->so_oobmark - offset; 873 if (len > m->m_len - moff) 874 len = m->m_len - moff; 875 /* 876 * If mp is set, just pass back the mbufs. 877 * Otherwise copy them out via the uio, then free. 878 * Sockbuf must be consistent here (points to current mbuf, 879 * it points to next record) when we drop priority; 880 * we must note any additions to the sockbuf when we 881 * block interrupts again. 882 */ 883 if (mp == NULL && uio_error == 0) { 884 SBLASTRECORDCHK(&so->so_rcv, "soreceive uiomove"); 885 SBLASTMBUFCHK(&so->so_rcv, "soreceive uiomove"); 886 resid = uio->uio_resid; 887 sounlock(s); 888 uio_error = uiomove(mtod(m, caddr_t) + moff, len, uio); 889 s = solock(so); 890 if (uio_error) 891 uio->uio_resid = resid - len; 892 } else 893 uio->uio_resid -= len; 894 if (len == m->m_len - moff) { 895 if (m->m_flags & M_EOR) 896 flags |= MSG_EOR; 897 if (flags & MSG_PEEK) { 898 m = m->m_next; 899 moff = 0; 900 } else { 901 nextrecord = m->m_nextpkt; 902 sbfree(&so->so_rcv, m); 903 if (mp) { 904 *mp = m; 905 mp = &m->m_next; 906 so->so_rcv.sb_mb = m = m->m_next; 907 *mp = NULL; 908 } else { 909 so->so_rcv.sb_mb = m_free(m); 910 m = so->so_rcv.sb_mb; 911 } 912 /* 913 * If m != NULL, we also know that 914 * so->so_rcv.sb_mb != NULL. 915 */ 916 KASSERT(so->so_rcv.sb_mb == m); 917 if (m) { 918 m->m_nextpkt = nextrecord; 919 if (nextrecord == NULL) 920 so->so_rcv.sb_lastrecord = m; 921 } else { 922 so->so_rcv.sb_mb = nextrecord; 923 SB_EMPTY_FIXUP(&so->so_rcv); 924 } 925 SBLASTRECORDCHK(&so->so_rcv, "soreceive 3"); 926 SBLASTMBUFCHK(&so->so_rcv, "soreceive 3"); 927 } 928 } else { 929 if (flags & MSG_PEEK) 930 moff += len; 931 else { 932 if (mp) 933 *mp = m_copym(m, 0, len, M_WAIT); 934 m->m_data += len; 935 m->m_len -= len; 936 so->so_rcv.sb_cc -= len; 937 so->so_rcv.sb_datacc -= len; 938 } 939 } 940 if (so->so_oobmark) { 941 if ((flags & MSG_PEEK) == 0) { 942 so->so_oobmark -= len; 943 if (so->so_oobmark == 0) { 944 so->so_state |= SS_RCVATMARK; 945 break; 946 } 947 } else { 948 offset += len; 949 if (offset == so->so_oobmark) 950 break; 951 } 952 } 953 if (flags & MSG_EOR) 954 break; 955 /* 956 * If the MSG_WAITALL flag is set (for non-atomic socket), 957 * we must not quit until "uio->uio_resid == 0" or an error 958 * termination. If a signal/timeout occurs, return 959 * with a short count but without error. 960 * Keep sockbuf locked against other readers. 961 */ 962 while (flags & MSG_WAITALL && m == NULL && uio->uio_resid > 0 && 963 !sosendallatonce(so) && !nextrecord) { 964 if (so->so_error || so->so_state & SS_CANTRCVMORE) 965 break; 966 SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); 967 SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); 968 error = sbwait(so, &so->so_rcv); 969 if (error) { 970 sbunlock(so, &so->so_rcv); 971 sounlock(s); 972 return (0); 973 } 974 if ((m = so->so_rcv.sb_mb) != NULL) 975 nextrecord = m->m_nextpkt; 976 } 977 } 978 979 if (m && pr->pr_flags & PR_ATOMIC) { 980 flags |= MSG_TRUNC; 981 if ((flags & MSG_PEEK) == 0) 982 (void) sbdroprecord(&so->so_rcv); 983 } 984 if ((flags & MSG_PEEK) == 0) { 985 if (m == NULL) { 986 /* 987 * First part is an inline SB_EMPTY_FIXUP(). Second 988 * part makes sure sb_lastrecord is up-to-date if 989 * there is still data in the socket buffer. 990 */ 991 so->so_rcv.sb_mb = nextrecord; 992 if (so->so_rcv.sb_mb == NULL) { 993 so->so_rcv.sb_mbtail = NULL; 994 so->so_rcv.sb_lastrecord = NULL; 995 } else if (nextrecord->m_nextpkt == NULL) 996 so->so_rcv.sb_lastrecord = nextrecord; 997 } 998 SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); 999 SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); 1000 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) 1001 (*pr->pr_usrreq)(so, PRU_RCVD, NULL, 1002 (struct mbuf *)(long)flags, NULL, curproc); 1003 } 1004 if (orig_resid == uio->uio_resid && orig_resid && 1005 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { 1006 sbunlock(so, &so->so_rcv); 1007 goto restart; 1008 } 1009 1010 if (uio_error) 1011 error = uio_error; 1012 1013 if (flagsp) 1014 *flagsp |= flags; 1015 release: 1016 sbunlock(so, &so->so_rcv); 1017 sounlock(s); 1018 return (error); 1019 } 1020 1021 int 1022 soshutdown(struct socket *so, int how) 1023 { 1024 const struct protosw *pr = so->so_proto; 1025 int s, error = 0; 1026 1027 s = solock(so); 1028 switch (how) { 1029 case SHUT_RD: 1030 sorflush(so); 1031 break; 1032 case SHUT_RDWR: 1033 sorflush(so); 1034 /* FALLTHROUGH */ 1035 case SHUT_WR: 1036 error = (*pr->pr_usrreq)(so, PRU_SHUTDOWN, NULL, NULL, NULL, 1037 curproc); 1038 break; 1039 default: 1040 error = EINVAL; 1041 break; 1042 } 1043 sounlock(s); 1044 1045 return (error); 1046 } 1047 1048 void 1049 sorflush(struct socket *so) 1050 { 1051 struct sockbuf *sb = &so->so_rcv; 1052 const struct protosw *pr = so->so_proto; 1053 struct socket aso; 1054 int error; 1055 1056 sb->sb_flags |= SB_NOINTR; 1057 error = sblock(so, sb, M_WAITOK); 1058 /* with SB_NOINTR and M_WAITOK sblock() must not fail */ 1059 KASSERT(error == 0); 1060 socantrcvmore(so); 1061 sbunlock(so, sb); 1062 aso.so_proto = pr; 1063 aso.so_rcv = *sb; 1064 memset(&sb->sb_startzero, 0, 1065 (caddr_t)&sb->sb_endzero - (caddr_t)&sb->sb_startzero); 1066 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) 1067 (*pr->pr_domain->dom_dispose)(aso.so_rcv.sb_mb); 1068 sbrelease(&aso, &aso.so_rcv); 1069 } 1070 1071 #ifdef SOCKET_SPLICE 1072 1073 #define so_splicelen so_sp->ssp_len 1074 #define so_splicemax so_sp->ssp_max 1075 #define so_idletv so_sp->ssp_idletv 1076 #define so_idleto so_sp->ssp_idleto 1077 #define so_splicetask so_sp->ssp_task 1078 1079 int 1080 sosplice(struct socket *so, int fd, off_t max, struct timeval *tv) 1081 { 1082 struct file *fp; 1083 struct socket *sosp; 1084 struct sosplice *sp; 1085 int error = 0; 1086 1087 soassertlocked(so); 1088 1089 if (sosplice_taskq == NULL) 1090 sosplice_taskq = taskq_create("sosplice", 1, IPL_SOFTNET, 1091 TASKQ_MPSAFE); 1092 if (sosplice_taskq == NULL) 1093 return (ENOMEM); 1094 1095 if ((so->so_proto->pr_flags & PR_SPLICE) == 0) 1096 return (EPROTONOSUPPORT); 1097 if (so->so_options & SO_ACCEPTCONN) 1098 return (EOPNOTSUPP); 1099 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && 1100 (so->so_proto->pr_flags & PR_CONNREQUIRED)) 1101 return (ENOTCONN); 1102 if (so->so_sp == NULL) { 1103 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1104 if (so->so_sp == NULL) 1105 so->so_sp = sp; 1106 else 1107 pool_put(&sosplice_pool, sp); 1108 } 1109 1110 /* If no fd is given, unsplice by removing existing link. */ 1111 if (fd < 0) { 1112 /* Lock receive buffer. */ 1113 if ((error = sblock(so, &so->so_rcv, 1114 (so->so_state & SS_NBIO) ? M_NOWAIT : M_WAITOK)) != 0) { 1115 return (error); 1116 } 1117 if (so->so_sp->ssp_socket) 1118 sounsplice(so, so->so_sp->ssp_socket, 1); 1119 sbunlock(so, &so->so_rcv); 1120 return (0); 1121 } 1122 1123 if (max && max < 0) 1124 return (EINVAL); 1125 1126 if (tv && (tv->tv_sec < 0 || tv->tv_usec < 0)) 1127 return (EINVAL); 1128 1129 /* Find sosp, the drain socket where data will be spliced into. */ 1130 if ((error = getsock(curproc, fd, &fp)) != 0) 1131 return (error); 1132 sosp = fp->f_data; 1133 if (sosp->so_sp == NULL) { 1134 sp = pool_get(&sosplice_pool, PR_WAITOK | PR_ZERO); 1135 if (sosp->so_sp == NULL) 1136 sosp->so_sp = sp; 1137 else 1138 pool_put(&sosplice_pool, sp); 1139 } 1140 1141 /* Lock both receive and send buffer. */ 1142 if ((error = sblock(so, &so->so_rcv, 1143 (so->so_state & SS_NBIO) ? M_NOWAIT : M_WAITOK)) != 0) { 1144 FRELE(fp, curproc); 1145 return (error); 1146 } 1147 if ((error = sblock(so, &sosp->so_snd, M_WAITOK)) != 0) { 1148 sbunlock(so, &so->so_rcv); 1149 FRELE(fp, curproc); 1150 return (error); 1151 } 1152 1153 if (so->so_sp->ssp_socket || sosp->so_sp->ssp_soback) { 1154 error = EBUSY; 1155 goto release; 1156 } 1157 if (sosp->so_proto->pr_usrreq != so->so_proto->pr_usrreq) { 1158 error = EPROTONOSUPPORT; 1159 goto release; 1160 } 1161 if (sosp->so_options & SO_ACCEPTCONN) { 1162 error = EOPNOTSUPP; 1163 goto release; 1164 } 1165 if ((sosp->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) { 1166 error = ENOTCONN; 1167 goto release; 1168 } 1169 1170 /* Splice so and sosp together. */ 1171 so->so_sp->ssp_socket = sosp; 1172 sosp->so_sp->ssp_soback = so; 1173 so->so_splicelen = 0; 1174 so->so_splicemax = max; 1175 if (tv) 1176 so->so_idletv = *tv; 1177 else 1178 timerclear(&so->so_idletv); 1179 timeout_set_proc(&so->so_idleto, soidle, so); 1180 task_set(&so->so_splicetask, sotask, so); 1181 1182 /* 1183 * To prevent softnet interrupt from calling somove() while 1184 * we sleep, the socket buffers are not marked as spliced yet. 1185 */ 1186 if (somove(so, M_WAIT)) { 1187 so->so_rcv.sb_flags |= SB_SPLICE; 1188 sosp->so_snd.sb_flags |= SB_SPLICE; 1189 } 1190 1191 release: 1192 sbunlock(sosp, &sosp->so_snd); 1193 sbunlock(so, &so->so_rcv); 1194 FRELE(fp, curproc); 1195 return (error); 1196 } 1197 1198 void 1199 sounsplice(struct socket *so, struct socket *sosp, int wakeup) 1200 { 1201 soassertlocked(so); 1202 1203 task_del(sosplice_taskq, &so->so_splicetask); 1204 timeout_del(&so->so_idleto); 1205 sosp->so_snd.sb_flags &= ~SB_SPLICE; 1206 so->so_rcv.sb_flags &= ~SB_SPLICE; 1207 so->so_sp->ssp_socket = sosp->so_sp->ssp_soback = NULL; 1208 if (wakeup && soreadable(so)) 1209 sorwakeup(so); 1210 } 1211 1212 void 1213 soidle(void *arg) 1214 { 1215 struct socket *so = arg; 1216 int s; 1217 1218 s = solock(so); 1219 if (so->so_rcv.sb_flags & SB_SPLICE) { 1220 so->so_error = ETIMEDOUT; 1221 sounsplice(so, so->so_sp->ssp_socket, 1); 1222 } 1223 sounlock(s); 1224 } 1225 1226 void 1227 sotask(void *arg) 1228 { 1229 struct socket *so = arg; 1230 int s; 1231 1232 s = solock(so); 1233 if (so->so_rcv.sb_flags & SB_SPLICE) { 1234 /* 1235 * We may not sleep here as sofree() and unsplice() may be 1236 * called from softnet interrupt context. This would remove 1237 * the socket during somove(). 1238 */ 1239 somove(so, M_DONTWAIT); 1240 } 1241 sounlock(s); 1242 1243 /* Avoid user land starvation. */ 1244 yield(); 1245 } 1246 1247 /* 1248 * The socket splicing task may sleep while grabbing the net lock. As sofree() 1249 * can be called anytime, sotask() can access the socket memory of a freed 1250 * socket after wakeup. So delay the pool_put() after all pending socket 1251 * splicing tasks have finished. Do this by scheduling it on the same thread. 1252 */ 1253 void 1254 soput(void *arg) 1255 { 1256 struct socket *so = arg; 1257 1258 pool_put(&sosplice_pool, so->so_sp); 1259 pool_put(&socket_pool, so); 1260 } 1261 1262 /* 1263 * Move data from receive buffer of spliced source socket to send 1264 * buffer of drain socket. Try to move as much as possible in one 1265 * big chunk. It is a TCP only implementation. 1266 * Return value 0 means splicing has been finished, 1 continue. 1267 */ 1268 int 1269 somove(struct socket *so, int wait) 1270 { 1271 struct socket *sosp = so->so_sp->ssp_socket; 1272 struct mbuf *m, **mp, *nextrecord; 1273 u_long len, off, oobmark; 1274 long space; 1275 int error = 0, maxreached = 0; 1276 unsigned int state; 1277 1278 soassertlocked(so); 1279 1280 nextpkt: 1281 if (so->so_error) { 1282 error = so->so_error; 1283 goto release; 1284 } 1285 if (sosp->so_state & SS_CANTSENDMORE) { 1286 error = EPIPE; 1287 goto release; 1288 } 1289 if (sosp->so_error && sosp->so_error != ETIMEDOUT && 1290 sosp->so_error != EFBIG && sosp->so_error != ELOOP) { 1291 error = sosp->so_error; 1292 goto release; 1293 } 1294 if ((sosp->so_state & SS_ISCONNECTED) == 0) 1295 goto release; 1296 1297 /* Calculate how many bytes can be copied now. */ 1298 len = so->so_rcv.sb_datacc; 1299 if (so->so_splicemax) { 1300 KASSERT(so->so_splicelen < so->so_splicemax); 1301 if (so->so_splicemax <= so->so_splicelen + len) { 1302 len = so->so_splicemax - so->so_splicelen; 1303 maxreached = 1; 1304 } 1305 } 1306 space = sbspace(sosp, &sosp->so_snd); 1307 if (so->so_oobmark && so->so_oobmark < len && 1308 so->so_oobmark < space + 1024) 1309 space += 1024; 1310 if (space <= 0) { 1311 maxreached = 0; 1312 goto release; 1313 } 1314 if (space < len) { 1315 maxreached = 0; 1316 if (space < sosp->so_snd.sb_lowat) 1317 goto release; 1318 len = space; 1319 } 1320 sosp->so_state |= SS_ISSENDING; 1321 1322 SBLASTRECORDCHK(&so->so_rcv, "somove 1"); 1323 SBLASTMBUFCHK(&so->so_rcv, "somove 1"); 1324 m = so->so_rcv.sb_mb; 1325 if (m == NULL) 1326 goto release; 1327 nextrecord = m->m_nextpkt; 1328 1329 /* Drop address and control information not used with splicing. */ 1330 if (so->so_proto->pr_flags & PR_ADDR) { 1331 #ifdef DIAGNOSTIC 1332 if (m->m_type != MT_SONAME) 1333 panic("somove soname: so %p, so_type %d, m %p, " 1334 "m_type %d", so, so->so_type, m, m->m_type); 1335 #endif 1336 m = m->m_next; 1337 } 1338 while (m && m->m_type == MT_CONTROL) 1339 m = m->m_next; 1340 if (m == NULL) { 1341 sbdroprecord(&so->so_rcv); 1342 if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb) 1343 (so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL, 1344 NULL, NULL, NULL); 1345 goto nextpkt; 1346 } 1347 1348 /* 1349 * By splicing sockets connected to localhost, userland might create a 1350 * loop. Dissolve splicing with error if loop is detected by counter. 1351 */ 1352 if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.ph_loopcnt++ >= M_MAXLOOP) { 1353 error = ELOOP; 1354 goto release; 1355 } 1356 1357 if (so->so_proto->pr_flags & PR_ATOMIC) { 1358 if ((m->m_flags & M_PKTHDR) == 0) 1359 panic("somove !PKTHDR: so %p, so_type %d, m %p, " 1360 "m_type %d", so, so->so_type, m, m->m_type); 1361 if (sosp->so_snd.sb_hiwat < m->m_pkthdr.len) { 1362 error = EMSGSIZE; 1363 goto release; 1364 } 1365 if (len < m->m_pkthdr.len) 1366 goto release; 1367 if (m->m_pkthdr.len < len) { 1368 maxreached = 0; 1369 len = m->m_pkthdr.len; 1370 } 1371 /* 1372 * Throw away the name mbuf after it has been assured 1373 * that the whole first record can be processed. 1374 */ 1375 m = so->so_rcv.sb_mb; 1376 sbfree(&so->so_rcv, m); 1377 so->so_rcv.sb_mb = m_free(m); 1378 sbsync(&so->so_rcv, nextrecord); 1379 } 1380 /* 1381 * Throw away the control mbufs after it has been assured 1382 * that the whole first record can be processed. 1383 */ 1384 m = so->so_rcv.sb_mb; 1385 while (m && m->m_type == MT_CONTROL) { 1386 sbfree(&so->so_rcv, m); 1387 so->so_rcv.sb_mb = m_free(m); 1388 m = so->so_rcv.sb_mb; 1389 sbsync(&so->so_rcv, nextrecord); 1390 } 1391 1392 SBLASTRECORDCHK(&so->so_rcv, "somove 2"); 1393 SBLASTMBUFCHK(&so->so_rcv, "somove 2"); 1394 1395 /* Take at most len mbufs out of receive buffer. */ 1396 for (off = 0, mp = &m; off <= len && *mp; 1397 off += (*mp)->m_len, mp = &(*mp)->m_next) { 1398 u_long size = len - off; 1399 1400 #ifdef DIAGNOSTIC 1401 if ((*mp)->m_type != MT_DATA && (*mp)->m_type != MT_HEADER) 1402 panic("somove type: so %p, so_type %d, m %p, " 1403 "m_type %d", so, so->so_type, *mp, (*mp)->m_type); 1404 #endif 1405 if ((*mp)->m_len > size) { 1406 /* 1407 * Move only a partial mbuf at maximum splice length or 1408 * if the drain buffer is too small for this large mbuf. 1409 */ 1410 if (!maxreached && so->so_snd.sb_datacc > 0) { 1411 len -= size; 1412 break; 1413 } 1414 *mp = m_copym(so->so_rcv.sb_mb, 0, size, wait); 1415 if (*mp == NULL) { 1416 len -= size; 1417 break; 1418 } 1419 so->so_rcv.sb_mb->m_data += size; 1420 so->so_rcv.sb_mb->m_len -= size; 1421 so->so_rcv.sb_cc -= size; 1422 so->so_rcv.sb_datacc -= size; 1423 } else { 1424 *mp = so->so_rcv.sb_mb; 1425 sbfree(&so->so_rcv, *mp); 1426 so->so_rcv.sb_mb = (*mp)->m_next; 1427 sbsync(&so->so_rcv, nextrecord); 1428 } 1429 } 1430 *mp = NULL; 1431 1432 SBLASTRECORDCHK(&so->so_rcv, "somove 3"); 1433 SBLASTMBUFCHK(&so->so_rcv, "somove 3"); 1434 SBCHECK(&so->so_rcv); 1435 if (m == NULL) 1436 goto release; 1437 m->m_nextpkt = NULL; 1438 if (m->m_flags & M_PKTHDR) { 1439 m_resethdr(m); 1440 m->m_pkthdr.len = len; 1441 } 1442 1443 /* Send window update to source peer as receive buffer has changed. */ 1444 if (so->so_proto->pr_flags & PR_WANTRCVD && so->so_pcb) 1445 (so->so_proto->pr_usrreq)(so, PRU_RCVD, NULL, 1446 NULL, NULL, NULL); 1447 1448 /* Receive buffer did shrink by len bytes, adjust oob. */ 1449 state = so->so_state; 1450 so->so_state &= ~SS_RCVATMARK; 1451 oobmark = so->so_oobmark; 1452 so->so_oobmark = oobmark > len ? oobmark - len : 0; 1453 if (oobmark) { 1454 if (oobmark == len) 1455 so->so_state |= SS_RCVATMARK; 1456 if (oobmark >= len) 1457 oobmark = 0; 1458 } 1459 1460 /* 1461 * Handle oob data. If any malloc fails, ignore error. 1462 * TCP urgent data is not very reliable anyway. 1463 */ 1464 while (((state & SS_RCVATMARK) || oobmark) && 1465 (so->so_options & SO_OOBINLINE)) { 1466 struct mbuf *o = NULL; 1467 1468 if (state & SS_RCVATMARK) { 1469 o = m_get(wait, MT_DATA); 1470 state &= ~SS_RCVATMARK; 1471 } else if (oobmark) { 1472 o = m_split(m, oobmark, wait); 1473 if (o) { 1474 error = (*sosp->so_proto->pr_usrreq)(sosp, 1475 PRU_SEND, m, NULL, NULL, NULL); 1476 if (error) { 1477 if (sosp->so_state & SS_CANTSENDMORE) 1478 error = EPIPE; 1479 m_freem(o); 1480 goto release; 1481 } 1482 len -= oobmark; 1483 so->so_splicelen += oobmark; 1484 m = o; 1485 o = m_get(wait, MT_DATA); 1486 } 1487 oobmark = 0; 1488 } 1489 if (o) { 1490 o->m_len = 1; 1491 *mtod(o, caddr_t) = *mtod(m, caddr_t); 1492 error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SENDOOB, 1493 o, NULL, NULL, NULL); 1494 if (error) { 1495 if (sosp->so_state & SS_CANTSENDMORE) 1496 error = EPIPE; 1497 m_freem(m); 1498 goto release; 1499 } 1500 len -= 1; 1501 so->so_splicelen += 1; 1502 if (oobmark) { 1503 oobmark -= 1; 1504 if (oobmark == 0) 1505 state |= SS_RCVATMARK; 1506 } 1507 m_adj(m, 1); 1508 } 1509 } 1510 1511 /* Append all remaining data to drain socket. */ 1512 if (so->so_rcv.sb_cc == 0 || maxreached) 1513 sosp->so_state &= ~SS_ISSENDING; 1514 error = (*sosp->so_proto->pr_usrreq)(sosp, PRU_SEND, m, NULL, NULL, 1515 NULL); 1516 if (error) { 1517 if (sosp->so_state & SS_CANTSENDMORE) 1518 error = EPIPE; 1519 goto release; 1520 } 1521 so->so_splicelen += len; 1522 1523 /* Move several packets if possible. */ 1524 if (!maxreached && nextrecord) 1525 goto nextpkt; 1526 1527 release: 1528 sosp->so_state &= ~SS_ISSENDING; 1529 if (!error && maxreached && so->so_splicemax == so->so_splicelen) 1530 error = EFBIG; 1531 if (error) 1532 so->so_error = error; 1533 if (((so->so_state & SS_CANTRCVMORE) && so->so_rcv.sb_cc == 0) || 1534 (sosp->so_state & SS_CANTSENDMORE) || maxreached || error) { 1535 sounsplice(so, sosp, 1); 1536 return (0); 1537 } 1538 if (timerisset(&so->so_idletv)) 1539 timeout_add_tv(&so->so_idleto, &so->so_idletv); 1540 return (1); 1541 } 1542 1543 #endif /* SOCKET_SPLICE */ 1544 1545 void 1546 sorwakeup(struct socket *so) 1547 { 1548 soassertlocked(so); 1549 1550 #ifdef SOCKET_SPLICE 1551 if (so->so_rcv.sb_flags & SB_SPLICE) { 1552 /* 1553 * TCP has a sendbuffer that can handle multiple packets 1554 * at once. So queue the stream a bit to accumulate data. 1555 * The sosplice thread will call somove() later and send 1556 * the packets calling tcp_output() only once. 1557 * In the UDP case, send out the packets immediately. 1558 * Using a thread would make things slower. 1559 */ 1560 if (so->so_proto->pr_flags & PR_WANTRCVD) 1561 task_add(sosplice_taskq, &so->so_splicetask); 1562 else 1563 somove(so, M_DONTWAIT); 1564 } 1565 if (isspliced(so)) 1566 return; 1567 #endif 1568 sowakeup(so, &so->so_rcv); 1569 if (so->so_upcall) 1570 (*(so->so_upcall))(so, so->so_upcallarg, M_DONTWAIT); 1571 } 1572 1573 void 1574 sowwakeup(struct socket *so) 1575 { 1576 soassertlocked(so); 1577 1578 #ifdef SOCKET_SPLICE 1579 if (so->so_snd.sb_flags & SB_SPLICE) 1580 task_add(sosplice_taskq, &so->so_sp->ssp_soback->so_splicetask); 1581 #endif 1582 sowakeup(so, &so->so_snd); 1583 } 1584 1585 int 1586 sosetopt(struct socket *so, int level, int optname, struct mbuf *m) 1587 { 1588 int error = 0; 1589 1590 soassertlocked(so); 1591 1592 if (level != SOL_SOCKET) { 1593 if (so->so_proto->pr_ctloutput) { 1594 error = (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1595 level, optname, m); 1596 return (error); 1597 } 1598 error = ENOPROTOOPT; 1599 } else { 1600 switch (optname) { 1601 case SO_BINDANY: 1602 if ((error = suser(curproc)) != 0) /* XXX */ 1603 return (error); 1604 break; 1605 } 1606 1607 switch (optname) { 1608 1609 case SO_LINGER: 1610 if (m == NULL || m->m_len != sizeof (struct linger) || 1611 mtod(m, struct linger *)->l_linger < 0 || 1612 mtod(m, struct linger *)->l_linger > SHRT_MAX) 1613 return (EINVAL); 1614 so->so_linger = mtod(m, struct linger *)->l_linger; 1615 /* FALLTHROUGH */ 1616 1617 case SO_BINDANY: 1618 case SO_DEBUG: 1619 case SO_KEEPALIVE: 1620 case SO_USELOOPBACK: 1621 case SO_BROADCAST: 1622 case SO_REUSEADDR: 1623 case SO_REUSEPORT: 1624 case SO_OOBINLINE: 1625 case SO_TIMESTAMP: 1626 case SO_ZEROIZE: 1627 if (m == NULL || m->m_len < sizeof (int)) 1628 return (EINVAL); 1629 if (*mtod(m, int *)) 1630 so->so_options |= optname; 1631 else 1632 so->so_options &= ~optname; 1633 break; 1634 1635 case SO_DONTROUTE: 1636 if (m == NULL || m->m_len < sizeof (int)) 1637 return (EINVAL); 1638 if (*mtod(m, int *)) 1639 error = EOPNOTSUPP; 1640 break; 1641 1642 case SO_SNDBUF: 1643 case SO_RCVBUF: 1644 case SO_SNDLOWAT: 1645 case SO_RCVLOWAT: 1646 { 1647 u_long cnt; 1648 1649 if (m == NULL || m->m_len < sizeof (int)) 1650 return (EINVAL); 1651 cnt = *mtod(m, int *); 1652 if ((long)cnt <= 0) 1653 cnt = 1; 1654 switch (optname) { 1655 1656 case SO_SNDBUF: 1657 if (so->so_state & SS_CANTSENDMORE) 1658 return (EINVAL); 1659 if (sbcheckreserve(cnt, so->so_snd.sb_wat) || 1660 sbreserve(so, &so->so_snd, cnt)) 1661 return (ENOBUFS); 1662 so->so_snd.sb_wat = cnt; 1663 break; 1664 1665 case SO_RCVBUF: 1666 if (so->so_state & SS_CANTRCVMORE) 1667 return (EINVAL); 1668 if (sbcheckreserve(cnt, so->so_rcv.sb_wat) || 1669 sbreserve(so, &so->so_rcv, cnt)) 1670 return (ENOBUFS); 1671 so->so_rcv.sb_wat = cnt; 1672 break; 1673 1674 case SO_SNDLOWAT: 1675 so->so_snd.sb_lowat = 1676 (cnt > so->so_snd.sb_hiwat) ? 1677 so->so_snd.sb_hiwat : cnt; 1678 break; 1679 case SO_RCVLOWAT: 1680 so->so_rcv.sb_lowat = 1681 (cnt > so->so_rcv.sb_hiwat) ? 1682 so->so_rcv.sb_hiwat : cnt; 1683 break; 1684 } 1685 break; 1686 } 1687 1688 case SO_SNDTIMEO: 1689 case SO_RCVTIMEO: 1690 { 1691 struct timeval tv; 1692 int val; 1693 1694 if (m == NULL || m->m_len < sizeof (tv)) 1695 return (EINVAL); 1696 memcpy(&tv, mtod(m, struct timeval *), sizeof tv); 1697 val = tvtohz(&tv); 1698 if (val > USHRT_MAX) 1699 return (EDOM); 1700 1701 switch (optname) { 1702 1703 case SO_SNDTIMEO: 1704 so->so_snd.sb_timeo = val; 1705 break; 1706 case SO_RCVTIMEO: 1707 so->so_rcv.sb_timeo = val; 1708 break; 1709 } 1710 break; 1711 } 1712 1713 case SO_RTABLE: 1714 if (so->so_proto->pr_domain && 1715 so->so_proto->pr_domain->dom_protosw && 1716 so->so_proto->pr_ctloutput) { 1717 struct domain *dom = so->so_proto->pr_domain; 1718 1719 level = dom->dom_protosw->pr_protocol; 1720 error = (*so->so_proto->pr_ctloutput) 1721 (PRCO_SETOPT, so, level, optname, m); 1722 return (error); 1723 } 1724 error = ENOPROTOOPT; 1725 break; 1726 1727 #ifdef SOCKET_SPLICE 1728 case SO_SPLICE: 1729 if (m == NULL) { 1730 error = sosplice(so, -1, 0, NULL); 1731 } else if (m->m_len < sizeof(int)) { 1732 return (EINVAL); 1733 } else if (m->m_len < sizeof(struct splice)) { 1734 error = sosplice(so, *mtod(m, int *), 0, NULL); 1735 } else { 1736 error = sosplice(so, 1737 mtod(m, struct splice *)->sp_fd, 1738 mtod(m, struct splice *)->sp_max, 1739 &mtod(m, struct splice *)->sp_idle); 1740 } 1741 break; 1742 #endif /* SOCKET_SPLICE */ 1743 1744 default: 1745 error = ENOPROTOOPT; 1746 break; 1747 } 1748 if (error == 0 && so->so_proto->pr_ctloutput) { 1749 (*so->so_proto->pr_ctloutput)(PRCO_SETOPT, so, 1750 level, optname, m); 1751 } 1752 } 1753 1754 return (error); 1755 } 1756 1757 int 1758 sogetopt(struct socket *so, int level, int optname, struct mbuf *m) 1759 { 1760 int error = 0; 1761 1762 soassertlocked(so); 1763 1764 if (level != SOL_SOCKET) { 1765 if (so->so_proto->pr_ctloutput) { 1766 m->m_len = 0; 1767 1768 error = (*so->so_proto->pr_ctloutput)(PRCO_GETOPT, so, 1769 level, optname, m); 1770 if (error) 1771 return (error); 1772 return (0); 1773 } else 1774 return (ENOPROTOOPT); 1775 } else { 1776 m->m_len = sizeof (int); 1777 1778 switch (optname) { 1779 1780 case SO_LINGER: 1781 m->m_len = sizeof (struct linger); 1782 mtod(m, struct linger *)->l_onoff = 1783 so->so_options & SO_LINGER; 1784 mtod(m, struct linger *)->l_linger = so->so_linger; 1785 break; 1786 1787 case SO_BINDANY: 1788 case SO_USELOOPBACK: 1789 case SO_DEBUG: 1790 case SO_KEEPALIVE: 1791 case SO_REUSEADDR: 1792 case SO_REUSEPORT: 1793 case SO_BROADCAST: 1794 case SO_OOBINLINE: 1795 case SO_TIMESTAMP: 1796 case SO_ZEROIZE: 1797 *mtod(m, int *) = so->so_options & optname; 1798 break; 1799 1800 case SO_DONTROUTE: 1801 *mtod(m, int *) = 0; 1802 break; 1803 1804 case SO_TYPE: 1805 *mtod(m, int *) = so->so_type; 1806 break; 1807 1808 case SO_ERROR: 1809 *mtod(m, int *) = so->so_error; 1810 so->so_error = 0; 1811 break; 1812 1813 case SO_SNDBUF: 1814 *mtod(m, int *) = so->so_snd.sb_hiwat; 1815 break; 1816 1817 case SO_RCVBUF: 1818 *mtod(m, int *) = so->so_rcv.sb_hiwat; 1819 break; 1820 1821 case SO_SNDLOWAT: 1822 *mtod(m, int *) = so->so_snd.sb_lowat; 1823 break; 1824 1825 case SO_RCVLOWAT: 1826 *mtod(m, int *) = so->so_rcv.sb_lowat; 1827 break; 1828 1829 case SO_SNDTIMEO: 1830 case SO_RCVTIMEO: 1831 { 1832 struct timeval tv; 1833 int val = (optname == SO_SNDTIMEO ? 1834 so->so_snd.sb_timeo : so->so_rcv.sb_timeo); 1835 1836 m->m_len = sizeof(struct timeval); 1837 memset(&tv, 0, sizeof(tv)); 1838 tv.tv_sec = val / hz; 1839 tv.tv_usec = (val % hz) * tick; 1840 memcpy(mtod(m, struct timeval *), &tv, sizeof tv); 1841 break; 1842 } 1843 1844 case SO_RTABLE: 1845 if (so->so_proto->pr_domain && 1846 so->so_proto->pr_domain->dom_protosw && 1847 so->so_proto->pr_ctloutput) { 1848 struct domain *dom = so->so_proto->pr_domain; 1849 1850 level = dom->dom_protosw->pr_protocol; 1851 error = (*so->so_proto->pr_ctloutput) 1852 (PRCO_GETOPT, so, level, optname, m); 1853 if (error) 1854 return (error); 1855 break; 1856 } 1857 return (ENOPROTOOPT); 1858 1859 #ifdef SOCKET_SPLICE 1860 case SO_SPLICE: 1861 { 1862 off_t len; 1863 1864 m->m_len = sizeof(off_t); 1865 len = so->so_sp ? so->so_sp->ssp_len : 0; 1866 memcpy(mtod(m, off_t *), &len, sizeof(off_t)); 1867 break; 1868 } 1869 #endif /* SOCKET_SPLICE */ 1870 1871 case SO_PEERCRED: 1872 if (so->so_proto->pr_protocol == AF_UNIX) { 1873 struct unpcb *unp = sotounpcb(so); 1874 1875 if (unp->unp_flags & UNP_FEIDS) { 1876 m->m_len = sizeof(unp->unp_connid); 1877 memcpy(mtod(m, caddr_t), 1878 &(unp->unp_connid), m->m_len); 1879 break; 1880 } 1881 return (ENOTCONN); 1882 } 1883 return (EOPNOTSUPP); 1884 1885 default: 1886 return (ENOPROTOOPT); 1887 } 1888 return (0); 1889 } 1890 } 1891 1892 void 1893 sohasoutofband(struct socket *so) 1894 { 1895 KERNEL_LOCK(); 1896 csignal(so->so_pgid, SIGURG, so->so_siguid, so->so_sigeuid); 1897 selwakeup(&so->so_rcv.sb_sel); 1898 KERNEL_UNLOCK(); 1899 } 1900 1901 int 1902 soo_kqfilter(struct file *fp, struct knote *kn) 1903 { 1904 struct socket *so = kn->kn_fp->f_data; 1905 struct sockbuf *sb; 1906 1907 KERNEL_ASSERT_LOCKED(); 1908 1909 switch (kn->kn_filter) { 1910 case EVFILT_READ: 1911 if (so->so_options & SO_ACCEPTCONN) 1912 kn->kn_fop = &solisten_filtops; 1913 else 1914 kn->kn_fop = &soread_filtops; 1915 sb = &so->so_rcv; 1916 break; 1917 case EVFILT_WRITE: 1918 kn->kn_fop = &sowrite_filtops; 1919 sb = &so->so_snd; 1920 break; 1921 default: 1922 return (EINVAL); 1923 } 1924 1925 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext); 1926 sb->sb_flagsintr |= SB_KNOTE; 1927 1928 return (0); 1929 } 1930 1931 void 1932 filt_sordetach(struct knote *kn) 1933 { 1934 struct socket *so = kn->kn_fp->f_data; 1935 1936 KERNEL_ASSERT_LOCKED(); 1937 1938 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext); 1939 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note)) 1940 so->so_rcv.sb_flagsintr &= ~SB_KNOTE; 1941 } 1942 1943 int 1944 filt_soread(struct knote *kn, long hint) 1945 { 1946 struct socket *so = kn->kn_fp->f_data; 1947 int rv; 1948 1949 kn->kn_data = so->so_rcv.sb_cc; 1950 #ifdef SOCKET_SPLICE 1951 if (isspliced(so)) { 1952 rv = 0; 1953 } else 1954 #endif /* SOCKET_SPLICE */ 1955 if (so->so_state & SS_CANTRCVMORE) { 1956 kn->kn_flags |= EV_EOF; 1957 kn->kn_fflags = so->so_error; 1958 rv = 1; 1959 } else if (so->so_error) { /* temporary udp error */ 1960 rv = 1; 1961 } else if (kn->kn_sfflags & NOTE_LOWAT) { 1962 rv = (kn->kn_data >= kn->kn_sdata); 1963 } else { 1964 rv = (kn->kn_data >= so->so_rcv.sb_lowat); 1965 } 1966 1967 return rv; 1968 } 1969 1970 void 1971 filt_sowdetach(struct knote *kn) 1972 { 1973 struct socket *so = kn->kn_fp->f_data; 1974 1975 KERNEL_ASSERT_LOCKED(); 1976 1977 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext); 1978 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note)) 1979 so->so_snd.sb_flagsintr &= ~SB_KNOTE; 1980 } 1981 1982 int 1983 filt_sowrite(struct knote *kn, long hint) 1984 { 1985 struct socket *so = kn->kn_fp->f_data; 1986 int rv; 1987 1988 kn->kn_data = sbspace(so, &so->so_snd); 1989 if (so->so_state & SS_CANTSENDMORE) { 1990 kn->kn_flags |= EV_EOF; 1991 kn->kn_fflags = so->so_error; 1992 rv = 1; 1993 } else if (so->so_error) { /* temporary udp error */ 1994 rv = 1; 1995 } else if (((so->so_state & SS_ISCONNECTED) == 0) && 1996 (so->so_proto->pr_flags & PR_CONNREQUIRED)) { 1997 rv = 0; 1998 } else if (kn->kn_sfflags & NOTE_LOWAT) { 1999 rv = (kn->kn_data >= kn->kn_sdata); 2000 } else { 2001 rv = (kn->kn_data >= so->so_snd.sb_lowat); 2002 } 2003 2004 return (rv); 2005 } 2006 2007 int 2008 filt_solisten(struct knote *kn, long hint) 2009 { 2010 struct socket *so = kn->kn_fp->f_data; 2011 2012 kn->kn_data = so->so_qlen; 2013 2014 return (kn->kn_data != 0); 2015 } 2016 2017 #ifdef DDB 2018 void 2019 sobuf_print(struct sockbuf *, 2020 int (*)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))); 2021 2022 void 2023 sobuf_print(struct sockbuf *sb, 2024 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2025 { 2026 (*pr)("\tsb_cc: %lu\n", sb->sb_cc); 2027 (*pr)("\tsb_datacc: %lu\n", sb->sb_datacc); 2028 (*pr)("\tsb_hiwat: %lu\n", sb->sb_hiwat); 2029 (*pr)("\tsb_wat: %lu\n", sb->sb_wat); 2030 (*pr)("\tsb_mbcnt: %lu\n", sb->sb_mbcnt); 2031 (*pr)("\tsb_mbmax: %lu\n", sb->sb_mbmax); 2032 (*pr)("\tsb_lowat: %ld\n", sb->sb_lowat); 2033 (*pr)("\tsb_mb: %p\n", sb->sb_mb); 2034 (*pr)("\tsb_mbtail: %p\n", sb->sb_mbtail); 2035 (*pr)("\tsb_lastrecord: %p\n", sb->sb_lastrecord); 2036 (*pr)("\tsb_sel: ...\n"); 2037 (*pr)("\tsb_flagsintr: %d\n", sb->sb_flagsintr); 2038 (*pr)("\tsb_flags: %i\n", sb->sb_flags); 2039 (*pr)("\tsb_timeo: %i\n", sb->sb_timeo); 2040 } 2041 2042 void 2043 so_print(void *v, 2044 int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2)))) 2045 { 2046 struct socket *so = v; 2047 2048 (*pr)("socket %p\n", so); 2049 (*pr)("so_type: %i\n", so->so_type); 2050 (*pr)("so_options: 0x%04x\n", so->so_options); /* %b */ 2051 (*pr)("so_linger: %i\n", so->so_linger); 2052 (*pr)("so_state: 0x%04x\n", so->so_state); 2053 (*pr)("so_pcb: %p\n", so->so_pcb); 2054 (*pr)("so_proto: %p\n", so->so_proto); 2055 2056 (*pr)("so_head: %p\n", so->so_head); 2057 (*pr)("so_onq: %p\n", so->so_onq); 2058 (*pr)("so_q0: @%p first: %p\n", &so->so_q0, TAILQ_FIRST(&so->so_q0)); 2059 (*pr)("so_q: @%p first: %p\n", &so->so_q, TAILQ_FIRST(&so->so_q)); 2060 (*pr)("so_eq: next: %p\n", TAILQ_NEXT(so, so_qe)); 2061 (*pr)("so_q0len: %i\n", so->so_q0len); 2062 (*pr)("so_qlen: %i\n", so->so_qlen); 2063 (*pr)("so_qlimit: %i\n", so->so_qlimit); 2064 (*pr)("so_timeo: %i\n", so->so_timeo); 2065 (*pr)("so_pgid: %i\n", so->so_pgid); 2066 (*pr)("so_siguid: %i\n", so->so_siguid); 2067 (*pr)("so_sigeuid: %i\n", so->so_sigeuid); 2068 (*pr)("so_obmark: %lu\n", so->so_oobmark); 2069 2070 (*pr)("so_sp: %p\n", so->so_sp); 2071 if (so->so_sp != NULL) { 2072 (*pr)("\tssp_socket: %p\n", so->so_sp->ssp_socket); 2073 (*pr)("\tssp_soback: %p\n", so->so_sp->ssp_soback); 2074 (*pr)("\tssp_len: %lld\n", 2075 (unsigned long long)so->so_sp->ssp_len); 2076 (*pr)("\tssp_max: %lld\n", 2077 (unsigned long long)so->so_sp->ssp_max); 2078 (*pr)("\tssp_idletv: %lld %ld\n", so->so_sp->ssp_idletv.tv_sec, 2079 so->so_sp->ssp_idletv.tv_usec); 2080 (*pr)("\tssp_idleto: %spending (@%i)\n", 2081 timeout_pending(&so->so_sp->ssp_idleto) ? "" : "not ", 2082 so->so_sp->ssp_idleto.to_time); 2083 } 2084 2085 (*pr)("so_rcv:\n"); 2086 sobuf_print(&so->so_rcv, pr); 2087 (*pr)("so_snd:\n"); 2088 sobuf_print(&so->so_snd, pr); 2089 2090 (*pr)("so_upcall: %p so_upcallarg: %p\n", 2091 so->so_upcall, so->so_upcallarg); 2092 2093 (*pr)("so_euid: %d so_ruid: %d\n", so->so_euid, so->so_ruid); 2094 (*pr)("so_egid: %d so_rgid: %d\n", so->so_egid, so->so_rgid); 2095 (*pr)("so_cpid: %d\n", so->so_cpid); 2096 } 2097 #endif 2098 2099