1 /* $OpenBSD: uipc_usrreq.c,v 1.208 2024/06/28 21:30:24 mvs Exp $ */ 2 /* $NetBSD: uipc_usrreq.c,v 1.18 1996/02/09 19:00:50 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/filedesc.h> 39 #include <sys/domain.h> 40 #include <sys/protosw.h> 41 #include <sys/queue.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/unpcb.h> 45 #include <sys/un.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/file.h> 49 #include <sys/stat.h> 50 #include <sys/mbuf.h> 51 #include <sys/task.h> 52 #include <sys/pledge.h> 53 #include <sys/pool.h> 54 #include <sys/rwlock.h> 55 #include <sys/mutex.h> 56 #include <sys/sysctl.h> 57 #include <sys/lock.h> 58 #include <sys/refcnt.h> 59 60 #include "kcov.h" 61 #if NKCOV > 0 62 #include <sys/kcov.h> 63 #endif 64 65 /* 66 * Locks used to protect global data and struct members: 67 * I immutable after creation 68 * D unp_df_lock 69 * G unp_gc_lock 70 * M unp_ino_mtx 71 * R unp_rights_mtx 72 * a atomic 73 * s socket lock 74 */ 75 76 struct rwlock unp_df_lock = RWLOCK_INITIALIZER("unpdflk"); 77 struct rwlock unp_gc_lock = RWLOCK_INITIALIZER("unpgclk"); 78 79 struct mutex unp_rights_mtx = MUTEX_INITIALIZER(IPL_SOFTNET); 80 struct mutex unp_ino_mtx = MUTEX_INITIALIZER(IPL_SOFTNET); 81 82 /* 83 * Stack of sets of files that were passed over a socket but were 84 * not received and need to be closed. 85 */ 86 struct unp_deferral { 87 SLIST_ENTRY(unp_deferral) ud_link; /* [D] */ 88 int ud_n; /* [I] */ 89 /* followed by ud_n struct fdpass */ 90 struct fdpass ud_fp[]; /* [I] */ 91 }; 92 93 void uipc_setaddr(const struct unpcb *, struct mbuf *); 94 void unp_discard(struct fdpass *, int); 95 void unp_remove_gcrefs(struct fdpass *, int); 96 void unp_restore_gcrefs(struct fdpass *, int); 97 void unp_scan(struct mbuf *, void (*)(struct fdpass *, int)); 98 int unp_nam2sun(struct mbuf *, struct sockaddr_un **, size_t *); 99 static inline void unp_ref(struct unpcb *); 100 static inline void unp_rele(struct unpcb *); 101 struct socket *unp_solock_peer(struct socket *); 102 103 struct pool unpcb_pool; 104 struct task unp_gc_task = TASK_INITIALIZER(unp_gc, NULL); 105 106 /* 107 * Unix communications domain. 108 * 109 * TODO: 110 * RDM 111 * rethink name space problems 112 * need a proper out-of-band 113 */ 114 const struct sockaddr sun_noname = { sizeof(sun_noname), AF_UNIX }; 115 116 /* [G] list of all UNIX domain sockets, for unp_gc() */ 117 LIST_HEAD(unp_head, unpcb) unp_head = 118 LIST_HEAD_INITIALIZER(unp_head); 119 /* [D] list of sets of files that were sent over sockets that are now closed */ 120 SLIST_HEAD(,unp_deferral) unp_deferred = 121 SLIST_HEAD_INITIALIZER(unp_deferred); 122 123 ino_t unp_ino; /* [U] prototype for fake inode numbers */ 124 int unp_rights; /* [R] file descriptors in flight */ 125 int unp_defer; /* [G] number of deferred fp to close by the GC task */ 126 int unp_gcing; /* [G] GC task currently running */ 127 128 const struct pr_usrreqs uipc_usrreqs = { 129 .pru_attach = uipc_attach, 130 .pru_detach = uipc_detach, 131 .pru_bind = uipc_bind, 132 .pru_listen = uipc_listen, 133 .pru_connect = uipc_connect, 134 .pru_accept = uipc_accept, 135 .pru_disconnect = uipc_disconnect, 136 .pru_shutdown = uipc_shutdown, 137 .pru_rcvd = uipc_rcvd, 138 .pru_send = uipc_send, 139 .pru_abort = uipc_abort, 140 .pru_sense = uipc_sense, 141 .pru_sockaddr = uipc_sockaddr, 142 .pru_peeraddr = uipc_peeraddr, 143 .pru_connect2 = uipc_connect2, 144 }; 145 146 const struct pr_usrreqs uipc_dgram_usrreqs = { 147 .pru_attach = uipc_attach, 148 .pru_detach = uipc_detach, 149 .pru_bind = uipc_bind, 150 .pru_listen = uipc_listen, 151 .pru_connect = uipc_connect, 152 .pru_disconnect = uipc_disconnect, 153 .pru_shutdown = uipc_dgram_shutdown, 154 .pru_send = uipc_dgram_send, 155 .pru_sense = uipc_sense, 156 .pru_sockaddr = uipc_sockaddr, 157 .pru_peeraddr = uipc_peeraddr, 158 .pru_connect2 = uipc_connect2, 159 }; 160 161 void 162 unp_init(void) 163 { 164 pool_init(&unpcb_pool, sizeof(struct unpcb), 0, 165 IPL_SOFTNET, 0, "unpcb", NULL); 166 } 167 168 static inline void 169 unp_ref(struct unpcb *unp) 170 { 171 refcnt_take(&unp->unp_refcnt); 172 } 173 174 static inline void 175 unp_rele(struct unpcb *unp) 176 { 177 refcnt_rele_wake(&unp->unp_refcnt); 178 } 179 180 struct socket * 181 unp_solock_peer(struct socket *so) 182 { 183 struct unpcb *unp, *unp2; 184 struct socket *so2; 185 186 unp = so->so_pcb; 187 188 again: 189 if ((unp2 = unp->unp_conn) == NULL) 190 return NULL; 191 192 so2 = unp2->unp_socket; 193 194 if (so < so2) 195 solock(so2); 196 else if (so > so2) { 197 unp_ref(unp2); 198 sounlock(so); 199 solock(so2); 200 solock(so); 201 202 /* Datagram socket could be reconnected due to re-lock. */ 203 if (unp->unp_conn != unp2) { 204 sounlock(so2); 205 unp_rele(unp2); 206 goto again; 207 } 208 209 unp_rele(unp2); 210 } 211 212 return so2; 213 } 214 215 void 216 uipc_setaddr(const struct unpcb *unp, struct mbuf *nam) 217 { 218 if (unp != NULL && unp->unp_addr != NULL) { 219 nam->m_len = unp->unp_addr->m_len; 220 memcpy(mtod(nam, caddr_t), mtod(unp->unp_addr, caddr_t), 221 nam->m_len); 222 } else { 223 nam->m_len = sizeof(sun_noname); 224 memcpy(mtod(nam, struct sockaddr *), &sun_noname, 225 nam->m_len); 226 } 227 } 228 229 /* 230 * Both send and receive buffers are allocated PIPSIZ bytes of buffering 231 * for stream sockets, although the total for sender and receiver is 232 * actually only PIPSIZ. 233 * Datagram sockets really use the sendspace as the maximum datagram size, 234 * and don't really want to reserve the sendspace. Their recvspace should 235 * be large enough for at least one max-size datagram plus address. 236 */ 237 #define PIPSIZ 8192 238 u_int unpst_sendspace = PIPSIZ; 239 u_int unpst_recvspace = PIPSIZ; 240 u_int unpsq_sendspace = PIPSIZ; 241 u_int unpsq_recvspace = PIPSIZ; 242 u_int unpdg_sendspace = 2*1024; /* really max datagram size */ 243 u_int unpdg_recvspace = 16*1024; 244 245 const struct sysctl_bounded_args unpstctl_vars[] = { 246 { UNPCTL_RECVSPACE, &unpst_recvspace, 0, SB_MAX }, 247 { UNPCTL_SENDSPACE, &unpst_sendspace, 0, SB_MAX }, 248 }; 249 const struct sysctl_bounded_args unpsqctl_vars[] = { 250 { UNPCTL_RECVSPACE, &unpsq_recvspace, 0, SB_MAX }, 251 { UNPCTL_SENDSPACE, &unpsq_sendspace, 0, SB_MAX }, 252 }; 253 const struct sysctl_bounded_args unpdgctl_vars[] = { 254 { UNPCTL_RECVSPACE, &unpdg_recvspace, 0, SB_MAX }, 255 { UNPCTL_SENDSPACE, &unpdg_sendspace, 0, SB_MAX }, 256 }; 257 258 int 259 uipc_attach(struct socket *so, int proto, int wait) 260 { 261 struct unpcb *unp; 262 int error; 263 264 if (so->so_pcb) 265 return EISCONN; 266 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 267 switch (so->so_type) { 268 269 case SOCK_STREAM: 270 error = soreserve(so, unpst_sendspace, unpst_recvspace); 271 break; 272 273 case SOCK_SEQPACKET: 274 error = soreserve(so, unpsq_sendspace, unpsq_recvspace); 275 break; 276 277 case SOCK_DGRAM: 278 error = soreserve(so, unpdg_sendspace, unpdg_recvspace); 279 break; 280 281 default: 282 panic("unp_attach"); 283 } 284 if (error) 285 return (error); 286 } 287 unp = pool_get(&unpcb_pool, (wait == M_WAIT ? PR_WAITOK : PR_NOWAIT) | 288 PR_ZERO); 289 if (unp == NULL) 290 return (ENOBUFS); 291 refcnt_init(&unp->unp_refcnt); 292 unp->unp_socket = so; 293 so->so_pcb = unp; 294 getnanotime(&unp->unp_ctime); 295 296 rw_enter_write(&unp_gc_lock); 297 LIST_INSERT_HEAD(&unp_head, unp, unp_link); 298 rw_exit_write(&unp_gc_lock); 299 300 return (0); 301 } 302 303 int 304 uipc_detach(struct socket *so) 305 { 306 struct unpcb *unp = sotounpcb(so); 307 308 if (unp == NULL) 309 return (EINVAL); 310 311 unp_detach(unp); 312 313 return (0); 314 } 315 316 int 317 uipc_bind(struct socket *so, struct mbuf *nam, struct proc *p) 318 { 319 struct unpcb *unp = sotounpcb(so); 320 struct sockaddr_un *soun; 321 struct mbuf *nam2; 322 struct vnode *vp; 323 struct vattr vattr; 324 int error; 325 struct nameidata nd; 326 size_t pathlen; 327 328 if (unp->unp_flags & (UNP_BINDING | UNP_CONNECTING)) 329 return (EINVAL); 330 if (unp->unp_vnode != NULL) 331 return (EINVAL); 332 if ((error = unp_nam2sun(nam, &soun, &pathlen))) 333 return (error); 334 335 unp->unp_flags |= UNP_BINDING; 336 337 /* 338 * Enforce `i_lock' -> `solock' because fifo subsystem 339 * requires it. The socket can't be closed concurrently 340 * because the file descriptor reference is still held. 341 */ 342 343 sounlock(unp->unp_socket); 344 345 nam2 = m_getclr(M_WAITOK, MT_SONAME); 346 nam2->m_len = sizeof(struct sockaddr_un); 347 memcpy(mtod(nam2, struct sockaddr_un *), soun, 348 offsetof(struct sockaddr_un, sun_path) + pathlen); 349 /* No need to NUL terminate: m_getclr() returns zero'd mbufs. */ 350 351 soun = mtod(nam2, struct sockaddr_un *); 352 353 /* Fixup sun_len to keep it in sync with m_len. */ 354 soun->sun_len = nam2->m_len; 355 356 NDINIT(&nd, CREATE, NOFOLLOW | LOCKPARENT, UIO_SYSSPACE, 357 soun->sun_path, p); 358 nd.ni_pledge = PLEDGE_UNIX; 359 nd.ni_unveil = UNVEIL_CREATE; 360 361 KERNEL_LOCK(); 362 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ 363 error = namei(&nd); 364 if (error != 0) { 365 m_freem(nam2); 366 solock(unp->unp_socket); 367 goto out; 368 } 369 vp = nd.ni_vp; 370 if (vp != NULL) { 371 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); 372 if (nd.ni_dvp == vp) 373 vrele(nd.ni_dvp); 374 else 375 vput(nd.ni_dvp); 376 vrele(vp); 377 m_freem(nam2); 378 error = EADDRINUSE; 379 solock(unp->unp_socket); 380 goto out; 381 } 382 VATTR_NULL(&vattr); 383 vattr.va_type = VSOCK; 384 vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask; 385 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); 386 vput(nd.ni_dvp); 387 if (error) { 388 m_freem(nam2); 389 solock(unp->unp_socket); 390 goto out; 391 } 392 solock(unp->unp_socket); 393 unp->unp_addr = nam2; 394 vp = nd.ni_vp; 395 vp->v_socket = unp->unp_socket; 396 unp->unp_vnode = vp; 397 unp->unp_connid.uid = p->p_ucred->cr_uid; 398 unp->unp_connid.gid = p->p_ucred->cr_gid; 399 unp->unp_connid.pid = p->p_p->ps_pid; 400 unp->unp_flags |= UNP_FEIDSBIND; 401 VOP_UNLOCK(vp); 402 out: 403 KERNEL_UNLOCK(); 404 unp->unp_flags &= ~UNP_BINDING; 405 406 return (error); 407 } 408 409 int 410 uipc_listen(struct socket *so) 411 { 412 struct unpcb *unp = sotounpcb(so); 413 414 if (unp->unp_flags & (UNP_BINDING | UNP_CONNECTING)) 415 return (EINVAL); 416 if (unp->unp_vnode == NULL) 417 return (EINVAL); 418 return (0); 419 } 420 421 int 422 uipc_connect(struct socket *so, struct mbuf *nam) 423 { 424 return unp_connect(so, nam, curproc); 425 } 426 427 int 428 uipc_accept(struct socket *so, struct mbuf *nam) 429 { 430 struct socket *so2; 431 struct unpcb *unp = sotounpcb(so); 432 433 /* 434 * Pass back name of connected socket, if it was bound and 435 * we are still connected (our peer may have closed already!). 436 */ 437 so2 = unp_solock_peer(so); 438 uipc_setaddr(unp->unp_conn, nam); 439 440 if (so2 != NULL && so2 != so) 441 sounlock(so2); 442 return (0); 443 } 444 445 int 446 uipc_disconnect(struct socket *so) 447 { 448 struct unpcb *unp = sotounpcb(so); 449 450 unp_disconnect(unp); 451 return (0); 452 } 453 454 int 455 uipc_shutdown(struct socket *so) 456 { 457 struct unpcb *unp = sotounpcb(so); 458 struct socket *so2; 459 460 socantsendmore(so); 461 462 if (unp->unp_conn != NULL) { 463 so2 = unp->unp_conn->unp_socket; 464 socantrcvmore(so2); 465 } 466 467 return (0); 468 } 469 470 int 471 uipc_dgram_shutdown(struct socket *so) 472 { 473 socantsendmore(so); 474 return (0); 475 } 476 477 void 478 uipc_rcvd(struct socket *so) 479 { 480 struct unpcb *unp = sotounpcb(so); 481 struct socket *so2; 482 483 if (unp->unp_conn == NULL) 484 return; 485 so2 = unp->unp_conn->unp_socket; 486 487 /* 488 * Adjust backpressure on sender 489 * and wakeup any waiting to write. 490 */ 491 mtx_enter(&so->so_rcv.sb_mtx); 492 mtx_enter(&so2->so_snd.sb_mtx); 493 so2->so_snd.sb_mbcnt = so->so_rcv.sb_mbcnt; 494 so2->so_snd.sb_cc = so->so_rcv.sb_cc; 495 mtx_leave(&so2->so_snd.sb_mtx); 496 mtx_leave(&so->so_rcv.sb_mtx); 497 sowwakeup(so2); 498 } 499 500 int 501 uipc_send(struct socket *so, struct mbuf *m, struct mbuf *nam, 502 struct mbuf *control) 503 { 504 struct unpcb *unp = sotounpcb(so); 505 struct socket *so2; 506 int error = 0, dowakeup = 0; 507 508 if (control) { 509 sounlock(so); 510 error = unp_internalize(control, curproc); 511 solock(so); 512 if (error) 513 goto out; 514 } 515 516 /* 517 * We hold both solock() and `sb_mtx' mutex while modifying 518 * SS_CANTSENDMORE flag. solock() is enough to check it. 519 */ 520 if (so->so_snd.sb_state & SS_CANTSENDMORE) { 521 error = EPIPE; 522 goto dispose; 523 } 524 if (unp->unp_conn == NULL) { 525 error = ENOTCONN; 526 goto dispose; 527 } 528 529 so2 = unp->unp_conn->unp_socket; 530 531 /* 532 * Send to paired receive port, and then raise 533 * send buffer counts to maintain backpressure. 534 * Wake up readers. 535 */ 536 /* 537 * sbappend*() should be serialized together 538 * with so_snd modification. 539 */ 540 mtx_enter(&so2->so_rcv.sb_mtx); 541 mtx_enter(&so->so_snd.sb_mtx); 542 if (control) { 543 if (sbappendcontrol(so2, &so2->so_rcv, m, control)) { 544 control = NULL; 545 } else { 546 mtx_leave(&so->so_snd.sb_mtx); 547 mtx_leave(&so2->so_rcv.sb_mtx); 548 error = ENOBUFS; 549 goto dispose; 550 } 551 } else if (so->so_type == SOCK_SEQPACKET) 552 sbappendrecord(so2, &so2->so_rcv, m); 553 else 554 sbappend(so2, &so2->so_rcv, m); 555 so->so_snd.sb_mbcnt = so2->so_rcv.sb_mbcnt; 556 so->so_snd.sb_cc = so2->so_rcv.sb_cc; 557 if (so2->so_rcv.sb_cc > 0) 558 dowakeup = 1; 559 mtx_leave(&so->so_snd.sb_mtx); 560 mtx_leave(&so2->so_rcv.sb_mtx); 561 562 if (dowakeup) 563 sorwakeup(so2); 564 565 m = NULL; 566 567 dispose: 568 /* we need to undo unp_internalize in case of errors */ 569 if (control && error) 570 unp_dispose(control); 571 572 out: 573 m_freem(control); 574 m_freem(m); 575 576 return (error); 577 } 578 579 int 580 uipc_dgram_send(struct socket *so, struct mbuf *m, struct mbuf *nam, 581 struct mbuf *control) 582 { 583 struct unpcb *unp = sotounpcb(so); 584 struct socket *so2; 585 const struct sockaddr *from; 586 int error = 0, dowakeup = 0; 587 588 if (control) { 589 sounlock(so); 590 error = unp_internalize(control, curproc); 591 solock(so); 592 if (error) 593 goto out; 594 } 595 596 if (nam) { 597 if (unp->unp_conn) { 598 error = EISCONN; 599 goto dispose; 600 } 601 error = unp_connect(so, nam, curproc); 602 if (error) 603 goto dispose; 604 } 605 606 if (unp->unp_conn == NULL) { 607 if (nam != NULL) 608 error = ECONNREFUSED; 609 else 610 error = ENOTCONN; 611 goto dispose; 612 } 613 614 so2 = unp->unp_conn->unp_socket; 615 616 if (unp->unp_addr) 617 from = mtod(unp->unp_addr, struct sockaddr *); 618 else 619 from = &sun_noname; 620 621 mtx_enter(&so2->so_rcv.sb_mtx); 622 if (sbappendaddr(so2, &so2->so_rcv, from, m, control)) { 623 dowakeup = 1; 624 m = NULL; 625 control = NULL; 626 } else 627 error = ENOBUFS; 628 mtx_leave(&so2->so_rcv.sb_mtx); 629 630 if (dowakeup) 631 sorwakeup(so2); 632 if (nam) 633 unp_disconnect(unp); 634 635 dispose: 636 /* we need to undo unp_internalize in case of errors */ 637 if (control && error) 638 unp_dispose(control); 639 640 out: 641 m_freem(control); 642 m_freem(m); 643 644 return (error); 645 } 646 647 void 648 uipc_abort(struct socket *so) 649 { 650 struct unpcb *unp = sotounpcb(so); 651 652 unp_detach(unp); 653 sofree(so, 0); 654 } 655 656 int 657 uipc_sense(struct socket *so, struct stat *sb) 658 { 659 struct unpcb *unp = sotounpcb(so); 660 661 sb->st_blksize = so->so_snd.sb_hiwat; 662 sb->st_dev = NODEV; 663 mtx_enter(&unp_ino_mtx); 664 if (unp->unp_ino == 0) 665 unp->unp_ino = unp_ino++; 666 mtx_leave(&unp_ino_mtx); 667 sb->st_atim.tv_sec = 668 sb->st_mtim.tv_sec = 669 sb->st_ctim.tv_sec = unp->unp_ctime.tv_sec; 670 sb->st_atim.tv_nsec = 671 sb->st_mtim.tv_nsec = 672 sb->st_ctim.tv_nsec = unp->unp_ctime.tv_nsec; 673 sb->st_ino = unp->unp_ino; 674 675 return (0); 676 } 677 678 int 679 uipc_sockaddr(struct socket *so, struct mbuf *nam) 680 { 681 struct unpcb *unp = sotounpcb(so); 682 683 uipc_setaddr(unp, nam); 684 return (0); 685 } 686 687 int 688 uipc_peeraddr(struct socket *so, struct mbuf *nam) 689 { 690 struct unpcb *unp = sotounpcb(so); 691 struct socket *so2; 692 693 so2 = unp_solock_peer(so); 694 uipc_setaddr(unp->unp_conn, nam); 695 if (so2 != NULL && so2 != so) 696 sounlock(so2); 697 return (0); 698 } 699 700 int 701 uipc_connect2(struct socket *so, struct socket *so2) 702 { 703 struct unpcb *unp = sotounpcb(so), *unp2; 704 int error; 705 706 if ((error = unp_connect2(so, so2))) 707 return (error); 708 709 unp->unp_connid.uid = curproc->p_ucred->cr_uid; 710 unp->unp_connid.gid = curproc->p_ucred->cr_gid; 711 unp->unp_connid.pid = curproc->p_p->ps_pid; 712 unp->unp_flags |= UNP_FEIDS; 713 unp2 = sotounpcb(so2); 714 unp2->unp_connid.uid = curproc->p_ucred->cr_uid; 715 unp2->unp_connid.gid = curproc->p_ucred->cr_gid; 716 unp2->unp_connid.pid = curproc->p_p->ps_pid; 717 unp2->unp_flags |= UNP_FEIDS; 718 719 return (0); 720 } 721 722 int 723 uipc_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 724 size_t newlen) 725 { 726 int *valp = &unp_defer; 727 728 /* All sysctl names at this level are terminal. */ 729 switch (name[0]) { 730 case SOCK_STREAM: 731 if (namelen != 2) 732 return (ENOTDIR); 733 return sysctl_bounded_arr(unpstctl_vars, nitems(unpstctl_vars), 734 name + 1, namelen - 1, oldp, oldlenp, newp, newlen); 735 case SOCK_SEQPACKET: 736 if (namelen != 2) 737 return (ENOTDIR); 738 return sysctl_bounded_arr(unpsqctl_vars, nitems(unpsqctl_vars), 739 name + 1, namelen - 1, oldp, oldlenp, newp, newlen); 740 case SOCK_DGRAM: 741 if (namelen != 2) 742 return (ENOTDIR); 743 return sysctl_bounded_arr(unpdgctl_vars, nitems(unpdgctl_vars), 744 name + 1, namelen - 1, oldp, oldlenp, newp, newlen); 745 case NET_UNIX_INFLIGHT: 746 valp = &unp_rights; 747 /* FALLTHROUGH */ 748 case NET_UNIX_DEFERRED: 749 if (namelen != 1) 750 return (ENOTDIR); 751 return sysctl_rdint(oldp, oldlenp, newp, *valp); 752 default: 753 return (ENOPROTOOPT); 754 } 755 } 756 757 void 758 unp_detach(struct unpcb *unp) 759 { 760 struct socket *so = unp->unp_socket; 761 struct vnode *vp = unp->unp_vnode; 762 struct unpcb *unp2; 763 764 unp->unp_vnode = NULL; 765 766 rw_enter_write(&unp_gc_lock); 767 LIST_REMOVE(unp, unp_link); 768 rw_exit_write(&unp_gc_lock); 769 770 if (vp != NULL) { 771 /* Enforce `i_lock' -> solock() lock order. */ 772 sounlock(so); 773 VOP_LOCK(vp, LK_EXCLUSIVE); 774 vp->v_socket = NULL; 775 776 KERNEL_LOCK(); 777 vput(vp); 778 KERNEL_UNLOCK(); 779 solock(so); 780 } 781 782 if (unp->unp_conn != NULL) { 783 /* 784 * Datagram socket could be connected to itself. 785 * Such socket will be disconnected here. 786 */ 787 unp_disconnect(unp); 788 } 789 790 while ((unp2 = SLIST_FIRST(&unp->unp_refs)) != NULL) { 791 struct socket *so2 = unp2->unp_socket; 792 793 if (so < so2) 794 solock(so2); 795 else { 796 unp_ref(unp2); 797 sounlock(so); 798 solock(so2); 799 solock(so); 800 801 if (unp2->unp_conn != unp) { 802 /* `unp2' was disconnected due to re-lock. */ 803 sounlock(so2); 804 unp_rele(unp2); 805 continue; 806 } 807 808 unp_rele(unp2); 809 } 810 811 unp2->unp_conn = NULL; 812 SLIST_REMOVE(&unp->unp_refs, unp2, unpcb, unp_nextref); 813 so2->so_error = ECONNRESET; 814 so2->so_state &= ~SS_ISCONNECTED; 815 816 sounlock(so2); 817 } 818 819 sounlock(so); 820 refcnt_finalize(&unp->unp_refcnt, "unpfinal"); 821 solock(so); 822 823 soisdisconnected(so); 824 so->so_pcb = NULL; 825 m_freem(unp->unp_addr); 826 pool_put(&unpcb_pool, unp); 827 if (unp_rights) 828 task_add(systqmp, &unp_gc_task); 829 } 830 831 int 832 unp_connect(struct socket *so, struct mbuf *nam, struct proc *p) 833 { 834 struct sockaddr_un *soun; 835 struct vnode *vp; 836 struct socket *so2, *so3; 837 struct unpcb *unp, *unp2, *unp3; 838 struct nameidata nd; 839 int error; 840 841 unp = sotounpcb(so); 842 if (unp->unp_flags & (UNP_BINDING | UNP_CONNECTING)) 843 return (EISCONN); 844 if ((error = unp_nam2sun(nam, &soun, NULL))) 845 return (error); 846 847 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, soun->sun_path, p); 848 nd.ni_pledge = PLEDGE_UNIX; 849 nd.ni_unveil = UNVEIL_WRITE; 850 851 unp->unp_flags |= UNP_CONNECTING; 852 853 /* 854 * Enforce `i_lock' -> `solock' because fifo subsystem 855 * requires it. The socket can't be closed concurrently 856 * because the file descriptor reference is still held. 857 */ 858 859 sounlock(so); 860 861 KERNEL_LOCK(); 862 error = namei(&nd); 863 if (error != 0) 864 goto unlock; 865 vp = nd.ni_vp; 866 if (vp->v_type != VSOCK) { 867 error = ENOTSOCK; 868 goto put; 869 } 870 if ((error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p)) != 0) 871 goto put; 872 so2 = vp->v_socket; 873 if (so2 == NULL) { 874 error = ECONNREFUSED; 875 goto put; 876 } 877 if (so->so_type != so2->so_type) { 878 error = EPROTOTYPE; 879 goto put; 880 } 881 882 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 883 solock(so2); 884 885 if ((so2->so_options & SO_ACCEPTCONN) == 0 || 886 (so3 = sonewconn(so2, 0, M_WAIT)) == NULL) { 887 error = ECONNREFUSED; 888 } 889 890 sounlock(so2); 891 892 if (error != 0) 893 goto put; 894 895 /* 896 * Since `so2' is protected by vnode(9) lock, `so3' 897 * can't be PRU_ABORT'ed here. 898 */ 899 solock_pair(so, so3); 900 901 unp2 = sotounpcb(so2); 902 unp3 = sotounpcb(so3); 903 904 /* 905 * `unp_addr', `unp_connid' and 'UNP_FEIDSBIND' flag 906 * are immutable since we set them in uipc_bind(). 907 */ 908 if (unp2->unp_addr) 909 unp3->unp_addr = 910 m_copym(unp2->unp_addr, 0, M_COPYALL, M_NOWAIT); 911 unp3->unp_connid.uid = p->p_ucred->cr_uid; 912 unp3->unp_connid.gid = p->p_ucred->cr_gid; 913 unp3->unp_connid.pid = p->p_p->ps_pid; 914 unp3->unp_flags |= UNP_FEIDS; 915 916 if (unp2->unp_flags & UNP_FEIDSBIND) { 917 unp->unp_connid = unp2->unp_connid; 918 unp->unp_flags |= UNP_FEIDS; 919 } 920 921 so2 = so3; 922 } else { 923 if (so2 != so) 924 solock_pair(so, so2); 925 else 926 solock(so); 927 } 928 929 error = unp_connect2(so, so2); 930 931 sounlock(so); 932 933 /* 934 * `so2' can't be PRU_ABORT'ed concurrently 935 */ 936 if (so2 != so) 937 sounlock(so2); 938 put: 939 vput(vp); 940 unlock: 941 KERNEL_UNLOCK(); 942 solock(so); 943 unp->unp_flags &= ~UNP_CONNECTING; 944 945 /* 946 * The peer socket could be closed by concurrent thread 947 * when `so' and `vp' are unlocked. 948 */ 949 if (error == 0 && unp->unp_conn == NULL) 950 error = ECONNREFUSED; 951 952 return (error); 953 } 954 955 int 956 unp_connect2(struct socket *so, struct socket *so2) 957 { 958 struct unpcb *unp = sotounpcb(so); 959 struct unpcb *unp2; 960 961 soassertlocked(so); 962 soassertlocked(so2); 963 964 if (so2->so_type != so->so_type) 965 return (EPROTOTYPE); 966 unp2 = sotounpcb(so2); 967 unp->unp_conn = unp2; 968 switch (so->so_type) { 969 970 case SOCK_DGRAM: 971 SLIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_nextref); 972 soisconnected(so); 973 break; 974 975 case SOCK_STREAM: 976 case SOCK_SEQPACKET: 977 unp2->unp_conn = unp; 978 soisconnected(so); 979 soisconnected(so2); 980 break; 981 982 default: 983 panic("unp_connect2"); 984 } 985 return (0); 986 } 987 988 void 989 unp_disconnect(struct unpcb *unp) 990 { 991 struct socket *so2; 992 struct unpcb *unp2; 993 994 if ((so2 = unp_solock_peer(unp->unp_socket)) == NULL) 995 return; 996 997 unp2 = unp->unp_conn; 998 unp->unp_conn = NULL; 999 1000 switch (unp->unp_socket->so_type) { 1001 1002 case SOCK_DGRAM: 1003 SLIST_REMOVE(&unp2->unp_refs, unp, unpcb, unp_nextref); 1004 unp->unp_socket->so_state &= ~SS_ISCONNECTED; 1005 break; 1006 1007 case SOCK_STREAM: 1008 case SOCK_SEQPACKET: 1009 unp->unp_socket->so_snd.sb_mbcnt = 0; 1010 unp->unp_socket->so_snd.sb_cc = 0; 1011 soisdisconnected(unp->unp_socket); 1012 unp2->unp_conn = NULL; 1013 unp2->unp_socket->so_snd.sb_mbcnt = 0; 1014 unp2->unp_socket->so_snd.sb_cc = 0; 1015 soisdisconnected(unp2->unp_socket); 1016 break; 1017 } 1018 1019 if (so2 != unp->unp_socket) 1020 sounlock(so2); 1021 } 1022 1023 static struct unpcb * 1024 fptounp(struct file *fp) 1025 { 1026 struct socket *so; 1027 1028 if (fp->f_type != DTYPE_SOCKET) 1029 return (NULL); 1030 if ((so = fp->f_data) == NULL) 1031 return (NULL); 1032 if (so->so_proto->pr_domain != &unixdomain) 1033 return (NULL); 1034 return (sotounpcb(so)); 1035 } 1036 1037 int 1038 unp_externalize(struct mbuf *rights, socklen_t controllen, int flags) 1039 { 1040 struct proc *p = curproc; /* XXX */ 1041 struct cmsghdr *cm = mtod(rights, struct cmsghdr *); 1042 struct filedesc *fdp = p->p_fd; 1043 int i, *fds = NULL; 1044 struct fdpass *rp; 1045 struct file *fp; 1046 int nfds, error = 0; 1047 1048 /* 1049 * This code only works because SCM_RIGHTS is the only supported 1050 * control message type on unix sockets. Enforce this here. 1051 */ 1052 if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET) 1053 return EINVAL; 1054 1055 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / 1056 sizeof(struct fdpass); 1057 if (controllen < CMSG_ALIGN(sizeof(struct cmsghdr))) 1058 controllen = 0; 1059 else 1060 controllen -= CMSG_ALIGN(sizeof(struct cmsghdr)); 1061 if (nfds > controllen / sizeof(int)) { 1062 error = EMSGSIZE; 1063 goto out; 1064 } 1065 1066 /* Make sure the recipient should be able to see the descriptors.. */ 1067 rp = (struct fdpass *)CMSG_DATA(cm); 1068 1069 /* fdp->fd_rdir requires KERNEL_LOCK() */ 1070 KERNEL_LOCK(); 1071 1072 for (i = 0; i < nfds; i++) { 1073 fp = rp->fp; 1074 rp++; 1075 error = pledge_recvfd(p, fp); 1076 if (error) 1077 break; 1078 1079 /* 1080 * No to block devices. If passing a directory, 1081 * make sure that it is underneath the root. 1082 */ 1083 if (fdp->fd_rdir != NULL && fp->f_type == DTYPE_VNODE) { 1084 struct vnode *vp = (struct vnode *)fp->f_data; 1085 1086 if (vp->v_type == VBLK || 1087 (vp->v_type == VDIR && 1088 !vn_isunder(vp, fdp->fd_rdir, p))) { 1089 error = EPERM; 1090 break; 1091 } 1092 } 1093 } 1094 1095 KERNEL_UNLOCK(); 1096 1097 if (error) 1098 goto out; 1099 1100 fds = mallocarray(nfds, sizeof(int), M_TEMP, M_WAITOK); 1101 1102 fdplock(fdp); 1103 restart: 1104 /* 1105 * First loop -- allocate file descriptor table slots for the 1106 * new descriptors. 1107 */ 1108 rp = ((struct fdpass *)CMSG_DATA(cm)); 1109 for (i = 0; i < nfds; i++) { 1110 if ((error = fdalloc(p, 0, &fds[i])) != 0) { 1111 /* 1112 * Back out what we've done so far. 1113 */ 1114 for (--i; i >= 0; i--) 1115 fdremove(fdp, fds[i]); 1116 1117 if (error == ENOSPC) { 1118 fdexpand(p); 1119 goto restart; 1120 } 1121 1122 fdpunlock(fdp); 1123 1124 /* 1125 * This is the error that has historically 1126 * been returned, and some callers may 1127 * expect it. 1128 */ 1129 1130 error = EMSGSIZE; 1131 goto out; 1132 } 1133 1134 /* 1135 * Make the slot reference the descriptor so that 1136 * fdalloc() works properly.. We finalize it all 1137 * in the loop below. 1138 */ 1139 mtx_enter(&fdp->fd_fplock); 1140 KASSERT(fdp->fd_ofiles[fds[i]] == NULL); 1141 fdp->fd_ofiles[fds[i]] = rp->fp; 1142 mtx_leave(&fdp->fd_fplock); 1143 1144 fdp->fd_ofileflags[fds[i]] = (rp->flags & UF_PLEDGED); 1145 if (flags & MSG_CMSG_CLOEXEC) 1146 fdp->fd_ofileflags[fds[i]] |= UF_EXCLOSE; 1147 1148 rp++; 1149 } 1150 1151 /* 1152 * Keep `fdp' locked to prevent concurrent close() of just 1153 * inserted descriptors. Such descriptors could have the only 1154 * `f_count' reference which is now shared between control 1155 * message and `fdp'. 1156 */ 1157 1158 /* 1159 * Now that adding them has succeeded, update all of the 1160 * descriptor passing state. 1161 */ 1162 rp = (struct fdpass *)CMSG_DATA(cm); 1163 1164 for (i = 0; i < nfds; i++) { 1165 struct unpcb *unp; 1166 1167 fp = rp->fp; 1168 rp++; 1169 if ((unp = fptounp(fp)) != NULL) { 1170 rw_enter_write(&unp_gc_lock); 1171 unp->unp_msgcount--; 1172 rw_exit_write(&unp_gc_lock); 1173 } 1174 } 1175 fdpunlock(fdp); 1176 1177 mtx_enter(&unp_rights_mtx); 1178 unp_rights -= nfds; 1179 mtx_leave(&unp_rights_mtx); 1180 1181 /* 1182 * Copy temporary array to message and adjust length, in case of 1183 * transition from large struct file pointers to ints. 1184 */ 1185 memcpy(CMSG_DATA(cm), fds, nfds * sizeof(int)); 1186 cm->cmsg_len = CMSG_LEN(nfds * sizeof(int)); 1187 rights->m_len = CMSG_LEN(nfds * sizeof(int)); 1188 out: 1189 if (fds != NULL) 1190 free(fds, M_TEMP, nfds * sizeof(int)); 1191 1192 if (error) { 1193 if (nfds > 0) { 1194 /* 1195 * No lock required. We are the only `cm' holder. 1196 */ 1197 rp = ((struct fdpass *)CMSG_DATA(cm)); 1198 unp_discard(rp, nfds); 1199 } 1200 } 1201 1202 return (error); 1203 } 1204 1205 int 1206 unp_internalize(struct mbuf *control, struct proc *p) 1207 { 1208 struct filedesc *fdp = p->p_fd; 1209 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 1210 struct fdpass *rp; 1211 struct file *fp; 1212 struct unpcb *unp; 1213 int i, error; 1214 int nfds, *ip, fd, neededspace; 1215 1216 /* 1217 * Check for two potential msg_controllen values because 1218 * IETF stuck their nose in a place it does not belong. 1219 */ 1220 if (control->m_len < CMSG_LEN(0) || cm->cmsg_len < CMSG_LEN(0)) 1221 return (EINVAL); 1222 if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET || 1223 !(cm->cmsg_len == control->m_len || 1224 control->m_len == CMSG_ALIGN(cm->cmsg_len))) 1225 return (EINVAL); 1226 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / sizeof (int); 1227 1228 mtx_enter(&unp_rights_mtx); 1229 if (unp_rights + nfds > maxfiles / 10) { 1230 mtx_leave(&unp_rights_mtx); 1231 return (EMFILE); 1232 } 1233 unp_rights += nfds; 1234 mtx_leave(&unp_rights_mtx); 1235 1236 /* Make sure we have room for the struct file pointers */ 1237 morespace: 1238 neededspace = CMSG_SPACE(nfds * sizeof(struct fdpass)) - 1239 control->m_len; 1240 if (neededspace > m_trailingspace(control)) { 1241 char *tmp; 1242 /* if we already have a cluster, the message is just too big */ 1243 if (control->m_flags & M_EXT) { 1244 error = E2BIG; 1245 goto nospace; 1246 } 1247 1248 /* copy cmsg data temporarily out of the mbuf */ 1249 tmp = malloc(control->m_len, M_TEMP, M_WAITOK); 1250 memcpy(tmp, mtod(control, caddr_t), control->m_len); 1251 1252 /* allocate a cluster and try again */ 1253 MCLGET(control, M_WAIT); 1254 if ((control->m_flags & M_EXT) == 0) { 1255 free(tmp, M_TEMP, control->m_len); 1256 error = ENOBUFS; /* allocation failed */ 1257 goto nospace; 1258 } 1259 1260 /* copy the data back into the cluster */ 1261 cm = mtod(control, struct cmsghdr *); 1262 memcpy(cm, tmp, control->m_len); 1263 free(tmp, M_TEMP, control->m_len); 1264 goto morespace; 1265 } 1266 1267 /* adjust message & mbuf to note amount of space actually used. */ 1268 cm->cmsg_len = CMSG_LEN(nfds * sizeof(struct fdpass)); 1269 control->m_len = CMSG_SPACE(nfds * sizeof(struct fdpass)); 1270 1271 ip = ((int *)CMSG_DATA(cm)) + nfds - 1; 1272 rp = ((struct fdpass *)CMSG_DATA(cm)) + nfds - 1; 1273 fdplock(fdp); 1274 for (i = 0; i < nfds; i++) { 1275 memcpy(&fd, ip, sizeof fd); 1276 ip--; 1277 if ((fp = fd_getfile(fdp, fd)) == NULL) { 1278 error = EBADF; 1279 goto fail; 1280 } 1281 if (fp->f_count >= FDUP_MAX_COUNT) { 1282 error = EDEADLK; 1283 goto fail; 1284 } 1285 error = pledge_sendfd(p, fp); 1286 if (error) 1287 goto fail; 1288 1289 /* kqueue descriptors cannot be copied */ 1290 if (fp->f_type == DTYPE_KQUEUE) { 1291 error = EINVAL; 1292 goto fail; 1293 } 1294 #if NKCOV > 0 1295 /* kcov descriptors cannot be copied */ 1296 if (fp->f_type == DTYPE_VNODE && kcov_vnode(fp->f_data)) { 1297 error = EINVAL; 1298 goto fail; 1299 } 1300 #endif 1301 rp->fp = fp; 1302 rp->flags = fdp->fd_ofileflags[fd] & UF_PLEDGED; 1303 rp--; 1304 if ((unp = fptounp(fp)) != NULL) { 1305 rw_enter_write(&unp_gc_lock); 1306 unp->unp_msgcount++; 1307 unp->unp_file = fp; 1308 rw_exit_write(&unp_gc_lock); 1309 } 1310 } 1311 fdpunlock(fdp); 1312 return (0); 1313 fail: 1314 fdpunlock(fdp); 1315 if (fp != NULL) 1316 FRELE(fp, p); 1317 /* Back out what we just did. */ 1318 for ( ; i > 0; i--) { 1319 rp++; 1320 fp = rp->fp; 1321 if ((unp = fptounp(fp)) != NULL) { 1322 rw_enter_write(&unp_gc_lock); 1323 unp->unp_msgcount--; 1324 rw_exit_write(&unp_gc_lock); 1325 } 1326 FRELE(fp, p); 1327 } 1328 1329 nospace: 1330 mtx_enter(&unp_rights_mtx); 1331 unp_rights -= nfds; 1332 mtx_leave(&unp_rights_mtx); 1333 1334 return (error); 1335 } 1336 1337 void 1338 unp_gc(void *arg __unused) 1339 { 1340 struct unp_deferral *defer; 1341 struct file *fp; 1342 struct socket *so; 1343 struct unpcb *unp; 1344 int nunref, i; 1345 1346 rw_enter_write(&unp_gc_lock); 1347 if (unp_gcing) 1348 goto unlock; 1349 unp_gcing = 1; 1350 rw_exit_write(&unp_gc_lock); 1351 1352 rw_enter_write(&unp_df_lock); 1353 /* close any fds on the deferred list */ 1354 while ((defer = SLIST_FIRST(&unp_deferred)) != NULL) { 1355 SLIST_REMOVE_HEAD(&unp_deferred, ud_link); 1356 rw_exit_write(&unp_df_lock); 1357 for (i = 0; i < defer->ud_n; i++) { 1358 fp = defer->ud_fp[i].fp; 1359 if (fp == NULL) 1360 continue; 1361 if ((unp = fptounp(fp)) != NULL) { 1362 rw_enter_write(&unp_gc_lock); 1363 unp->unp_msgcount--; 1364 rw_exit_write(&unp_gc_lock); 1365 } 1366 mtx_enter(&unp_rights_mtx); 1367 unp_rights--; 1368 mtx_leave(&unp_rights_mtx); 1369 /* closef() expects a refcount of 2 */ 1370 FREF(fp); 1371 (void) closef(fp, NULL); 1372 } 1373 free(defer, M_TEMP, sizeof(*defer) + 1374 sizeof(struct fdpass) * defer->ud_n); 1375 rw_enter_write(&unp_df_lock); 1376 } 1377 rw_exit_write(&unp_df_lock); 1378 1379 nunref = 0; 1380 1381 rw_enter_write(&unp_gc_lock); 1382 1383 /* 1384 * Determine sockets which may be prospectively dead. Such 1385 * sockets have their `unp_msgcount' equal to the `f_count'. 1386 * If `unp_msgcount' is 0, the socket has not been passed 1387 * and can't be unreferenced. 1388 */ 1389 LIST_FOREACH(unp, &unp_head, unp_link) { 1390 unp->unp_gcflags = 0; 1391 1392 if (unp->unp_msgcount == 0) 1393 continue; 1394 if ((fp = unp->unp_file) == NULL) 1395 continue; 1396 if (fp->f_count == unp->unp_msgcount) { 1397 unp->unp_gcflags |= UNP_GCDEAD; 1398 unp->unp_gcrefs = unp->unp_msgcount; 1399 nunref++; 1400 } 1401 } 1402 1403 /* 1404 * Scan all sockets previously marked as dead. Remove 1405 * the `unp_gcrefs' reference each socket holds on any 1406 * dead socket in its buffer. 1407 */ 1408 LIST_FOREACH(unp, &unp_head, unp_link) { 1409 if ((unp->unp_gcflags & UNP_GCDEAD) == 0) 1410 continue; 1411 so = unp->unp_socket; 1412 mtx_enter(&so->so_rcv.sb_mtx); 1413 unp_scan(so->so_rcv.sb_mb, unp_remove_gcrefs); 1414 mtx_leave(&so->so_rcv.sb_mtx); 1415 } 1416 1417 /* 1418 * If the dead socket has `unp_gcrefs' reference counter 1419 * greater than 0, it can't be unreferenced. Mark it as 1420 * alive and increment the `unp_gcrefs' reference for each 1421 * dead socket within its buffer. Repeat this until we 1422 * have no new alive sockets found. 1423 */ 1424 do { 1425 unp_defer = 0; 1426 1427 LIST_FOREACH(unp, &unp_head, unp_link) { 1428 if ((unp->unp_gcflags & UNP_GCDEAD) == 0) 1429 continue; 1430 if (unp->unp_gcrefs == 0) 1431 continue; 1432 1433 unp->unp_gcflags &= ~UNP_GCDEAD; 1434 1435 so = unp->unp_socket; 1436 mtx_enter(&so->so_rcv.sb_mtx); 1437 unp_scan(so->so_rcv.sb_mb, unp_restore_gcrefs); 1438 mtx_leave(&so->so_rcv.sb_mtx); 1439 1440 KASSERT(nunref > 0); 1441 nunref--; 1442 } 1443 } while (unp_defer > 0); 1444 1445 /* 1446 * If there are any unreferenced sockets, then for each dispose 1447 * of files in its receive buffer and then close it. 1448 */ 1449 if (nunref) { 1450 LIST_FOREACH(unp, &unp_head, unp_link) { 1451 if (unp->unp_gcflags & UNP_GCDEAD) { 1452 struct sockbuf *sb = &unp->unp_socket->so_rcv; 1453 struct mbuf *m; 1454 1455 /* 1456 * This socket could still be connected 1457 * and if so it's `so_rcv' is still 1458 * accessible by concurrent PRU_SEND 1459 * thread. 1460 */ 1461 1462 mtx_enter(&sb->sb_mtx); 1463 m = sb->sb_mb; 1464 memset(&sb->sb_startzero, 0, 1465 (caddr_t)&sb->sb_endzero - 1466 (caddr_t)&sb->sb_startzero); 1467 sb->sb_timeo_nsecs = INFSLP; 1468 mtx_leave(&sb->sb_mtx); 1469 1470 unp_scan(m, unp_discard); 1471 m_purge(m); 1472 } 1473 } 1474 } 1475 1476 unp_gcing = 0; 1477 unlock: 1478 rw_exit_write(&unp_gc_lock); 1479 } 1480 1481 void 1482 unp_dispose(struct mbuf *m) 1483 { 1484 1485 if (m) 1486 unp_scan(m, unp_discard); 1487 } 1488 1489 void 1490 unp_scan(struct mbuf *m0, void (*op)(struct fdpass *, int)) 1491 { 1492 struct mbuf *m; 1493 struct fdpass *rp; 1494 struct cmsghdr *cm; 1495 int qfds; 1496 1497 while (m0) { 1498 for (m = m0; m; m = m->m_next) { 1499 if (m->m_type == MT_CONTROL && 1500 m->m_len >= sizeof(*cm)) { 1501 cm = mtod(m, struct cmsghdr *); 1502 if (cm->cmsg_level != SOL_SOCKET || 1503 cm->cmsg_type != SCM_RIGHTS) 1504 continue; 1505 qfds = (cm->cmsg_len - CMSG_ALIGN(sizeof *cm)) 1506 / sizeof(struct fdpass); 1507 if (qfds > 0) { 1508 rp = (struct fdpass *)CMSG_DATA(cm); 1509 op(rp, qfds); 1510 } 1511 break; /* XXX, but saves time */ 1512 } 1513 } 1514 m0 = m0->m_nextpkt; 1515 } 1516 } 1517 1518 void 1519 unp_discard(struct fdpass *rp, int nfds) 1520 { 1521 struct unp_deferral *defer; 1522 1523 /* copy the file pointers to a deferral structure */ 1524 defer = malloc(sizeof(*defer) + sizeof(*rp) * nfds, M_TEMP, M_WAITOK); 1525 defer->ud_n = nfds; 1526 memcpy(&defer->ud_fp[0], rp, sizeof(*rp) * nfds); 1527 memset(rp, 0, sizeof(*rp) * nfds); 1528 1529 rw_enter_write(&unp_df_lock); 1530 SLIST_INSERT_HEAD(&unp_deferred, defer, ud_link); 1531 rw_exit_write(&unp_df_lock); 1532 1533 task_add(systqmp, &unp_gc_task); 1534 } 1535 1536 void 1537 unp_remove_gcrefs(struct fdpass *rp, int nfds) 1538 { 1539 struct unpcb *unp; 1540 int i; 1541 1542 rw_assert_wrlock(&unp_gc_lock); 1543 1544 for (i = 0; i < nfds; i++) { 1545 if (rp[i].fp == NULL) 1546 continue; 1547 if ((unp = fptounp(rp[i].fp)) == NULL) 1548 continue; 1549 if (unp->unp_gcflags & UNP_GCDEAD) { 1550 KASSERT(unp->unp_gcrefs > 0); 1551 unp->unp_gcrefs--; 1552 } 1553 } 1554 } 1555 1556 void 1557 unp_restore_gcrefs(struct fdpass *rp, int nfds) 1558 { 1559 struct unpcb *unp; 1560 int i; 1561 1562 rw_assert_wrlock(&unp_gc_lock); 1563 1564 for (i = 0; i < nfds; i++) { 1565 if (rp[i].fp == NULL) 1566 continue; 1567 if ((unp = fptounp(rp[i].fp)) == NULL) 1568 continue; 1569 if (unp->unp_gcflags & UNP_GCDEAD) { 1570 unp->unp_gcrefs++; 1571 unp_defer++; 1572 } 1573 } 1574 } 1575 1576 int 1577 unp_nam2sun(struct mbuf *nam, struct sockaddr_un **sun, size_t *pathlen) 1578 { 1579 struct sockaddr *sa = mtod(nam, struct sockaddr *); 1580 size_t size, len; 1581 1582 if (nam->m_len < offsetof(struct sockaddr, sa_data)) 1583 return EINVAL; 1584 if (sa->sa_family != AF_UNIX) 1585 return EAFNOSUPPORT; 1586 if (sa->sa_len != nam->m_len) 1587 return EINVAL; 1588 if (sa->sa_len > sizeof(struct sockaddr_un)) 1589 return EINVAL; 1590 *sun = (struct sockaddr_un *)sa; 1591 1592 /* ensure that sun_path is NUL terminated and fits */ 1593 size = (*sun)->sun_len - offsetof(struct sockaddr_un, sun_path); 1594 len = strnlen((*sun)->sun_path, size); 1595 if (len == sizeof((*sun)->sun_path)) 1596 return EINVAL; 1597 if (len == size) { 1598 if (m_trailingspace(nam) == 0) 1599 return EINVAL; 1600 nam->m_len++; 1601 (*sun)->sun_len++; 1602 (*sun)->sun_path[len] = '\0'; 1603 } 1604 if (pathlen != NULL) 1605 *pathlen = len; 1606 1607 return 0; 1608 } 1609