1 /* $OpenBSD: uipc_usrreq.c,v 1.189 2022/09/20 10:10:11 mvs Exp $ */ 2 /* $NetBSD: uipc_usrreq.c,v 1.18 1996/02/09 19:00:50 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/filedesc.h> 39 #include <sys/domain.h> 40 #include <sys/protosw.h> 41 #include <sys/queue.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/unpcb.h> 45 #include <sys/un.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/file.h> 49 #include <sys/stat.h> 50 #include <sys/mbuf.h> 51 #include <sys/task.h> 52 #include <sys/pledge.h> 53 #include <sys/pool.h> 54 #include <sys/rwlock.h> 55 #include <sys/mutex.h> 56 #include <sys/sysctl.h> 57 #include <sys/lock.h> 58 #include <sys/refcnt.h> 59 60 #include "kcov.h" 61 #if NKCOV > 0 62 #include <sys/kcov.h> 63 #endif 64 65 /* 66 * Locks used to protect global data and struct members: 67 * I immutable after creation 68 * D unp_df_lock 69 * G unp_gc_lock 70 * M unp_ino_mtx 71 * R unp_rights_mtx 72 * a atomic 73 * s socket lock 74 */ 75 76 struct rwlock unp_lock = RWLOCK_INITIALIZER("unplock"); 77 struct rwlock unp_df_lock = RWLOCK_INITIALIZER("unpdflk"); 78 struct rwlock unp_gc_lock = RWLOCK_INITIALIZER("unpgclk"); 79 80 struct mutex unp_rights_mtx = MUTEX_INITIALIZER(IPL_SOFTNET); 81 struct mutex unp_ino_mtx = MUTEX_INITIALIZER(IPL_SOFTNET); 82 83 /* 84 * Stack of sets of files that were passed over a socket but were 85 * not received and need to be closed. 86 */ 87 struct unp_deferral { 88 SLIST_ENTRY(unp_deferral) ud_link; /* [D] */ 89 int ud_n; /* [I] */ 90 /* followed by ud_n struct fdpass */ 91 struct fdpass ud_fp[]; /* [I] */ 92 }; 93 94 void uipc_setaddr(const struct unpcb *, struct mbuf *); 95 void unp_discard(struct fdpass *, int); 96 void unp_remove_gcrefs(struct fdpass *, int); 97 void unp_restore_gcrefs(struct fdpass *, int); 98 void unp_scan(struct mbuf *, void (*)(struct fdpass *, int)); 99 int unp_nam2sun(struct mbuf *, struct sockaddr_un **, size_t *); 100 static inline void unp_ref(struct unpcb *); 101 static inline void unp_rele(struct unpcb *); 102 struct socket *unp_solock_peer(struct socket *); 103 104 struct pool unpcb_pool; 105 struct task unp_gc_task = TASK_INITIALIZER(unp_gc, NULL); 106 107 /* 108 * Unix communications domain. 109 * 110 * TODO: 111 * RDM 112 * rethink name space problems 113 * need a proper out-of-band 114 */ 115 const struct sockaddr sun_noname = { sizeof(sun_noname), AF_UNIX }; 116 117 /* [G] list of all UNIX domain sockets, for unp_gc() */ 118 LIST_HEAD(unp_head, unpcb) unp_head = 119 LIST_HEAD_INITIALIZER(unp_head); 120 /* [D] list of sets of files that were sent over sockets that are now closed */ 121 SLIST_HEAD(,unp_deferral) unp_deferred = 122 SLIST_HEAD_INITIALIZER(unp_deferred); 123 124 ino_t unp_ino; /* [U] prototype for fake inode numbers */ 125 int unp_rights; /* [R] file descriptors in flight */ 126 int unp_defer; /* [G] number of deferred fp to close by the GC task */ 127 int unp_gcing; /* [G] GC task currently running */ 128 129 const struct pr_usrreqs uipc_usrreqs = { 130 .pru_attach = uipc_attach, 131 .pru_detach = uipc_detach, 132 .pru_bind = uipc_bind, 133 .pru_listen = uipc_listen, 134 .pru_connect = uipc_connect, 135 .pru_accept = uipc_accept, 136 .pru_disconnect = uipc_disconnect, 137 .pru_shutdown = uipc_shutdown, 138 .pru_rcvd = uipc_rcvd, 139 .pru_send = uipc_send, 140 .pru_abort = uipc_abort, 141 .pru_sense = uipc_sense, 142 .pru_sockaddr = uipc_sockaddr, 143 .pru_peeraddr = uipc_peeraddr, 144 .pru_connect2 = uipc_connect2, 145 }; 146 147 void 148 unp_init(void) 149 { 150 pool_init(&unpcb_pool, sizeof(struct unpcb), 0, 151 IPL_SOFTNET, 0, "unpcb", NULL); 152 } 153 154 static inline void 155 unp_ref(struct unpcb *unp) 156 { 157 refcnt_take(&unp->unp_refcnt); 158 } 159 160 static inline void 161 unp_rele(struct unpcb *unp) 162 { 163 refcnt_rele_wake(&unp->unp_refcnt); 164 } 165 166 struct socket * 167 unp_solock_peer(struct socket *so) 168 { 169 struct unpcb *unp, *unp2; 170 struct socket *so2; 171 172 unp = so->so_pcb; 173 174 again: 175 if ((unp2 = unp->unp_conn) == NULL) 176 return NULL; 177 178 so2 = unp2->unp_socket; 179 180 if (so < so2) 181 solock(so2); 182 else if (so > so2){ 183 unp_ref(unp2); 184 sounlock(so); 185 solock(so2); 186 solock(so); 187 188 /* Datagram socket could be reconnected due to re-lock. */ 189 if (unp->unp_conn != unp2) { 190 sounlock(so2); 191 unp_rele(unp2); 192 goto again; 193 } 194 195 unp_rele(unp2); 196 } 197 198 return so2; 199 } 200 201 void 202 uipc_setaddr(const struct unpcb *unp, struct mbuf *nam) 203 { 204 if (unp != NULL && unp->unp_addr != NULL) { 205 nam->m_len = unp->unp_addr->m_len; 206 memcpy(mtod(nam, caddr_t), mtod(unp->unp_addr, caddr_t), 207 nam->m_len); 208 } else { 209 nam->m_len = sizeof(sun_noname); 210 memcpy(mtod(nam, struct sockaddr *), &sun_noname, 211 nam->m_len); 212 } 213 } 214 215 /* 216 * Both send and receive buffers are allocated PIPSIZ bytes of buffering 217 * for stream sockets, although the total for sender and receiver is 218 * actually only PIPSIZ. 219 * Datagram sockets really use the sendspace as the maximum datagram size, 220 * and don't really want to reserve the sendspace. Their recvspace should 221 * be large enough for at least one max-size datagram plus address. 222 */ 223 #define PIPSIZ 8192 224 u_int unpst_sendspace = PIPSIZ; 225 u_int unpst_recvspace = PIPSIZ; 226 u_int unpsq_sendspace = PIPSIZ; 227 u_int unpsq_recvspace = PIPSIZ; 228 u_int unpdg_sendspace = 2*1024; /* really max datagram size */ 229 u_int unpdg_recvspace = 16*1024; 230 231 const struct sysctl_bounded_args unpstctl_vars[] = { 232 { UNPCTL_RECVSPACE, &unpst_recvspace, 0, SB_MAX }, 233 { UNPCTL_SENDSPACE, &unpst_sendspace, 0, SB_MAX }, 234 }; 235 const struct sysctl_bounded_args unpsqctl_vars[] = { 236 { UNPCTL_RECVSPACE, &unpsq_recvspace, 0, SB_MAX }, 237 { UNPCTL_SENDSPACE, &unpsq_sendspace, 0, SB_MAX }, 238 }; 239 const struct sysctl_bounded_args unpdgctl_vars[] = { 240 { UNPCTL_RECVSPACE, &unpdg_recvspace, 0, SB_MAX }, 241 { UNPCTL_SENDSPACE, &unpdg_sendspace, 0, SB_MAX }, 242 }; 243 244 int 245 uipc_attach(struct socket *so, int proto) 246 { 247 struct unpcb *unp; 248 int error; 249 250 if (so->so_pcb) 251 return EISCONN; 252 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 253 switch (so->so_type) { 254 255 case SOCK_STREAM: 256 error = soreserve(so, unpst_sendspace, unpst_recvspace); 257 break; 258 259 case SOCK_SEQPACKET: 260 error = soreserve(so, unpsq_sendspace, unpsq_recvspace); 261 break; 262 263 case SOCK_DGRAM: 264 error = soreserve(so, unpdg_sendspace, unpdg_recvspace); 265 break; 266 267 default: 268 panic("unp_attach"); 269 } 270 if (error) 271 return (error); 272 } 273 unp = pool_get(&unpcb_pool, PR_NOWAIT|PR_ZERO); 274 if (unp == NULL) 275 return (ENOBUFS); 276 refcnt_init(&unp->unp_refcnt); 277 unp->unp_socket = so; 278 so->so_pcb = unp; 279 getnanotime(&unp->unp_ctime); 280 281 /* 282 * Enforce `unp_gc_lock' -> `solock()' lock order. 283 */ 284 sounlock(so); 285 rw_enter_write(&unp_gc_lock); 286 LIST_INSERT_HEAD(&unp_head, unp, unp_link); 287 rw_exit_write(&unp_gc_lock); 288 solock(so); 289 return (0); 290 } 291 292 int 293 uipc_detach(struct socket *so) 294 { 295 struct unpcb *unp = sotounpcb(so); 296 297 if (unp == NULL) 298 return (EINVAL); 299 300 unp_detach(unp); 301 302 return (0); 303 } 304 305 int 306 uipc_bind(struct socket *so, struct mbuf *nam, struct proc *p) 307 { 308 struct unpcb *unp = sotounpcb(so); 309 310 return unp_bind(unp, nam, p); 311 } 312 313 int 314 uipc_listen(struct socket *so) 315 { 316 struct unpcb *unp = sotounpcb(so); 317 318 if (unp->unp_vnode == NULL) 319 return (EINVAL); 320 return (0); 321 } 322 323 int 324 uipc_connect(struct socket *so, struct mbuf *nam) 325 { 326 return unp_connect(so, nam, curproc); 327 } 328 329 int 330 uipc_accept(struct socket *so, struct mbuf *nam) 331 { 332 struct socket *so2; 333 struct unpcb *unp = sotounpcb(so); 334 335 /* 336 * Pass back name of connected socket, if it was bound and 337 * we are still connected (our peer may have closed already!). 338 */ 339 so2 = unp_solock_peer(so); 340 uipc_setaddr(unp->unp_conn, nam); 341 342 if (so2 != NULL && so2 != so) 343 sounlock(so2); 344 return (0); 345 } 346 347 int 348 uipc_disconnect(struct socket *so) 349 { 350 struct unpcb *unp = sotounpcb(so); 351 352 unp_disconnect(unp); 353 return (0); 354 } 355 356 int 357 uipc_shutdown(struct socket *so) 358 { 359 struct unpcb *unp = sotounpcb(so); 360 361 socantsendmore(so); 362 unp_shutdown(unp); 363 return (0); 364 } 365 366 void 367 uipc_rcvd(struct socket *so) 368 { 369 struct socket *so2; 370 371 switch (so->so_type) { 372 case SOCK_DGRAM: 373 panic("uipc 1"); 374 /*NOTREACHED*/ 375 376 case SOCK_STREAM: 377 case SOCK_SEQPACKET: 378 if ((so2 = unp_solock_peer(so)) == NULL) 379 break; 380 /* 381 * Adjust backpressure on sender 382 * and wakeup any waiting to write. 383 */ 384 so2->so_snd.sb_mbcnt = so->so_rcv.sb_mbcnt; 385 so2->so_snd.sb_cc = so->so_rcv.sb_cc; 386 sowwakeup(so2); 387 sounlock(so2); 388 break; 389 390 default: 391 panic("uipc 2"); 392 } 393 } 394 395 int 396 uipc_send(struct socket *so, struct mbuf *m, struct mbuf *nam, 397 struct mbuf *control) 398 { 399 struct unpcb *unp = sotounpcb(so); 400 struct socket *so2; 401 int error = 0; 402 403 if (control) { 404 sounlock(so); 405 error = unp_internalize(control, curproc); 406 solock(so); 407 if (error) 408 goto out; 409 } 410 411 switch (so->so_type) { 412 case SOCK_DGRAM: { 413 const struct sockaddr *from; 414 415 if (nam) { 416 if (unp->unp_conn) { 417 error = EISCONN; 418 break; 419 } 420 error = unp_connect(so, nam, curproc); 421 if (error) 422 break; 423 } 424 425 if ((so2 = unp_solock_peer(so)) == NULL) { 426 if (nam != NULL) 427 error = ECONNREFUSED; 428 else 429 error = ENOTCONN; 430 break; 431 } 432 433 if (unp->unp_addr) 434 from = mtod(unp->unp_addr, struct sockaddr *); 435 else 436 from = &sun_noname; 437 if (sbappendaddr(so2, &so2->so_rcv, from, m, control)) { 438 sorwakeup(so2); 439 m = NULL; 440 control = NULL; 441 } else 442 error = ENOBUFS; 443 444 if (so2 != so) 445 sounlock(so2); 446 447 if (nam) 448 unp_disconnect(unp); 449 break; 450 } 451 452 case SOCK_STREAM: 453 case SOCK_SEQPACKET: 454 if (so->so_state & SS_CANTSENDMORE) { 455 error = EPIPE; 456 break; 457 } 458 if ((so2 = unp_solock_peer(so)) == NULL) { 459 error = ENOTCONN; 460 break; 461 } 462 463 /* 464 * Send to paired receive port, and then raise 465 * send buffer counts to maintain backpressure. 466 * Wake up readers. 467 */ 468 if (control) { 469 if (sbappendcontrol(so2, &so2->so_rcv, m, control)) { 470 control = NULL; 471 } else { 472 sounlock(so2); 473 error = ENOBUFS; 474 break; 475 } 476 } else if (so->so_type == SOCK_SEQPACKET) 477 sbappendrecord(so2, &so2->so_rcv, m); 478 else 479 sbappend(so2, &so2->so_rcv, m); 480 so->so_snd.sb_mbcnt = so2->so_rcv.sb_mbcnt; 481 so->so_snd.sb_cc = so2->so_rcv.sb_cc; 482 if (so2->so_rcv.sb_cc > 0) 483 sorwakeup(so2); 484 485 sounlock(so2); 486 m = NULL; 487 break; 488 489 default: 490 panic("uipc 4"); 491 } 492 493 /* we need to undo unp_internalize in case of errors */ 494 if (control && error) 495 unp_dispose(control); 496 497 out: 498 m_freem(control); 499 m_freem(m); 500 501 return (error); 502 } 503 504 int 505 uipc_abort(struct socket *so) 506 { 507 struct unpcb *unp = sotounpcb(so); 508 509 unp_detach(unp); 510 sofree(so, 0); 511 512 return (0); 513 } 514 515 int 516 uipc_sense(struct socket *so, struct stat *sb) 517 { 518 struct unpcb *unp = sotounpcb(so); 519 520 sb->st_blksize = so->so_snd.sb_hiwat; 521 sb->st_dev = NODEV; 522 mtx_enter(&unp_ino_mtx); 523 if (unp->unp_ino == 0) 524 unp->unp_ino = unp_ino++; 525 mtx_leave(&unp_ino_mtx); 526 sb->st_atim.tv_sec = 527 sb->st_mtim.tv_sec = 528 sb->st_ctim.tv_sec = unp->unp_ctime.tv_sec; 529 sb->st_atim.tv_nsec = 530 sb->st_mtim.tv_nsec = 531 sb->st_ctim.tv_nsec = unp->unp_ctime.tv_nsec; 532 sb->st_ino = unp->unp_ino; 533 534 return (0); 535 } 536 537 int 538 uipc_sockaddr(struct socket *so, struct mbuf *nam) 539 { 540 struct unpcb *unp = sotounpcb(so); 541 542 uipc_setaddr(unp, nam); 543 return (0); 544 } 545 546 int 547 uipc_peeraddr(struct socket *so, struct mbuf *nam) 548 { 549 struct unpcb *unp = sotounpcb(so); 550 struct socket *so2; 551 552 so2 = unp_solock_peer(so); 553 uipc_setaddr(unp->unp_conn, nam); 554 if (so2 != NULL && so2 != so) 555 sounlock(so2); 556 return (0); 557 } 558 559 int 560 uipc_connect2(struct socket *so, struct socket *so2) 561 { 562 struct unpcb *unp = sotounpcb(so), *unp2; 563 int error; 564 565 if ((error = unp_connect2(so, so2))) 566 return (error); 567 568 unp->unp_connid.uid = curproc->p_ucred->cr_uid; 569 unp->unp_connid.gid = curproc->p_ucred->cr_gid; 570 unp->unp_connid.pid = curproc->p_p->ps_pid; 571 unp->unp_flags |= UNP_FEIDS; 572 unp2 = sotounpcb(so2); 573 unp2->unp_connid.uid = curproc->p_ucred->cr_uid; 574 unp2->unp_connid.gid = curproc->p_ucred->cr_gid; 575 unp2->unp_connid.pid = curproc->p_p->ps_pid; 576 unp2->unp_flags |= UNP_FEIDS; 577 578 return (0); 579 } 580 581 int 582 uipc_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 583 size_t newlen) 584 { 585 int *valp = &unp_defer; 586 587 /* All sysctl names at this level are terminal. */ 588 switch (name[0]) { 589 case SOCK_STREAM: 590 if (namelen != 2) 591 return (ENOTDIR); 592 return sysctl_bounded_arr(unpstctl_vars, nitems(unpstctl_vars), 593 name + 1, namelen - 1, oldp, oldlenp, newp, newlen); 594 case SOCK_SEQPACKET: 595 if (namelen != 2) 596 return (ENOTDIR); 597 return sysctl_bounded_arr(unpsqctl_vars, nitems(unpsqctl_vars), 598 name + 1, namelen - 1, oldp, oldlenp, newp, newlen); 599 case SOCK_DGRAM: 600 if (namelen != 2) 601 return (ENOTDIR); 602 return sysctl_bounded_arr(unpdgctl_vars, nitems(unpdgctl_vars), 603 name + 1, namelen - 1, oldp, oldlenp, newp, newlen); 604 case NET_UNIX_INFLIGHT: 605 valp = &unp_rights; 606 /* FALLTHOUGH */ 607 case NET_UNIX_DEFERRED: 608 if (namelen != 1) 609 return (ENOTDIR); 610 return sysctl_rdint(oldp, oldlenp, newp, *valp); 611 default: 612 return (ENOPROTOOPT); 613 } 614 } 615 616 void 617 unp_detach(struct unpcb *unp) 618 { 619 struct socket *so = unp->unp_socket; 620 struct vnode *vp = unp->unp_vnode; 621 struct unpcb *unp2; 622 623 unp->unp_vnode = NULL; 624 625 /* 626 * Enforce `unp_gc_lock' -> `solock()' lock order. 627 * Enforce `i_lock' -> `solock()' lock order. 628 */ 629 sounlock(so); 630 631 rw_enter_write(&unp_gc_lock); 632 LIST_REMOVE(unp, unp_link); 633 rw_exit_write(&unp_gc_lock); 634 635 if (vp != NULL) { 636 VOP_LOCK(vp, LK_EXCLUSIVE); 637 vp->v_socket = NULL; 638 639 KERNEL_LOCK(); 640 vput(vp); 641 KERNEL_UNLOCK(); 642 } 643 644 solock(so); 645 646 if (unp->unp_conn != NULL) { 647 /* 648 * Datagram socket could be connected to itself. 649 * Such socket will be disconnected here. 650 */ 651 unp_disconnect(unp); 652 } 653 654 while ((unp2 = SLIST_FIRST(&unp->unp_refs)) != NULL) { 655 struct socket *so2 = unp2->unp_socket; 656 657 if (so < so2) 658 solock(so2); 659 else { 660 unp_ref(unp2); 661 sounlock(so); 662 solock(so2); 663 solock(so); 664 665 if (unp2->unp_conn != unp) { 666 /* `unp2' was disconnected due to re-lock. */ 667 sounlock(so2); 668 unp_rele(unp2); 669 continue; 670 } 671 672 unp_rele(unp2); 673 } 674 675 unp2->unp_conn = NULL; 676 SLIST_REMOVE(&unp->unp_refs, unp2, unpcb, unp_nextref); 677 so2->so_error = ECONNRESET; 678 so2->so_state &= ~SS_ISCONNECTED; 679 680 sounlock(so2); 681 } 682 683 sounlock(so); 684 refcnt_finalize(&unp->unp_refcnt, "unpfinal"); 685 solock(so); 686 687 soisdisconnected(so); 688 so->so_pcb = NULL; 689 m_freem(unp->unp_addr); 690 pool_put(&unpcb_pool, unp); 691 if (unp_rights) 692 task_add(systqmp, &unp_gc_task); 693 } 694 695 int 696 unp_bind(struct unpcb *unp, struct mbuf *nam, struct proc *p) 697 { 698 struct sockaddr_un *soun; 699 struct mbuf *nam2; 700 struct vnode *vp; 701 struct vattr vattr; 702 int error; 703 struct nameidata nd; 704 size_t pathlen; 705 706 if (unp->unp_flags & (UNP_BINDING | UNP_CONNECTING)) 707 return (EINVAL); 708 if (unp->unp_vnode != NULL) 709 return (EINVAL); 710 if ((error = unp_nam2sun(nam, &soun, &pathlen))) 711 return (error); 712 713 unp->unp_flags |= UNP_BINDING; 714 715 /* 716 * Enforce `i_lock' -> `unplock' because fifo subsystem 717 * requires it. The socket can't be closed concurrently 718 * because the file descriptor reference is still held. 719 */ 720 721 sounlock(unp->unp_socket); 722 723 nam2 = m_getclr(M_WAITOK, MT_SONAME); 724 nam2->m_len = sizeof(struct sockaddr_un); 725 memcpy(mtod(nam2, struct sockaddr_un *), soun, 726 offsetof(struct sockaddr_un, sun_path) + pathlen); 727 /* No need to NUL terminate: m_getclr() returns zero'd mbufs. */ 728 729 soun = mtod(nam2, struct sockaddr_un *); 730 731 /* Fixup sun_len to keep it in sync with m_len. */ 732 soun->sun_len = nam2->m_len; 733 734 NDINIT(&nd, CREATE, NOFOLLOW | LOCKPARENT, UIO_SYSSPACE, 735 soun->sun_path, p); 736 nd.ni_pledge = PLEDGE_UNIX; 737 nd.ni_unveil = UNVEIL_CREATE; 738 739 KERNEL_LOCK(); 740 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ 741 error = namei(&nd); 742 if (error != 0) { 743 m_freem(nam2); 744 solock(unp->unp_socket); 745 goto out; 746 } 747 vp = nd.ni_vp; 748 if (vp != NULL) { 749 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); 750 if (nd.ni_dvp == vp) 751 vrele(nd.ni_dvp); 752 else 753 vput(nd.ni_dvp); 754 vrele(vp); 755 m_freem(nam2); 756 error = EADDRINUSE; 757 solock(unp->unp_socket); 758 goto out; 759 } 760 VATTR_NULL(&vattr); 761 vattr.va_type = VSOCK; 762 vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask; 763 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); 764 vput(nd.ni_dvp); 765 if (error) { 766 m_freem(nam2); 767 solock(unp->unp_socket); 768 goto out; 769 } 770 solock(unp->unp_socket); 771 unp->unp_addr = nam2; 772 vp = nd.ni_vp; 773 vp->v_socket = unp->unp_socket; 774 unp->unp_vnode = vp; 775 unp->unp_connid.uid = p->p_ucred->cr_uid; 776 unp->unp_connid.gid = p->p_ucred->cr_gid; 777 unp->unp_connid.pid = p->p_p->ps_pid; 778 unp->unp_flags |= UNP_FEIDSBIND; 779 VOP_UNLOCK(vp); 780 out: 781 KERNEL_UNLOCK(); 782 unp->unp_flags &= ~UNP_BINDING; 783 784 return (error); 785 } 786 787 int 788 unp_connect(struct socket *so, struct mbuf *nam, struct proc *p) 789 { 790 struct sockaddr_un *soun; 791 struct vnode *vp; 792 struct socket *so2, *so3; 793 struct unpcb *unp, *unp2, *unp3; 794 struct nameidata nd; 795 int error; 796 797 unp = sotounpcb(so); 798 if (unp->unp_flags & (UNP_BINDING | UNP_CONNECTING)) 799 return (EISCONN); 800 if ((error = unp_nam2sun(nam, &soun, NULL))) 801 return (error); 802 803 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, soun->sun_path, p); 804 nd.ni_pledge = PLEDGE_UNIX; 805 nd.ni_unveil = UNVEIL_WRITE; 806 807 unp->unp_flags |= UNP_CONNECTING; 808 809 /* 810 * Enforce `i_lock' -> `unplock' because fifo subsystem 811 * requires it. The socket can't be closed concurrently 812 * because the file descriptor reference is still held. 813 */ 814 815 sounlock(so); 816 817 KERNEL_LOCK(); 818 error = namei(&nd); 819 if (error != 0) 820 goto unlock; 821 vp = nd.ni_vp; 822 if (vp->v_type != VSOCK) { 823 error = ENOTSOCK; 824 goto put; 825 } 826 if ((error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p)) != 0) 827 goto put; 828 so2 = vp->v_socket; 829 if (so2 == NULL) { 830 error = ECONNREFUSED; 831 goto put; 832 } 833 if (so->so_type != so2->so_type) { 834 error = EPROTOTYPE; 835 goto put; 836 } 837 838 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 839 solock(so2); 840 841 if ((so2->so_options & SO_ACCEPTCONN) == 0 || 842 (so3 = sonewconn(so2, 0)) == NULL) { 843 error = ECONNREFUSED; 844 } 845 846 sounlock(so2); 847 848 if (error != 0) 849 goto put; 850 851 /* 852 * Since `so2' is protected by vnode(9) lock, `so3' 853 * can't be PRU_ABORT'ed here. 854 */ 855 solock_pair(so, so3); 856 857 unp2 = sotounpcb(so2); 858 unp3 = sotounpcb(so3); 859 860 /* 861 * `unp_addr', `unp_connid' and 'UNP_FEIDSBIND' flag 862 * are immutable since we set them in unp_bind(). 863 */ 864 if (unp2->unp_addr) 865 unp3->unp_addr = 866 m_copym(unp2->unp_addr, 0, M_COPYALL, M_NOWAIT); 867 unp3->unp_connid.uid = p->p_ucred->cr_uid; 868 unp3->unp_connid.gid = p->p_ucred->cr_gid; 869 unp3->unp_connid.pid = p->p_p->ps_pid; 870 unp3->unp_flags |= UNP_FEIDS; 871 872 if (unp2->unp_flags & UNP_FEIDSBIND) { 873 unp->unp_connid = unp2->unp_connid; 874 unp->unp_flags |= UNP_FEIDS; 875 } 876 877 so2 = so3; 878 } else { 879 if (so2 != so) 880 solock_pair(so, so2); 881 else 882 solock(so); 883 } 884 885 error = unp_connect2(so, so2); 886 887 sounlock(so); 888 889 /* 890 * `so2' can't be PRU_ABORT'ed concurrently 891 */ 892 if (so2 != so) 893 sounlock(so2); 894 put: 895 vput(vp); 896 unlock: 897 KERNEL_UNLOCK(); 898 solock(so); 899 unp->unp_flags &= ~UNP_CONNECTING; 900 901 /* 902 * The peer socket could be closed by concurrent thread 903 * when `so' and `vp' are unlocked. 904 */ 905 if (error == 0 && unp->unp_conn == NULL) 906 error = ECONNREFUSED; 907 908 return (error); 909 } 910 911 int 912 unp_connect2(struct socket *so, struct socket *so2) 913 { 914 struct unpcb *unp = sotounpcb(so); 915 struct unpcb *unp2; 916 917 soassertlocked(so); 918 soassertlocked(so2); 919 920 if (so2->so_type != so->so_type) 921 return (EPROTOTYPE); 922 unp2 = sotounpcb(so2); 923 unp->unp_conn = unp2; 924 switch (so->so_type) { 925 926 case SOCK_DGRAM: 927 SLIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_nextref); 928 soisconnected(so); 929 break; 930 931 case SOCK_STREAM: 932 case SOCK_SEQPACKET: 933 unp2->unp_conn = unp; 934 soisconnected(so); 935 soisconnected(so2); 936 break; 937 938 default: 939 panic("unp_connect2"); 940 } 941 return (0); 942 } 943 944 void 945 unp_disconnect(struct unpcb *unp) 946 { 947 struct socket *so2; 948 struct unpcb *unp2; 949 950 if ((so2 = unp_solock_peer(unp->unp_socket)) == NULL) 951 return; 952 953 unp2 = unp->unp_conn; 954 unp->unp_conn = NULL; 955 956 switch (unp->unp_socket->so_type) { 957 958 case SOCK_DGRAM: 959 SLIST_REMOVE(&unp2->unp_refs, unp, unpcb, unp_nextref); 960 unp->unp_socket->so_state &= ~SS_ISCONNECTED; 961 break; 962 963 case SOCK_STREAM: 964 case SOCK_SEQPACKET: 965 unp->unp_socket->so_snd.sb_mbcnt = 0; 966 unp->unp_socket->so_snd.sb_cc = 0; 967 soisdisconnected(unp->unp_socket); 968 unp2->unp_conn = NULL; 969 unp2->unp_socket->so_snd.sb_mbcnt = 0; 970 unp2->unp_socket->so_snd.sb_cc = 0; 971 soisdisconnected(unp2->unp_socket); 972 break; 973 } 974 975 if (so2 != unp->unp_socket) 976 sounlock(so2); 977 } 978 979 void 980 unp_shutdown(struct unpcb *unp) 981 { 982 struct socket *so2; 983 984 switch (unp->unp_socket->so_type) { 985 case SOCK_STREAM: 986 case SOCK_SEQPACKET: 987 if ((so2 = unp_solock_peer(unp->unp_socket)) == NULL) 988 break; 989 990 socantrcvmore(so2); 991 sounlock(so2); 992 993 break; 994 default: 995 break; 996 } 997 } 998 999 static struct unpcb * 1000 fptounp(struct file *fp) 1001 { 1002 struct socket *so; 1003 1004 if (fp->f_type != DTYPE_SOCKET) 1005 return (NULL); 1006 if ((so = fp->f_data) == NULL) 1007 return (NULL); 1008 if (so->so_proto->pr_domain != &unixdomain) 1009 return (NULL); 1010 return (sotounpcb(so)); 1011 } 1012 1013 int 1014 unp_externalize(struct mbuf *rights, socklen_t controllen, int flags) 1015 { 1016 struct proc *p = curproc; /* XXX */ 1017 struct cmsghdr *cm = mtod(rights, struct cmsghdr *); 1018 struct filedesc *fdp = p->p_fd; 1019 int i, *fds = NULL; 1020 struct fdpass *rp; 1021 struct file *fp; 1022 int nfds, error = 0; 1023 1024 /* 1025 * This code only works because SCM_RIGHTS is the only supported 1026 * control message type on unix sockets. Enforce this here. 1027 */ 1028 if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET) 1029 return EINVAL; 1030 1031 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / 1032 sizeof(struct fdpass); 1033 if (controllen < CMSG_ALIGN(sizeof(struct cmsghdr))) 1034 controllen = 0; 1035 else 1036 controllen -= CMSG_ALIGN(sizeof(struct cmsghdr)); 1037 if (nfds > controllen / sizeof(int)) { 1038 error = EMSGSIZE; 1039 goto out; 1040 } 1041 1042 /* Make sure the recipient should be able to see the descriptors.. */ 1043 rp = (struct fdpass *)CMSG_DATA(cm); 1044 1045 /* fdp->fd_rdir requires KERNEL_LOCK() */ 1046 KERNEL_LOCK(); 1047 1048 for (i = 0; i < nfds; i++) { 1049 fp = rp->fp; 1050 rp++; 1051 error = pledge_recvfd(p, fp); 1052 if (error) 1053 break; 1054 1055 /* 1056 * No to block devices. If passing a directory, 1057 * make sure that it is underneath the root. 1058 */ 1059 if (fdp->fd_rdir != NULL && fp->f_type == DTYPE_VNODE) { 1060 struct vnode *vp = (struct vnode *)fp->f_data; 1061 1062 if (vp->v_type == VBLK || 1063 (vp->v_type == VDIR && 1064 !vn_isunder(vp, fdp->fd_rdir, p))) { 1065 error = EPERM; 1066 break; 1067 } 1068 } 1069 } 1070 1071 KERNEL_UNLOCK(); 1072 1073 if (error) 1074 goto out; 1075 1076 fds = mallocarray(nfds, sizeof(int), M_TEMP, M_WAITOK); 1077 1078 fdplock(fdp); 1079 restart: 1080 /* 1081 * First loop -- allocate file descriptor table slots for the 1082 * new descriptors. 1083 */ 1084 rp = ((struct fdpass *)CMSG_DATA(cm)); 1085 for (i = 0; i < nfds; i++) { 1086 if ((error = fdalloc(p, 0, &fds[i])) != 0) { 1087 /* 1088 * Back out what we've done so far. 1089 */ 1090 for (--i; i >= 0; i--) 1091 fdremove(fdp, fds[i]); 1092 1093 if (error == ENOSPC) { 1094 fdexpand(p); 1095 goto restart; 1096 } 1097 1098 fdpunlock(fdp); 1099 1100 /* 1101 * This is the error that has historically 1102 * been returned, and some callers may 1103 * expect it. 1104 */ 1105 1106 error = EMSGSIZE; 1107 goto out; 1108 } 1109 1110 /* 1111 * Make the slot reference the descriptor so that 1112 * fdalloc() works properly.. We finalize it all 1113 * in the loop below. 1114 */ 1115 mtx_enter(&fdp->fd_fplock); 1116 KASSERT(fdp->fd_ofiles[fds[i]] == NULL); 1117 fdp->fd_ofiles[fds[i]] = rp->fp; 1118 mtx_leave(&fdp->fd_fplock); 1119 1120 fdp->fd_ofileflags[fds[i]] = (rp->flags & UF_PLEDGED); 1121 if (flags & MSG_CMSG_CLOEXEC) 1122 fdp->fd_ofileflags[fds[i]] |= UF_EXCLOSE; 1123 1124 rp++; 1125 } 1126 1127 /* 1128 * Keep `fdp' locked to prevent concurrent close() of just 1129 * inserted descriptors. Such descriptors could have the only 1130 * `f_count' reference which is now shared between control 1131 * message and `fdp'. 1132 */ 1133 1134 /* 1135 * Now that adding them has succeeded, update all of the 1136 * descriptor passing state. 1137 */ 1138 rp = (struct fdpass *)CMSG_DATA(cm); 1139 1140 for (i = 0; i < nfds; i++) { 1141 struct unpcb *unp; 1142 1143 fp = rp->fp; 1144 rp++; 1145 if ((unp = fptounp(fp)) != NULL) { 1146 rw_enter_write(&unp_gc_lock); 1147 unp->unp_msgcount--; 1148 rw_exit_write(&unp_gc_lock); 1149 } 1150 } 1151 fdpunlock(fdp); 1152 1153 mtx_enter(&unp_rights_mtx); 1154 unp_rights -= nfds; 1155 mtx_leave(&unp_rights_mtx); 1156 1157 /* 1158 * Copy temporary array to message and adjust length, in case of 1159 * transition from large struct file pointers to ints. 1160 */ 1161 memcpy(CMSG_DATA(cm), fds, nfds * sizeof(int)); 1162 cm->cmsg_len = CMSG_LEN(nfds * sizeof(int)); 1163 rights->m_len = CMSG_LEN(nfds * sizeof(int)); 1164 out: 1165 if (fds != NULL) 1166 free(fds, M_TEMP, nfds * sizeof(int)); 1167 1168 if (error) { 1169 if (nfds > 0) { 1170 /* 1171 * No lock required. We are the only `cm' holder. 1172 */ 1173 rp = ((struct fdpass *)CMSG_DATA(cm)); 1174 unp_discard(rp, nfds); 1175 } 1176 } 1177 1178 return (error); 1179 } 1180 1181 int 1182 unp_internalize(struct mbuf *control, struct proc *p) 1183 { 1184 struct filedesc *fdp = p->p_fd; 1185 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 1186 struct fdpass *rp; 1187 struct file *fp; 1188 struct unpcb *unp; 1189 int i, error; 1190 int nfds, *ip, fd, neededspace; 1191 1192 /* 1193 * Check for two potential msg_controllen values because 1194 * IETF stuck their nose in a place it does not belong. 1195 */ 1196 if (control->m_len < CMSG_LEN(0) || cm->cmsg_len < CMSG_LEN(0)) 1197 return (EINVAL); 1198 if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET || 1199 !(cm->cmsg_len == control->m_len || 1200 control->m_len == CMSG_ALIGN(cm->cmsg_len))) 1201 return (EINVAL); 1202 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / sizeof (int); 1203 1204 mtx_enter(&unp_rights_mtx); 1205 if (unp_rights + nfds > maxfiles / 10) { 1206 mtx_leave(&unp_rights_mtx); 1207 return (EMFILE); 1208 } 1209 unp_rights += nfds; 1210 mtx_leave(&unp_rights_mtx); 1211 1212 /* Make sure we have room for the struct file pointers */ 1213 morespace: 1214 neededspace = CMSG_SPACE(nfds * sizeof(struct fdpass)) - 1215 control->m_len; 1216 if (neededspace > m_trailingspace(control)) { 1217 char *tmp; 1218 /* if we already have a cluster, the message is just too big */ 1219 if (control->m_flags & M_EXT) { 1220 error = E2BIG; 1221 goto nospace; 1222 } 1223 1224 /* copy cmsg data temporarily out of the mbuf */ 1225 tmp = malloc(control->m_len, M_TEMP, M_WAITOK); 1226 memcpy(tmp, mtod(control, caddr_t), control->m_len); 1227 1228 /* allocate a cluster and try again */ 1229 MCLGET(control, M_WAIT); 1230 if ((control->m_flags & M_EXT) == 0) { 1231 free(tmp, M_TEMP, control->m_len); 1232 error = ENOBUFS; /* allocation failed */ 1233 goto nospace; 1234 } 1235 1236 /* copy the data back into the cluster */ 1237 cm = mtod(control, struct cmsghdr *); 1238 memcpy(cm, tmp, control->m_len); 1239 free(tmp, M_TEMP, control->m_len); 1240 goto morespace; 1241 } 1242 1243 /* adjust message & mbuf to note amount of space actually used. */ 1244 cm->cmsg_len = CMSG_LEN(nfds * sizeof(struct fdpass)); 1245 control->m_len = CMSG_SPACE(nfds * sizeof(struct fdpass)); 1246 1247 ip = ((int *)CMSG_DATA(cm)) + nfds - 1; 1248 rp = ((struct fdpass *)CMSG_DATA(cm)) + nfds - 1; 1249 fdplock(fdp); 1250 for (i = 0; i < nfds; i++) { 1251 memcpy(&fd, ip, sizeof fd); 1252 ip--; 1253 if ((fp = fd_getfile(fdp, fd)) == NULL) { 1254 error = EBADF; 1255 goto fail; 1256 } 1257 if (fp->f_count >= FDUP_MAX_COUNT) { 1258 error = EDEADLK; 1259 goto fail; 1260 } 1261 error = pledge_sendfd(p, fp); 1262 if (error) 1263 goto fail; 1264 1265 /* kqueue descriptors cannot be copied */ 1266 if (fp->f_type == DTYPE_KQUEUE) { 1267 error = EINVAL; 1268 goto fail; 1269 } 1270 #if NKCOV > 0 1271 /* kcov descriptors cannot be copied */ 1272 if (fp->f_type == DTYPE_VNODE && kcov_vnode(fp->f_data)) { 1273 error = EINVAL; 1274 goto fail; 1275 } 1276 #endif 1277 rp->fp = fp; 1278 rp->flags = fdp->fd_ofileflags[fd] & UF_PLEDGED; 1279 rp--; 1280 if ((unp = fptounp(fp)) != NULL) { 1281 rw_enter_write(&unp_gc_lock); 1282 unp->unp_msgcount++; 1283 unp->unp_file = fp; 1284 rw_exit_write(&unp_gc_lock); 1285 } 1286 } 1287 fdpunlock(fdp); 1288 return (0); 1289 fail: 1290 fdpunlock(fdp); 1291 if (fp != NULL) 1292 FRELE(fp, p); 1293 /* Back out what we just did. */ 1294 for ( ; i > 0; i--) { 1295 rp++; 1296 fp = rp->fp; 1297 if ((unp = fptounp(fp)) != NULL) { 1298 rw_enter_write(&unp_gc_lock); 1299 unp->unp_msgcount--; 1300 rw_exit_write(&unp_gc_lock); 1301 } 1302 FRELE(fp, p); 1303 } 1304 1305 nospace: 1306 mtx_enter(&unp_rights_mtx); 1307 unp_rights -= nfds; 1308 mtx_leave(&unp_rights_mtx); 1309 1310 return (error); 1311 } 1312 1313 void 1314 unp_gc(void *arg __unused) 1315 { 1316 struct unp_deferral *defer; 1317 struct file *fp; 1318 struct socket *so; 1319 struct unpcb *unp; 1320 int nunref, i; 1321 1322 rw_enter_write(&unp_gc_lock); 1323 if (unp_gcing) 1324 goto unlock; 1325 unp_gcing = 1; 1326 rw_exit_write(&unp_gc_lock); 1327 1328 rw_enter_write(&unp_df_lock); 1329 /* close any fds on the deferred list */ 1330 while ((defer = SLIST_FIRST(&unp_deferred)) != NULL) { 1331 SLIST_REMOVE_HEAD(&unp_deferred, ud_link); 1332 rw_exit_write(&unp_df_lock); 1333 for (i = 0; i < defer->ud_n; i++) { 1334 fp = defer->ud_fp[i].fp; 1335 if (fp == NULL) 1336 continue; 1337 if ((unp = fptounp(fp)) != NULL) { 1338 rw_enter_write(&unp_gc_lock); 1339 unp->unp_msgcount--; 1340 rw_exit_write(&unp_gc_lock); 1341 } 1342 mtx_enter(&unp_rights_mtx); 1343 unp_rights--; 1344 mtx_leave(&unp_rights_mtx); 1345 /* closef() expects a refcount of 2 */ 1346 FREF(fp); 1347 (void) closef(fp, NULL); 1348 } 1349 free(defer, M_TEMP, sizeof(*defer) + 1350 sizeof(struct fdpass) * defer->ud_n); 1351 rw_enter_write(&unp_df_lock); 1352 } 1353 rw_exit_write(&unp_df_lock); 1354 1355 nunref = 0; 1356 1357 rw_enter_write(&unp_gc_lock); 1358 1359 /* 1360 * Determine sockets which may be prospectively dead. Such 1361 * sockets have their `unp_msgcount' equal to the `f_count'. 1362 * If `unp_msgcount' is 0, the socket has not been passed 1363 * and can't be unreferenced. 1364 */ 1365 LIST_FOREACH(unp, &unp_head, unp_link) { 1366 unp->unp_gcflags = 0; 1367 1368 if (unp->unp_msgcount == 0) 1369 continue; 1370 if ((fp = unp->unp_file) == NULL) 1371 continue; 1372 if (fp->f_count == unp->unp_msgcount) { 1373 unp->unp_gcflags |= UNP_GCDEAD; 1374 unp->unp_gcrefs = unp->unp_msgcount; 1375 nunref++; 1376 } 1377 } 1378 1379 /* 1380 * Scan all sockets previously marked as dead. Remove 1381 * the `unp_gcrefs' reference each socket holds on any 1382 * dead socket in its buffer. 1383 */ 1384 LIST_FOREACH(unp, &unp_head, unp_link) { 1385 if ((unp->unp_gcflags & UNP_GCDEAD) == 0) 1386 continue; 1387 so = unp->unp_socket; 1388 solock(so); 1389 unp_scan(so->so_rcv.sb_mb, unp_remove_gcrefs); 1390 sounlock(so); 1391 } 1392 1393 /* 1394 * If the dead socket has `unp_gcrefs' reference counter 1395 * greater than 0, it can't be unreferenced. Mark it as 1396 * alive and increment the `unp_gcrefs' reference for each 1397 * dead socket within its buffer. Repeat this until we 1398 * have no new alive sockets found. 1399 */ 1400 do { 1401 unp_defer = 0; 1402 1403 LIST_FOREACH(unp, &unp_head, unp_link) { 1404 if ((unp->unp_gcflags & UNP_GCDEAD) == 0) 1405 continue; 1406 if (unp->unp_gcrefs == 0) 1407 continue; 1408 1409 unp->unp_gcflags &= ~UNP_GCDEAD; 1410 1411 so = unp->unp_socket; 1412 solock(so); 1413 unp_scan(so->so_rcv.sb_mb, unp_restore_gcrefs); 1414 sounlock(so); 1415 1416 KASSERT(nunref > 0); 1417 nunref--; 1418 } 1419 } while (unp_defer > 0); 1420 1421 /* 1422 * If there are any unreferenced sockets, then for each dispose 1423 * of files in its receive buffer and then close it. 1424 */ 1425 if (nunref) { 1426 LIST_FOREACH(unp, &unp_head, unp_link) { 1427 if (unp->unp_gcflags & UNP_GCDEAD) { 1428 /* 1429 * This socket could still be connected 1430 * and if so it's `so_rcv' is still 1431 * accessible by concurrent PRU_SEND 1432 * thread. 1433 */ 1434 so = unp->unp_socket; 1435 solock(so); 1436 unp_scan(so->so_rcv.sb_mb, unp_discard); 1437 sounlock(so); 1438 } 1439 } 1440 } 1441 1442 unp_gcing = 0; 1443 unlock: 1444 rw_exit_write(&unp_gc_lock); 1445 } 1446 1447 void 1448 unp_dispose(struct mbuf *m) 1449 { 1450 1451 if (m) 1452 unp_scan(m, unp_discard); 1453 } 1454 1455 void 1456 unp_scan(struct mbuf *m0, void (*op)(struct fdpass *, int)) 1457 { 1458 struct mbuf *m; 1459 struct fdpass *rp; 1460 struct cmsghdr *cm; 1461 int qfds; 1462 1463 while (m0) { 1464 for (m = m0; m; m = m->m_next) { 1465 if (m->m_type == MT_CONTROL && 1466 m->m_len >= sizeof(*cm)) { 1467 cm = mtod(m, struct cmsghdr *); 1468 if (cm->cmsg_level != SOL_SOCKET || 1469 cm->cmsg_type != SCM_RIGHTS) 1470 continue; 1471 qfds = (cm->cmsg_len - CMSG_ALIGN(sizeof *cm)) 1472 / sizeof(struct fdpass); 1473 if (qfds > 0) { 1474 rp = (struct fdpass *)CMSG_DATA(cm); 1475 op(rp, qfds); 1476 } 1477 break; /* XXX, but saves time */ 1478 } 1479 } 1480 m0 = m0->m_nextpkt; 1481 } 1482 } 1483 1484 void 1485 unp_discard(struct fdpass *rp, int nfds) 1486 { 1487 struct unp_deferral *defer; 1488 1489 /* copy the file pointers to a deferral structure */ 1490 defer = malloc(sizeof(*defer) + sizeof(*rp) * nfds, M_TEMP, M_WAITOK); 1491 defer->ud_n = nfds; 1492 memcpy(&defer->ud_fp[0], rp, sizeof(*rp) * nfds); 1493 memset(rp, 0, sizeof(*rp) * nfds); 1494 1495 rw_enter_write(&unp_df_lock); 1496 SLIST_INSERT_HEAD(&unp_deferred, defer, ud_link); 1497 rw_exit_write(&unp_df_lock); 1498 1499 task_add(systqmp, &unp_gc_task); 1500 } 1501 1502 void 1503 unp_remove_gcrefs(struct fdpass *rp, int nfds) 1504 { 1505 struct unpcb *unp; 1506 int i; 1507 1508 rw_assert_wrlock(&unp_gc_lock); 1509 1510 for (i = 0; i < nfds; i++) { 1511 if (rp[i].fp == NULL) 1512 continue; 1513 if ((unp = fptounp(rp[i].fp)) == NULL) 1514 continue; 1515 if (unp->unp_gcflags & UNP_GCDEAD) { 1516 KASSERT(unp->unp_gcrefs > 0); 1517 unp->unp_gcrefs--; 1518 } 1519 } 1520 } 1521 1522 void 1523 unp_restore_gcrefs(struct fdpass *rp, int nfds) 1524 { 1525 struct unpcb *unp; 1526 int i; 1527 1528 rw_assert_wrlock(&unp_gc_lock); 1529 1530 for (i = 0; i < nfds; i++) { 1531 if (rp[i].fp == NULL) 1532 continue; 1533 if ((unp = fptounp(rp[i].fp)) == NULL) 1534 continue; 1535 if (unp->unp_gcflags & UNP_GCDEAD) { 1536 unp->unp_gcrefs++; 1537 unp_defer++; 1538 } 1539 } 1540 } 1541 1542 int 1543 unp_nam2sun(struct mbuf *nam, struct sockaddr_un **sun, size_t *pathlen) 1544 { 1545 struct sockaddr *sa = mtod(nam, struct sockaddr *); 1546 size_t size, len; 1547 1548 if (nam->m_len < offsetof(struct sockaddr, sa_data)) 1549 return EINVAL; 1550 if (sa->sa_family != AF_UNIX) 1551 return EAFNOSUPPORT; 1552 if (sa->sa_len != nam->m_len) 1553 return EINVAL; 1554 if (sa->sa_len > sizeof(struct sockaddr_un)) 1555 return EINVAL; 1556 *sun = (struct sockaddr_un *)sa; 1557 1558 /* ensure that sun_path is NUL terminated and fits */ 1559 size = (*sun)->sun_len - offsetof(struct sockaddr_un, sun_path); 1560 len = strnlen((*sun)->sun_path, size); 1561 if (len == sizeof((*sun)->sun_path)) 1562 return EINVAL; 1563 if (len == size) { 1564 if (m_trailingspace(nam) == 0) 1565 return EINVAL; 1566 nam->m_len++; 1567 (*sun)->sun_len++; 1568 (*sun)->sun_path[len] = '\0'; 1569 } 1570 if (pathlen != NULL) 1571 *pathlen = len; 1572 1573 return 0; 1574 } 1575