1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94 30 * $FreeBSD: src/sys/kern/uipc_usrreq.c,v 1.54.2.10 2003/03/04 17:28:09 nectar Exp $ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/domain.h> 37 #include <sys/fcntl.h> 38 #include <sys/malloc.h> /* XXX must be before <sys/file.h> */ 39 #include <sys/proc.h> 40 #include <sys/file.h> 41 #include <sys/filedesc.h> 42 #include <sys/mbuf.h> 43 #include <sys/nlookup.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/resourcevar.h> 48 #include <sys/stat.h> 49 #include <sys/mount.h> 50 #include <sys/sysctl.h> 51 #include <sys/un.h> 52 #include <sys/unpcb.h> 53 #include <sys/vnode.h> 54 55 #include <sys/file2.h> 56 #include <sys/spinlock2.h> 57 #include <sys/socketvar2.h> 58 #include <sys/msgport2.h> 59 60 #define UNP_DETACHED UNP_PRIVATE1 61 #define UNP_CONNECTING UNP_PRIVATE2 62 63 #define UNP_ISATTACHED(unp) \ 64 ((unp) != NULL && ((unp)->unp_flags & UNP_DETACHED) == 0) 65 66 #ifdef INVARIANTS 67 #define UNP_ASSERT_TOKEN_HELD(unp) \ 68 ASSERT_LWKT_TOKEN_HELD(lwkt_token_pool_lookup((unp))) 69 #else /* !INVARIANTS */ 70 #define UNP_ASSERT_TOKEN_HELD(unp) 71 #endif /* INVARIANTS */ 72 73 typedef struct unp_defdiscard { 74 struct unp_defdiscard *next; 75 struct file *fp; 76 } *unp_defdiscard_t; 77 78 static MALLOC_DEFINE(M_UNPCB, "unpcb", "unpcb struct"); 79 static unp_gen_t unp_gencnt; 80 static u_int unp_count; 81 82 static struct unp_head unp_shead, unp_dhead; 83 84 static struct lwkt_token unp_token = LWKT_TOKEN_INITIALIZER(unp_token); 85 static int unp_defdiscard_nest; 86 static unp_defdiscard_t unp_defdiscard_base; 87 88 /* 89 * Unix communications domain. 90 * 91 * TODO: 92 * RDM 93 * rethink name space problems 94 * need a proper out-of-band 95 * lock pushdown 96 */ 97 static struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL }; 98 static ino_t unp_ino = 1; /* prototype for fake inode numbers */ 99 static struct spinlock unp_ino_spin = SPINLOCK_INITIALIZER(&unp_ino_spin, "unp_ino_spin"); 100 101 static int unp_attach (struct socket *, struct pru_attach_info *); 102 static void unp_detach (struct unpcb *); 103 static int unp_bind (struct unpcb *,struct sockaddr *, struct thread *); 104 static int unp_connect (struct socket *,struct sockaddr *, 105 struct thread *); 106 static void unp_disconnect (struct unpcb *); 107 static void unp_shutdown (struct unpcb *); 108 static void unp_drop (struct unpcb *, int); 109 static void unp_gc (void); 110 static int unp_gc_clearmarks(struct file *, void *); 111 static int unp_gc_checkmarks(struct file *, void *); 112 static int unp_gc_checkrefs(struct file *, void *); 113 static int unp_revoke_gc_check(struct file *, void *); 114 static void unp_scan (struct mbuf *, void (*)(struct file *, void *), 115 void *data); 116 static void unp_mark (struct file *, void *data); 117 static void unp_discard (struct file *, void *); 118 static int unp_internalize (struct mbuf *, struct thread *); 119 static int unp_listen (struct unpcb *, struct thread *); 120 static void unp_fp_externalize(struct lwp *lp, struct file *fp, int fd); 121 static int unp_find_lockref(struct sockaddr *nam, struct thread *td, 122 short type, struct unpcb **unp_ret); 123 static int unp_connect_pair(struct unpcb *unp, struct unpcb *unp2); 124 125 /* 126 * SMP Considerations: 127 * 128 * Since unp_token will be automaticly released upon execution of 129 * blocking code, we need to reference unp_conn before any possible 130 * blocking code to prevent it from being ripped behind our back. 131 * 132 * Any adjustment to unp->unp_conn requires both the global unp_token 133 * AND the per-unp token (lwkt_token_pool_lookup(unp)) to be held. 134 * 135 * Any access to so_pcb to obtain unp requires the pool token for 136 * unp to be held. 137 */ 138 139 /* NOTE: unp_token MUST be held */ 140 static __inline void 141 unp_reference(struct unpcb *unp) 142 { 143 atomic_add_int(&unp->unp_refcnt, 1); 144 } 145 146 /* NOTE: unp_token MUST be held */ 147 static __inline void 148 unp_free(struct unpcb *unp) 149 { 150 KKASSERT(unp->unp_refcnt > 0); 151 if (atomic_fetchadd_int(&unp->unp_refcnt, -1) == 1) 152 unp_detach(unp); 153 } 154 155 static __inline struct unpcb * 156 unp_getsocktoken(struct socket *so) 157 { 158 struct unpcb *unp; 159 160 /* 161 * The unp pointer is invalid until we verify that it is 162 * good by re-checking so_pcb AFTER obtaining the token. 163 */ 164 while ((unp = so->so_pcb) != NULL) { 165 lwkt_getpooltoken(unp); 166 if (unp == so->so_pcb) 167 break; 168 lwkt_relpooltoken(unp); 169 } 170 return unp; 171 } 172 173 static __inline void 174 unp_reltoken(struct unpcb *unp) 175 { 176 if (unp != NULL) 177 lwkt_relpooltoken(unp); 178 } 179 180 static __inline void 181 unp_setflags(struct unpcb *unp, int flags) 182 { 183 atomic_set_int(&unp->unp_flags, flags); 184 } 185 186 static __inline void 187 unp_clrflags(struct unpcb *unp, int flags) 188 { 189 atomic_clear_int(&unp->unp_flags, flags); 190 } 191 192 /* 193 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort() 194 * will sofree() it when we return. 195 */ 196 static void 197 uipc_abort(netmsg_t msg) 198 { 199 struct unpcb *unp; 200 int error; 201 202 lwkt_gettoken(&unp_token); 203 unp = unp_getsocktoken(msg->base.nm_so); 204 205 if (UNP_ISATTACHED(unp)) { 206 unp_setflags(unp, UNP_DETACHED); 207 unp_drop(unp, ECONNABORTED); 208 unp_free(unp); 209 error = 0; 210 } else { 211 error = EINVAL; 212 } 213 214 unp_reltoken(unp); 215 lwkt_reltoken(&unp_token); 216 217 lwkt_replymsg(&msg->lmsg, error); 218 } 219 220 static void 221 uipc_accept(netmsg_t msg) 222 { 223 struct unpcb *unp; 224 int error; 225 226 lwkt_gettoken(&unp_token); 227 unp = msg->base.nm_so->so_pcb; 228 if (!UNP_ISATTACHED(unp)) { 229 error = EINVAL; 230 } else { 231 struct unpcb *unp2 = unp->unp_conn; 232 233 /* 234 * Pass back name of connected socket, 235 * if it was bound and we are still connected 236 * (our peer may have closed already!). 237 */ 238 if (unp2 && unp2->unp_addr) { 239 unp_reference(unp2); 240 *msg->accept.nm_nam = dup_sockaddr( 241 (struct sockaddr *)unp2->unp_addr); 242 unp_free(unp2); 243 } else { 244 *msg->accept.nm_nam = dup_sockaddr(&sun_noname); 245 } 246 error = 0; 247 } 248 lwkt_reltoken(&unp_token); 249 lwkt_replymsg(&msg->lmsg, error); 250 } 251 252 static void 253 uipc_attach(netmsg_t msg) 254 { 255 struct unpcb *unp; 256 int error; 257 258 lwkt_gettoken(&unp_token); 259 unp = msg->base.nm_so->so_pcb; 260 KASSERT(unp == NULL, ("double unp attach")); 261 error = unp_attach(msg->base.nm_so, msg->attach.nm_ai); 262 lwkt_reltoken(&unp_token); 263 lwkt_replymsg(&msg->lmsg, error); 264 } 265 266 static void 267 uipc_bind(netmsg_t msg) 268 { 269 struct unpcb *unp; 270 int error; 271 272 lwkt_gettoken(&unp_token); 273 unp = msg->base.nm_so->so_pcb; 274 if (UNP_ISATTACHED(unp)) 275 error = unp_bind(unp, msg->bind.nm_nam, msg->bind.nm_td); 276 else 277 error = EINVAL; 278 lwkt_reltoken(&unp_token); 279 lwkt_replymsg(&msg->lmsg, error); 280 } 281 282 static void 283 uipc_connect(netmsg_t msg) 284 { 285 int error; 286 287 error = unp_connect(msg->base.nm_so, msg->connect.nm_nam, 288 msg->connect.nm_td); 289 lwkt_replymsg(&msg->lmsg, error); 290 } 291 292 static void 293 uipc_connect2(netmsg_t msg) 294 { 295 int error; 296 297 error = unp_connect2(msg->connect2.nm_so1, msg->connect2.nm_so2); 298 lwkt_replymsg(&msg->lmsg, error); 299 } 300 301 /* control is EOPNOTSUPP */ 302 303 static void 304 uipc_detach(netmsg_t msg) 305 { 306 struct unpcb *unp; 307 int error; 308 309 lwkt_gettoken(&unp_token); 310 unp = unp_getsocktoken(msg->base.nm_so); 311 312 if (UNP_ISATTACHED(unp)) { 313 unp_setflags(unp, UNP_DETACHED); 314 unp_free(unp); 315 error = 0; 316 } else { 317 error = EINVAL; 318 } 319 320 unp_reltoken(unp); 321 lwkt_reltoken(&unp_token); 322 323 lwkt_replymsg(&msg->lmsg, error); 324 } 325 326 static void 327 uipc_disconnect(netmsg_t msg) 328 { 329 struct unpcb *unp; 330 int error; 331 332 lwkt_gettoken(&unp_token); 333 unp = msg->base.nm_so->so_pcb; 334 if (UNP_ISATTACHED(unp)) { 335 unp_disconnect(unp); 336 error = 0; 337 } else { 338 error = EINVAL; 339 } 340 lwkt_reltoken(&unp_token); 341 lwkt_replymsg(&msg->lmsg, error); 342 } 343 344 static void 345 uipc_listen(netmsg_t msg) 346 { 347 struct unpcb *unp; 348 int error; 349 350 lwkt_gettoken(&unp_token); 351 unp = msg->base.nm_so->so_pcb; 352 if (!UNP_ISATTACHED(unp) || unp->unp_vnode == NULL) 353 error = EINVAL; 354 else 355 error = unp_listen(unp, msg->listen.nm_td); 356 lwkt_reltoken(&unp_token); 357 lwkt_replymsg(&msg->lmsg, error); 358 } 359 360 static void 361 uipc_peeraddr(netmsg_t msg) 362 { 363 struct unpcb *unp; 364 int error; 365 366 lwkt_gettoken(&unp_token); 367 unp = msg->base.nm_so->so_pcb; 368 if (!UNP_ISATTACHED(unp)) { 369 error = EINVAL; 370 } else if (unp->unp_conn && unp->unp_conn->unp_addr) { 371 struct unpcb *unp2 = unp->unp_conn; 372 373 unp_reference(unp2); 374 *msg->peeraddr.nm_nam = dup_sockaddr( 375 (struct sockaddr *)unp2->unp_addr); 376 unp_free(unp2); 377 error = 0; 378 } else { 379 /* 380 * XXX: It seems that this test always fails even when 381 * connection is established. So, this else clause is 382 * added as workaround to return PF_LOCAL sockaddr. 383 */ 384 *msg->peeraddr.nm_nam = dup_sockaddr(&sun_noname); 385 error = 0; 386 } 387 lwkt_reltoken(&unp_token); 388 lwkt_replymsg(&msg->lmsg, error); 389 } 390 391 static void 392 uipc_rcvd(netmsg_t msg) 393 { 394 struct unpcb *unp, *unp2; 395 struct socket *so; 396 struct socket *so2; 397 int error; 398 399 /* 400 * so_pcb is only modified with both the global and the unp 401 * pool token held. 402 */ 403 so = msg->base.nm_so; 404 unp = unp_getsocktoken(so); 405 406 if (!UNP_ISATTACHED(unp)) { 407 error = EINVAL; 408 goto done; 409 } 410 411 switch (so->so_type) { 412 case SOCK_DGRAM: 413 panic("uipc_rcvd DGRAM?"); 414 /*NOTREACHED*/ 415 case SOCK_STREAM: 416 case SOCK_SEQPACKET: 417 if (unp->unp_conn == NULL) 418 break; 419 unp2 = unp->unp_conn; /* protected by pool token */ 420 421 /* 422 * Because we are transfering mbufs directly to the 423 * peer socket we have to use SSB_STOP on the sender 424 * to prevent it from building up infinite mbufs. 425 * 426 * As in several places in this module w ehave to ref unp2 427 * to ensure that it does not get ripped out from under us 428 * if we block on the so2 token or in sowwakeup(). 429 */ 430 so2 = unp2->unp_socket; 431 unp_reference(unp2); 432 lwkt_gettoken(&so2->so_rcv.ssb_token); 433 if (so->so_rcv.ssb_cc < so2->so_snd.ssb_hiwat && 434 so->so_rcv.ssb_mbcnt < so2->so_snd.ssb_mbmax 435 ) { 436 atomic_clear_int(&so2->so_snd.ssb_flags, SSB_STOP); 437 438 sowwakeup(so2); 439 } 440 lwkt_reltoken(&so2->so_rcv.ssb_token); 441 unp_free(unp2); 442 break; 443 default: 444 panic("uipc_rcvd unknown socktype"); 445 /*NOTREACHED*/ 446 } 447 error = 0; 448 done: 449 unp_reltoken(unp); 450 lwkt_replymsg(&msg->lmsg, error); 451 } 452 453 /* pru_rcvoob is EOPNOTSUPP */ 454 455 static void 456 uipc_send(netmsg_t msg) 457 { 458 struct unpcb *unp, *unp2; 459 struct socket *so; 460 struct socket *so2; 461 struct mbuf *control; 462 struct mbuf *m; 463 int error = 0; 464 465 so = msg->base.nm_so; 466 control = msg->send.nm_control; 467 m = msg->send.nm_m; 468 469 /* 470 * so_pcb is only modified with both the global and the unp 471 * pool token held. 472 */ 473 so = msg->base.nm_so; 474 unp = unp_getsocktoken(so); 475 476 if (!UNP_ISATTACHED(unp)) { 477 error = EINVAL; 478 goto release; 479 } 480 481 if (msg->send.nm_flags & PRUS_OOB) { 482 error = EOPNOTSUPP; 483 goto release; 484 } 485 486 wakeup_start_delayed(); 487 488 if (control && (error = unp_internalize(control, msg->send.nm_td))) 489 goto release; 490 491 switch (so->so_type) { 492 case SOCK_DGRAM: 493 { 494 struct sockaddr *from; 495 496 if (msg->send.nm_addr) { 497 if (unp->unp_conn) { 498 error = EISCONN; 499 break; 500 } 501 error = unp_find_lockref(msg->send.nm_addr, 502 msg->send.nm_td, so->so_type, &unp2); 503 if (error) 504 break; 505 /* 506 * NOTE: 507 * unp2 is locked and referenced. 508 * 509 * We could unlock unp2 now, since it was checked 510 * and referenced. 511 */ 512 unp_reltoken(unp2); 513 } else { 514 if (unp->unp_conn == NULL) { 515 error = ENOTCONN; 516 break; 517 } 518 /* XXX racy. */ 519 unp2 = unp->unp_conn; 520 unp_reference(unp2); 521 } 522 /* NOTE: unp2 is referenced. */ 523 so2 = unp2->unp_socket; 524 525 if (unp->unp_addr) 526 from = (struct sockaddr *)unp->unp_addr; 527 else 528 from = &sun_noname; 529 530 lwkt_gettoken(&so2->so_rcv.ssb_token); 531 if (ssb_appendaddr(&so2->so_rcv, from, m, control)) { 532 sorwakeup(so2); 533 m = NULL; 534 control = NULL; 535 } else { 536 error = ENOBUFS; 537 } 538 lwkt_reltoken(&so2->so_rcv.ssb_token); 539 540 unp_free(unp2); 541 break; 542 } 543 544 case SOCK_STREAM: 545 case SOCK_SEQPACKET: 546 /* Connect if not connected yet. */ 547 /* 548 * Note: A better implementation would complain 549 * if not equal to the peer's address. 550 */ 551 if (!(so->so_state & SS_ISCONNECTED)) { 552 if (msg->send.nm_addr) { 553 error = unp_connect(so, 554 msg->send.nm_addr, 555 msg->send.nm_td); 556 if (error) 557 break; /* XXX */ 558 } else { 559 error = ENOTCONN; 560 break; 561 } 562 } 563 564 if (so->so_state & SS_CANTSENDMORE) { 565 error = EPIPE; 566 break; 567 } 568 if (unp->unp_conn == NULL) 569 panic("uipc_send connected but no connection?"); 570 unp2 = unp->unp_conn; 571 so2 = unp2->unp_socket; 572 573 unp_reference(unp2); 574 575 /* 576 * Send to paired receive port, and then reduce 577 * send buffer hiwater marks to maintain backpressure. 578 * Wake up readers. 579 */ 580 lwkt_gettoken(&so2->so_rcv.ssb_token); 581 if (control) { 582 if (ssb_appendcontrol(&so2->so_rcv, m, control)) { 583 control = NULL; 584 m = NULL; 585 } 586 } else if (so->so_type == SOCK_SEQPACKET) { 587 sbappendrecord(&so2->so_rcv.sb, m); 588 m = NULL; 589 } else { 590 sbappend(&so2->so_rcv.sb, m); 591 m = NULL; 592 } 593 594 /* 595 * Because we are transfering mbufs directly to the 596 * peer socket we have to use SSB_STOP on the sender 597 * to prevent it from building up infinite mbufs. 598 */ 599 if (so2->so_rcv.ssb_cc >= so->so_snd.ssb_hiwat || 600 so2->so_rcv.ssb_mbcnt >= so->so_snd.ssb_mbmax 601 ) { 602 atomic_set_int(&so->so_snd.ssb_flags, SSB_STOP); 603 } 604 lwkt_reltoken(&so2->so_rcv.ssb_token); 605 sorwakeup(so2); 606 607 unp_free(unp2); 608 break; 609 610 default: 611 panic("uipc_send unknown socktype"); 612 } 613 614 /* 615 * SEND_EOF is equivalent to a SEND followed by a SHUTDOWN. 616 */ 617 if (msg->send.nm_flags & PRUS_EOF) { 618 socantsendmore(so); 619 unp_shutdown(unp); 620 } 621 622 if (control && error != 0) 623 unp_dispose(control); 624 release: 625 unp_reltoken(unp); 626 wakeup_end_delayed(); 627 628 if (control) 629 m_freem(control); 630 if (m) 631 m_freem(m); 632 lwkt_replymsg(&msg->lmsg, error); 633 } 634 635 /* 636 * MPSAFE 637 */ 638 static void 639 uipc_sense(netmsg_t msg) 640 { 641 struct unpcb *unp; 642 struct socket *so; 643 struct stat *sb; 644 int error; 645 646 so = msg->base.nm_so; 647 sb = msg->sense.nm_stat; 648 649 /* 650 * so_pcb is only modified with both the global and the unp 651 * pool token held. 652 */ 653 unp = unp_getsocktoken(so); 654 655 if (!UNP_ISATTACHED(unp)) { 656 error = EINVAL; 657 goto done; 658 } 659 660 sb->st_blksize = so->so_snd.ssb_hiwat; 661 sb->st_dev = NOUDEV; 662 if (unp->unp_ino == 0) { /* make up a non-zero inode number */ 663 spin_lock(&unp_ino_spin); 664 unp->unp_ino = unp_ino++; 665 spin_unlock(&unp_ino_spin); 666 } 667 sb->st_ino = unp->unp_ino; 668 error = 0; 669 done: 670 unp_reltoken(unp); 671 lwkt_replymsg(&msg->lmsg, error); 672 } 673 674 static void 675 uipc_shutdown(netmsg_t msg) 676 { 677 struct socket *so; 678 struct unpcb *unp; 679 int error; 680 681 /* 682 * so_pcb is only modified with both the global and the unp 683 * pool token held. 684 */ 685 so = msg->base.nm_so; 686 unp = unp_getsocktoken(so); 687 688 if (UNP_ISATTACHED(unp)) { 689 socantsendmore(so); 690 unp_shutdown(unp); 691 error = 0; 692 } else { 693 error = EINVAL; 694 } 695 696 unp_reltoken(unp); 697 lwkt_replymsg(&msg->lmsg, error); 698 } 699 700 static void 701 uipc_sockaddr(netmsg_t msg) 702 { 703 struct socket *so; 704 struct unpcb *unp; 705 int error; 706 707 /* 708 * so_pcb is only modified with both the global and the unp 709 * pool token held. 710 */ 711 so = msg->base.nm_so; 712 unp = unp_getsocktoken(so); 713 714 if (UNP_ISATTACHED(unp)) { 715 if (unp->unp_addr) { 716 *msg->sockaddr.nm_nam = 717 dup_sockaddr((struct sockaddr *)unp->unp_addr); 718 } 719 error = 0; 720 } else { 721 error = EINVAL; 722 } 723 724 unp_reltoken(unp); 725 lwkt_replymsg(&msg->lmsg, error); 726 } 727 728 struct pr_usrreqs uipc_usrreqs = { 729 .pru_abort = uipc_abort, 730 .pru_accept = uipc_accept, 731 .pru_attach = uipc_attach, 732 .pru_bind = uipc_bind, 733 .pru_connect = uipc_connect, 734 .pru_connect2 = uipc_connect2, 735 .pru_control = pr_generic_notsupp, 736 .pru_detach = uipc_detach, 737 .pru_disconnect = uipc_disconnect, 738 .pru_listen = uipc_listen, 739 .pru_peeraddr = uipc_peeraddr, 740 .pru_rcvd = uipc_rcvd, 741 .pru_rcvoob = pr_generic_notsupp, 742 .pru_send = uipc_send, 743 .pru_sense = uipc_sense, 744 .pru_shutdown = uipc_shutdown, 745 .pru_sockaddr = uipc_sockaddr, 746 .pru_sosend = sosend, 747 .pru_soreceive = soreceive 748 }; 749 750 void 751 uipc_ctloutput(netmsg_t msg) 752 { 753 struct socket *so; 754 struct sockopt *sopt; 755 struct unpcb *unp; 756 int error = 0; 757 758 lwkt_gettoken(&unp_token); 759 so = msg->base.nm_so; 760 sopt = msg->ctloutput.nm_sopt; 761 unp = so->so_pcb; 762 763 switch (sopt->sopt_dir) { 764 case SOPT_GET: 765 switch (sopt->sopt_name) { 766 case LOCAL_PEERCRED: 767 if (unp->unp_flags & UNP_HAVEPC) 768 soopt_from_kbuf(sopt, &unp->unp_peercred, 769 sizeof(unp->unp_peercred)); 770 else { 771 if (so->so_type == SOCK_STREAM) 772 error = ENOTCONN; 773 else if (so->so_type == SOCK_SEQPACKET) 774 error = ENOTCONN; 775 else 776 error = EINVAL; 777 } 778 break; 779 default: 780 error = EOPNOTSUPP; 781 break; 782 } 783 break; 784 case SOPT_SET: 785 default: 786 error = EOPNOTSUPP; 787 break; 788 } 789 lwkt_reltoken(&unp_token); 790 lwkt_replymsg(&msg->lmsg, error); 791 } 792 793 /* 794 * Both send and receive buffers are allocated PIPSIZ bytes of buffering 795 * for stream sockets, although the total for sender and receiver is 796 * actually only PIPSIZ. 797 * 798 * Datagram sockets really use the sendspace as the maximum datagram size, 799 * and don't really want to reserve the sendspace. Their recvspace should 800 * be large enough for at least one max-size datagram plus address. 801 * 802 * We want the local send/recv space to be significant larger then lo0's 803 * mtu of 16384. 804 */ 805 #ifndef PIPSIZ 806 #define PIPSIZ 57344 807 #endif 808 static u_long unpst_sendspace = PIPSIZ; 809 static u_long unpst_recvspace = PIPSIZ; 810 static u_long unpdg_sendspace = 2*1024; /* really max datagram size */ 811 static u_long unpdg_recvspace = 4*1024; 812 813 static int unp_rights; /* file descriptors in flight */ 814 static struct spinlock unp_spin = SPINLOCK_INITIALIZER(&unp_spin, "unp_spin"); 815 816 SYSCTL_DECL(_net_local_seqpacket); 817 SYSCTL_DECL(_net_local_stream); 818 SYSCTL_INT(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW, 819 &unpst_sendspace, 0, "Size of stream socket send buffer"); 820 SYSCTL_INT(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW, 821 &unpst_recvspace, 0, "Size of stream socket receive buffer"); 822 823 SYSCTL_DECL(_net_local_dgram); 824 SYSCTL_INT(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW, 825 &unpdg_sendspace, 0, "Max datagram socket size"); 826 SYSCTL_INT(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW, 827 &unpdg_recvspace, 0, "Size of datagram socket receive buffer"); 828 829 SYSCTL_DECL(_net_local); 830 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, 831 "File descriptors in flight"); 832 833 static int 834 unp_attach(struct socket *so, struct pru_attach_info *ai) 835 { 836 struct unpcb *unp; 837 int error; 838 839 lwkt_gettoken(&unp_token); 840 841 if (so->so_snd.ssb_hiwat == 0 || so->so_rcv.ssb_hiwat == 0) { 842 switch (so->so_type) { 843 case SOCK_STREAM: 844 case SOCK_SEQPACKET: 845 error = soreserve(so, unpst_sendspace, unpst_recvspace, 846 ai->sb_rlimit); 847 break; 848 849 case SOCK_DGRAM: 850 error = soreserve(so, unpdg_sendspace, unpdg_recvspace, 851 ai->sb_rlimit); 852 break; 853 854 default: 855 panic("unp_attach"); 856 } 857 if (error) 858 goto failed; 859 } 860 861 /* 862 * In order to support sendfile we have to set either SSB_STOPSUPP 863 * or SSB_PREALLOC. Unix domain sockets use the SSB_STOP flow 864 * control mechanism. 865 */ 866 if (so->so_type == SOCK_STREAM) { 867 atomic_set_int(&so->so_rcv.ssb_flags, SSB_STOPSUPP); 868 atomic_set_int(&so->so_snd.ssb_flags, SSB_STOPSUPP); 869 } 870 871 unp = kmalloc(sizeof(*unp), M_UNPCB, M_WAITOK | M_ZERO | M_NULLOK); 872 if (unp == NULL) { 873 error = ENOBUFS; 874 goto failed; 875 } 876 unp->unp_refcnt = 1; 877 unp->unp_gencnt = ++unp_gencnt; 878 unp_count++; 879 LIST_INIT(&unp->unp_refs); 880 unp->unp_socket = so; 881 unp->unp_rvnode = ai->fd_rdir; /* jail cruft XXX JH */ 882 LIST_INSERT_HEAD(so->so_type == SOCK_DGRAM ? &unp_dhead 883 : &unp_shead, unp, unp_link); 884 so->so_pcb = (caddr_t)unp; 885 soreference(so); 886 error = 0; 887 failed: 888 lwkt_reltoken(&unp_token); 889 return error; 890 } 891 892 static void 893 unp_detach(struct unpcb *unp) 894 { 895 struct socket *so; 896 897 lwkt_gettoken(&unp_token); 898 lwkt_getpooltoken(unp); 899 900 LIST_REMOVE(unp, unp_link); /* both tokens required */ 901 unp->unp_gencnt = ++unp_gencnt; 902 --unp_count; 903 if (unp->unp_vnode) { 904 unp->unp_vnode->v_socket = NULL; 905 vrele(unp->unp_vnode); 906 unp->unp_vnode = NULL; 907 } 908 if (unp->unp_conn) 909 unp_disconnect(unp); 910 while (!LIST_EMPTY(&unp->unp_refs)) 911 unp_drop(LIST_FIRST(&unp->unp_refs), ECONNRESET); 912 soisdisconnected(unp->unp_socket); 913 so = unp->unp_socket; 914 soreference(so); /* for delayed sorflush */ 915 KKASSERT(so->so_pcb == unp); 916 so->so_pcb = NULL; /* both tokens required */ 917 unp->unp_socket = NULL; 918 sofree(so); /* remove pcb ref */ 919 920 if (unp_rights) { 921 /* 922 * Normally the receive buffer is flushed later, 923 * in sofree, but if our receive buffer holds references 924 * to descriptors that are now garbage, we will dispose 925 * of those descriptor references after the garbage collector 926 * gets them (resulting in a "panic: closef: count < 0"). 927 */ 928 sorflush(so); 929 unp_gc(); 930 } 931 sofree(so); 932 lwkt_relpooltoken(unp); 933 lwkt_reltoken(&unp_token); 934 935 if (unp->unp_addr) 936 kfree(unp->unp_addr, M_SONAME); 937 kfree(unp, M_UNPCB); 938 } 939 940 static int 941 unp_bind(struct unpcb *unp, struct sockaddr *nam, struct thread *td) 942 { 943 struct proc *p = td->td_proc; 944 struct sockaddr_un *soun = (struct sockaddr_un *)nam; 945 struct vnode *vp; 946 struct vattr vattr; 947 int error, namelen; 948 struct nlookupdata nd; 949 char buf[SOCK_MAXADDRLEN]; 950 951 lwkt_gettoken(&unp_token); 952 if (unp->unp_vnode != NULL) { 953 error = EINVAL; 954 goto failed; 955 } 956 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); 957 if (namelen <= 0) { 958 error = EINVAL; 959 goto failed; 960 } 961 strncpy(buf, soun->sun_path, namelen); 962 buf[namelen] = 0; /* null-terminate the string */ 963 error = nlookup_init(&nd, buf, UIO_SYSSPACE, 964 NLC_LOCKVP | NLC_CREATE | NLC_REFDVP); 965 if (error == 0) 966 error = nlookup(&nd); 967 if (error == 0 && nd.nl_nch.ncp->nc_vp != NULL) 968 error = EADDRINUSE; 969 if (error) 970 goto done; 971 972 VATTR_NULL(&vattr); 973 vattr.va_type = VSOCK; 974 vattr.va_mode = (ACCESSPERMS & ~p->p_fd->fd_cmask); 975 error = VOP_NCREATE(&nd.nl_nch, nd.nl_dvp, &vp, nd.nl_cred, &vattr); 976 if (error == 0) { 977 if (unp->unp_vnode == NULL) { 978 vp->v_socket = unp->unp_socket; 979 unp->unp_vnode = vp; 980 unp->unp_addr = (struct sockaddr_un *)dup_sockaddr(nam); 981 vn_unlock(vp); 982 } else { 983 vput(vp); /* late race */ 984 error = EINVAL; 985 } 986 } 987 done: 988 nlookup_done(&nd); 989 failed: 990 lwkt_reltoken(&unp_token); 991 return (error); 992 } 993 994 static int 995 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 996 { 997 struct unpcb *unp, *unp2; 998 int error, flags = 0; 999 1000 lwkt_gettoken(&unp_token); 1001 1002 unp = unp_getsocktoken(so); 1003 if (!UNP_ISATTACHED(unp)) { 1004 error = EINVAL; 1005 goto failed; 1006 } 1007 1008 if ((unp->unp_flags & UNP_CONNECTING) || unp->unp_conn != NULL) { 1009 error = EISCONN; 1010 goto failed; 1011 } 1012 1013 flags = UNP_CONNECTING; 1014 unp_setflags(unp, flags); 1015 1016 error = unp_find_lockref(nam, td, so->so_type, &unp2); 1017 if (error) 1018 goto failed; 1019 /* 1020 * NOTE: 1021 * unp2 is locked and referenced. 1022 */ 1023 1024 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 1025 struct socket *so2, *so3; 1026 struct unpcb *unp3; 1027 1028 so2 = unp2->unp_socket; 1029 if (!(so2->so_options & SO_ACCEPTCONN) || 1030 (so3 = sonewconn_faddr(so2, 0, NULL, 1031 TRUE /* keep ref */)) == NULL) { 1032 error = ECONNREFUSED; 1033 goto done; 1034 } 1035 /* so3 has a socket reference. */ 1036 1037 unp3 = unp_getsocktoken(so3); 1038 if (!UNP_ISATTACHED(unp3)) { 1039 unp_reltoken(unp3); 1040 /* 1041 * Already aborted; we only need to drop the 1042 * socket reference held by sonewconn_faddr(). 1043 */ 1044 sofree(so3); 1045 error = ECONNREFUSED; 1046 goto done; 1047 } 1048 unp_reference(unp3); 1049 /* 1050 * NOTE: 1051 * unp3 is locked and referenced. 1052 */ 1053 1054 /* 1055 * Release so3 socket reference held by sonewconn_faddr(). 1056 * Since we have referenced unp3, neither unp3 nor so3 will 1057 * be destroyed here. 1058 */ 1059 sofree(so3); 1060 1061 if (unp2->unp_addr != NULL) { 1062 unp3->unp_addr = (struct sockaddr_un *) 1063 dup_sockaddr((struct sockaddr *)unp2->unp_addr); 1064 } 1065 1066 /* 1067 * unp_peercred management: 1068 * 1069 * The connecter's (client's) credentials are copied 1070 * from its process structure at the time of connect() 1071 * (which is now). 1072 */ 1073 cru2x(td->td_proc->p_ucred, &unp3->unp_peercred); 1074 unp_setflags(unp3, UNP_HAVEPC); 1075 /* 1076 * The receiver's (server's) credentials are copied 1077 * from the unp_peercred member of socket on which the 1078 * former called listen(); unp_listen() cached that 1079 * process's credentials at that time so we can use 1080 * them now. 1081 */ 1082 KASSERT(unp2->unp_flags & UNP_HAVEPCCACHED, 1083 ("unp_connect: listener without cached peercred")); 1084 memcpy(&unp->unp_peercred, &unp2->unp_peercred, 1085 sizeof(unp->unp_peercred)); 1086 unp_setflags(unp, UNP_HAVEPC); 1087 1088 error = unp_connect_pair(unp, unp3); 1089 if (error) { 1090 /* XXX we need a better name */ 1091 soabort_oncpu(so3); 1092 } 1093 1094 /* Done with unp3 */ 1095 unp_free(unp3); 1096 unp_reltoken(unp3); 1097 } else { 1098 error = unp_connect_pair(unp, unp2); 1099 } 1100 done: 1101 unp_free(unp2); 1102 unp_reltoken(unp2); 1103 failed: 1104 if (flags) 1105 unp_clrflags(unp, flags); 1106 unp_reltoken(unp); 1107 1108 lwkt_reltoken(&unp_token); 1109 return (error); 1110 } 1111 1112 /* 1113 * Connect two unix domain sockets together. 1114 * 1115 * NOTE: Semantics for any change to unp_conn requires that the per-unp 1116 * pool token also be held. 1117 */ 1118 int 1119 unp_connect2(struct socket *so, struct socket *so2) 1120 { 1121 struct unpcb *unp, *unp2; 1122 int error; 1123 1124 lwkt_gettoken(&unp_token); 1125 if (so2->so_type != so->so_type) { 1126 lwkt_reltoken(&unp_token); 1127 return (EPROTOTYPE); 1128 } 1129 unp = unp_getsocktoken(so); 1130 unp2 = unp_getsocktoken(so2); 1131 1132 if (!UNP_ISATTACHED(unp)) { 1133 error = EINVAL; 1134 goto done; 1135 } 1136 if (!UNP_ISATTACHED(unp2)) { 1137 error = ECONNREFUSED; 1138 goto done; 1139 } 1140 1141 if (unp->unp_conn != NULL) { 1142 error = EISCONN; 1143 goto done; 1144 } 1145 if ((so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET) && 1146 unp2->unp_conn != NULL) { 1147 error = EISCONN; 1148 goto done; 1149 } 1150 1151 error = unp_connect_pair(unp, unp2); 1152 done: 1153 unp_reltoken(unp2); 1154 unp_reltoken(unp); 1155 lwkt_reltoken(&unp_token); 1156 return (error); 1157 } 1158 1159 /* 1160 * Disconnect a unix domain socket pair. 1161 * 1162 * NOTE: Semantics for any change to unp_conn requires that the per-unp 1163 * pool token also be held. 1164 */ 1165 static void 1166 unp_disconnect(struct unpcb *unp) 1167 { 1168 struct unpcb *unp2; 1169 1170 lwkt_gettoken(&unp_token); 1171 lwkt_getpooltoken(unp); 1172 1173 while ((unp2 = unp->unp_conn) != NULL) { 1174 lwkt_getpooltoken(unp2); 1175 if (unp2 == unp->unp_conn) 1176 break; 1177 lwkt_relpooltoken(unp2); 1178 } 1179 if (unp2 == NULL) 1180 goto done; 1181 1182 unp->unp_conn = NULL; 1183 1184 switch (unp->unp_socket->so_type) { 1185 case SOCK_DGRAM: 1186 LIST_REMOVE(unp, unp_reflink); 1187 soclrstate(unp->unp_socket, SS_ISCONNECTED); 1188 break; 1189 1190 case SOCK_STREAM: 1191 case SOCK_SEQPACKET: 1192 unp_reference(unp2); 1193 unp2->unp_conn = NULL; 1194 1195 soisdisconnected(unp->unp_socket); 1196 soisdisconnected(unp2->unp_socket); 1197 1198 unp_free(unp2); 1199 break; 1200 } 1201 lwkt_relpooltoken(unp2); 1202 done: 1203 lwkt_relpooltoken(unp); 1204 lwkt_reltoken(&unp_token); 1205 } 1206 1207 #ifdef notdef 1208 void 1209 unp_abort(struct unpcb *unp) 1210 { 1211 lwkt_gettoken(&unp_token); 1212 unp_free(unp); 1213 lwkt_reltoken(&unp_token); 1214 } 1215 #endif 1216 1217 static int 1218 prison_unpcb(struct thread *td, struct unpcb *unp) 1219 { 1220 struct proc *p; 1221 1222 if (td == NULL) 1223 return (0); 1224 if ((p = td->td_proc) == NULL) 1225 return (0); 1226 if (!p->p_ucred->cr_prison) 1227 return (0); 1228 if (p->p_fd->fd_rdir == unp->unp_rvnode) 1229 return (0); 1230 return (1); 1231 } 1232 1233 static int 1234 unp_pcblist(SYSCTL_HANDLER_ARGS) 1235 { 1236 int error, i, n; 1237 struct unpcb *unp, **unp_list; 1238 unp_gen_t gencnt; 1239 struct unp_head *head; 1240 1241 head = ((intptr_t)arg1 == SOCK_DGRAM ? &unp_dhead : &unp_shead); 1242 1243 KKASSERT(curproc != NULL); 1244 1245 /* 1246 * The process of preparing the PCB list is too time-consuming and 1247 * resource-intensive to repeat twice on every request. 1248 */ 1249 if (req->oldptr == NULL) { 1250 n = unp_count; 1251 req->oldidx = (n + n/8) * sizeof(struct xunpcb); 1252 return 0; 1253 } 1254 1255 if (req->newptr != NULL) 1256 return EPERM; 1257 1258 lwkt_gettoken(&unp_token); 1259 1260 /* 1261 * OK, now we're committed to doing something. 1262 */ 1263 gencnt = unp_gencnt; 1264 n = unp_count; 1265 1266 unp_list = kmalloc(n * sizeof *unp_list, M_TEMP, M_WAITOK); 1267 1268 for (unp = LIST_FIRST(head), i = 0; unp && i < n; 1269 unp = LIST_NEXT(unp, unp_link)) { 1270 if (unp->unp_gencnt <= gencnt && !prison_unpcb(req->td, unp)) 1271 unp_list[i++] = unp; 1272 } 1273 n = i; /* in case we lost some during malloc */ 1274 1275 error = 0; 1276 for (i = 0; i < n; i++) { 1277 unp = unp_list[i]; 1278 if (unp->unp_gencnt <= gencnt) { 1279 struct xunpcb xu; 1280 xu.xu_len = sizeof xu; 1281 xu.xu_unpp = unp; 1282 /* 1283 * XXX - need more locking here to protect against 1284 * connect/disconnect races for SMP. 1285 */ 1286 if (unp->unp_addr) 1287 bcopy(unp->unp_addr, &xu.xu_addr, 1288 unp->unp_addr->sun_len); 1289 if (unp->unp_conn && unp->unp_conn->unp_addr) 1290 bcopy(unp->unp_conn->unp_addr, 1291 &xu.xu_caddr, 1292 unp->unp_conn->unp_addr->sun_len); 1293 bcopy(unp, &xu.xu_unp, sizeof *unp); 1294 sotoxsocket(unp->unp_socket, &xu.xu_socket); 1295 error = SYSCTL_OUT(req, &xu, sizeof xu); 1296 } 1297 } 1298 lwkt_reltoken(&unp_token); 1299 kfree(unp_list, M_TEMP); 1300 1301 return error; 1302 } 1303 1304 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLFLAG_RD, 1305 (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb", 1306 "List of active local datagram sockets"); 1307 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLFLAG_RD, 1308 (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb", 1309 "List of active local stream sockets"); 1310 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist, CTLFLAG_RD, 1311 (caddr_t)(long)SOCK_SEQPACKET, 0, unp_pcblist, "S,xunpcb", 1312 "List of active local seqpacket stream sockets"); 1313 1314 static void 1315 unp_shutdown(struct unpcb *unp) 1316 { 1317 struct socket *so; 1318 1319 if ((unp->unp_socket->so_type == SOCK_STREAM || 1320 unp->unp_socket->so_type == SOCK_SEQPACKET) && 1321 unp->unp_conn != NULL && (so = unp->unp_conn->unp_socket)) { 1322 socantrcvmore(so); 1323 } 1324 } 1325 1326 static void 1327 unp_drop(struct unpcb *unp, int err) 1328 { 1329 struct socket *so = unp->unp_socket; 1330 1331 so->so_error = err; 1332 unp_disconnect(unp); 1333 } 1334 1335 #ifdef notdef 1336 void 1337 unp_drain(void) 1338 { 1339 lwkt_gettoken(&unp_token); 1340 lwkt_reltoken(&unp_token); 1341 } 1342 #endif 1343 1344 int 1345 unp_externalize(struct mbuf *rights) 1346 { 1347 struct thread *td = curthread; 1348 struct proc *p = td->td_proc; /* XXX */ 1349 struct lwp *lp = td->td_lwp; 1350 struct cmsghdr *cm = mtod(rights, struct cmsghdr *); 1351 int *fdp; 1352 int i; 1353 struct file **rp; 1354 struct file *fp; 1355 int newfds = (cm->cmsg_len - (CMSG_DATA(cm) - (u_char *)cm)) 1356 / sizeof (struct file *); 1357 int f; 1358 1359 lwkt_gettoken(&unp_token); 1360 1361 /* 1362 * if the new FD's will not fit, then we free them all 1363 */ 1364 if (!fdavail(p, newfds)) { 1365 rp = (struct file **)CMSG_DATA(cm); 1366 for (i = 0; i < newfds; i++) { 1367 fp = *rp; 1368 /* 1369 * zero the pointer before calling unp_discard, 1370 * since it may end up in unp_gc().. 1371 */ 1372 *rp++ = NULL; 1373 unp_discard(fp, NULL); 1374 } 1375 lwkt_reltoken(&unp_token); 1376 return (EMSGSIZE); 1377 } 1378 1379 /* 1380 * now change each pointer to an fd in the global table to 1381 * an integer that is the index to the local fd table entry 1382 * that we set up to point to the global one we are transferring. 1383 * If sizeof (struct file *) is bigger than or equal to sizeof int, 1384 * then do it in forward order. In that case, an integer will 1385 * always come in the same place or before its corresponding 1386 * struct file pointer. 1387 * If sizeof (struct file *) is smaller than sizeof int, then 1388 * do it in reverse order. 1389 */ 1390 if (sizeof (struct file *) >= sizeof (int)) { 1391 fdp = (int *)CMSG_DATA(cm); 1392 rp = (struct file **)CMSG_DATA(cm); 1393 for (i = 0; i < newfds; i++) { 1394 if (fdalloc(p, 0, &f)) 1395 panic("unp_externalize"); 1396 fp = *rp++; 1397 unp_fp_externalize(lp, fp, f); 1398 *fdp++ = f; 1399 } 1400 } else { 1401 fdp = (int *)CMSG_DATA(cm) + newfds - 1; 1402 rp = (struct file **)CMSG_DATA(cm) + newfds - 1; 1403 for (i = 0; i < newfds; i++) { 1404 if (fdalloc(p, 0, &f)) 1405 panic("unp_externalize"); 1406 fp = *rp--; 1407 unp_fp_externalize(lp, fp, f); 1408 *fdp-- = f; 1409 } 1410 } 1411 1412 /* 1413 * Adjust length, in case sizeof(struct file *) and sizeof(int) 1414 * differs. 1415 */ 1416 cm->cmsg_len = CMSG_LEN(newfds * sizeof(int)); 1417 rights->m_len = cm->cmsg_len; 1418 1419 lwkt_reltoken(&unp_token); 1420 return (0); 1421 } 1422 1423 static void 1424 unp_fp_externalize(struct lwp *lp, struct file *fp, int fd) 1425 { 1426 struct file *fx; 1427 int error; 1428 1429 lwkt_gettoken(&unp_token); 1430 1431 if (lp) { 1432 KKASSERT(fd >= 0); 1433 if (fp->f_flag & FREVOKED) { 1434 kprintf("Warning: revoked fp exiting unix socket\n"); 1435 fx = NULL; 1436 error = falloc(lp, &fx, NULL); 1437 if (error == 0) 1438 fsetfd(lp->lwp_proc->p_fd, fx, fd); 1439 else 1440 fsetfd(lp->lwp_proc->p_fd, NULL, fd); 1441 fdrop(fx); 1442 } else { 1443 fsetfd(lp->lwp_proc->p_fd, fp, fd); 1444 } 1445 } 1446 spin_lock(&unp_spin); 1447 fp->f_msgcount--; 1448 unp_rights--; 1449 spin_unlock(&unp_spin); 1450 fdrop(fp); 1451 1452 lwkt_reltoken(&unp_token); 1453 } 1454 1455 1456 void 1457 unp_init(void) 1458 { 1459 LIST_INIT(&unp_dhead); 1460 LIST_INIT(&unp_shead); 1461 spin_init(&unp_spin, "unpinit"); 1462 } 1463 1464 static int 1465 unp_internalize(struct mbuf *control, struct thread *td) 1466 { 1467 struct proc *p = td->td_proc; 1468 struct filedesc *fdescp; 1469 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 1470 struct file **rp; 1471 struct file *fp; 1472 int i, fd, *fdp; 1473 struct cmsgcred *cmcred; 1474 int oldfds; 1475 u_int newlen; 1476 int error; 1477 1478 KKASSERT(p); 1479 lwkt_gettoken(&unp_token); 1480 1481 fdescp = p->p_fd; 1482 if ((cm->cmsg_type != SCM_RIGHTS && cm->cmsg_type != SCM_CREDS) || 1483 cm->cmsg_level != SOL_SOCKET || 1484 CMSG_ALIGN(cm->cmsg_len) != control->m_len) { 1485 error = EINVAL; 1486 goto done; 1487 } 1488 1489 /* 1490 * Fill in credential information. 1491 */ 1492 if (cm->cmsg_type == SCM_CREDS) { 1493 cmcred = (struct cmsgcred *)CMSG_DATA(cm); 1494 cmcred->cmcred_pid = p->p_pid; 1495 cmcred->cmcred_uid = p->p_ucred->cr_ruid; 1496 cmcred->cmcred_gid = p->p_ucred->cr_rgid; 1497 cmcred->cmcred_euid = p->p_ucred->cr_uid; 1498 cmcred->cmcred_ngroups = MIN(p->p_ucred->cr_ngroups, 1499 CMGROUP_MAX); 1500 for (i = 0; i < cmcred->cmcred_ngroups; i++) 1501 cmcred->cmcred_groups[i] = p->p_ucred->cr_groups[i]; 1502 error = 0; 1503 goto done; 1504 } 1505 1506 /* 1507 * cmsghdr may not be aligned, do not allow calculation(s) to 1508 * go negative. 1509 */ 1510 if (cm->cmsg_len < CMSG_LEN(0)) { 1511 error = EINVAL; 1512 goto done; 1513 } 1514 1515 oldfds = (cm->cmsg_len - CMSG_LEN(0)) / sizeof (int); 1516 1517 /* 1518 * check that all the FDs passed in refer to legal OPEN files 1519 * If not, reject the entire operation. 1520 */ 1521 fdp = (int *)CMSG_DATA(cm); 1522 for (i = 0; i < oldfds; i++) { 1523 fd = *fdp++; 1524 if ((unsigned)fd >= fdescp->fd_nfiles || 1525 fdescp->fd_files[fd].fp == NULL) { 1526 error = EBADF; 1527 goto done; 1528 } 1529 if (fdescp->fd_files[fd].fp->f_type == DTYPE_KQUEUE) { 1530 error = EOPNOTSUPP; 1531 goto done; 1532 } 1533 } 1534 /* 1535 * Now replace the integer FDs with pointers to 1536 * the associated global file table entry.. 1537 * Allocate a bigger buffer as necessary. But if an cluster is not 1538 * enough, return E2BIG. 1539 */ 1540 newlen = CMSG_LEN(oldfds * sizeof(struct file *)); 1541 if (newlen > MCLBYTES) { 1542 error = E2BIG; 1543 goto done; 1544 } 1545 if (newlen - control->m_len > M_TRAILINGSPACE(control)) { 1546 if (control->m_flags & M_EXT) { 1547 error = E2BIG; 1548 goto done; 1549 } 1550 MCLGET(control, M_WAITOK); 1551 if (!(control->m_flags & M_EXT)) { 1552 error = ENOBUFS; 1553 goto done; 1554 } 1555 1556 /* copy the data to the cluster */ 1557 memcpy(mtod(control, char *), cm, cm->cmsg_len); 1558 cm = mtod(control, struct cmsghdr *); 1559 } 1560 1561 /* 1562 * Adjust length, in case sizeof(struct file *) and sizeof(int) 1563 * differs. 1564 */ 1565 cm->cmsg_len = newlen; 1566 control->m_len = CMSG_ALIGN(newlen); 1567 1568 /* 1569 * Transform the file descriptors into struct file pointers. 1570 * If sizeof (struct file *) is bigger than or equal to sizeof int, 1571 * then do it in reverse order so that the int won't get until 1572 * we're done. 1573 * If sizeof (struct file *) is smaller than sizeof int, then 1574 * do it in forward order. 1575 */ 1576 if (sizeof (struct file *) >= sizeof (int)) { 1577 fdp = (int *)CMSG_DATA(cm) + oldfds - 1; 1578 rp = (struct file **)CMSG_DATA(cm) + oldfds - 1; 1579 for (i = 0; i < oldfds; i++) { 1580 fp = fdescp->fd_files[*fdp--].fp; 1581 *rp-- = fp; 1582 fhold(fp); 1583 spin_lock(&unp_spin); 1584 fp->f_msgcount++; 1585 unp_rights++; 1586 spin_unlock(&unp_spin); 1587 } 1588 } else { 1589 fdp = (int *)CMSG_DATA(cm); 1590 rp = (struct file **)CMSG_DATA(cm); 1591 for (i = 0; i < oldfds; i++) { 1592 fp = fdescp->fd_files[*fdp++].fp; 1593 *rp++ = fp; 1594 fhold(fp); 1595 spin_lock(&unp_spin); 1596 fp->f_msgcount++; 1597 unp_rights++; 1598 spin_unlock(&unp_spin); 1599 } 1600 } 1601 error = 0; 1602 done: 1603 lwkt_reltoken(&unp_token); 1604 return error; 1605 } 1606 1607 /* 1608 * Garbage collect in-transit file descriptors that get lost due to 1609 * loops (i.e. when a socket is sent to another process over itself, 1610 * and more complex situations). 1611 * 1612 * NOT MPSAFE - TODO socket flush code and maybe closef. Rest is MPSAFE. 1613 */ 1614 1615 struct unp_gc_info { 1616 struct file **extra_ref; 1617 struct file *locked_fp; 1618 int defer; 1619 int index; 1620 int maxindex; 1621 }; 1622 1623 static void 1624 unp_gc(void) 1625 { 1626 struct unp_gc_info info; 1627 static boolean_t unp_gcing; 1628 struct file **fpp; 1629 int i; 1630 1631 /* 1632 * Only one gc can be in-progress at any given moment 1633 */ 1634 spin_lock(&unp_spin); 1635 if (unp_gcing) { 1636 spin_unlock(&unp_spin); 1637 return; 1638 } 1639 unp_gcing = TRUE; 1640 spin_unlock(&unp_spin); 1641 1642 lwkt_gettoken(&unp_token); 1643 1644 /* 1645 * Before going through all this, set all FDs to be NOT defered 1646 * and NOT externally accessible (not marked). During the scan 1647 * a fd can be marked externally accessible but we may or may not 1648 * be able to immediately process it (controlled by FDEFER). 1649 * 1650 * If we loop sleep a bit. The complexity of the topology can cause 1651 * multiple loops. Also failure to acquire the socket's so_rcv 1652 * token can cause us to loop. 1653 */ 1654 allfiles_scan_exclusive(unp_gc_clearmarks, NULL); 1655 do { 1656 info.defer = 0; 1657 allfiles_scan_exclusive(unp_gc_checkmarks, &info); 1658 if (info.defer) 1659 tsleep(&info, 0, "gcagain", 1); 1660 } while (info.defer); 1661 1662 /* 1663 * We grab an extra reference to each of the file table entries 1664 * that are not otherwise accessible and then free the rights 1665 * that are stored in messages on them. 1666 * 1667 * The bug in the orginal code is a little tricky, so I'll describe 1668 * what's wrong with it here. 1669 * 1670 * It is incorrect to simply unp_discard each entry for f_msgcount 1671 * times -- consider the case of sockets A and B that contain 1672 * references to each other. On a last close of some other socket, 1673 * we trigger a gc since the number of outstanding rights (unp_rights) 1674 * is non-zero. If during the sweep phase the gc code un_discards, 1675 * we end up doing a (full) closef on the descriptor. A closef on A 1676 * results in the following chain. Closef calls soo_close, which 1677 * calls soclose. Soclose calls first (through the switch 1678 * uipc_usrreq) unp_detach, which re-invokes unp_gc. Unp_gc simply 1679 * returns because the previous instance had set unp_gcing, and 1680 * we return all the way back to soclose, which marks the socket 1681 * with SS_NOFDREF, and then calls sofree. Sofree calls sorflush 1682 * to free up the rights that are queued in messages on the socket A, 1683 * i.e., the reference on B. The sorflush calls via the dom_dispose 1684 * switch unp_dispose, which unp_scans with unp_discard. This second 1685 * instance of unp_discard just calls closef on B. 1686 * 1687 * Well, a similar chain occurs on B, resulting in a sorflush on B, 1688 * which results in another closef on A. Unfortunately, A is already 1689 * being closed, and the descriptor has already been marked with 1690 * SS_NOFDREF, and soclose panics at this point. 1691 * 1692 * Here, we first take an extra reference to each inaccessible 1693 * descriptor. Then, we call sorflush ourself, since we know 1694 * it is a Unix domain socket anyhow. After we destroy all the 1695 * rights carried in messages, we do a last closef to get rid 1696 * of our extra reference. This is the last close, and the 1697 * unp_detach etc will shut down the socket. 1698 * 1699 * 91/09/19, bsy@cs.cmu.edu 1700 */ 1701 info.extra_ref = kmalloc(256 * sizeof(struct file *), M_FILE, M_WAITOK); 1702 info.maxindex = 256; 1703 1704 do { 1705 /* 1706 * Look for matches 1707 */ 1708 info.index = 0; 1709 allfiles_scan_exclusive(unp_gc_checkrefs, &info); 1710 1711 /* 1712 * For each FD on our hit list, do the following two things 1713 */ 1714 for (i = info.index, fpp = info.extra_ref; --i >= 0; ++fpp) { 1715 struct file *tfp = *fpp; 1716 if (tfp->f_type == DTYPE_SOCKET && tfp->f_data != NULL) 1717 sorflush((struct socket *)(tfp->f_data)); 1718 } 1719 for (i = info.index, fpp = info.extra_ref; --i >= 0; ++fpp) 1720 closef(*fpp, NULL); 1721 } while (info.index == info.maxindex); 1722 1723 lwkt_reltoken(&unp_token); 1724 1725 kfree((caddr_t)info.extra_ref, M_FILE); 1726 unp_gcing = FALSE; 1727 } 1728 1729 /* 1730 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry 1731 */ 1732 static int 1733 unp_gc_checkrefs(struct file *fp, void *data) 1734 { 1735 struct unp_gc_info *info = data; 1736 1737 if (fp->f_count == 0) 1738 return(0); 1739 if (info->index == info->maxindex) 1740 return(-1); 1741 1742 /* 1743 * If all refs are from msgs, and it's not marked accessible 1744 * then it must be referenced from some unreachable cycle 1745 * of (shut-down) FDs, so include it in our 1746 * list of FDs to remove 1747 */ 1748 if (fp->f_count == fp->f_msgcount && !(fp->f_flag & FMARK)) { 1749 info->extra_ref[info->index++] = fp; 1750 fhold(fp); 1751 } 1752 return(0); 1753 } 1754 1755 /* 1756 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry 1757 */ 1758 static int 1759 unp_gc_clearmarks(struct file *fp, void *data __unused) 1760 { 1761 atomic_clear_int(&fp->f_flag, FMARK | FDEFER); 1762 return(0); 1763 } 1764 1765 /* 1766 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry 1767 */ 1768 static int 1769 unp_gc_checkmarks(struct file *fp, void *data) 1770 { 1771 struct unp_gc_info *info = data; 1772 struct socket *so; 1773 1774 /* 1775 * If the file is not open, skip it. Make sure it isn't marked 1776 * defered or we could loop forever, in case we somehow race 1777 * something. 1778 */ 1779 if (fp->f_count == 0) { 1780 if (fp->f_flag & FDEFER) 1781 atomic_clear_int(&fp->f_flag, FDEFER); 1782 return(0); 1783 } 1784 /* 1785 * If we already marked it as 'defer' in a 1786 * previous pass, then try process it this time 1787 * and un-mark it 1788 */ 1789 if (fp->f_flag & FDEFER) { 1790 atomic_clear_int(&fp->f_flag, FDEFER); 1791 } else { 1792 /* 1793 * if it's not defered, then check if it's 1794 * already marked.. if so skip it 1795 */ 1796 if (fp->f_flag & FMARK) 1797 return(0); 1798 /* 1799 * If all references are from messages 1800 * in transit, then skip it. it's not 1801 * externally accessible. 1802 */ 1803 if (fp->f_count == fp->f_msgcount) 1804 return(0); 1805 /* 1806 * If it got this far then it must be 1807 * externally accessible. 1808 */ 1809 atomic_set_int(&fp->f_flag, FMARK); 1810 } 1811 1812 /* 1813 * either it was defered, or it is externally 1814 * accessible and not already marked so. 1815 * Now check if it is possibly one of OUR sockets. 1816 */ 1817 if (fp->f_type != DTYPE_SOCKET || 1818 (so = (struct socket *)fp->f_data) == NULL) { 1819 return(0); 1820 } 1821 if (so->so_proto->pr_domain != &localdomain || 1822 !(so->so_proto->pr_flags & PR_RIGHTS)) { 1823 return(0); 1824 } 1825 1826 /* 1827 * So, Ok, it's one of our sockets and it IS externally accessible 1828 * (or was defered). Now we look to see if we hold any file 1829 * descriptors in its message buffers. Follow those links and mark 1830 * them as accessible too. 1831 * 1832 * We are holding multiple spinlocks here, if we cannot get the 1833 * token non-blocking defer until the next loop. 1834 */ 1835 info->locked_fp = fp; 1836 if (lwkt_trytoken(&so->so_rcv.ssb_token)) { 1837 unp_scan(so->so_rcv.ssb_mb, unp_mark, info); 1838 lwkt_reltoken(&so->so_rcv.ssb_token); 1839 } else { 1840 atomic_set_int(&fp->f_flag, FDEFER); 1841 ++info->defer; 1842 } 1843 return (0); 1844 } 1845 1846 /* 1847 * Scan all unix domain sockets and replace any revoked file pointers 1848 * found with the dummy file pointer fx. We don't worry about races 1849 * against file pointers being read out as those are handled in the 1850 * externalize code. 1851 */ 1852 1853 #define REVOKE_GC_MAXFILES 32 1854 1855 struct unp_revoke_gc_info { 1856 struct file *fx; 1857 struct file *fary[REVOKE_GC_MAXFILES]; 1858 int fcount; 1859 }; 1860 1861 void 1862 unp_revoke_gc(struct file *fx) 1863 { 1864 struct unp_revoke_gc_info info; 1865 int i; 1866 1867 lwkt_gettoken(&unp_token); 1868 info.fx = fx; 1869 do { 1870 info.fcount = 0; 1871 allfiles_scan_exclusive(unp_revoke_gc_check, &info); 1872 for (i = 0; i < info.fcount; ++i) 1873 unp_fp_externalize(NULL, info.fary[i], -1); 1874 } while (info.fcount == REVOKE_GC_MAXFILES); 1875 lwkt_reltoken(&unp_token); 1876 } 1877 1878 /* 1879 * Check for and replace revoked descriptors. 1880 * 1881 * WARNING: This routine is not allowed to block. 1882 */ 1883 static int 1884 unp_revoke_gc_check(struct file *fps, void *vinfo) 1885 { 1886 struct unp_revoke_gc_info *info = vinfo; 1887 struct file *fp; 1888 struct socket *so; 1889 struct mbuf *m0; 1890 struct mbuf *m; 1891 struct file **rp; 1892 struct cmsghdr *cm; 1893 int i; 1894 int qfds; 1895 1896 /* 1897 * Is this a unix domain socket with rights-passing abilities? 1898 */ 1899 if (fps->f_type != DTYPE_SOCKET) 1900 return (0); 1901 if ((so = (struct socket *)fps->f_data) == NULL) 1902 return(0); 1903 if (so->so_proto->pr_domain != &localdomain) 1904 return(0); 1905 if ((so->so_proto->pr_flags & PR_RIGHTS) == 0) 1906 return(0); 1907 1908 /* 1909 * Scan the mbufs for control messages and replace any revoked 1910 * descriptors we find. 1911 */ 1912 lwkt_gettoken(&so->so_rcv.ssb_token); 1913 m0 = so->so_rcv.ssb_mb; 1914 while (m0) { 1915 for (m = m0; m; m = m->m_next) { 1916 if (m->m_type != MT_CONTROL) 1917 continue; 1918 if (m->m_len < sizeof(*cm)) 1919 continue; 1920 cm = mtod(m, struct cmsghdr *); 1921 if (cm->cmsg_level != SOL_SOCKET || 1922 cm->cmsg_type != SCM_RIGHTS) { 1923 continue; 1924 } 1925 qfds = (cm->cmsg_len - CMSG_LEN(0)) / sizeof(void *); 1926 rp = (struct file **)CMSG_DATA(cm); 1927 for (i = 0; i < qfds; i++) { 1928 fp = rp[i]; 1929 if (fp->f_flag & FREVOKED) { 1930 kprintf("Warning: Removing revoked fp from unix domain socket queue\n"); 1931 fhold(info->fx); 1932 info->fx->f_msgcount++; 1933 unp_rights++; 1934 rp[i] = info->fx; 1935 info->fary[info->fcount++] = fp; 1936 } 1937 if (info->fcount == REVOKE_GC_MAXFILES) 1938 break; 1939 } 1940 if (info->fcount == REVOKE_GC_MAXFILES) 1941 break; 1942 } 1943 m0 = m0->m_nextpkt; 1944 if (info->fcount == REVOKE_GC_MAXFILES) 1945 break; 1946 } 1947 lwkt_reltoken(&so->so_rcv.ssb_token); 1948 1949 /* 1950 * Stop the scan if we filled up our array. 1951 */ 1952 if (info->fcount == REVOKE_GC_MAXFILES) 1953 return(-1); 1954 return(0); 1955 } 1956 1957 /* 1958 * Dispose of the fp's stored in a mbuf. 1959 * 1960 * The dds loop can cause additional fps to be entered onto the 1961 * list while it is running, flattening out the operation and avoiding 1962 * a deep kernel stack recursion. 1963 */ 1964 void 1965 unp_dispose(struct mbuf *m) 1966 { 1967 unp_defdiscard_t dds; 1968 1969 lwkt_gettoken(&unp_token); 1970 ++unp_defdiscard_nest; 1971 if (m) { 1972 unp_scan(m, unp_discard, NULL); 1973 } 1974 if (unp_defdiscard_nest == 1) { 1975 while ((dds = unp_defdiscard_base) != NULL) { 1976 unp_defdiscard_base = dds->next; 1977 closef(dds->fp, NULL); 1978 kfree(dds, M_UNPCB); 1979 } 1980 } 1981 --unp_defdiscard_nest; 1982 lwkt_reltoken(&unp_token); 1983 } 1984 1985 static int 1986 unp_listen(struct unpcb *unp, struct thread *td) 1987 { 1988 struct proc *p = td->td_proc; 1989 1990 KKASSERT(p); 1991 lwkt_gettoken(&unp_token); 1992 cru2x(p->p_ucred, &unp->unp_peercred); 1993 unp_setflags(unp, UNP_HAVEPCCACHED); 1994 lwkt_reltoken(&unp_token); 1995 return (0); 1996 } 1997 1998 static void 1999 unp_scan(struct mbuf *m0, void (*op)(struct file *, void *), void *data) 2000 { 2001 struct mbuf *m; 2002 struct file **rp; 2003 struct cmsghdr *cm; 2004 int i; 2005 int qfds; 2006 2007 while (m0) { 2008 for (m = m0; m; m = m->m_next) { 2009 if (m->m_type == MT_CONTROL && 2010 m->m_len >= sizeof(*cm)) { 2011 cm = mtod(m, struct cmsghdr *); 2012 if (cm->cmsg_level != SOL_SOCKET || 2013 cm->cmsg_type != SCM_RIGHTS) 2014 continue; 2015 qfds = (cm->cmsg_len - CMSG_LEN(0)) / 2016 sizeof(void *); 2017 rp = (struct file **)CMSG_DATA(cm); 2018 for (i = 0; i < qfds; i++) 2019 (*op)(*rp++, data); 2020 break; /* XXX, but saves time */ 2021 } 2022 } 2023 m0 = m0->m_nextpkt; 2024 } 2025 } 2026 2027 /* 2028 * Mark visibility. info->defer is recalculated on every pass. 2029 */ 2030 static void 2031 unp_mark(struct file *fp, void *data) 2032 { 2033 struct unp_gc_info *info = data; 2034 2035 if ((fp->f_flag & FMARK) == 0) { 2036 ++info->defer; 2037 atomic_set_int(&fp->f_flag, FMARK | FDEFER); 2038 } else if (fp->f_flag & FDEFER) { 2039 ++info->defer; 2040 } 2041 } 2042 2043 /* 2044 * Discard a fp previously held in a unix domain socket mbuf. To 2045 * avoid blowing out the kernel stack due to contrived chain-reactions 2046 * we may have to defer the operation to a higher procedural level. 2047 * 2048 * Caller holds unp_token 2049 */ 2050 static void 2051 unp_discard(struct file *fp, void *data __unused) 2052 { 2053 unp_defdiscard_t dds; 2054 2055 spin_lock(&unp_spin); 2056 fp->f_msgcount--; 2057 unp_rights--; 2058 spin_unlock(&unp_spin); 2059 2060 if (unp_defdiscard_nest) { 2061 dds = kmalloc(sizeof(*dds), M_UNPCB, M_WAITOK|M_ZERO); 2062 dds->fp = fp; 2063 dds->next = unp_defdiscard_base; 2064 unp_defdiscard_base = dds; 2065 } else { 2066 closef(fp, NULL); 2067 } 2068 } 2069 2070 static int 2071 unp_find_lockref(struct sockaddr *nam, struct thread *td, short type, 2072 struct unpcb **unp_ret) 2073 { 2074 struct proc *p = td->td_proc; 2075 struct sockaddr_un *soun = (struct sockaddr_un *)nam; 2076 struct vnode *vp = NULL; 2077 struct socket *so; 2078 struct unpcb *unp; 2079 int error, len; 2080 struct nlookupdata nd; 2081 char buf[SOCK_MAXADDRLEN]; 2082 2083 *unp_ret = NULL; 2084 2085 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path); 2086 if (len <= 0) { 2087 error = EINVAL; 2088 goto failed; 2089 } 2090 strncpy(buf, soun->sun_path, len); 2091 buf[len] = 0; 2092 2093 error = nlookup_init(&nd, buf, UIO_SYSSPACE, NLC_FOLLOW); 2094 if (error == 0) 2095 error = nlookup(&nd); 2096 if (error == 0) 2097 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 2098 nlookup_done(&nd); 2099 if (error) { 2100 vp = NULL; 2101 goto failed; 2102 } 2103 2104 if (vp->v_type != VSOCK) { 2105 error = ENOTSOCK; 2106 goto failed; 2107 } 2108 error = VOP_EACCESS(vp, VWRITE, p->p_ucred); 2109 if (error) 2110 goto failed; 2111 so = vp->v_socket; 2112 if (so == NULL) { 2113 error = ECONNREFUSED; 2114 goto failed; 2115 } 2116 if (so->so_type != type) { 2117 error = EPROTOTYPE; 2118 goto failed; 2119 } 2120 2121 /* Lock this unp. */ 2122 unp = unp_getsocktoken(so); 2123 if (!UNP_ISATTACHED(unp)) { 2124 unp_reltoken(unp); 2125 error = ECONNREFUSED; 2126 goto failed; 2127 } 2128 /* And keep this unp referenced. */ 2129 unp_reference(unp); 2130 2131 /* Done! */ 2132 *unp_ret = unp; 2133 error = 0; 2134 failed: 2135 if (vp != NULL) 2136 vput(vp); 2137 return error; 2138 } 2139 2140 static int 2141 unp_connect_pair(struct unpcb *unp, struct unpcb *unp2) 2142 { 2143 struct socket *so = unp->unp_socket; 2144 struct socket *so2 = unp2->unp_socket; 2145 2146 UNP_ASSERT_TOKEN_HELD(unp); 2147 UNP_ASSERT_TOKEN_HELD(unp2); 2148 2149 KASSERT(so->so_type == so2->so_type, 2150 ("socket type mismatch, so %d, so2 %d", so->so_type, so2->so_type)); 2151 2152 if (!UNP_ISATTACHED(unp)) 2153 return EINVAL; 2154 if (!UNP_ISATTACHED(unp2)) 2155 return ECONNREFUSED; 2156 2157 KASSERT(unp->unp_conn == NULL, ("unp is already connected")); 2158 unp->unp_conn = unp2; 2159 2160 switch (so->so_type) { 2161 case SOCK_DGRAM: 2162 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink); 2163 soisconnected(so); 2164 break; 2165 2166 case SOCK_STREAM: 2167 case SOCK_SEQPACKET: 2168 KASSERT(unp2->unp_conn == NULL, ("unp2 is already connected")); 2169 unp2->unp_conn = unp; 2170 soisconnected(so); 2171 soisconnected(so2); 2172 break; 2173 2174 default: 2175 panic("unp_connect_pair: unknown socket type %d", so->so_type); 2176 } 2177 return 0; 2178 } 2179