1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94 30 * $FreeBSD: src/sys/kern/uipc_usrreq.c,v 1.54.2.10 2003/03/04 17:28:09 nectar Exp $ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/domain.h> 37 #include <sys/fcntl.h> 38 #include <sys/malloc.h> /* XXX must be before <sys/file.h> */ 39 #include <sys/proc.h> 40 #include <sys/file.h> 41 #include <sys/filedesc.h> 42 #include <sys/mbuf.h> 43 #include <sys/nlookup.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/resourcevar.h> 48 #include <sys/stat.h> 49 #include <sys/mount.h> 50 #include <sys/sysctl.h> 51 #include <sys/un.h> 52 #include <sys/unpcb.h> 53 #include <sys/vnode.h> 54 #include <sys/kern_syscall.h> 55 #include <sys/taskqueue.h> 56 57 #include <sys/file2.h> 58 #include <sys/spinlock2.h> 59 #include <sys/socketvar2.h> 60 #include <sys/msgport2.h> 61 62 #define UNP_DETACHED UNP_PRIVATE1 63 #define UNP_CONNECTING UNP_PRIVATE2 64 #define UNP_DROPPED UNP_PRIVATE3 65 #define UNP_MARKER UNP_PRIVATE4 66 67 #define UNP_ISATTACHED(unp) \ 68 ((unp) != NULL && ((unp)->unp_flags & UNP_DETACHED) == 0) 69 70 #ifdef INVARIANTS 71 #define UNP_ASSERT_TOKEN_HELD(unp) \ 72 ASSERT_LWKT_TOKEN_HELD(lwkt_token_pool_lookup((unp))) 73 #else /* !INVARIANTS */ 74 #define UNP_ASSERT_TOKEN_HELD(unp) 75 #endif /* INVARIANTS */ 76 77 struct unp_defdiscard { 78 SLIST_ENTRY(unp_defdiscard) next; 79 struct file *fp; 80 }; 81 SLIST_HEAD(unp_defdiscard_list, unp_defdiscard); 82 83 TAILQ_HEAD(unpcb_qhead, unpcb); 84 struct unp_global_head { 85 struct unpcb_qhead list; 86 int count; 87 }; 88 89 static MALLOC_DEFINE(M_UNPCB, "unpcb", "unpcb struct"); 90 static unp_gen_t unp_gencnt; 91 92 static struct unp_global_head unp_stream_head; 93 static struct unp_global_head unp_dgram_head; 94 static struct unp_global_head unp_seqpkt_head; 95 96 static struct lwkt_token unp_token = LWKT_TOKEN_INITIALIZER(unp_token); 97 static struct taskqueue *unp_taskqueue; 98 99 static struct unp_defdiscard_list unp_defdiscard_head; 100 static struct spinlock unp_defdiscard_spin; 101 static struct task unp_defdiscard_task; 102 103 /* 104 * Unix communications domain. 105 * 106 * TODO: 107 * RDM 108 * rethink name space problems 109 * need a proper out-of-band 110 * lock pushdown 111 */ 112 static struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL }; 113 static ino_t unp_ino = 1; /* prototype for fake inode numbers */ 114 115 static int unp_attach (struct socket *, struct pru_attach_info *); 116 static void unp_detach (struct unpcb *); 117 static int unp_bind (struct unpcb *,struct sockaddr *, struct thread *); 118 static int unp_connect (struct socket *,struct sockaddr *, 119 struct thread *); 120 static void unp_disconnect(struct unpcb *, int); 121 static void unp_shutdown (struct unpcb *); 122 static void unp_gc (void); 123 static int unp_gc_clearmarks(struct file *, void *); 124 static int unp_gc_checkmarks(struct file *, void *); 125 static int unp_gc_checkrefs(struct file *, void *); 126 static void unp_scan (struct mbuf *, void (*)(struct file *, void *), 127 void *data); 128 static void unp_mark (struct file *, void *data); 129 static void unp_discard (struct file *, void *); 130 static int unp_internalize (struct mbuf *, struct thread *); 131 static int unp_listen (struct unpcb *, struct thread *); 132 static void unp_fp_externalize(struct lwp *lp, struct file *fp, int fd, 133 int flags); 134 static int unp_find_lockref(struct sockaddr *nam, struct thread *td, 135 short type, struct unpcb **unp_ret); 136 static int unp_connect_pair(struct unpcb *unp, struct unpcb *unp2); 137 static void unp_drop(struct unpcb *unp, int error); 138 static void unp_defdiscard_taskfunc(void *, int); 139 140 /* 141 * SMP Considerations: 142 * 143 * Since unp_token will be automaticly released upon execution of 144 * blocking code, we need to reference unp_conn before any possible 145 * blocking code to prevent it from being ripped behind our back. 146 * 147 * Any adjustment to unp->unp_conn requires both the global unp_token 148 * AND the per-unp token (lwkt_token_pool_lookup(unp)) to be held. 149 * 150 * Any access to so_pcb to obtain unp requires the pool token for 151 * unp to be held. 152 */ 153 154 static __inline void 155 unp_reference(struct unpcb *unp) 156 { 157 /* 0->1 transition will not work */ 158 KKASSERT(unp->unp_refcnt > 0); 159 atomic_add_int(&unp->unp_refcnt, 1); 160 } 161 162 static __inline void 163 unp_free(struct unpcb *unp) 164 { 165 KKASSERT(unp->unp_refcnt > 0); 166 if (atomic_fetchadd_int(&unp->unp_refcnt, -1) == 1) 167 unp_detach(unp); 168 } 169 170 static __inline struct unpcb * 171 unp_getsocktoken(struct socket *so) 172 { 173 struct unpcb *unp; 174 175 /* 176 * The unp pointer is invalid until we verify that it is 177 * good by re-checking so_pcb AFTER obtaining the token. 178 */ 179 while ((unp = so->so_pcb) != NULL) { 180 lwkt_getpooltoken(unp); 181 if (unp == so->so_pcb) 182 break; 183 lwkt_relpooltoken(unp); 184 } 185 return unp; 186 } 187 188 static __inline void 189 unp_reltoken(struct unpcb *unp) 190 { 191 if (unp != NULL) 192 lwkt_relpooltoken(unp); 193 } 194 195 static __inline void 196 unp_setflags(struct unpcb *unp, int flags) 197 { 198 atomic_set_int(&unp->unp_flags, flags); 199 } 200 201 static __inline void 202 unp_clrflags(struct unpcb *unp, int flags) 203 { 204 atomic_clear_int(&unp->unp_flags, flags); 205 } 206 207 static __inline struct unp_global_head * 208 unp_globalhead(short type) 209 { 210 switch (type) { 211 case SOCK_STREAM: 212 return &unp_stream_head; 213 case SOCK_DGRAM: 214 return &unp_dgram_head; 215 case SOCK_SEQPACKET: 216 return &unp_seqpkt_head; 217 default: 218 panic("unknown socket type %d", type); 219 } 220 } 221 222 /* 223 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort() 224 * will sofree() it when we return. 225 */ 226 static void 227 uipc_abort(netmsg_t msg) 228 { 229 struct unpcb *unp; 230 int error; 231 232 lwkt_gettoken(&unp_token); 233 unp = unp_getsocktoken(msg->base.nm_so); 234 235 if (UNP_ISATTACHED(unp)) { 236 unp_setflags(unp, UNP_DETACHED); 237 unp_drop(unp, ECONNABORTED); 238 unp_free(unp); 239 error = 0; 240 } else { 241 error = EINVAL; 242 } 243 244 unp_reltoken(unp); 245 lwkt_reltoken(&unp_token); 246 247 lwkt_replymsg(&msg->lmsg, error); 248 } 249 250 static void 251 uipc_accept(netmsg_t msg) 252 { 253 struct unpcb *unp; 254 int error; 255 256 lwkt_gettoken(&unp_token); 257 unp = unp_getsocktoken(msg->base.nm_so); 258 259 if (!UNP_ISATTACHED(unp)) { 260 error = EINVAL; 261 } else { 262 struct unpcb *unp2 = unp->unp_conn; 263 264 /* 265 * Pass back name of connected socket, 266 * if it was bound and we are still connected 267 * (our peer may have closed already!). 268 */ 269 if (unp2 && unp2->unp_addr) { 270 unp_reference(unp2); 271 *msg->accept.nm_nam = dup_sockaddr( 272 (struct sockaddr *)unp2->unp_addr); 273 unp_free(unp2); 274 } else { 275 *msg->accept.nm_nam = dup_sockaddr(&sun_noname); 276 } 277 error = 0; 278 } 279 280 unp_reltoken(unp); 281 lwkt_reltoken(&unp_token); 282 283 lwkt_replymsg(&msg->lmsg, error); 284 } 285 286 static void 287 uipc_attach(netmsg_t msg) 288 { 289 int error; 290 291 lwkt_gettoken(&unp_token); 292 293 KASSERT(msg->base.nm_so->so_pcb == NULL, ("double unp attach")); 294 error = unp_attach(msg->base.nm_so, msg->attach.nm_ai); 295 296 lwkt_reltoken(&unp_token); 297 lwkt_replymsg(&msg->lmsg, error); 298 } 299 300 static void 301 uipc_bind(netmsg_t msg) 302 { 303 struct unpcb *unp; 304 int error; 305 306 lwkt_gettoken(&unp_token); 307 unp = unp_getsocktoken(msg->base.nm_so); 308 309 if (UNP_ISATTACHED(unp)) 310 error = unp_bind(unp, msg->bind.nm_nam, msg->bind.nm_td); 311 else 312 error = EINVAL; 313 314 unp_reltoken(unp); 315 lwkt_reltoken(&unp_token); 316 317 lwkt_replymsg(&msg->lmsg, error); 318 } 319 320 static void 321 uipc_connect(netmsg_t msg) 322 { 323 int error; 324 325 error = unp_connect(msg->base.nm_so, msg->connect.nm_nam, 326 msg->connect.nm_td); 327 lwkt_replymsg(&msg->lmsg, error); 328 } 329 330 static void 331 uipc_connect2(netmsg_t msg) 332 { 333 int error; 334 335 error = unp_connect2(msg->connect2.nm_so1, msg->connect2.nm_so2); 336 lwkt_replymsg(&msg->lmsg, error); 337 } 338 339 /* control is EOPNOTSUPP */ 340 341 static void 342 uipc_detach(netmsg_t msg) 343 { 344 struct unpcb *unp; 345 int error; 346 347 lwkt_gettoken(&unp_token); 348 unp = unp_getsocktoken(msg->base.nm_so); 349 350 if (UNP_ISATTACHED(unp)) { 351 unp_setflags(unp, UNP_DETACHED); 352 unp_drop(unp, 0); 353 unp_free(unp); 354 error = 0; 355 } else { 356 error = EINVAL; 357 } 358 359 unp_reltoken(unp); 360 lwkt_reltoken(&unp_token); 361 362 lwkt_replymsg(&msg->lmsg, error); 363 } 364 365 static void 366 uipc_disconnect(netmsg_t msg) 367 { 368 struct unpcb *unp; 369 int error; 370 371 lwkt_gettoken(&unp_token); 372 unp = unp_getsocktoken(msg->base.nm_so); 373 374 if (UNP_ISATTACHED(unp)) { 375 unp_disconnect(unp, 0); 376 error = 0; 377 } else { 378 error = EINVAL; 379 } 380 381 unp_reltoken(unp); 382 lwkt_reltoken(&unp_token); 383 384 lwkt_replymsg(&msg->lmsg, error); 385 } 386 387 static void 388 uipc_listen(netmsg_t msg) 389 { 390 struct unpcb *unp; 391 int error; 392 393 lwkt_gettoken(&unp_token); 394 unp = unp_getsocktoken(msg->base.nm_so); 395 396 if (!UNP_ISATTACHED(unp) || unp->unp_vnode == NULL) 397 error = EINVAL; 398 else 399 error = unp_listen(unp, msg->listen.nm_td); 400 401 unp_reltoken(unp); 402 lwkt_reltoken(&unp_token); 403 404 lwkt_replymsg(&msg->lmsg, error); 405 } 406 407 static void 408 uipc_peeraddr(netmsg_t msg) 409 { 410 struct unpcb *unp; 411 int error; 412 413 lwkt_gettoken(&unp_token); 414 unp = unp_getsocktoken(msg->base.nm_so); 415 416 if (!UNP_ISATTACHED(unp)) { 417 error = EINVAL; 418 } else if (unp->unp_conn && unp->unp_conn->unp_addr) { 419 struct unpcb *unp2 = unp->unp_conn; 420 421 unp_reference(unp2); 422 *msg->peeraddr.nm_nam = dup_sockaddr( 423 (struct sockaddr *)unp2->unp_addr); 424 unp_free(unp2); 425 error = 0; 426 } else { 427 /* 428 * XXX: It seems that this test always fails even when 429 * connection is established. So, this else clause is 430 * added as workaround to return PF_LOCAL sockaddr. 431 */ 432 *msg->peeraddr.nm_nam = dup_sockaddr(&sun_noname); 433 error = 0; 434 } 435 436 unp_reltoken(unp); 437 lwkt_reltoken(&unp_token); 438 439 lwkt_replymsg(&msg->lmsg, error); 440 } 441 442 static void 443 uipc_rcvd(netmsg_t msg) 444 { 445 struct unpcb *unp, *unp2; 446 struct socket *so; 447 struct socket *so2; 448 int error; 449 450 /* 451 * so_pcb is only modified with both the global and the unp 452 * pool token held. 453 */ 454 so = msg->base.nm_so; 455 unp = unp_getsocktoken(so); 456 457 if (!UNP_ISATTACHED(unp)) { 458 error = EINVAL; 459 goto done; 460 } 461 462 switch (so->so_type) { 463 case SOCK_DGRAM: 464 panic("uipc_rcvd DGRAM?"); 465 /*NOTREACHED*/ 466 case SOCK_STREAM: 467 case SOCK_SEQPACKET: 468 if (unp->unp_conn == NULL) 469 break; 470 unp2 = unp->unp_conn; /* protected by pool token */ 471 472 /* 473 * Because we are transfering mbufs directly to the 474 * peer socket we have to use SSB_STOP on the sender 475 * to prevent it from building up infinite mbufs. 476 * 477 * As in several places in this module w ehave to ref unp2 478 * to ensure that it does not get ripped out from under us 479 * if we block on the so2 token or in sowwakeup(). 480 */ 481 so2 = unp2->unp_socket; 482 unp_reference(unp2); 483 lwkt_gettoken(&so2->so_rcv.ssb_token); 484 if (so->so_rcv.ssb_cc < so2->so_snd.ssb_hiwat && 485 so->so_rcv.ssb_mbcnt < so2->so_snd.ssb_mbmax 486 ) { 487 atomic_clear_int(&so2->so_snd.ssb_flags, SSB_STOP); 488 489 sowwakeup(so2); 490 } 491 lwkt_reltoken(&so2->so_rcv.ssb_token); 492 unp_free(unp2); 493 break; 494 default: 495 panic("uipc_rcvd unknown socktype"); 496 /*NOTREACHED*/ 497 } 498 error = 0; 499 done: 500 unp_reltoken(unp); 501 lwkt_replymsg(&msg->lmsg, error); 502 } 503 504 /* pru_rcvoob is EOPNOTSUPP */ 505 506 static void 507 uipc_send(netmsg_t msg) 508 { 509 struct unpcb *unp, *unp2; 510 struct socket *so; 511 struct socket *so2; 512 struct mbuf *control; 513 struct mbuf *m; 514 int error = 0; 515 516 so = msg->base.nm_so; 517 control = msg->send.nm_control; 518 m = msg->send.nm_m; 519 520 /* 521 * so_pcb is only modified with both the global and the unp 522 * pool token held. 523 */ 524 so = msg->base.nm_so; 525 unp = unp_getsocktoken(so); 526 527 if (!UNP_ISATTACHED(unp)) { 528 error = EINVAL; 529 goto release; 530 } 531 532 if (msg->send.nm_flags & PRUS_OOB) { 533 error = EOPNOTSUPP; 534 goto release; 535 } 536 537 wakeup_start_delayed(); 538 539 if (control && (error = unp_internalize(control, msg->send.nm_td))) 540 goto release; 541 542 switch (so->so_type) { 543 case SOCK_DGRAM: 544 { 545 struct sockaddr *from; 546 547 if (msg->send.nm_addr) { 548 if (unp->unp_conn) { 549 error = EISCONN; 550 break; 551 } 552 error = unp_find_lockref(msg->send.nm_addr, 553 msg->send.nm_td, so->so_type, &unp2); 554 if (error) 555 break; 556 /* 557 * NOTE: 558 * unp2 is locked and referenced. 559 * 560 * We could unlock unp2 now, since it was checked 561 * and referenced. 562 */ 563 unp_reltoken(unp2); 564 } else { 565 if (unp->unp_conn == NULL) { 566 error = ENOTCONN; 567 break; 568 } 569 unp2 = unp->unp_conn; 570 unp_reference(unp2); 571 } 572 /* NOTE: unp2 is referenced. */ 573 so2 = unp2->unp_socket; 574 575 if (unp->unp_addr) 576 from = (struct sockaddr *)unp->unp_addr; 577 else 578 from = &sun_noname; 579 580 lwkt_gettoken(&so2->so_rcv.ssb_token); 581 if (ssb_appendaddr(&so2->so_rcv, from, m, control)) { 582 sorwakeup(so2); 583 m = NULL; 584 control = NULL; 585 } else { 586 error = ENOBUFS; 587 } 588 lwkt_reltoken(&so2->so_rcv.ssb_token); 589 590 unp_free(unp2); 591 break; 592 } 593 594 case SOCK_STREAM: 595 case SOCK_SEQPACKET: 596 /* Connect if not connected yet. */ 597 /* 598 * Note: A better implementation would complain 599 * if not equal to the peer's address. 600 */ 601 if (unp->unp_conn == NULL) { 602 if (msg->send.nm_addr) { 603 error = unp_connect(so, 604 msg->send.nm_addr, 605 msg->send.nm_td); 606 if (error) 607 break; /* XXX */ 608 } 609 /* 610 * NOTE: 611 * unp_conn still could be NULL, even if the 612 * above unp_connect() succeeds; since the 613 * current unp's token could be released due 614 * to blocking operations after unp_conn is 615 * assigned. 616 */ 617 if (unp->unp_conn == NULL) { 618 error = ENOTCONN; 619 break; 620 } 621 } 622 if (so->so_state & SS_CANTSENDMORE) { 623 error = EPIPE; 624 break; 625 } 626 627 unp2 = unp->unp_conn; 628 KASSERT(unp2 != NULL, ("unp is not connected")); 629 so2 = unp2->unp_socket; 630 631 unp_reference(unp2); 632 633 /* 634 * Send to paired receive port, and then reduce 635 * send buffer hiwater marks to maintain backpressure. 636 * Wake up readers. 637 */ 638 lwkt_gettoken(&so2->so_rcv.ssb_token); 639 if (control) { 640 if (ssb_appendcontrol(&so2->so_rcv, m, control)) { 641 control = NULL; 642 m = NULL; 643 } 644 } else if (so->so_type == SOCK_SEQPACKET) { 645 sbappendrecord(&so2->so_rcv.sb, m); 646 m = NULL; 647 } else { 648 sbappend(&so2->so_rcv.sb, m); 649 m = NULL; 650 } 651 652 /* 653 * Because we are transfering mbufs directly to the 654 * peer socket we have to use SSB_STOP on the sender 655 * to prevent it from building up infinite mbufs. 656 */ 657 if (so2->so_rcv.ssb_cc >= so->so_snd.ssb_hiwat || 658 so2->so_rcv.ssb_mbcnt >= so->so_snd.ssb_mbmax 659 ) { 660 atomic_set_int(&so->so_snd.ssb_flags, SSB_STOP); 661 } 662 lwkt_reltoken(&so2->so_rcv.ssb_token); 663 sorwakeup(so2); 664 665 unp_free(unp2); 666 break; 667 668 default: 669 panic("uipc_send unknown socktype"); 670 } 671 672 /* 673 * SEND_EOF is equivalent to a SEND followed by a SHUTDOWN. 674 */ 675 if (msg->send.nm_flags & PRUS_EOF) { 676 socantsendmore(so); 677 unp_shutdown(unp); 678 } 679 680 if (control && error != 0) 681 unp_dispose(control); 682 release: 683 unp_reltoken(unp); 684 wakeup_end_delayed(); 685 686 if (control) 687 m_freem(control); 688 if (m) 689 m_freem(m); 690 lwkt_replymsg(&msg->lmsg, error); 691 } 692 693 /* 694 * MPSAFE 695 */ 696 static void 697 uipc_sense(netmsg_t msg) 698 { 699 struct unpcb *unp; 700 struct socket *so; 701 struct stat *sb; 702 int error; 703 704 so = msg->base.nm_so; 705 sb = msg->sense.nm_stat; 706 707 /* 708 * so_pcb is only modified with both the global and the unp 709 * pool token held. 710 */ 711 unp = unp_getsocktoken(so); 712 713 if (!UNP_ISATTACHED(unp)) { 714 error = EINVAL; 715 goto done; 716 } 717 718 sb->st_blksize = so->so_snd.ssb_hiwat; 719 sb->st_dev = NOUDEV; 720 if (unp->unp_ino == 0) { /* make up a non-zero inode number */ 721 unp->unp_ino = atomic_fetchadd_long(&unp_ino, 1); 722 if (__predict_false(unp->unp_ino == 0)) 723 unp->unp_ino = atomic_fetchadd_long(&unp_ino, 1); 724 } 725 sb->st_ino = unp->unp_ino; 726 error = 0; 727 done: 728 unp_reltoken(unp); 729 lwkt_replymsg(&msg->lmsg, error); 730 } 731 732 static void 733 uipc_shutdown(netmsg_t msg) 734 { 735 struct socket *so; 736 struct unpcb *unp; 737 int error; 738 739 /* 740 * so_pcb is only modified with both the global and the unp 741 * pool token held. 742 */ 743 so = msg->base.nm_so; 744 unp = unp_getsocktoken(so); 745 746 if (UNP_ISATTACHED(unp)) { 747 socantsendmore(so); 748 unp_shutdown(unp); 749 error = 0; 750 } else { 751 error = EINVAL; 752 } 753 754 unp_reltoken(unp); 755 lwkt_replymsg(&msg->lmsg, error); 756 } 757 758 static void 759 uipc_sockaddr(netmsg_t msg) 760 { 761 struct unpcb *unp; 762 int error; 763 764 /* 765 * so_pcb is only modified with both the global and the unp 766 * pool token held. 767 */ 768 unp = unp_getsocktoken(msg->base.nm_so); 769 770 if (UNP_ISATTACHED(unp)) { 771 if (unp->unp_addr) { 772 *msg->sockaddr.nm_nam = 773 dup_sockaddr((struct sockaddr *)unp->unp_addr); 774 } 775 error = 0; 776 } else { 777 error = EINVAL; 778 } 779 780 unp_reltoken(unp); 781 lwkt_replymsg(&msg->lmsg, error); 782 } 783 784 struct pr_usrreqs uipc_usrreqs = { 785 .pru_abort = uipc_abort, 786 .pru_accept = uipc_accept, 787 .pru_attach = uipc_attach, 788 .pru_bind = uipc_bind, 789 .pru_connect = uipc_connect, 790 .pru_connect2 = uipc_connect2, 791 .pru_control = pr_generic_notsupp, 792 .pru_detach = uipc_detach, 793 .pru_disconnect = uipc_disconnect, 794 .pru_listen = uipc_listen, 795 .pru_peeraddr = uipc_peeraddr, 796 .pru_rcvd = uipc_rcvd, 797 .pru_rcvoob = pr_generic_notsupp, 798 .pru_send = uipc_send, 799 .pru_sense = uipc_sense, 800 .pru_shutdown = uipc_shutdown, 801 .pru_sockaddr = uipc_sockaddr, 802 .pru_sosend = sosend, 803 .pru_soreceive = soreceive 804 }; 805 806 void 807 uipc_ctloutput(netmsg_t msg) 808 { 809 struct socket *so; 810 struct sockopt *sopt; 811 struct unpcb *unp; 812 int error = 0; 813 814 so = msg->base.nm_so; 815 sopt = msg->ctloutput.nm_sopt; 816 817 lwkt_gettoken(&unp_token); 818 unp = unp_getsocktoken(so); 819 820 if (!UNP_ISATTACHED(unp)) { 821 error = EINVAL; 822 goto done; 823 } 824 825 switch (sopt->sopt_dir) { 826 case SOPT_GET: 827 switch (sopt->sopt_name) { 828 case LOCAL_PEERCRED: 829 if (unp->unp_flags & UNP_HAVEPC) 830 soopt_from_kbuf(sopt, &unp->unp_peercred, 831 sizeof(unp->unp_peercred)); 832 else { 833 if (so->so_type == SOCK_STREAM) 834 error = ENOTCONN; 835 else if (so->so_type == SOCK_SEQPACKET) 836 error = ENOTCONN; 837 else 838 error = EINVAL; 839 } 840 break; 841 default: 842 error = EOPNOTSUPP; 843 break; 844 } 845 break; 846 case SOPT_SET: 847 default: 848 error = EOPNOTSUPP; 849 break; 850 } 851 852 done: 853 unp_reltoken(unp); 854 lwkt_reltoken(&unp_token); 855 856 lwkt_replymsg(&msg->lmsg, error); 857 } 858 859 /* 860 * Both send and receive buffers are allocated PIPSIZ bytes of buffering 861 * for stream sockets, although the total for sender and receiver is 862 * actually only PIPSIZ. 863 * 864 * Datagram sockets really use the sendspace as the maximum datagram size, 865 * and don't really want to reserve the sendspace. Their recvspace should 866 * be large enough for at least one max-size datagram plus address. 867 * 868 * We want the local send/recv space to be significant larger then lo0's 869 * mtu of 16384. 870 */ 871 #ifndef PIPSIZ 872 #define PIPSIZ 57344 873 #endif 874 static u_long unpst_sendspace = PIPSIZ; 875 static u_long unpst_recvspace = PIPSIZ; 876 static u_long unpdg_sendspace = 2*1024; /* really max datagram size */ 877 static u_long unpdg_recvspace = 4*1024; 878 879 static int unp_rights; /* file descriptors in flight */ 880 static struct spinlock unp_spin = SPINLOCK_INITIALIZER(&unp_spin, "unp_spin"); 881 882 SYSCTL_DECL(_net_local_seqpacket); 883 SYSCTL_DECL(_net_local_stream); 884 SYSCTL_INT(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW, 885 &unpst_sendspace, 0, "Size of stream socket send buffer"); 886 SYSCTL_INT(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW, 887 &unpst_recvspace, 0, "Size of stream socket receive buffer"); 888 889 SYSCTL_DECL(_net_local_dgram); 890 SYSCTL_INT(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW, 891 &unpdg_sendspace, 0, "Max datagram socket size"); 892 SYSCTL_INT(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW, 893 &unpdg_recvspace, 0, "Size of datagram socket receive buffer"); 894 895 SYSCTL_DECL(_net_local); 896 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, 897 "File descriptors in flight"); 898 899 static int 900 unp_attach(struct socket *so, struct pru_attach_info *ai) 901 { 902 struct unp_global_head *head; 903 struct unpcb *unp; 904 int error; 905 906 lwkt_gettoken(&unp_token); 907 908 if (so->so_snd.ssb_hiwat == 0 || so->so_rcv.ssb_hiwat == 0) { 909 switch (so->so_type) { 910 case SOCK_STREAM: 911 case SOCK_SEQPACKET: 912 error = soreserve(so, unpst_sendspace, unpst_recvspace, 913 ai->sb_rlimit); 914 break; 915 916 case SOCK_DGRAM: 917 error = soreserve(so, unpdg_sendspace, unpdg_recvspace, 918 ai->sb_rlimit); 919 break; 920 921 default: 922 panic("unp_attach"); 923 } 924 if (error) 925 goto failed; 926 } 927 928 /* 929 * In order to support sendfile we have to set either SSB_STOPSUPP 930 * or SSB_PREALLOC. Unix domain sockets use the SSB_STOP flow 931 * control mechanism. 932 */ 933 if (so->so_type == SOCK_STREAM) { 934 atomic_set_int(&so->so_rcv.ssb_flags, SSB_STOPSUPP); 935 atomic_set_int(&so->so_snd.ssb_flags, SSB_STOPSUPP); 936 } 937 938 unp = kmalloc(sizeof(*unp), M_UNPCB, M_WAITOK | M_ZERO | M_NULLOK); 939 if (unp == NULL) { 940 error = ENOBUFS; 941 goto failed; 942 } 943 unp->unp_refcnt = 1; 944 unp->unp_gencnt = ++unp_gencnt; 945 LIST_INIT(&unp->unp_refs); 946 unp->unp_socket = so; 947 unp->unp_rvnode = ai->fd_rdir; /* jail cruft XXX JH */ 948 so->so_pcb = (caddr_t)unp; 949 soreference(so); 950 951 head = unp_globalhead(so->so_type); 952 TAILQ_INSERT_TAIL(&head->list, unp, unp_link); 953 head->count++; 954 error = 0; 955 failed: 956 lwkt_reltoken(&unp_token); 957 return error; 958 } 959 960 static void 961 unp_detach(struct unpcb *unp) 962 { 963 struct unp_global_head *head; 964 struct socket *so; 965 966 lwkt_gettoken(&unp_token); 967 lwkt_getpooltoken(unp); 968 969 so = unp->unp_socket; 970 971 head = unp_globalhead(so->so_type); 972 KASSERT(head->count > 0, ("invalid unp count")); 973 TAILQ_REMOVE(&head->list, unp, unp_link); 974 head->count--; 975 976 unp->unp_gencnt = ++unp_gencnt; 977 if (unp->unp_vnode) { 978 unp->unp_vnode->v_socket = NULL; 979 vrele(unp->unp_vnode); 980 unp->unp_vnode = NULL; 981 } 982 soisdisconnected(so); 983 soreference(so); /* for delayed sorflush */ 984 KKASSERT(so->so_pcb == unp); 985 so->so_pcb = NULL; /* both tokens required */ 986 unp->unp_socket = NULL; 987 sofree(so); /* remove pcb ref */ 988 989 if (unp_rights) { 990 /* 991 * Normally the receive buffer is flushed later, 992 * in sofree, but if our receive buffer holds references 993 * to descriptors that are now garbage, we will dispose 994 * of those descriptor references after the garbage collector 995 * gets them (resulting in a "panic: closef: count < 0"). 996 */ 997 sorflush(so); 998 unp_gc(); 999 } 1000 sofree(so); 1001 lwkt_relpooltoken(unp); 1002 lwkt_reltoken(&unp_token); 1003 1004 KASSERT(unp->unp_conn == NULL, ("unp is still connected")); 1005 KASSERT(LIST_EMPTY(&unp->unp_refs), ("unp still has references")); 1006 1007 if (unp->unp_addr) 1008 kfree(unp->unp_addr, M_SONAME); 1009 kfree(unp, M_UNPCB); 1010 } 1011 1012 static int 1013 unp_bind(struct unpcb *unp, struct sockaddr *nam, struct thread *td) 1014 { 1015 struct proc *p = td->td_proc; 1016 struct sockaddr_un *soun = (struct sockaddr_un *)nam; 1017 struct vnode *vp; 1018 struct vattr vattr; 1019 int error, namelen; 1020 struct nlookupdata nd; 1021 char buf[SOCK_MAXADDRLEN]; 1022 1023 ASSERT_LWKT_TOKEN_HELD(&unp_token); 1024 UNP_ASSERT_TOKEN_HELD(unp); 1025 1026 if (unp->unp_vnode != NULL) 1027 return EINVAL; 1028 1029 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); 1030 if (namelen <= 0) 1031 return EINVAL; 1032 strncpy(buf, soun->sun_path, namelen); 1033 buf[namelen] = 0; /* null-terminate the string */ 1034 error = nlookup_init(&nd, buf, UIO_SYSSPACE, 1035 NLC_LOCKVP | NLC_CREATE | NLC_REFDVP); 1036 if (error == 0) 1037 error = nlookup(&nd); 1038 if (error == 0 && nd.nl_nch.ncp->nc_vp != NULL) 1039 error = EADDRINUSE; 1040 if (error) 1041 goto done; 1042 1043 VATTR_NULL(&vattr); 1044 vattr.va_type = VSOCK; 1045 vattr.va_mode = (ACCESSPERMS & ~p->p_fd->fd_cmask); 1046 error = VOP_NCREATE(&nd.nl_nch, nd.nl_dvp, &vp, nd.nl_cred, &vattr); 1047 if (error == 0) { 1048 if (unp->unp_vnode == NULL) { 1049 vp->v_socket = unp->unp_socket; 1050 unp->unp_vnode = vp; 1051 unp->unp_addr = (struct sockaddr_un *)dup_sockaddr(nam); 1052 vn_unlock(vp); 1053 } else { 1054 vput(vp); /* late race */ 1055 error = EINVAL; 1056 } 1057 } 1058 done: 1059 nlookup_done(&nd); 1060 return (error); 1061 } 1062 1063 static int 1064 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 1065 { 1066 struct unpcb *unp, *unp2; 1067 int error, flags = 0; 1068 1069 lwkt_gettoken(&unp_token); 1070 1071 unp = unp_getsocktoken(so); 1072 if (!UNP_ISATTACHED(unp)) { 1073 error = EINVAL; 1074 goto failed; 1075 } 1076 1077 if ((unp->unp_flags & UNP_CONNECTING) || unp->unp_conn != NULL) { 1078 error = EISCONN; 1079 goto failed; 1080 } 1081 1082 flags = UNP_CONNECTING; 1083 unp_setflags(unp, flags); 1084 1085 error = unp_find_lockref(nam, td, so->so_type, &unp2); 1086 if (error) 1087 goto failed; 1088 /* 1089 * NOTE: 1090 * unp2 is locked and referenced. 1091 */ 1092 1093 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 1094 struct socket *so2, *so3; 1095 struct unpcb *unp3; 1096 1097 so2 = unp2->unp_socket; 1098 if (!(so2->so_options & SO_ACCEPTCONN) || 1099 (so3 = sonewconn_faddr(so2, 0, NULL, 1100 TRUE /* keep ref */)) == NULL) { 1101 error = ECONNREFUSED; 1102 goto done; 1103 } 1104 /* so3 has a socket reference. */ 1105 1106 unp3 = unp_getsocktoken(so3); 1107 if (!UNP_ISATTACHED(unp3)) { 1108 unp_reltoken(unp3); 1109 /* 1110 * Already aborted; we only need to drop the 1111 * socket reference held by sonewconn_faddr(). 1112 */ 1113 sofree(so3); 1114 error = ECONNREFUSED; 1115 goto done; 1116 } 1117 unp_reference(unp3); 1118 /* 1119 * NOTE: 1120 * unp3 is locked and referenced. 1121 */ 1122 1123 /* 1124 * Release so3 socket reference held by sonewconn_faddr(). 1125 * Since we have referenced unp3, neither unp3 nor so3 will 1126 * be destroyed here. 1127 */ 1128 sofree(so3); 1129 1130 if (unp2->unp_addr != NULL) { 1131 unp3->unp_addr = (struct sockaddr_un *) 1132 dup_sockaddr((struct sockaddr *)unp2->unp_addr); 1133 } 1134 1135 /* 1136 * unp_peercred management: 1137 * 1138 * The connecter's (client's) credentials are copied 1139 * from its process structure at the time of connect() 1140 * (which is now). 1141 */ 1142 cru2x(td->td_proc->p_ucred, &unp3->unp_peercred); 1143 unp_setflags(unp3, UNP_HAVEPC); 1144 /* 1145 * The receiver's (server's) credentials are copied 1146 * from the unp_peercred member of socket on which the 1147 * former called listen(); unp_listen() cached that 1148 * process's credentials at that time so we can use 1149 * them now. 1150 */ 1151 KASSERT(unp2->unp_flags & UNP_HAVEPCCACHED, 1152 ("unp_connect: listener without cached peercred")); 1153 memcpy(&unp->unp_peercred, &unp2->unp_peercred, 1154 sizeof(unp->unp_peercred)); 1155 unp_setflags(unp, UNP_HAVEPC); 1156 1157 error = unp_connect_pair(unp, unp3); 1158 if (error) 1159 soabort_direct(so3); 1160 1161 /* Done with unp3 */ 1162 unp_free(unp3); 1163 unp_reltoken(unp3); 1164 } else { 1165 error = unp_connect_pair(unp, unp2); 1166 } 1167 done: 1168 unp_free(unp2); 1169 unp_reltoken(unp2); 1170 failed: 1171 if (flags) 1172 unp_clrflags(unp, flags); 1173 unp_reltoken(unp); 1174 1175 lwkt_reltoken(&unp_token); 1176 return (error); 1177 } 1178 1179 /* 1180 * Connect two unix domain sockets together. 1181 * 1182 * NOTE: Semantics for any change to unp_conn requires that the per-unp 1183 * pool token also be held. 1184 */ 1185 int 1186 unp_connect2(struct socket *so, struct socket *so2) 1187 { 1188 struct unpcb *unp, *unp2; 1189 int error; 1190 1191 lwkt_gettoken(&unp_token); 1192 if (so2->so_type != so->so_type) { 1193 lwkt_reltoken(&unp_token); 1194 return (EPROTOTYPE); 1195 } 1196 unp = unp_getsocktoken(so); 1197 unp2 = unp_getsocktoken(so2); 1198 1199 if (!UNP_ISATTACHED(unp)) { 1200 error = EINVAL; 1201 goto done; 1202 } 1203 if (!UNP_ISATTACHED(unp2)) { 1204 error = ECONNREFUSED; 1205 goto done; 1206 } 1207 1208 if (unp->unp_conn != NULL) { 1209 error = EISCONN; 1210 goto done; 1211 } 1212 if ((so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET) && 1213 unp2->unp_conn != NULL) { 1214 error = EISCONN; 1215 goto done; 1216 } 1217 1218 error = unp_connect_pair(unp, unp2); 1219 done: 1220 unp_reltoken(unp2); 1221 unp_reltoken(unp); 1222 lwkt_reltoken(&unp_token); 1223 return (error); 1224 } 1225 1226 /* 1227 * Disconnect a unix domain socket pair. 1228 * 1229 * NOTE: Semantics for any change to unp_conn requires that the per-unp 1230 * pool token also be held. 1231 */ 1232 static void 1233 unp_disconnect(struct unpcb *unp, int error) 1234 { 1235 struct socket *so = unp->unp_socket; 1236 struct unpcb *unp2; 1237 1238 ASSERT_LWKT_TOKEN_HELD(&unp_token); 1239 UNP_ASSERT_TOKEN_HELD(unp); 1240 1241 if (error) 1242 so->so_error = error; 1243 1244 while ((unp2 = unp->unp_conn) != NULL) { 1245 lwkt_getpooltoken(unp2); 1246 if (unp2 == unp->unp_conn) 1247 break; 1248 lwkt_relpooltoken(unp2); 1249 } 1250 if (unp2 == NULL) 1251 return; 1252 /* unp2 is locked. */ 1253 1254 KASSERT((unp2->unp_flags & UNP_DROPPED) == 0, ("unp2 was dropped")); 1255 1256 unp->unp_conn = NULL; 1257 1258 switch (so->so_type) { 1259 case SOCK_DGRAM: 1260 LIST_REMOVE(unp, unp_reflink); 1261 soclrstate(so, SS_ISCONNECTED); 1262 break; 1263 1264 case SOCK_STREAM: 1265 case SOCK_SEQPACKET: 1266 /* 1267 * Keep a reference before clearing the unp_conn 1268 * to avoid racing uipc_detach()/uipc_abort() in 1269 * other thread. 1270 */ 1271 unp_reference(unp2); 1272 KASSERT(unp2->unp_conn == unp, ("unp_conn mismatch")); 1273 unp2->unp_conn = NULL; 1274 1275 soisdisconnected(so); 1276 soisdisconnected(unp2->unp_socket); 1277 1278 unp_free(unp2); 1279 break; 1280 } 1281 1282 lwkt_relpooltoken(unp2); 1283 } 1284 1285 #ifdef notdef 1286 void 1287 unp_abort(struct unpcb *unp) 1288 { 1289 lwkt_gettoken(&unp_token); 1290 unp_free(unp); 1291 lwkt_reltoken(&unp_token); 1292 } 1293 #endif 1294 1295 static int 1296 prison_unpcb(struct thread *td, struct unpcb *unp) 1297 { 1298 struct proc *p; 1299 1300 if (td == NULL) 1301 return (0); 1302 if ((p = td->td_proc) == NULL) 1303 return (0); 1304 if (!p->p_ucred->cr_prison) 1305 return (0); 1306 if (p->p_fd->fd_rdir == unp->unp_rvnode) 1307 return (0); 1308 return (1); 1309 } 1310 1311 static int 1312 unp_pcblist(SYSCTL_HANDLER_ARGS) 1313 { 1314 struct unp_global_head *head = arg1; 1315 int error, i, n; 1316 struct unpcb *unp, *marker; 1317 1318 KKASSERT(curproc != NULL); 1319 1320 /* 1321 * The process of preparing the PCB list is too time-consuming and 1322 * resource-intensive to repeat twice on every request. 1323 */ 1324 if (req->oldptr == NULL) { 1325 n = head->count; 1326 req->oldidx = (n + n/8) * sizeof(struct xunpcb); 1327 return 0; 1328 } 1329 1330 if (req->newptr != NULL) 1331 return EPERM; 1332 1333 marker = kmalloc(sizeof(*marker), M_UNPCB, M_WAITOK | M_ZERO); 1334 marker->unp_flags |= UNP_MARKER; 1335 1336 lwkt_gettoken(&unp_token); 1337 1338 n = head->count; 1339 i = 0; 1340 error = 0; 1341 1342 TAILQ_INSERT_HEAD(&head->list, marker, unp_link); 1343 while ((unp = TAILQ_NEXT(marker, unp_link)) != NULL && i < n) { 1344 struct xunpcb xu; 1345 1346 TAILQ_REMOVE(&head->list, marker, unp_link); 1347 TAILQ_INSERT_AFTER(&head->list, unp, marker, unp_link); 1348 1349 if (unp->unp_flags & UNP_MARKER) 1350 continue; 1351 if (prison_unpcb(req->td, unp)) 1352 continue; 1353 1354 xu.xu_len = sizeof(xu); 1355 xu.xu_unpp = unp; 1356 1357 /* 1358 * NOTE: 1359 * unp->unp_addr and unp->unp_conn are protected by 1360 * unp_token. So if we want to get rid of unp_token 1361 * or reduce the coverage of unp_token, care must be 1362 * taken. 1363 */ 1364 if (unp->unp_addr) { 1365 bcopy(unp->unp_addr, &xu.xu_addr, 1366 unp->unp_addr->sun_len); 1367 } 1368 if (unp->unp_conn && unp->unp_conn->unp_addr) { 1369 bcopy(unp->unp_conn->unp_addr, 1370 &xu.xu_caddr, 1371 unp->unp_conn->unp_addr->sun_len); 1372 } 1373 bcopy(unp, &xu.xu_unp, sizeof(*unp)); 1374 sotoxsocket(unp->unp_socket, &xu.xu_socket); 1375 1376 /* NOTE: This could block and temporarily release unp_token */ 1377 error = SYSCTL_OUT(req, &xu, sizeof(xu)); 1378 if (error) 1379 break; 1380 ++i; 1381 } 1382 TAILQ_REMOVE(&head->list, marker, unp_link); 1383 1384 lwkt_reltoken(&unp_token); 1385 1386 kfree(marker, M_UNPCB); 1387 return error; 1388 } 1389 1390 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLFLAG_RD, 1391 &unp_dgram_head, 0, unp_pcblist, "S,xunpcb", 1392 "List of active local datagram sockets"); 1393 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLFLAG_RD, 1394 &unp_stream_head, 0, unp_pcblist, "S,xunpcb", 1395 "List of active local stream sockets"); 1396 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist, CTLFLAG_RD, 1397 &unp_seqpkt_head, 0, unp_pcblist, "S,xunpcb", 1398 "List of active local seqpacket sockets"); 1399 1400 static void 1401 unp_shutdown(struct unpcb *unp) 1402 { 1403 struct socket *so; 1404 1405 if ((unp->unp_socket->so_type == SOCK_STREAM || 1406 unp->unp_socket->so_type == SOCK_SEQPACKET) && 1407 unp->unp_conn != NULL && (so = unp->unp_conn->unp_socket)) { 1408 socantrcvmore(so); 1409 } 1410 } 1411 1412 #ifdef notdef 1413 void 1414 unp_drain(void) 1415 { 1416 lwkt_gettoken(&unp_token); 1417 lwkt_reltoken(&unp_token); 1418 } 1419 #endif 1420 1421 int 1422 unp_externalize(struct mbuf *rights, int flags) 1423 { 1424 struct thread *td = curthread; 1425 struct proc *p = td->td_proc; /* XXX */ 1426 struct lwp *lp = td->td_lwp; 1427 struct cmsghdr *cm = mtod(rights, struct cmsghdr *); 1428 int *fdp; 1429 int i; 1430 struct file **rp; 1431 struct file *fp; 1432 int newfds = (cm->cmsg_len - (CMSG_DATA(cm) - (u_char *)cm)) 1433 / sizeof(struct file *); 1434 int f; 1435 1436 /* 1437 * if the new FD's will not fit, then we free them all 1438 */ 1439 if (!fdavail(p, newfds)) { 1440 rp = (struct file **)CMSG_DATA(cm); 1441 for (i = 0; i < newfds; i++) { 1442 fp = *rp; 1443 /* 1444 * zero the pointer before calling unp_discard, 1445 * since it may end up in unp_gc().. 1446 */ 1447 *rp++ = NULL; 1448 unp_discard(fp, NULL); 1449 } 1450 return (EMSGSIZE); 1451 } 1452 1453 /* 1454 * now change each pointer to an fd in the global table to 1455 * an integer that is the index to the local fd table entry 1456 * that we set up to point to the global one we are transferring. 1457 * If sizeof (struct file *) is bigger than or equal to sizeof int, 1458 * then do it in forward order. In that case, an integer will 1459 * always come in the same place or before its corresponding 1460 * struct file pointer. 1461 * If sizeof (struct file *) is smaller than sizeof int, then 1462 * do it in reverse order. 1463 * 1464 * Hold revoke_token in 'shared' mode, so that we won't miss 1465 * the FREVOKED update on fps being externalized (fsetfd). 1466 */ 1467 lwkt_gettoken_shared(&revoke_token); 1468 if (sizeof(struct file *) >= sizeof(int)) { 1469 fdp = (int *)CMSG_DATA(cm); 1470 rp = (struct file **)CMSG_DATA(cm); 1471 for (i = 0; i < newfds; i++) { 1472 if (fdalloc(p, 0, &f)) { 1473 int j; 1474 1475 /* 1476 * Previous fdavail() can't garantee 1477 * fdalloc() success due to SMP race. 1478 * Just clean up and return the same 1479 * error value as if fdavail() failed. 1480 */ 1481 1482 /* Close externalized files */ 1483 for (j = 0; j < i; j++) 1484 kern_close(fdp[j]); 1485 /* Discard the rest of internal files */ 1486 for (; i < newfds; i++) 1487 unp_discard(rp[i], NULL); 1488 /* Wipe out the control message */ 1489 for (i = 0; i < newfds; i++) 1490 rp[i] = NULL; 1491 1492 lwkt_reltoken(&revoke_token); 1493 return (EMSGSIZE); 1494 } 1495 fp = rp[i]; 1496 unp_fp_externalize(lp, fp, f, flags); 1497 fdp[i] = f; 1498 } 1499 } else { 1500 /* 1501 * XXX 1502 * Will this ever happen? I don't think compiler will 1503 * generate code for this code segment -- sephe 1504 */ 1505 fdp = (int *)CMSG_DATA(cm) + newfds - 1; 1506 rp = (struct file **)CMSG_DATA(cm) + newfds - 1; 1507 for (i = 0; i < newfds; i++) { 1508 if (fdalloc(p, 0, &f)) 1509 panic("unp_externalize"); 1510 fp = *rp--; 1511 unp_fp_externalize(lp, fp, f, flags); 1512 *fdp-- = f; 1513 } 1514 } 1515 lwkt_reltoken(&revoke_token); 1516 1517 /* 1518 * Adjust length, in case sizeof(struct file *) and sizeof(int) 1519 * differs. 1520 */ 1521 cm->cmsg_len = CMSG_LEN(newfds * sizeof(int)); 1522 rights->m_len = cm->cmsg_len; 1523 1524 return (0); 1525 } 1526 1527 static void 1528 unp_fp_externalize(struct lwp *lp, struct file *fp, int fd, int flags) 1529 { 1530 if (lp) { 1531 struct filedesc *fdp = lp->lwp_proc->p_fd; 1532 1533 KKASSERT(fd >= 0); 1534 if (fp->f_flag & FREVOKED) { 1535 struct file *fx; 1536 int error; 1537 1538 kprintf("Warning: revoked fp exiting unix socket\n"); 1539 error = falloc(lp, &fx, NULL); 1540 if (error == 0) { 1541 if (flags & MSG_CMSG_CLOEXEC) 1542 fdp->fd_files[fd].fileflags |= UF_EXCLOSE; 1543 fsetfd(fdp, fx, fd); 1544 fdrop(fx); 1545 } else { 1546 fsetfd(fdp, NULL, fd); 1547 } 1548 } else { 1549 if (flags & MSG_CMSG_CLOEXEC) 1550 fdp->fd_files[fd].fileflags |= UF_EXCLOSE; 1551 fsetfd(fdp, fp, fd); 1552 } 1553 } 1554 spin_lock(&unp_spin); 1555 fp->f_msgcount--; 1556 unp_rights--; 1557 spin_unlock(&unp_spin); 1558 fdrop(fp); 1559 } 1560 1561 void 1562 unp_init(void) 1563 { 1564 TAILQ_INIT(&unp_stream_head.list); 1565 TAILQ_INIT(&unp_dgram_head.list); 1566 TAILQ_INIT(&unp_seqpkt_head.list); 1567 1568 spin_init(&unp_spin, "unpinit"); 1569 1570 SLIST_INIT(&unp_defdiscard_head); 1571 spin_init(&unp_defdiscard_spin, "unpdisc"); 1572 TASK_INIT(&unp_defdiscard_task, 0, unp_defdiscard_taskfunc, NULL); 1573 1574 /* 1575 * Create taskqueue for defered discard, and stick it to 1576 * the last CPU. 1577 */ 1578 unp_taskqueue = taskqueue_create("unp_taskq", M_WAITOK, 1579 taskqueue_thread_enqueue, &unp_taskqueue); 1580 taskqueue_start_threads(&unp_taskqueue, 1, TDPRI_KERN_DAEMON, 1581 ncpus - 1, "unp taskq"); 1582 } 1583 1584 static int 1585 unp_internalize(struct mbuf *control, struct thread *td) 1586 { 1587 struct proc *p = td->td_proc; 1588 struct filedesc *fdescp; 1589 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 1590 struct file **rp; 1591 struct file *fp; 1592 int i, fd, *fdp; 1593 struct cmsgcred *cmcred; 1594 int oldfds; 1595 u_int newlen; 1596 int error; 1597 1598 KKASSERT(p); 1599 1600 if ((cm->cmsg_type != SCM_RIGHTS && cm->cmsg_type != SCM_CREDS) || 1601 cm->cmsg_level != SOL_SOCKET || 1602 CMSG_ALIGN(cm->cmsg_len) != control->m_len) 1603 return EINVAL; 1604 1605 /* 1606 * Fill in credential information. 1607 */ 1608 if (cm->cmsg_type == SCM_CREDS) { 1609 cmcred = (struct cmsgcred *)CMSG_DATA(cm); 1610 cmcred->cmcred_pid = p->p_pid; 1611 cmcred->cmcred_uid = p->p_ucred->cr_ruid; 1612 cmcred->cmcred_gid = p->p_ucred->cr_rgid; 1613 cmcred->cmcred_euid = p->p_ucred->cr_uid; 1614 cmcred->cmcred_ngroups = MIN(p->p_ucred->cr_ngroups, 1615 CMGROUP_MAX); 1616 for (i = 0; i < cmcred->cmcred_ngroups; i++) 1617 cmcred->cmcred_groups[i] = p->p_ucred->cr_groups[i]; 1618 return 0; 1619 } 1620 1621 /* 1622 * cmsghdr may not be aligned, do not allow calculation(s) to 1623 * go negative. 1624 */ 1625 if (cm->cmsg_len < CMSG_LEN(0)) 1626 return EINVAL; 1627 1628 oldfds = (cm->cmsg_len - CMSG_LEN(0)) / sizeof(int); 1629 1630 /* 1631 * Now replace the integer FDs with pointers to 1632 * the associated global file table entry.. 1633 * Allocate a bigger buffer as necessary. But if an cluster is not 1634 * enough, return E2BIG. 1635 */ 1636 newlen = CMSG_LEN(oldfds * sizeof(struct file *)); 1637 if (newlen > MCLBYTES) 1638 return E2BIG; 1639 if (newlen - control->m_len > M_TRAILINGSPACE(control)) { 1640 if (control->m_flags & M_EXT) 1641 return E2BIG; 1642 MCLGET(control, M_WAITOK); 1643 if (!(control->m_flags & M_EXT)) 1644 return ENOBUFS; 1645 1646 /* copy the data to the cluster */ 1647 memcpy(mtod(control, char *), cm, cm->cmsg_len); 1648 cm = mtod(control, struct cmsghdr *); 1649 } 1650 1651 fdescp = p->p_fd; 1652 spin_lock_shared(&fdescp->fd_spin); 1653 1654 /* 1655 * check that all the FDs passed in refer to legal OPEN files 1656 * If not, reject the entire operation. 1657 */ 1658 fdp = (int *)CMSG_DATA(cm); 1659 for (i = 0; i < oldfds; i++) { 1660 fd = *fdp++; 1661 if ((unsigned)fd >= fdescp->fd_nfiles || 1662 fdescp->fd_files[fd].fp == NULL) { 1663 error = EBADF; 1664 goto done; 1665 } 1666 if (fdescp->fd_files[fd].fp->f_type == DTYPE_KQUEUE) { 1667 error = EOPNOTSUPP; 1668 goto done; 1669 } 1670 } 1671 1672 /* 1673 * Adjust length, in case sizeof(struct file *) and sizeof(int) 1674 * differs. 1675 */ 1676 cm->cmsg_len = newlen; 1677 control->m_len = CMSG_ALIGN(newlen); 1678 1679 /* 1680 * Transform the file descriptors into struct file pointers. 1681 * If sizeof (struct file *) is bigger than or equal to sizeof int, 1682 * then do it in reverse order so that the int won't get until 1683 * we're done. 1684 * If sizeof (struct file *) is smaller than sizeof int, then 1685 * do it in forward order. 1686 */ 1687 if (sizeof(struct file *) >= sizeof(int)) { 1688 fdp = (int *)CMSG_DATA(cm) + oldfds - 1; 1689 rp = (struct file **)CMSG_DATA(cm) + oldfds - 1; 1690 for (i = 0; i < oldfds; i++) { 1691 fp = fdescp->fd_files[*fdp--].fp; 1692 *rp-- = fp; 1693 fhold(fp); 1694 spin_lock(&unp_spin); 1695 fp->f_msgcount++; 1696 unp_rights++; 1697 spin_unlock(&unp_spin); 1698 } 1699 } else { 1700 /* 1701 * XXX 1702 * Will this ever happen? I don't think compiler will 1703 * generate code for this code segment -- sephe 1704 */ 1705 fdp = (int *)CMSG_DATA(cm); 1706 rp = (struct file **)CMSG_DATA(cm); 1707 for (i = 0; i < oldfds; i++) { 1708 fp = fdescp->fd_files[*fdp++].fp; 1709 *rp++ = fp; 1710 fhold(fp); 1711 spin_lock(&unp_spin); 1712 fp->f_msgcount++; 1713 unp_rights++; 1714 spin_unlock(&unp_spin); 1715 } 1716 } 1717 error = 0; 1718 done: 1719 spin_unlock_shared(&fdescp->fd_spin); 1720 return error; 1721 } 1722 1723 /* 1724 * Garbage collect in-transit file descriptors that get lost due to 1725 * loops (i.e. when a socket is sent to another process over itself, 1726 * and more complex situations). 1727 * 1728 * NOT MPSAFE - TODO socket flush code and maybe closef. Rest is MPSAFE. 1729 */ 1730 1731 struct unp_gc_info { 1732 struct file **extra_ref; 1733 struct file *locked_fp; 1734 int defer; 1735 int index; 1736 int maxindex; 1737 }; 1738 1739 static void 1740 unp_gc(void) 1741 { 1742 struct unp_gc_info info; 1743 static boolean_t unp_gcing; 1744 struct file **fpp; 1745 int i; 1746 1747 /* 1748 * Only one gc can be in-progress at any given moment 1749 */ 1750 spin_lock(&unp_spin); 1751 if (unp_gcing) { 1752 spin_unlock(&unp_spin); 1753 return; 1754 } 1755 unp_gcing = TRUE; 1756 spin_unlock(&unp_spin); 1757 1758 lwkt_gettoken(&unp_token); 1759 1760 /* 1761 * Before going through all this, set all FDs to be NOT defered 1762 * and NOT externally accessible (not marked). During the scan 1763 * a fd can be marked externally accessible but we may or may not 1764 * be able to immediately process it (controlled by FDEFER). 1765 * 1766 * If we loop sleep a bit. The complexity of the topology can cause 1767 * multiple loops. Also failure to acquire the socket's so_rcv 1768 * token can cause us to loop. 1769 */ 1770 allfiles_scan_exclusive(unp_gc_clearmarks, NULL); 1771 do { 1772 info.defer = 0; 1773 allfiles_scan_exclusive(unp_gc_checkmarks, &info); 1774 if (info.defer) 1775 tsleep(&info, 0, "gcagain", 1); 1776 } while (info.defer); 1777 1778 /* 1779 * We grab an extra reference to each of the file table entries 1780 * that are not otherwise accessible and then free the rights 1781 * that are stored in messages on them. 1782 * 1783 * The bug in the orginal code is a little tricky, so I'll describe 1784 * what's wrong with it here. 1785 * 1786 * It is incorrect to simply unp_discard each entry for f_msgcount 1787 * times -- consider the case of sockets A and B that contain 1788 * references to each other. On a last close of some other socket, 1789 * we trigger a gc since the number of outstanding rights (unp_rights) 1790 * is non-zero. If during the sweep phase the gc code un_discards, 1791 * we end up doing a (full) closef on the descriptor. A closef on A 1792 * results in the following chain. Closef calls soo_close, which 1793 * calls soclose. Soclose calls first (through the switch 1794 * uipc_usrreq) unp_detach, which re-invokes unp_gc. Unp_gc simply 1795 * returns because the previous instance had set unp_gcing, and 1796 * we return all the way back to soclose, which marks the socket 1797 * with SS_NOFDREF, and then calls sofree. Sofree calls sorflush 1798 * to free up the rights that are queued in messages on the socket A, 1799 * i.e., the reference on B. The sorflush calls via the dom_dispose 1800 * switch unp_dispose, which unp_scans with unp_discard. This second 1801 * instance of unp_discard just calls closef on B. 1802 * 1803 * Well, a similar chain occurs on B, resulting in a sorflush on B, 1804 * which results in another closef on A. Unfortunately, A is already 1805 * being closed, and the descriptor has already been marked with 1806 * SS_NOFDREF, and soclose panics at this point. 1807 * 1808 * Here, we first take an extra reference to each inaccessible 1809 * descriptor. Then, we call sorflush ourself, since we know 1810 * it is a Unix domain socket anyhow. After we destroy all the 1811 * rights carried in messages, we do a last closef to get rid 1812 * of our extra reference. This is the last close, and the 1813 * unp_detach etc will shut down the socket. 1814 * 1815 * 91/09/19, bsy@cs.cmu.edu 1816 */ 1817 info.extra_ref = kmalloc(256 * sizeof(struct file *), M_FILE, M_WAITOK); 1818 info.maxindex = 256; 1819 1820 do { 1821 /* 1822 * Look for matches 1823 */ 1824 info.index = 0; 1825 allfiles_scan_exclusive(unp_gc_checkrefs, &info); 1826 1827 /* 1828 * For each FD on our hit list, do the following two things 1829 */ 1830 for (i = info.index, fpp = info.extra_ref; --i >= 0; ++fpp) { 1831 struct file *tfp = *fpp; 1832 if (tfp->f_type == DTYPE_SOCKET && tfp->f_data != NULL) 1833 sorflush((struct socket *)(tfp->f_data)); 1834 } 1835 for (i = info.index, fpp = info.extra_ref; --i >= 0; ++fpp) 1836 closef(*fpp, NULL); 1837 } while (info.index == info.maxindex); 1838 1839 lwkt_reltoken(&unp_token); 1840 1841 kfree((caddr_t)info.extra_ref, M_FILE); 1842 unp_gcing = FALSE; 1843 } 1844 1845 /* 1846 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry 1847 */ 1848 static int 1849 unp_gc_checkrefs(struct file *fp, void *data) 1850 { 1851 struct unp_gc_info *info = data; 1852 1853 if (fp->f_count == 0) 1854 return(0); 1855 if (info->index == info->maxindex) 1856 return(-1); 1857 1858 /* 1859 * If all refs are from msgs, and it's not marked accessible 1860 * then it must be referenced from some unreachable cycle 1861 * of (shut-down) FDs, so include it in our 1862 * list of FDs to remove 1863 */ 1864 if (fp->f_count == fp->f_msgcount && !(fp->f_flag & FMARK)) { 1865 info->extra_ref[info->index++] = fp; 1866 fhold(fp); 1867 } 1868 return(0); 1869 } 1870 1871 /* 1872 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry 1873 */ 1874 static int 1875 unp_gc_clearmarks(struct file *fp, void *data __unused) 1876 { 1877 atomic_clear_int(&fp->f_flag, FMARK | FDEFER); 1878 return(0); 1879 } 1880 1881 /* 1882 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry 1883 */ 1884 static int 1885 unp_gc_checkmarks(struct file *fp, void *data) 1886 { 1887 struct unp_gc_info *info = data; 1888 struct socket *so; 1889 1890 /* 1891 * If the file is not open, skip it. Make sure it isn't marked 1892 * defered or we could loop forever, in case we somehow race 1893 * something. 1894 */ 1895 if (fp->f_count == 0) { 1896 if (fp->f_flag & FDEFER) 1897 atomic_clear_int(&fp->f_flag, FDEFER); 1898 return(0); 1899 } 1900 /* 1901 * If we already marked it as 'defer' in a 1902 * previous pass, then try process it this time 1903 * and un-mark it 1904 */ 1905 if (fp->f_flag & FDEFER) { 1906 atomic_clear_int(&fp->f_flag, FDEFER); 1907 } else { 1908 /* 1909 * if it's not defered, then check if it's 1910 * already marked.. if so skip it 1911 */ 1912 if (fp->f_flag & FMARK) 1913 return(0); 1914 /* 1915 * If all references are from messages 1916 * in transit, then skip it. it's not 1917 * externally accessible. 1918 */ 1919 if (fp->f_count == fp->f_msgcount) 1920 return(0); 1921 /* 1922 * If it got this far then it must be 1923 * externally accessible. 1924 */ 1925 atomic_set_int(&fp->f_flag, FMARK); 1926 } 1927 1928 /* 1929 * either it was defered, or it is externally 1930 * accessible and not already marked so. 1931 * Now check if it is possibly one of OUR sockets. 1932 */ 1933 if (fp->f_type != DTYPE_SOCKET || 1934 (so = (struct socket *)fp->f_data) == NULL) { 1935 return(0); 1936 } 1937 if (so->so_proto->pr_domain != &localdomain || 1938 !(so->so_proto->pr_flags & PR_RIGHTS)) { 1939 return(0); 1940 } 1941 1942 /* 1943 * So, Ok, it's one of our sockets and it IS externally accessible 1944 * (or was defered). Now we look to see if we hold any file 1945 * descriptors in its message buffers. Follow those links and mark 1946 * them as accessible too. 1947 * 1948 * We are holding multiple spinlocks here, if we cannot get the 1949 * token non-blocking defer until the next loop. 1950 */ 1951 info->locked_fp = fp; 1952 if (lwkt_trytoken(&so->so_rcv.ssb_token)) { 1953 unp_scan(so->so_rcv.ssb_mb, unp_mark, info); 1954 lwkt_reltoken(&so->so_rcv.ssb_token); 1955 } else { 1956 atomic_set_int(&fp->f_flag, FDEFER); 1957 ++info->defer; 1958 } 1959 return (0); 1960 } 1961 1962 /* 1963 * Dispose of the fp's stored in a mbuf. 1964 * 1965 * The dds loop can cause additional fps to be entered onto the 1966 * list while it is running, flattening out the operation and avoiding 1967 * a deep kernel stack recursion. 1968 */ 1969 void 1970 unp_dispose(struct mbuf *m) 1971 { 1972 lwkt_gettoken(&unp_token); 1973 if (m) 1974 unp_scan(m, unp_discard, NULL); 1975 lwkt_reltoken(&unp_token); 1976 } 1977 1978 static int 1979 unp_listen(struct unpcb *unp, struct thread *td) 1980 { 1981 struct proc *p = td->td_proc; 1982 1983 ASSERT_LWKT_TOKEN_HELD(&unp_token); 1984 UNP_ASSERT_TOKEN_HELD(unp); 1985 1986 KKASSERT(p); 1987 cru2x(p->p_ucred, &unp->unp_peercred); 1988 unp_setflags(unp, UNP_HAVEPCCACHED); 1989 return (0); 1990 } 1991 1992 static void 1993 unp_scan(struct mbuf *m0, void (*op)(struct file *, void *), void *data) 1994 { 1995 struct mbuf *m; 1996 struct file **rp; 1997 struct cmsghdr *cm; 1998 int i; 1999 int qfds; 2000 2001 while (m0) { 2002 for (m = m0; m; m = m->m_next) { 2003 if (m->m_type == MT_CONTROL && 2004 m->m_len >= sizeof(*cm)) { 2005 cm = mtod(m, struct cmsghdr *); 2006 if (cm->cmsg_level != SOL_SOCKET || 2007 cm->cmsg_type != SCM_RIGHTS) 2008 continue; 2009 qfds = (cm->cmsg_len - CMSG_LEN(0)) / 2010 sizeof(void *); 2011 rp = (struct file **)CMSG_DATA(cm); 2012 for (i = 0; i < qfds; i++) 2013 (*op)(*rp++, data); 2014 break; /* XXX, but saves time */ 2015 } 2016 } 2017 m0 = m0->m_nextpkt; 2018 } 2019 } 2020 2021 /* 2022 * Mark visibility. info->defer is recalculated on every pass. 2023 */ 2024 static void 2025 unp_mark(struct file *fp, void *data) 2026 { 2027 struct unp_gc_info *info = data; 2028 2029 if ((fp->f_flag & FMARK) == 0) { 2030 ++info->defer; 2031 atomic_set_int(&fp->f_flag, FMARK | FDEFER); 2032 } else if (fp->f_flag & FDEFER) { 2033 ++info->defer; 2034 } 2035 } 2036 2037 /* 2038 * Discard a fp previously held in a unix domain socket mbuf. To 2039 * avoid blowing out the kernel stack due to contrived chain-reactions 2040 * we may have to defer the operation to a higher procedural level. 2041 * 2042 * Caller holds unp_token 2043 */ 2044 static void 2045 unp_discard(struct file *fp, void *data __unused) 2046 { 2047 struct unp_defdiscard *d; 2048 2049 spin_lock(&unp_spin); 2050 fp->f_msgcount--; 2051 unp_rights--; 2052 spin_unlock(&unp_spin); 2053 2054 d = kmalloc(sizeof(*d), M_UNPCB, M_WAITOK); 2055 d->fp = fp; 2056 2057 spin_lock(&unp_defdiscard_spin); 2058 SLIST_INSERT_HEAD(&unp_defdiscard_head, d, next); 2059 spin_unlock(&unp_defdiscard_spin); 2060 2061 taskqueue_enqueue(unp_taskqueue, &unp_defdiscard_task); 2062 } 2063 2064 static int 2065 unp_find_lockref(struct sockaddr *nam, struct thread *td, short type, 2066 struct unpcb **unp_ret) 2067 { 2068 struct proc *p = td->td_proc; 2069 struct sockaddr_un *soun = (struct sockaddr_un *)nam; 2070 struct vnode *vp = NULL; 2071 struct socket *so; 2072 struct unpcb *unp; 2073 int error, len; 2074 struct nlookupdata nd; 2075 char buf[SOCK_MAXADDRLEN]; 2076 2077 *unp_ret = NULL; 2078 2079 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path); 2080 if (len <= 0) { 2081 error = EINVAL; 2082 goto failed; 2083 } 2084 strncpy(buf, soun->sun_path, len); 2085 buf[len] = 0; 2086 2087 error = nlookup_init(&nd, buf, UIO_SYSSPACE, NLC_FOLLOW); 2088 if (error == 0) 2089 error = nlookup(&nd); 2090 if (error == 0) 2091 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 2092 nlookup_done(&nd); 2093 if (error) { 2094 vp = NULL; 2095 goto failed; 2096 } 2097 2098 if (vp->v_type != VSOCK) { 2099 error = ENOTSOCK; 2100 goto failed; 2101 } 2102 error = VOP_EACCESS(vp, VWRITE, p->p_ucred); 2103 if (error) 2104 goto failed; 2105 so = vp->v_socket; 2106 if (so == NULL) { 2107 error = ECONNREFUSED; 2108 goto failed; 2109 } 2110 if (so->so_type != type) { 2111 error = EPROTOTYPE; 2112 goto failed; 2113 } 2114 2115 /* Lock this unp. */ 2116 unp = unp_getsocktoken(so); 2117 if (!UNP_ISATTACHED(unp)) { 2118 unp_reltoken(unp); 2119 error = ECONNREFUSED; 2120 goto failed; 2121 } 2122 /* And keep this unp referenced. */ 2123 unp_reference(unp); 2124 2125 /* Done! */ 2126 *unp_ret = unp; 2127 error = 0; 2128 failed: 2129 if (vp != NULL) 2130 vput(vp); 2131 return error; 2132 } 2133 2134 static int 2135 unp_connect_pair(struct unpcb *unp, struct unpcb *unp2) 2136 { 2137 struct socket *so = unp->unp_socket; 2138 struct socket *so2 = unp2->unp_socket; 2139 2140 ASSERT_LWKT_TOKEN_HELD(&unp_token); 2141 UNP_ASSERT_TOKEN_HELD(unp); 2142 UNP_ASSERT_TOKEN_HELD(unp2); 2143 2144 KASSERT(so->so_type == so2->so_type, 2145 ("socket type mismatch, so %d, so2 %d", so->so_type, so2->so_type)); 2146 2147 if (!UNP_ISATTACHED(unp)) 2148 return EINVAL; 2149 if (!UNP_ISATTACHED(unp2)) 2150 return ECONNREFUSED; 2151 2152 KASSERT(unp->unp_conn == NULL, ("unp is already connected")); 2153 unp->unp_conn = unp2; 2154 2155 switch (so->so_type) { 2156 case SOCK_DGRAM: 2157 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink); 2158 soisconnected(so); 2159 break; 2160 2161 case SOCK_STREAM: 2162 case SOCK_SEQPACKET: 2163 KASSERT(unp2->unp_conn == NULL, ("unp2 is already connected")); 2164 unp2->unp_conn = unp; 2165 soisconnected(so); 2166 soisconnected(so2); 2167 break; 2168 2169 default: 2170 panic("unp_connect_pair: unknown socket type %d", so->so_type); 2171 } 2172 return 0; 2173 } 2174 2175 static void 2176 unp_drop(struct unpcb *unp, int error) 2177 { 2178 struct unpcb *unp2; 2179 2180 ASSERT_LWKT_TOKEN_HELD(&unp_token); 2181 UNP_ASSERT_TOKEN_HELD(unp); 2182 KASSERT(unp->unp_flags & UNP_DETACHED, ("unp is not detached")); 2183 2184 unp_disconnect(unp, error); 2185 2186 while ((unp2 = LIST_FIRST(&unp->unp_refs)) != NULL) { 2187 lwkt_getpooltoken(unp2); 2188 unp_disconnect(unp2, ECONNRESET); 2189 lwkt_relpooltoken(unp2); 2190 } 2191 unp_setflags(unp, UNP_DROPPED); 2192 } 2193 2194 static void 2195 unp_defdiscard_taskfunc(void *arg __unused, int pending __unused) 2196 { 2197 struct unp_defdiscard *d; 2198 2199 spin_lock(&unp_defdiscard_spin); 2200 while ((d = SLIST_FIRST(&unp_defdiscard_head)) != NULL) { 2201 SLIST_REMOVE_HEAD(&unp_defdiscard_head, next); 2202 spin_unlock(&unp_defdiscard_spin); 2203 2204 closef(d->fp, NULL); 2205 kfree(d, M_UNPCB); 2206 2207 spin_lock(&unp_defdiscard_spin); 2208 } 2209 spin_unlock(&unp_defdiscard_spin); 2210 } 2211