1 /* 2 * Copyright (c) 1982, 1986, 1989, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94 30 * $FreeBSD: src/sys/kern/uipc_usrreq.c,v 1.54.2.10 2003/03/04 17:28:09 nectar Exp $ 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/domain.h> 37 #include <sys/fcntl.h> 38 #include <sys/malloc.h> /* XXX must be before <sys/file.h> */ 39 #include <sys/proc.h> 40 #include <sys/file.h> 41 #include <sys/filedesc.h> 42 #include <sys/mbuf.h> 43 #include <sys/nlookup.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/resourcevar.h> 48 #include <sys/stat.h> 49 #include <sys/mount.h> 50 #include <sys/sysctl.h> 51 #include <sys/un.h> 52 #include <sys/unpcb.h> 53 #include <sys/vnode.h> 54 #include <sys/kern_syscall.h> 55 #include <sys/taskqueue.h> 56 57 #include <sys/file2.h> 58 #include <sys/spinlock2.h> 59 #include <sys/socketvar2.h> 60 #include <sys/msgport2.h> 61 62 #define UNP_DETACHED UNP_PRIVATE1 63 #define UNP_CONNECTING UNP_PRIVATE2 64 #define UNP_DROPPED UNP_PRIVATE3 65 #define UNP_MARKER UNP_PRIVATE4 66 67 #define UNP_ISATTACHED(unp) \ 68 ((unp) != NULL && ((unp)->unp_flags & UNP_DETACHED) == 0) 69 70 #ifdef INVARIANTS 71 #define UNP_ASSERT_TOKEN_HELD(unp) \ 72 ASSERT_LWKT_TOKEN_HELD(lwkt_token_pool_lookup((unp))) 73 #else /* !INVARIANTS */ 74 #define UNP_ASSERT_TOKEN_HELD(unp) 75 #endif /* INVARIANTS */ 76 77 struct unp_defdiscard { 78 SLIST_ENTRY(unp_defdiscard) next; 79 struct file *fp; 80 }; 81 SLIST_HEAD(unp_defdiscard_list, unp_defdiscard); 82 83 TAILQ_HEAD(unpcb_qhead, unpcb); 84 struct unp_global_head { 85 struct unpcb_qhead list; 86 int count; 87 }; 88 89 static MALLOC_DEFINE(M_UNPCB, "unpcb", "unpcb struct"); 90 static unp_gen_t unp_gencnt; 91 92 static struct unp_global_head unp_stream_head; 93 static struct unp_global_head unp_dgram_head; 94 static struct unp_global_head unp_seqpkt_head; 95 96 static struct lwkt_token unp_token = LWKT_TOKEN_INITIALIZER(unp_token); 97 static struct taskqueue *unp_taskqueue; 98 99 static struct unp_defdiscard_list unp_defdiscard_head; 100 static struct spinlock unp_defdiscard_spin; 101 static struct task unp_defdiscard_task; 102 103 /* 104 * Unix communications domain. 105 * 106 * TODO: 107 * RDM 108 * rethink name space problems 109 * need a proper out-of-band 110 * lock pushdown 111 */ 112 static struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL }; 113 static ino_t unp_ino = 1; /* prototype for fake inode numbers */ 114 115 static int unp_attach (struct socket *, struct pru_attach_info *); 116 static void unp_detach (struct unpcb *); 117 static int unp_bind (struct unpcb *,struct sockaddr *, struct thread *); 118 static int unp_connect (struct socket *,struct sockaddr *, 119 struct thread *); 120 static void unp_disconnect(struct unpcb *, int); 121 static void unp_shutdown (struct unpcb *); 122 static void unp_gc (void); 123 static int unp_gc_clearmarks(struct file *, void *); 124 static int unp_gc_checkmarks(struct file *, void *); 125 static int unp_gc_checkrefs(struct file *, void *); 126 static void unp_scan (struct mbuf *, void (*)(struct file *, void *), 127 void *data); 128 static void unp_mark (struct file *, void *data); 129 static void unp_discard (struct file *, void *); 130 static int unp_internalize (struct mbuf *, struct thread *); 131 static int unp_listen (struct unpcb *, struct thread *); 132 static void unp_fp_externalize(struct lwp *lp, struct file *fp, int fd); 133 static int unp_find_lockref(struct sockaddr *nam, struct thread *td, 134 short type, struct unpcb **unp_ret); 135 static int unp_connect_pair(struct unpcb *unp, struct unpcb *unp2); 136 static void unp_drop(struct unpcb *unp, int error); 137 static void unp_defdiscard_taskfunc(void *, int); 138 139 /* 140 * SMP Considerations: 141 * 142 * Since unp_token will be automaticly released upon execution of 143 * blocking code, we need to reference unp_conn before any possible 144 * blocking code to prevent it from being ripped behind our back. 145 * 146 * Any adjustment to unp->unp_conn requires both the global unp_token 147 * AND the per-unp token (lwkt_token_pool_lookup(unp)) to be held. 148 * 149 * Any access to so_pcb to obtain unp requires the pool token for 150 * unp to be held. 151 */ 152 153 static __inline void 154 unp_reference(struct unpcb *unp) 155 { 156 /* 0->1 transition will not work */ 157 KKASSERT(unp->unp_refcnt > 0); 158 atomic_add_int(&unp->unp_refcnt, 1); 159 } 160 161 static __inline void 162 unp_free(struct unpcb *unp) 163 { 164 KKASSERT(unp->unp_refcnt > 0); 165 if (atomic_fetchadd_int(&unp->unp_refcnt, -1) == 1) 166 unp_detach(unp); 167 } 168 169 static __inline struct unpcb * 170 unp_getsocktoken(struct socket *so) 171 { 172 struct unpcb *unp; 173 174 /* 175 * The unp pointer is invalid until we verify that it is 176 * good by re-checking so_pcb AFTER obtaining the token. 177 */ 178 while ((unp = so->so_pcb) != NULL) { 179 lwkt_getpooltoken(unp); 180 if (unp == so->so_pcb) 181 break; 182 lwkt_relpooltoken(unp); 183 } 184 return unp; 185 } 186 187 static __inline void 188 unp_reltoken(struct unpcb *unp) 189 { 190 if (unp != NULL) 191 lwkt_relpooltoken(unp); 192 } 193 194 static __inline void 195 unp_setflags(struct unpcb *unp, int flags) 196 { 197 atomic_set_int(&unp->unp_flags, flags); 198 } 199 200 static __inline void 201 unp_clrflags(struct unpcb *unp, int flags) 202 { 203 atomic_clear_int(&unp->unp_flags, flags); 204 } 205 206 static __inline struct unp_global_head * 207 unp_globalhead(short type) 208 { 209 switch (type) { 210 case SOCK_STREAM: 211 return &unp_stream_head; 212 case SOCK_DGRAM: 213 return &unp_dgram_head; 214 case SOCK_SEQPACKET: 215 return &unp_seqpkt_head; 216 default: 217 panic("unknown socket type %d", type); 218 } 219 } 220 221 /* 222 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort() 223 * will sofree() it when we return. 224 */ 225 static void 226 uipc_abort(netmsg_t msg) 227 { 228 struct unpcb *unp; 229 int error; 230 231 lwkt_gettoken(&unp_token); 232 unp = unp_getsocktoken(msg->base.nm_so); 233 234 if (UNP_ISATTACHED(unp)) { 235 unp_setflags(unp, UNP_DETACHED); 236 unp_drop(unp, ECONNABORTED); 237 unp_free(unp); 238 error = 0; 239 } else { 240 error = EINVAL; 241 } 242 243 unp_reltoken(unp); 244 lwkt_reltoken(&unp_token); 245 246 lwkt_replymsg(&msg->lmsg, error); 247 } 248 249 static void 250 uipc_accept(netmsg_t msg) 251 { 252 struct unpcb *unp; 253 int error; 254 255 lwkt_gettoken(&unp_token); 256 unp = unp_getsocktoken(msg->base.nm_so); 257 258 if (!UNP_ISATTACHED(unp)) { 259 error = EINVAL; 260 } else { 261 struct unpcb *unp2 = unp->unp_conn; 262 263 /* 264 * Pass back name of connected socket, 265 * if it was bound and we are still connected 266 * (our peer may have closed already!). 267 */ 268 if (unp2 && unp2->unp_addr) { 269 unp_reference(unp2); 270 *msg->accept.nm_nam = dup_sockaddr( 271 (struct sockaddr *)unp2->unp_addr); 272 unp_free(unp2); 273 } else { 274 *msg->accept.nm_nam = dup_sockaddr(&sun_noname); 275 } 276 error = 0; 277 } 278 279 unp_reltoken(unp); 280 lwkt_reltoken(&unp_token); 281 282 lwkt_replymsg(&msg->lmsg, error); 283 } 284 285 static void 286 uipc_attach(netmsg_t msg) 287 { 288 int error; 289 290 lwkt_gettoken(&unp_token); 291 292 KASSERT(msg->base.nm_so->so_pcb == NULL, ("double unp attach")); 293 error = unp_attach(msg->base.nm_so, msg->attach.nm_ai); 294 295 lwkt_reltoken(&unp_token); 296 lwkt_replymsg(&msg->lmsg, error); 297 } 298 299 static void 300 uipc_bind(netmsg_t msg) 301 { 302 struct unpcb *unp; 303 int error; 304 305 lwkt_gettoken(&unp_token); 306 unp = unp_getsocktoken(msg->base.nm_so); 307 308 if (UNP_ISATTACHED(unp)) 309 error = unp_bind(unp, msg->bind.nm_nam, msg->bind.nm_td); 310 else 311 error = EINVAL; 312 313 unp_reltoken(unp); 314 lwkt_reltoken(&unp_token); 315 316 lwkt_replymsg(&msg->lmsg, error); 317 } 318 319 static void 320 uipc_connect(netmsg_t msg) 321 { 322 int error; 323 324 error = unp_connect(msg->base.nm_so, msg->connect.nm_nam, 325 msg->connect.nm_td); 326 lwkt_replymsg(&msg->lmsg, error); 327 } 328 329 static void 330 uipc_connect2(netmsg_t msg) 331 { 332 int error; 333 334 error = unp_connect2(msg->connect2.nm_so1, msg->connect2.nm_so2); 335 lwkt_replymsg(&msg->lmsg, error); 336 } 337 338 /* control is EOPNOTSUPP */ 339 340 static void 341 uipc_detach(netmsg_t msg) 342 { 343 struct unpcb *unp; 344 int error; 345 346 lwkt_gettoken(&unp_token); 347 unp = unp_getsocktoken(msg->base.nm_so); 348 349 if (UNP_ISATTACHED(unp)) { 350 unp_setflags(unp, UNP_DETACHED); 351 unp_drop(unp, 0); 352 unp_free(unp); 353 error = 0; 354 } else { 355 error = EINVAL; 356 } 357 358 unp_reltoken(unp); 359 lwkt_reltoken(&unp_token); 360 361 lwkt_replymsg(&msg->lmsg, error); 362 } 363 364 static void 365 uipc_disconnect(netmsg_t msg) 366 { 367 struct unpcb *unp; 368 int error; 369 370 lwkt_gettoken(&unp_token); 371 unp = unp_getsocktoken(msg->base.nm_so); 372 373 if (UNP_ISATTACHED(unp)) { 374 unp_disconnect(unp, 0); 375 error = 0; 376 } else { 377 error = EINVAL; 378 } 379 380 unp_reltoken(unp); 381 lwkt_reltoken(&unp_token); 382 383 lwkt_replymsg(&msg->lmsg, error); 384 } 385 386 static void 387 uipc_listen(netmsg_t msg) 388 { 389 struct unpcb *unp; 390 int error; 391 392 lwkt_gettoken(&unp_token); 393 unp = unp_getsocktoken(msg->base.nm_so); 394 395 if (!UNP_ISATTACHED(unp) || unp->unp_vnode == NULL) 396 error = EINVAL; 397 else 398 error = unp_listen(unp, msg->listen.nm_td); 399 400 unp_reltoken(unp); 401 lwkt_reltoken(&unp_token); 402 403 lwkt_replymsg(&msg->lmsg, error); 404 } 405 406 static void 407 uipc_peeraddr(netmsg_t msg) 408 { 409 struct unpcb *unp; 410 int error; 411 412 lwkt_gettoken(&unp_token); 413 unp = unp_getsocktoken(msg->base.nm_so); 414 415 if (!UNP_ISATTACHED(unp)) { 416 error = EINVAL; 417 } else if (unp->unp_conn && unp->unp_conn->unp_addr) { 418 struct unpcb *unp2 = unp->unp_conn; 419 420 unp_reference(unp2); 421 *msg->peeraddr.nm_nam = dup_sockaddr( 422 (struct sockaddr *)unp2->unp_addr); 423 unp_free(unp2); 424 error = 0; 425 } else { 426 /* 427 * XXX: It seems that this test always fails even when 428 * connection is established. So, this else clause is 429 * added as workaround to return PF_LOCAL sockaddr. 430 */ 431 *msg->peeraddr.nm_nam = dup_sockaddr(&sun_noname); 432 error = 0; 433 } 434 435 unp_reltoken(unp); 436 lwkt_reltoken(&unp_token); 437 438 lwkt_replymsg(&msg->lmsg, error); 439 } 440 441 static void 442 uipc_rcvd(netmsg_t msg) 443 { 444 struct unpcb *unp, *unp2; 445 struct socket *so; 446 struct socket *so2; 447 int error; 448 449 /* 450 * so_pcb is only modified with both the global and the unp 451 * pool token held. 452 */ 453 so = msg->base.nm_so; 454 unp = unp_getsocktoken(so); 455 456 if (!UNP_ISATTACHED(unp)) { 457 error = EINVAL; 458 goto done; 459 } 460 461 switch (so->so_type) { 462 case SOCK_DGRAM: 463 panic("uipc_rcvd DGRAM?"); 464 /*NOTREACHED*/ 465 case SOCK_STREAM: 466 case SOCK_SEQPACKET: 467 if (unp->unp_conn == NULL) 468 break; 469 unp2 = unp->unp_conn; /* protected by pool token */ 470 471 /* 472 * Because we are transfering mbufs directly to the 473 * peer socket we have to use SSB_STOP on the sender 474 * to prevent it from building up infinite mbufs. 475 * 476 * As in several places in this module w ehave to ref unp2 477 * to ensure that it does not get ripped out from under us 478 * if we block on the so2 token or in sowwakeup(). 479 */ 480 so2 = unp2->unp_socket; 481 unp_reference(unp2); 482 lwkt_gettoken(&so2->so_rcv.ssb_token); 483 if (so->so_rcv.ssb_cc < so2->so_snd.ssb_hiwat && 484 so->so_rcv.ssb_mbcnt < so2->so_snd.ssb_mbmax 485 ) { 486 atomic_clear_int(&so2->so_snd.ssb_flags, SSB_STOP); 487 488 sowwakeup(so2); 489 } 490 lwkt_reltoken(&so2->so_rcv.ssb_token); 491 unp_free(unp2); 492 break; 493 default: 494 panic("uipc_rcvd unknown socktype"); 495 /*NOTREACHED*/ 496 } 497 error = 0; 498 done: 499 unp_reltoken(unp); 500 lwkt_replymsg(&msg->lmsg, error); 501 } 502 503 /* pru_rcvoob is EOPNOTSUPP */ 504 505 static void 506 uipc_send(netmsg_t msg) 507 { 508 struct unpcb *unp, *unp2; 509 struct socket *so; 510 struct socket *so2; 511 struct mbuf *control; 512 struct mbuf *m; 513 int error = 0; 514 515 so = msg->base.nm_so; 516 control = msg->send.nm_control; 517 m = msg->send.nm_m; 518 519 /* 520 * so_pcb is only modified with both the global and the unp 521 * pool token held. 522 */ 523 so = msg->base.nm_so; 524 unp = unp_getsocktoken(so); 525 526 if (!UNP_ISATTACHED(unp)) { 527 error = EINVAL; 528 goto release; 529 } 530 531 if (msg->send.nm_flags & PRUS_OOB) { 532 error = EOPNOTSUPP; 533 goto release; 534 } 535 536 wakeup_start_delayed(); 537 538 if (control && (error = unp_internalize(control, msg->send.nm_td))) 539 goto release; 540 541 switch (so->so_type) { 542 case SOCK_DGRAM: 543 { 544 struct sockaddr *from; 545 546 if (msg->send.nm_addr) { 547 if (unp->unp_conn) { 548 error = EISCONN; 549 break; 550 } 551 error = unp_find_lockref(msg->send.nm_addr, 552 msg->send.nm_td, so->so_type, &unp2); 553 if (error) 554 break; 555 /* 556 * NOTE: 557 * unp2 is locked and referenced. 558 * 559 * We could unlock unp2 now, since it was checked 560 * and referenced. 561 */ 562 unp_reltoken(unp2); 563 } else { 564 if (unp->unp_conn == NULL) { 565 error = ENOTCONN; 566 break; 567 } 568 unp2 = unp->unp_conn; 569 unp_reference(unp2); 570 } 571 /* NOTE: unp2 is referenced. */ 572 so2 = unp2->unp_socket; 573 574 if (unp->unp_addr) 575 from = (struct sockaddr *)unp->unp_addr; 576 else 577 from = &sun_noname; 578 579 lwkt_gettoken(&so2->so_rcv.ssb_token); 580 if (ssb_appendaddr(&so2->so_rcv, from, m, control)) { 581 sorwakeup(so2); 582 m = NULL; 583 control = NULL; 584 } else { 585 error = ENOBUFS; 586 } 587 lwkt_reltoken(&so2->so_rcv.ssb_token); 588 589 unp_free(unp2); 590 break; 591 } 592 593 case SOCK_STREAM: 594 case SOCK_SEQPACKET: 595 /* Connect if not connected yet. */ 596 /* 597 * Note: A better implementation would complain 598 * if not equal to the peer's address. 599 */ 600 if (unp->unp_conn == NULL) { 601 if (msg->send.nm_addr) { 602 error = unp_connect(so, 603 msg->send.nm_addr, 604 msg->send.nm_td); 605 if (error) 606 break; /* XXX */ 607 } 608 /* 609 * NOTE: 610 * unp_conn still could be NULL, even if the 611 * above unp_connect() succeeds; since the 612 * current unp's token could be released due 613 * to blocking operations after unp_conn is 614 * assigned. 615 */ 616 if (unp->unp_conn == NULL) { 617 error = ENOTCONN; 618 break; 619 } 620 } 621 if (so->so_state & SS_CANTSENDMORE) { 622 error = EPIPE; 623 break; 624 } 625 626 unp2 = unp->unp_conn; 627 KASSERT(unp2 != NULL, ("unp is not connected")); 628 so2 = unp2->unp_socket; 629 630 unp_reference(unp2); 631 632 /* 633 * Send to paired receive port, and then reduce 634 * send buffer hiwater marks to maintain backpressure. 635 * Wake up readers. 636 */ 637 lwkt_gettoken(&so2->so_rcv.ssb_token); 638 if (control) { 639 if (ssb_appendcontrol(&so2->so_rcv, m, control)) { 640 control = NULL; 641 m = NULL; 642 } 643 } else if (so->so_type == SOCK_SEQPACKET) { 644 sbappendrecord(&so2->so_rcv.sb, m); 645 m = NULL; 646 } else { 647 sbappend(&so2->so_rcv.sb, m); 648 m = NULL; 649 } 650 651 /* 652 * Because we are transfering mbufs directly to the 653 * peer socket we have to use SSB_STOP on the sender 654 * to prevent it from building up infinite mbufs. 655 */ 656 if (so2->so_rcv.ssb_cc >= so->so_snd.ssb_hiwat || 657 so2->so_rcv.ssb_mbcnt >= so->so_snd.ssb_mbmax 658 ) { 659 atomic_set_int(&so->so_snd.ssb_flags, SSB_STOP); 660 } 661 lwkt_reltoken(&so2->so_rcv.ssb_token); 662 sorwakeup(so2); 663 664 unp_free(unp2); 665 break; 666 667 default: 668 panic("uipc_send unknown socktype"); 669 } 670 671 /* 672 * SEND_EOF is equivalent to a SEND followed by a SHUTDOWN. 673 */ 674 if (msg->send.nm_flags & PRUS_EOF) { 675 socantsendmore(so); 676 unp_shutdown(unp); 677 } 678 679 if (control && error != 0) 680 unp_dispose(control); 681 release: 682 unp_reltoken(unp); 683 wakeup_end_delayed(); 684 685 if (control) 686 m_freem(control); 687 if (m) 688 m_freem(m); 689 lwkt_replymsg(&msg->lmsg, error); 690 } 691 692 /* 693 * MPSAFE 694 */ 695 static void 696 uipc_sense(netmsg_t msg) 697 { 698 struct unpcb *unp; 699 struct socket *so; 700 struct stat *sb; 701 int error; 702 703 so = msg->base.nm_so; 704 sb = msg->sense.nm_stat; 705 706 /* 707 * so_pcb is only modified with both the global and the unp 708 * pool token held. 709 */ 710 unp = unp_getsocktoken(so); 711 712 if (!UNP_ISATTACHED(unp)) { 713 error = EINVAL; 714 goto done; 715 } 716 717 sb->st_blksize = so->so_snd.ssb_hiwat; 718 sb->st_dev = NOUDEV; 719 if (unp->unp_ino == 0) { /* make up a non-zero inode number */ 720 unp->unp_ino = atomic_fetchadd_long(&unp_ino, 1); 721 if (__predict_false(unp->unp_ino == 0)) 722 unp->unp_ino = atomic_fetchadd_long(&unp_ino, 1); 723 } 724 sb->st_ino = unp->unp_ino; 725 error = 0; 726 done: 727 unp_reltoken(unp); 728 lwkt_replymsg(&msg->lmsg, error); 729 } 730 731 static void 732 uipc_shutdown(netmsg_t msg) 733 { 734 struct socket *so; 735 struct unpcb *unp; 736 int error; 737 738 /* 739 * so_pcb is only modified with both the global and the unp 740 * pool token held. 741 */ 742 so = msg->base.nm_so; 743 unp = unp_getsocktoken(so); 744 745 if (UNP_ISATTACHED(unp)) { 746 socantsendmore(so); 747 unp_shutdown(unp); 748 error = 0; 749 } else { 750 error = EINVAL; 751 } 752 753 unp_reltoken(unp); 754 lwkt_replymsg(&msg->lmsg, error); 755 } 756 757 static void 758 uipc_sockaddr(netmsg_t msg) 759 { 760 struct unpcb *unp; 761 int error; 762 763 /* 764 * so_pcb is only modified with both the global and the unp 765 * pool token held. 766 */ 767 unp = unp_getsocktoken(msg->base.nm_so); 768 769 if (UNP_ISATTACHED(unp)) { 770 if (unp->unp_addr) { 771 *msg->sockaddr.nm_nam = 772 dup_sockaddr((struct sockaddr *)unp->unp_addr); 773 } 774 error = 0; 775 } else { 776 error = EINVAL; 777 } 778 779 unp_reltoken(unp); 780 lwkt_replymsg(&msg->lmsg, error); 781 } 782 783 struct pr_usrreqs uipc_usrreqs = { 784 .pru_abort = uipc_abort, 785 .pru_accept = uipc_accept, 786 .pru_attach = uipc_attach, 787 .pru_bind = uipc_bind, 788 .pru_connect = uipc_connect, 789 .pru_connect2 = uipc_connect2, 790 .pru_control = pr_generic_notsupp, 791 .pru_detach = uipc_detach, 792 .pru_disconnect = uipc_disconnect, 793 .pru_listen = uipc_listen, 794 .pru_peeraddr = uipc_peeraddr, 795 .pru_rcvd = uipc_rcvd, 796 .pru_rcvoob = pr_generic_notsupp, 797 .pru_send = uipc_send, 798 .pru_sense = uipc_sense, 799 .pru_shutdown = uipc_shutdown, 800 .pru_sockaddr = uipc_sockaddr, 801 .pru_sosend = sosend, 802 .pru_soreceive = soreceive 803 }; 804 805 void 806 uipc_ctloutput(netmsg_t msg) 807 { 808 struct socket *so; 809 struct sockopt *sopt; 810 struct unpcb *unp; 811 int error = 0; 812 813 so = msg->base.nm_so; 814 sopt = msg->ctloutput.nm_sopt; 815 816 lwkt_gettoken(&unp_token); 817 unp = unp_getsocktoken(so); 818 819 if (!UNP_ISATTACHED(unp)) { 820 error = EINVAL; 821 goto done; 822 } 823 824 switch (sopt->sopt_dir) { 825 case SOPT_GET: 826 switch (sopt->sopt_name) { 827 case LOCAL_PEERCRED: 828 if (unp->unp_flags & UNP_HAVEPC) 829 soopt_from_kbuf(sopt, &unp->unp_peercred, 830 sizeof(unp->unp_peercred)); 831 else { 832 if (so->so_type == SOCK_STREAM) 833 error = ENOTCONN; 834 else if (so->so_type == SOCK_SEQPACKET) 835 error = ENOTCONN; 836 else 837 error = EINVAL; 838 } 839 break; 840 default: 841 error = EOPNOTSUPP; 842 break; 843 } 844 break; 845 case SOPT_SET: 846 default: 847 error = EOPNOTSUPP; 848 break; 849 } 850 851 done: 852 unp_reltoken(unp); 853 lwkt_reltoken(&unp_token); 854 855 lwkt_replymsg(&msg->lmsg, error); 856 } 857 858 /* 859 * Both send and receive buffers are allocated PIPSIZ bytes of buffering 860 * for stream sockets, although the total for sender and receiver is 861 * actually only PIPSIZ. 862 * 863 * Datagram sockets really use the sendspace as the maximum datagram size, 864 * and don't really want to reserve the sendspace. Their recvspace should 865 * be large enough for at least one max-size datagram plus address. 866 * 867 * We want the local send/recv space to be significant larger then lo0's 868 * mtu of 16384. 869 */ 870 #ifndef PIPSIZ 871 #define PIPSIZ 57344 872 #endif 873 static u_long unpst_sendspace = PIPSIZ; 874 static u_long unpst_recvspace = PIPSIZ; 875 static u_long unpdg_sendspace = 2*1024; /* really max datagram size */ 876 static u_long unpdg_recvspace = 4*1024; 877 878 static int unp_rights; /* file descriptors in flight */ 879 static struct spinlock unp_spin = SPINLOCK_INITIALIZER(&unp_spin, "unp_spin"); 880 881 SYSCTL_DECL(_net_local_seqpacket); 882 SYSCTL_DECL(_net_local_stream); 883 SYSCTL_INT(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW, 884 &unpst_sendspace, 0, "Size of stream socket send buffer"); 885 SYSCTL_INT(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW, 886 &unpst_recvspace, 0, "Size of stream socket receive buffer"); 887 888 SYSCTL_DECL(_net_local_dgram); 889 SYSCTL_INT(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW, 890 &unpdg_sendspace, 0, "Max datagram socket size"); 891 SYSCTL_INT(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW, 892 &unpdg_recvspace, 0, "Size of datagram socket receive buffer"); 893 894 SYSCTL_DECL(_net_local); 895 SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, 896 "File descriptors in flight"); 897 898 static int 899 unp_attach(struct socket *so, struct pru_attach_info *ai) 900 { 901 struct unp_global_head *head; 902 struct unpcb *unp; 903 int error; 904 905 lwkt_gettoken(&unp_token); 906 907 if (so->so_snd.ssb_hiwat == 0 || so->so_rcv.ssb_hiwat == 0) { 908 switch (so->so_type) { 909 case SOCK_STREAM: 910 case SOCK_SEQPACKET: 911 error = soreserve(so, unpst_sendspace, unpst_recvspace, 912 ai->sb_rlimit); 913 break; 914 915 case SOCK_DGRAM: 916 error = soreserve(so, unpdg_sendspace, unpdg_recvspace, 917 ai->sb_rlimit); 918 break; 919 920 default: 921 panic("unp_attach"); 922 } 923 if (error) 924 goto failed; 925 } 926 927 /* 928 * In order to support sendfile we have to set either SSB_STOPSUPP 929 * or SSB_PREALLOC. Unix domain sockets use the SSB_STOP flow 930 * control mechanism. 931 */ 932 if (so->so_type == SOCK_STREAM) { 933 atomic_set_int(&so->so_rcv.ssb_flags, SSB_STOPSUPP); 934 atomic_set_int(&so->so_snd.ssb_flags, SSB_STOPSUPP); 935 } 936 937 unp = kmalloc(sizeof(*unp), M_UNPCB, M_WAITOK | M_ZERO | M_NULLOK); 938 if (unp == NULL) { 939 error = ENOBUFS; 940 goto failed; 941 } 942 unp->unp_refcnt = 1; 943 unp->unp_gencnt = ++unp_gencnt; 944 LIST_INIT(&unp->unp_refs); 945 unp->unp_socket = so; 946 unp->unp_rvnode = ai->fd_rdir; /* jail cruft XXX JH */ 947 so->so_pcb = (caddr_t)unp; 948 soreference(so); 949 950 head = unp_globalhead(so->so_type); 951 TAILQ_INSERT_TAIL(&head->list, unp, unp_link); 952 head->count++; 953 error = 0; 954 failed: 955 lwkt_reltoken(&unp_token); 956 return error; 957 } 958 959 static void 960 unp_detach(struct unpcb *unp) 961 { 962 struct unp_global_head *head; 963 struct socket *so; 964 965 lwkt_gettoken(&unp_token); 966 lwkt_getpooltoken(unp); 967 968 so = unp->unp_socket; 969 970 head = unp_globalhead(so->so_type); 971 KASSERT(head->count > 0, ("invalid unp count")); 972 TAILQ_REMOVE(&head->list, unp, unp_link); 973 head->count--; 974 975 unp->unp_gencnt = ++unp_gencnt; 976 if (unp->unp_vnode) { 977 unp->unp_vnode->v_socket = NULL; 978 vrele(unp->unp_vnode); 979 unp->unp_vnode = NULL; 980 } 981 soisdisconnected(so); 982 soreference(so); /* for delayed sorflush */ 983 KKASSERT(so->so_pcb == unp); 984 so->so_pcb = NULL; /* both tokens required */ 985 unp->unp_socket = NULL; 986 sofree(so); /* remove pcb ref */ 987 988 if (unp_rights) { 989 /* 990 * Normally the receive buffer is flushed later, 991 * in sofree, but if our receive buffer holds references 992 * to descriptors that are now garbage, we will dispose 993 * of those descriptor references after the garbage collector 994 * gets them (resulting in a "panic: closef: count < 0"). 995 */ 996 sorflush(so); 997 unp_gc(); 998 } 999 sofree(so); 1000 lwkt_relpooltoken(unp); 1001 lwkt_reltoken(&unp_token); 1002 1003 KASSERT(unp->unp_conn == NULL, ("unp is still connected")); 1004 KASSERT(LIST_EMPTY(&unp->unp_refs), ("unp still has references")); 1005 1006 if (unp->unp_addr) 1007 kfree(unp->unp_addr, M_SONAME); 1008 kfree(unp, M_UNPCB); 1009 } 1010 1011 static int 1012 unp_bind(struct unpcb *unp, struct sockaddr *nam, struct thread *td) 1013 { 1014 struct proc *p = td->td_proc; 1015 struct sockaddr_un *soun = (struct sockaddr_un *)nam; 1016 struct vnode *vp; 1017 struct vattr vattr; 1018 int error, namelen; 1019 struct nlookupdata nd; 1020 char buf[SOCK_MAXADDRLEN]; 1021 1022 ASSERT_LWKT_TOKEN_HELD(&unp_token); 1023 UNP_ASSERT_TOKEN_HELD(unp); 1024 1025 if (unp->unp_vnode != NULL) 1026 return EINVAL; 1027 1028 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); 1029 if (namelen <= 0) 1030 return EINVAL; 1031 strncpy(buf, soun->sun_path, namelen); 1032 buf[namelen] = 0; /* null-terminate the string */ 1033 error = nlookup_init(&nd, buf, UIO_SYSSPACE, 1034 NLC_LOCKVP | NLC_CREATE | NLC_REFDVP); 1035 if (error == 0) 1036 error = nlookup(&nd); 1037 if (error == 0 && nd.nl_nch.ncp->nc_vp != NULL) 1038 error = EADDRINUSE; 1039 if (error) 1040 goto done; 1041 1042 VATTR_NULL(&vattr); 1043 vattr.va_type = VSOCK; 1044 vattr.va_mode = (ACCESSPERMS & ~p->p_fd->fd_cmask); 1045 error = VOP_NCREATE(&nd.nl_nch, nd.nl_dvp, &vp, nd.nl_cred, &vattr); 1046 if (error == 0) { 1047 if (unp->unp_vnode == NULL) { 1048 vp->v_socket = unp->unp_socket; 1049 unp->unp_vnode = vp; 1050 unp->unp_addr = (struct sockaddr_un *)dup_sockaddr(nam); 1051 vn_unlock(vp); 1052 } else { 1053 vput(vp); /* late race */ 1054 error = EINVAL; 1055 } 1056 } 1057 done: 1058 nlookup_done(&nd); 1059 return (error); 1060 } 1061 1062 static int 1063 unp_connect(struct socket *so, struct sockaddr *nam, struct thread *td) 1064 { 1065 struct unpcb *unp, *unp2; 1066 int error, flags = 0; 1067 1068 lwkt_gettoken(&unp_token); 1069 1070 unp = unp_getsocktoken(so); 1071 if (!UNP_ISATTACHED(unp)) { 1072 error = EINVAL; 1073 goto failed; 1074 } 1075 1076 if ((unp->unp_flags & UNP_CONNECTING) || unp->unp_conn != NULL) { 1077 error = EISCONN; 1078 goto failed; 1079 } 1080 1081 flags = UNP_CONNECTING; 1082 unp_setflags(unp, flags); 1083 1084 error = unp_find_lockref(nam, td, so->so_type, &unp2); 1085 if (error) 1086 goto failed; 1087 /* 1088 * NOTE: 1089 * unp2 is locked and referenced. 1090 */ 1091 1092 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 1093 struct socket *so2, *so3; 1094 struct unpcb *unp3; 1095 1096 so2 = unp2->unp_socket; 1097 if (!(so2->so_options & SO_ACCEPTCONN) || 1098 (so3 = sonewconn_faddr(so2, 0, NULL, 1099 TRUE /* keep ref */)) == NULL) { 1100 error = ECONNREFUSED; 1101 goto done; 1102 } 1103 /* so3 has a socket reference. */ 1104 1105 unp3 = unp_getsocktoken(so3); 1106 if (!UNP_ISATTACHED(unp3)) { 1107 unp_reltoken(unp3); 1108 /* 1109 * Already aborted; we only need to drop the 1110 * socket reference held by sonewconn_faddr(). 1111 */ 1112 sofree(so3); 1113 error = ECONNREFUSED; 1114 goto done; 1115 } 1116 unp_reference(unp3); 1117 /* 1118 * NOTE: 1119 * unp3 is locked and referenced. 1120 */ 1121 1122 /* 1123 * Release so3 socket reference held by sonewconn_faddr(). 1124 * Since we have referenced unp3, neither unp3 nor so3 will 1125 * be destroyed here. 1126 */ 1127 sofree(so3); 1128 1129 if (unp2->unp_addr != NULL) { 1130 unp3->unp_addr = (struct sockaddr_un *) 1131 dup_sockaddr((struct sockaddr *)unp2->unp_addr); 1132 } 1133 1134 /* 1135 * unp_peercred management: 1136 * 1137 * The connecter's (client's) credentials are copied 1138 * from its process structure at the time of connect() 1139 * (which is now). 1140 */ 1141 cru2x(td->td_proc->p_ucred, &unp3->unp_peercred); 1142 unp_setflags(unp3, UNP_HAVEPC); 1143 /* 1144 * The receiver's (server's) credentials are copied 1145 * from the unp_peercred member of socket on which the 1146 * former called listen(); unp_listen() cached that 1147 * process's credentials at that time so we can use 1148 * them now. 1149 */ 1150 KASSERT(unp2->unp_flags & UNP_HAVEPCCACHED, 1151 ("unp_connect: listener without cached peercred")); 1152 memcpy(&unp->unp_peercred, &unp2->unp_peercred, 1153 sizeof(unp->unp_peercred)); 1154 unp_setflags(unp, UNP_HAVEPC); 1155 1156 error = unp_connect_pair(unp, unp3); 1157 if (error) 1158 soabort_direct(so3); 1159 1160 /* Done with unp3 */ 1161 unp_free(unp3); 1162 unp_reltoken(unp3); 1163 } else { 1164 error = unp_connect_pair(unp, unp2); 1165 } 1166 done: 1167 unp_free(unp2); 1168 unp_reltoken(unp2); 1169 failed: 1170 if (flags) 1171 unp_clrflags(unp, flags); 1172 unp_reltoken(unp); 1173 1174 lwkt_reltoken(&unp_token); 1175 return (error); 1176 } 1177 1178 /* 1179 * Connect two unix domain sockets together. 1180 * 1181 * NOTE: Semantics for any change to unp_conn requires that the per-unp 1182 * pool token also be held. 1183 */ 1184 int 1185 unp_connect2(struct socket *so, struct socket *so2) 1186 { 1187 struct unpcb *unp, *unp2; 1188 int error; 1189 1190 lwkt_gettoken(&unp_token); 1191 if (so2->so_type != so->so_type) { 1192 lwkt_reltoken(&unp_token); 1193 return (EPROTOTYPE); 1194 } 1195 unp = unp_getsocktoken(so); 1196 unp2 = unp_getsocktoken(so2); 1197 1198 if (!UNP_ISATTACHED(unp)) { 1199 error = EINVAL; 1200 goto done; 1201 } 1202 if (!UNP_ISATTACHED(unp2)) { 1203 error = ECONNREFUSED; 1204 goto done; 1205 } 1206 1207 if (unp->unp_conn != NULL) { 1208 error = EISCONN; 1209 goto done; 1210 } 1211 if ((so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET) && 1212 unp2->unp_conn != NULL) { 1213 error = EISCONN; 1214 goto done; 1215 } 1216 1217 error = unp_connect_pair(unp, unp2); 1218 done: 1219 unp_reltoken(unp2); 1220 unp_reltoken(unp); 1221 lwkt_reltoken(&unp_token); 1222 return (error); 1223 } 1224 1225 /* 1226 * Disconnect a unix domain socket pair. 1227 * 1228 * NOTE: Semantics for any change to unp_conn requires that the per-unp 1229 * pool token also be held. 1230 */ 1231 static void 1232 unp_disconnect(struct unpcb *unp, int error) 1233 { 1234 struct socket *so = unp->unp_socket; 1235 struct unpcb *unp2; 1236 1237 ASSERT_LWKT_TOKEN_HELD(&unp_token); 1238 UNP_ASSERT_TOKEN_HELD(unp); 1239 1240 if (error) 1241 so->so_error = error; 1242 1243 while ((unp2 = unp->unp_conn) != NULL) { 1244 lwkt_getpooltoken(unp2); 1245 if (unp2 == unp->unp_conn) 1246 break; 1247 lwkt_relpooltoken(unp2); 1248 } 1249 if (unp2 == NULL) 1250 return; 1251 /* unp2 is locked. */ 1252 1253 KASSERT((unp2->unp_flags & UNP_DROPPED) == 0, ("unp2 was dropped")); 1254 1255 unp->unp_conn = NULL; 1256 1257 switch (so->so_type) { 1258 case SOCK_DGRAM: 1259 LIST_REMOVE(unp, unp_reflink); 1260 soclrstate(so, SS_ISCONNECTED); 1261 break; 1262 1263 case SOCK_STREAM: 1264 case SOCK_SEQPACKET: 1265 /* 1266 * Keep a reference before clearing the unp_conn 1267 * to avoid racing uipc_detach()/uipc_abort() in 1268 * other thread. 1269 */ 1270 unp_reference(unp2); 1271 KASSERT(unp2->unp_conn == unp, ("unp_conn mismatch")); 1272 unp2->unp_conn = NULL; 1273 1274 soisdisconnected(so); 1275 soisdisconnected(unp2->unp_socket); 1276 1277 unp_free(unp2); 1278 break; 1279 } 1280 1281 lwkt_relpooltoken(unp2); 1282 } 1283 1284 #ifdef notdef 1285 void 1286 unp_abort(struct unpcb *unp) 1287 { 1288 lwkt_gettoken(&unp_token); 1289 unp_free(unp); 1290 lwkt_reltoken(&unp_token); 1291 } 1292 #endif 1293 1294 static int 1295 prison_unpcb(struct thread *td, struct unpcb *unp) 1296 { 1297 struct proc *p; 1298 1299 if (td == NULL) 1300 return (0); 1301 if ((p = td->td_proc) == NULL) 1302 return (0); 1303 if (!p->p_ucred->cr_prison) 1304 return (0); 1305 if (p->p_fd->fd_rdir == unp->unp_rvnode) 1306 return (0); 1307 return (1); 1308 } 1309 1310 static int 1311 unp_pcblist(SYSCTL_HANDLER_ARGS) 1312 { 1313 struct unp_global_head *head = arg1; 1314 int error, i, n; 1315 struct unpcb *unp, *marker; 1316 1317 KKASSERT(curproc != NULL); 1318 1319 /* 1320 * The process of preparing the PCB list is too time-consuming and 1321 * resource-intensive to repeat twice on every request. 1322 */ 1323 if (req->oldptr == NULL) { 1324 n = head->count; 1325 req->oldidx = (n + n/8) * sizeof(struct xunpcb); 1326 return 0; 1327 } 1328 1329 if (req->newptr != NULL) 1330 return EPERM; 1331 1332 marker = kmalloc(sizeof(*marker), M_UNPCB, M_WAITOK | M_ZERO); 1333 marker->unp_flags |= UNP_MARKER; 1334 1335 lwkt_gettoken(&unp_token); 1336 1337 n = head->count; 1338 i = 0; 1339 error = 0; 1340 1341 TAILQ_INSERT_HEAD(&head->list, marker, unp_link); 1342 while ((unp = TAILQ_NEXT(marker, unp_link)) != NULL && i < n) { 1343 struct xunpcb xu; 1344 1345 TAILQ_REMOVE(&head->list, marker, unp_link); 1346 TAILQ_INSERT_AFTER(&head->list, unp, marker, unp_link); 1347 1348 if (unp->unp_flags & UNP_MARKER) 1349 continue; 1350 if (prison_unpcb(req->td, unp)) 1351 continue; 1352 1353 xu.xu_len = sizeof(xu); 1354 xu.xu_unpp = unp; 1355 1356 /* 1357 * NOTE: 1358 * unp->unp_addr and unp->unp_conn are protected by 1359 * unp_token. So if we want to get rid of unp_token 1360 * or reduce the coverage of unp_token, care must be 1361 * taken. 1362 */ 1363 if (unp->unp_addr) { 1364 bcopy(unp->unp_addr, &xu.xu_addr, 1365 unp->unp_addr->sun_len); 1366 } 1367 if (unp->unp_conn && unp->unp_conn->unp_addr) { 1368 bcopy(unp->unp_conn->unp_addr, 1369 &xu.xu_caddr, 1370 unp->unp_conn->unp_addr->sun_len); 1371 } 1372 bcopy(unp, &xu.xu_unp, sizeof(*unp)); 1373 sotoxsocket(unp->unp_socket, &xu.xu_socket); 1374 1375 /* NOTE: This could block and temporarily release unp_token */ 1376 error = SYSCTL_OUT(req, &xu, sizeof(xu)); 1377 if (error) 1378 break; 1379 ++i; 1380 } 1381 TAILQ_REMOVE(&head->list, marker, unp_link); 1382 1383 lwkt_reltoken(&unp_token); 1384 1385 kfree(marker, M_UNPCB); 1386 return error; 1387 } 1388 1389 SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLFLAG_RD, 1390 &unp_dgram_head, 0, unp_pcblist, "S,xunpcb", 1391 "List of active local datagram sockets"); 1392 SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLFLAG_RD, 1393 &unp_stream_head, 0, unp_pcblist, "S,xunpcb", 1394 "List of active local stream sockets"); 1395 SYSCTL_PROC(_net_local_seqpacket, OID_AUTO, pcblist, CTLFLAG_RD, 1396 &unp_seqpkt_head, 0, unp_pcblist, "S,xunpcb", 1397 "List of active local seqpacket sockets"); 1398 1399 static void 1400 unp_shutdown(struct unpcb *unp) 1401 { 1402 struct socket *so; 1403 1404 if ((unp->unp_socket->so_type == SOCK_STREAM || 1405 unp->unp_socket->so_type == SOCK_SEQPACKET) && 1406 unp->unp_conn != NULL && (so = unp->unp_conn->unp_socket)) { 1407 socantrcvmore(so); 1408 } 1409 } 1410 1411 #ifdef notdef 1412 void 1413 unp_drain(void) 1414 { 1415 lwkt_gettoken(&unp_token); 1416 lwkt_reltoken(&unp_token); 1417 } 1418 #endif 1419 1420 int 1421 unp_externalize(struct mbuf *rights) 1422 { 1423 struct thread *td = curthread; 1424 struct proc *p = td->td_proc; /* XXX */ 1425 struct lwp *lp = td->td_lwp; 1426 struct cmsghdr *cm = mtod(rights, struct cmsghdr *); 1427 int *fdp; 1428 int i; 1429 struct file **rp; 1430 struct file *fp; 1431 int newfds = (cm->cmsg_len - (CMSG_DATA(cm) - (u_char *)cm)) 1432 / sizeof(struct file *); 1433 int f; 1434 1435 /* 1436 * if the new FD's will not fit, then we free them all 1437 */ 1438 if (!fdavail(p, newfds)) { 1439 rp = (struct file **)CMSG_DATA(cm); 1440 for (i = 0; i < newfds; i++) { 1441 fp = *rp; 1442 /* 1443 * zero the pointer before calling unp_discard, 1444 * since it may end up in unp_gc().. 1445 */ 1446 *rp++ = NULL; 1447 unp_discard(fp, NULL); 1448 } 1449 return (EMSGSIZE); 1450 } 1451 1452 /* 1453 * now change each pointer to an fd in the global table to 1454 * an integer that is the index to the local fd table entry 1455 * that we set up to point to the global one we are transferring. 1456 * If sizeof (struct file *) is bigger than or equal to sizeof int, 1457 * then do it in forward order. In that case, an integer will 1458 * always come in the same place or before its corresponding 1459 * struct file pointer. 1460 * If sizeof (struct file *) is smaller than sizeof int, then 1461 * do it in reverse order. 1462 * 1463 * Hold revoke_token in 'shared' mode, so that we won't miss 1464 * the FREVOKED update on fps being externalized (fsetfd). 1465 */ 1466 lwkt_gettoken_shared(&revoke_token); 1467 if (sizeof(struct file *) >= sizeof(int)) { 1468 fdp = (int *)CMSG_DATA(cm); 1469 rp = (struct file **)CMSG_DATA(cm); 1470 for (i = 0; i < newfds; i++) { 1471 if (fdalloc(p, 0, &f)) { 1472 int j; 1473 1474 /* 1475 * Previous fdavail() can't garantee 1476 * fdalloc() success due to SMP race. 1477 * Just clean up and return the same 1478 * error value as if fdavail() failed. 1479 */ 1480 1481 /* Close externalized files */ 1482 for (j = 0; j < i; j++) 1483 kern_close(fdp[j]); 1484 /* Discard the rest of internal files */ 1485 for (; i < newfds; i++) 1486 unp_discard(rp[i], NULL); 1487 /* Wipe out the control message */ 1488 for (i = 0; i < newfds; i++) 1489 rp[i] = NULL; 1490 1491 lwkt_reltoken(&revoke_token); 1492 return (EMSGSIZE); 1493 } 1494 fp = rp[i]; 1495 unp_fp_externalize(lp, fp, f); 1496 fdp[i] = f; 1497 } 1498 } else { 1499 /* 1500 * XXX 1501 * Will this ever happen? I don't think compiler will 1502 * generate code for this code segment -- sephe 1503 */ 1504 fdp = (int *)CMSG_DATA(cm) + newfds - 1; 1505 rp = (struct file **)CMSG_DATA(cm) + newfds - 1; 1506 for (i = 0; i < newfds; i++) { 1507 if (fdalloc(p, 0, &f)) 1508 panic("unp_externalize"); 1509 fp = *rp--; 1510 unp_fp_externalize(lp, fp, f); 1511 *fdp-- = f; 1512 } 1513 } 1514 lwkt_reltoken(&revoke_token); 1515 1516 /* 1517 * Adjust length, in case sizeof(struct file *) and sizeof(int) 1518 * differs. 1519 */ 1520 cm->cmsg_len = CMSG_LEN(newfds * sizeof(int)); 1521 rights->m_len = cm->cmsg_len; 1522 1523 return (0); 1524 } 1525 1526 static void 1527 unp_fp_externalize(struct lwp *lp, struct file *fp, int fd) 1528 { 1529 if (lp) { 1530 KKASSERT(fd >= 0); 1531 if (fp->f_flag & FREVOKED) { 1532 struct file *fx; 1533 int error; 1534 1535 kprintf("Warning: revoked fp exiting unix socket\n"); 1536 error = falloc(lp, &fx, NULL); 1537 if (error == 0) { 1538 fsetfd(lp->lwp_proc->p_fd, fx, fd); 1539 fdrop(fx); 1540 } else { 1541 fsetfd(lp->lwp_proc->p_fd, NULL, fd); 1542 } 1543 } else { 1544 fsetfd(lp->lwp_proc->p_fd, fp, fd); 1545 } 1546 } 1547 spin_lock(&unp_spin); 1548 fp->f_msgcount--; 1549 unp_rights--; 1550 spin_unlock(&unp_spin); 1551 fdrop(fp); 1552 } 1553 1554 void 1555 unp_init(void) 1556 { 1557 TAILQ_INIT(&unp_stream_head.list); 1558 TAILQ_INIT(&unp_dgram_head.list); 1559 TAILQ_INIT(&unp_seqpkt_head.list); 1560 1561 spin_init(&unp_spin, "unpinit"); 1562 1563 SLIST_INIT(&unp_defdiscard_head); 1564 spin_init(&unp_defdiscard_spin, "unpdisc"); 1565 TASK_INIT(&unp_defdiscard_task, 0, unp_defdiscard_taskfunc, NULL); 1566 1567 /* 1568 * Create taskqueue for defered discard, and stick it to 1569 * the last CPU. 1570 */ 1571 unp_taskqueue = taskqueue_create("unp_taskq", M_WAITOK, 1572 taskqueue_thread_enqueue, &unp_taskqueue); 1573 taskqueue_start_threads(&unp_taskqueue, 1, TDPRI_KERN_DAEMON, 1574 ncpus - 1, "unp taskq"); 1575 } 1576 1577 static int 1578 unp_internalize(struct mbuf *control, struct thread *td) 1579 { 1580 struct proc *p = td->td_proc; 1581 struct filedesc *fdescp; 1582 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 1583 struct file **rp; 1584 struct file *fp; 1585 int i, fd, *fdp; 1586 struct cmsgcred *cmcred; 1587 int oldfds; 1588 u_int newlen; 1589 int error; 1590 1591 KKASSERT(p); 1592 1593 if ((cm->cmsg_type != SCM_RIGHTS && cm->cmsg_type != SCM_CREDS) || 1594 cm->cmsg_level != SOL_SOCKET || 1595 CMSG_ALIGN(cm->cmsg_len) != control->m_len) 1596 return EINVAL; 1597 1598 /* 1599 * Fill in credential information. 1600 */ 1601 if (cm->cmsg_type == SCM_CREDS) { 1602 cmcred = (struct cmsgcred *)CMSG_DATA(cm); 1603 cmcred->cmcred_pid = p->p_pid; 1604 cmcred->cmcred_uid = p->p_ucred->cr_ruid; 1605 cmcred->cmcred_gid = p->p_ucred->cr_rgid; 1606 cmcred->cmcred_euid = p->p_ucred->cr_uid; 1607 cmcred->cmcred_ngroups = MIN(p->p_ucred->cr_ngroups, 1608 CMGROUP_MAX); 1609 for (i = 0; i < cmcred->cmcred_ngroups; i++) 1610 cmcred->cmcred_groups[i] = p->p_ucred->cr_groups[i]; 1611 return 0; 1612 } 1613 1614 /* 1615 * cmsghdr may not be aligned, do not allow calculation(s) to 1616 * go negative. 1617 */ 1618 if (cm->cmsg_len < CMSG_LEN(0)) 1619 return EINVAL; 1620 1621 oldfds = (cm->cmsg_len - CMSG_LEN(0)) / sizeof(int); 1622 1623 /* 1624 * Now replace the integer FDs with pointers to 1625 * the associated global file table entry.. 1626 * Allocate a bigger buffer as necessary. But if an cluster is not 1627 * enough, return E2BIG. 1628 */ 1629 newlen = CMSG_LEN(oldfds * sizeof(struct file *)); 1630 if (newlen > MCLBYTES) 1631 return E2BIG; 1632 if (newlen - control->m_len > M_TRAILINGSPACE(control)) { 1633 if (control->m_flags & M_EXT) 1634 return E2BIG; 1635 MCLGET(control, M_WAITOK); 1636 if (!(control->m_flags & M_EXT)) 1637 return ENOBUFS; 1638 1639 /* copy the data to the cluster */ 1640 memcpy(mtod(control, char *), cm, cm->cmsg_len); 1641 cm = mtod(control, struct cmsghdr *); 1642 } 1643 1644 fdescp = p->p_fd; 1645 spin_lock_shared(&fdescp->fd_spin); 1646 1647 /* 1648 * check that all the FDs passed in refer to legal OPEN files 1649 * If not, reject the entire operation. 1650 */ 1651 fdp = (int *)CMSG_DATA(cm); 1652 for (i = 0; i < oldfds; i++) { 1653 fd = *fdp++; 1654 if ((unsigned)fd >= fdescp->fd_nfiles || 1655 fdescp->fd_files[fd].fp == NULL) { 1656 error = EBADF; 1657 goto done; 1658 } 1659 if (fdescp->fd_files[fd].fp->f_type == DTYPE_KQUEUE) { 1660 error = EOPNOTSUPP; 1661 goto done; 1662 } 1663 } 1664 1665 /* 1666 * Adjust length, in case sizeof(struct file *) and sizeof(int) 1667 * differs. 1668 */ 1669 cm->cmsg_len = newlen; 1670 control->m_len = CMSG_ALIGN(newlen); 1671 1672 /* 1673 * Transform the file descriptors into struct file pointers. 1674 * If sizeof (struct file *) is bigger than or equal to sizeof int, 1675 * then do it in reverse order so that the int won't get until 1676 * we're done. 1677 * If sizeof (struct file *) is smaller than sizeof int, then 1678 * do it in forward order. 1679 */ 1680 if (sizeof(struct file *) >= sizeof(int)) { 1681 fdp = (int *)CMSG_DATA(cm) + oldfds - 1; 1682 rp = (struct file **)CMSG_DATA(cm) + oldfds - 1; 1683 for (i = 0; i < oldfds; i++) { 1684 fp = fdescp->fd_files[*fdp--].fp; 1685 *rp-- = fp; 1686 fhold(fp); 1687 spin_lock(&unp_spin); 1688 fp->f_msgcount++; 1689 unp_rights++; 1690 spin_unlock(&unp_spin); 1691 } 1692 } else { 1693 /* 1694 * XXX 1695 * Will this ever happen? I don't think compiler will 1696 * generate code for this code segment -- sephe 1697 */ 1698 fdp = (int *)CMSG_DATA(cm); 1699 rp = (struct file **)CMSG_DATA(cm); 1700 for (i = 0; i < oldfds; i++) { 1701 fp = fdescp->fd_files[*fdp++].fp; 1702 *rp++ = fp; 1703 fhold(fp); 1704 spin_lock(&unp_spin); 1705 fp->f_msgcount++; 1706 unp_rights++; 1707 spin_unlock(&unp_spin); 1708 } 1709 } 1710 error = 0; 1711 done: 1712 spin_unlock_shared(&fdescp->fd_spin); 1713 return error; 1714 } 1715 1716 /* 1717 * Garbage collect in-transit file descriptors that get lost due to 1718 * loops (i.e. when a socket is sent to another process over itself, 1719 * and more complex situations). 1720 * 1721 * NOT MPSAFE - TODO socket flush code and maybe closef. Rest is MPSAFE. 1722 */ 1723 1724 struct unp_gc_info { 1725 struct file **extra_ref; 1726 struct file *locked_fp; 1727 int defer; 1728 int index; 1729 int maxindex; 1730 }; 1731 1732 static void 1733 unp_gc(void) 1734 { 1735 struct unp_gc_info info; 1736 static boolean_t unp_gcing; 1737 struct file **fpp; 1738 int i; 1739 1740 /* 1741 * Only one gc can be in-progress at any given moment 1742 */ 1743 spin_lock(&unp_spin); 1744 if (unp_gcing) { 1745 spin_unlock(&unp_spin); 1746 return; 1747 } 1748 unp_gcing = TRUE; 1749 spin_unlock(&unp_spin); 1750 1751 lwkt_gettoken(&unp_token); 1752 1753 /* 1754 * Before going through all this, set all FDs to be NOT defered 1755 * and NOT externally accessible (not marked). During the scan 1756 * a fd can be marked externally accessible but we may or may not 1757 * be able to immediately process it (controlled by FDEFER). 1758 * 1759 * If we loop sleep a bit. The complexity of the topology can cause 1760 * multiple loops. Also failure to acquire the socket's so_rcv 1761 * token can cause us to loop. 1762 */ 1763 allfiles_scan_exclusive(unp_gc_clearmarks, NULL); 1764 do { 1765 info.defer = 0; 1766 allfiles_scan_exclusive(unp_gc_checkmarks, &info); 1767 if (info.defer) 1768 tsleep(&info, 0, "gcagain", 1); 1769 } while (info.defer); 1770 1771 /* 1772 * We grab an extra reference to each of the file table entries 1773 * that are not otherwise accessible and then free the rights 1774 * that are stored in messages on them. 1775 * 1776 * The bug in the orginal code is a little tricky, so I'll describe 1777 * what's wrong with it here. 1778 * 1779 * It is incorrect to simply unp_discard each entry for f_msgcount 1780 * times -- consider the case of sockets A and B that contain 1781 * references to each other. On a last close of some other socket, 1782 * we trigger a gc since the number of outstanding rights (unp_rights) 1783 * is non-zero. If during the sweep phase the gc code un_discards, 1784 * we end up doing a (full) closef on the descriptor. A closef on A 1785 * results in the following chain. Closef calls soo_close, which 1786 * calls soclose. Soclose calls first (through the switch 1787 * uipc_usrreq) unp_detach, which re-invokes unp_gc. Unp_gc simply 1788 * returns because the previous instance had set unp_gcing, and 1789 * we return all the way back to soclose, which marks the socket 1790 * with SS_NOFDREF, and then calls sofree. Sofree calls sorflush 1791 * to free up the rights that are queued in messages on the socket A, 1792 * i.e., the reference on B. The sorflush calls via the dom_dispose 1793 * switch unp_dispose, which unp_scans with unp_discard. This second 1794 * instance of unp_discard just calls closef on B. 1795 * 1796 * Well, a similar chain occurs on B, resulting in a sorflush on B, 1797 * which results in another closef on A. Unfortunately, A is already 1798 * being closed, and the descriptor has already been marked with 1799 * SS_NOFDREF, and soclose panics at this point. 1800 * 1801 * Here, we first take an extra reference to each inaccessible 1802 * descriptor. Then, we call sorflush ourself, since we know 1803 * it is a Unix domain socket anyhow. After we destroy all the 1804 * rights carried in messages, we do a last closef to get rid 1805 * of our extra reference. This is the last close, and the 1806 * unp_detach etc will shut down the socket. 1807 * 1808 * 91/09/19, bsy@cs.cmu.edu 1809 */ 1810 info.extra_ref = kmalloc(256 * sizeof(struct file *), M_FILE, M_WAITOK); 1811 info.maxindex = 256; 1812 1813 do { 1814 /* 1815 * Look for matches 1816 */ 1817 info.index = 0; 1818 allfiles_scan_exclusive(unp_gc_checkrefs, &info); 1819 1820 /* 1821 * For each FD on our hit list, do the following two things 1822 */ 1823 for (i = info.index, fpp = info.extra_ref; --i >= 0; ++fpp) { 1824 struct file *tfp = *fpp; 1825 if (tfp->f_type == DTYPE_SOCKET && tfp->f_data != NULL) 1826 sorflush((struct socket *)(tfp->f_data)); 1827 } 1828 for (i = info.index, fpp = info.extra_ref; --i >= 0; ++fpp) 1829 closef(*fpp, NULL); 1830 } while (info.index == info.maxindex); 1831 1832 lwkt_reltoken(&unp_token); 1833 1834 kfree((caddr_t)info.extra_ref, M_FILE); 1835 unp_gcing = FALSE; 1836 } 1837 1838 /* 1839 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry 1840 */ 1841 static int 1842 unp_gc_checkrefs(struct file *fp, void *data) 1843 { 1844 struct unp_gc_info *info = data; 1845 1846 if (fp->f_count == 0) 1847 return(0); 1848 if (info->index == info->maxindex) 1849 return(-1); 1850 1851 /* 1852 * If all refs are from msgs, and it's not marked accessible 1853 * then it must be referenced from some unreachable cycle 1854 * of (shut-down) FDs, so include it in our 1855 * list of FDs to remove 1856 */ 1857 if (fp->f_count == fp->f_msgcount && !(fp->f_flag & FMARK)) { 1858 info->extra_ref[info->index++] = fp; 1859 fhold(fp); 1860 } 1861 return(0); 1862 } 1863 1864 /* 1865 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry 1866 */ 1867 static int 1868 unp_gc_clearmarks(struct file *fp, void *data __unused) 1869 { 1870 atomic_clear_int(&fp->f_flag, FMARK | FDEFER); 1871 return(0); 1872 } 1873 1874 /* 1875 * MPSAFE - NOTE: filehead list and file pointer spinlocked on entry 1876 */ 1877 static int 1878 unp_gc_checkmarks(struct file *fp, void *data) 1879 { 1880 struct unp_gc_info *info = data; 1881 struct socket *so; 1882 1883 /* 1884 * If the file is not open, skip it. Make sure it isn't marked 1885 * defered or we could loop forever, in case we somehow race 1886 * something. 1887 */ 1888 if (fp->f_count == 0) { 1889 if (fp->f_flag & FDEFER) 1890 atomic_clear_int(&fp->f_flag, FDEFER); 1891 return(0); 1892 } 1893 /* 1894 * If we already marked it as 'defer' in a 1895 * previous pass, then try process it this time 1896 * and un-mark it 1897 */ 1898 if (fp->f_flag & FDEFER) { 1899 atomic_clear_int(&fp->f_flag, FDEFER); 1900 } else { 1901 /* 1902 * if it's not defered, then check if it's 1903 * already marked.. if so skip it 1904 */ 1905 if (fp->f_flag & FMARK) 1906 return(0); 1907 /* 1908 * If all references are from messages 1909 * in transit, then skip it. it's not 1910 * externally accessible. 1911 */ 1912 if (fp->f_count == fp->f_msgcount) 1913 return(0); 1914 /* 1915 * If it got this far then it must be 1916 * externally accessible. 1917 */ 1918 atomic_set_int(&fp->f_flag, FMARK); 1919 } 1920 1921 /* 1922 * either it was defered, or it is externally 1923 * accessible and not already marked so. 1924 * Now check if it is possibly one of OUR sockets. 1925 */ 1926 if (fp->f_type != DTYPE_SOCKET || 1927 (so = (struct socket *)fp->f_data) == NULL) { 1928 return(0); 1929 } 1930 if (so->so_proto->pr_domain != &localdomain || 1931 !(so->so_proto->pr_flags & PR_RIGHTS)) { 1932 return(0); 1933 } 1934 1935 /* 1936 * So, Ok, it's one of our sockets and it IS externally accessible 1937 * (or was defered). Now we look to see if we hold any file 1938 * descriptors in its message buffers. Follow those links and mark 1939 * them as accessible too. 1940 * 1941 * We are holding multiple spinlocks here, if we cannot get the 1942 * token non-blocking defer until the next loop. 1943 */ 1944 info->locked_fp = fp; 1945 if (lwkt_trytoken(&so->so_rcv.ssb_token)) { 1946 unp_scan(so->so_rcv.ssb_mb, unp_mark, info); 1947 lwkt_reltoken(&so->so_rcv.ssb_token); 1948 } else { 1949 atomic_set_int(&fp->f_flag, FDEFER); 1950 ++info->defer; 1951 } 1952 return (0); 1953 } 1954 1955 /* 1956 * Dispose of the fp's stored in a mbuf. 1957 * 1958 * The dds loop can cause additional fps to be entered onto the 1959 * list while it is running, flattening out the operation and avoiding 1960 * a deep kernel stack recursion. 1961 */ 1962 void 1963 unp_dispose(struct mbuf *m) 1964 { 1965 lwkt_gettoken(&unp_token); 1966 if (m) 1967 unp_scan(m, unp_discard, NULL); 1968 lwkt_reltoken(&unp_token); 1969 } 1970 1971 static int 1972 unp_listen(struct unpcb *unp, struct thread *td) 1973 { 1974 struct proc *p = td->td_proc; 1975 1976 ASSERT_LWKT_TOKEN_HELD(&unp_token); 1977 UNP_ASSERT_TOKEN_HELD(unp); 1978 1979 KKASSERT(p); 1980 cru2x(p->p_ucred, &unp->unp_peercred); 1981 unp_setflags(unp, UNP_HAVEPCCACHED); 1982 return (0); 1983 } 1984 1985 static void 1986 unp_scan(struct mbuf *m0, void (*op)(struct file *, void *), void *data) 1987 { 1988 struct mbuf *m; 1989 struct file **rp; 1990 struct cmsghdr *cm; 1991 int i; 1992 int qfds; 1993 1994 while (m0) { 1995 for (m = m0; m; m = m->m_next) { 1996 if (m->m_type == MT_CONTROL && 1997 m->m_len >= sizeof(*cm)) { 1998 cm = mtod(m, struct cmsghdr *); 1999 if (cm->cmsg_level != SOL_SOCKET || 2000 cm->cmsg_type != SCM_RIGHTS) 2001 continue; 2002 qfds = (cm->cmsg_len - CMSG_LEN(0)) / 2003 sizeof(void *); 2004 rp = (struct file **)CMSG_DATA(cm); 2005 for (i = 0; i < qfds; i++) 2006 (*op)(*rp++, data); 2007 break; /* XXX, but saves time */ 2008 } 2009 } 2010 m0 = m0->m_nextpkt; 2011 } 2012 } 2013 2014 /* 2015 * Mark visibility. info->defer is recalculated on every pass. 2016 */ 2017 static void 2018 unp_mark(struct file *fp, void *data) 2019 { 2020 struct unp_gc_info *info = data; 2021 2022 if ((fp->f_flag & FMARK) == 0) { 2023 ++info->defer; 2024 atomic_set_int(&fp->f_flag, FMARK | FDEFER); 2025 } else if (fp->f_flag & FDEFER) { 2026 ++info->defer; 2027 } 2028 } 2029 2030 /* 2031 * Discard a fp previously held in a unix domain socket mbuf. To 2032 * avoid blowing out the kernel stack due to contrived chain-reactions 2033 * we may have to defer the operation to a higher procedural level. 2034 * 2035 * Caller holds unp_token 2036 */ 2037 static void 2038 unp_discard(struct file *fp, void *data __unused) 2039 { 2040 struct unp_defdiscard *d; 2041 2042 spin_lock(&unp_spin); 2043 fp->f_msgcount--; 2044 unp_rights--; 2045 spin_unlock(&unp_spin); 2046 2047 d = kmalloc(sizeof(*d), M_UNPCB, M_WAITOK); 2048 d->fp = fp; 2049 2050 spin_lock(&unp_defdiscard_spin); 2051 SLIST_INSERT_HEAD(&unp_defdiscard_head, d, next); 2052 spin_unlock(&unp_defdiscard_spin); 2053 2054 taskqueue_enqueue(unp_taskqueue, &unp_defdiscard_task); 2055 } 2056 2057 static int 2058 unp_find_lockref(struct sockaddr *nam, struct thread *td, short type, 2059 struct unpcb **unp_ret) 2060 { 2061 struct proc *p = td->td_proc; 2062 struct sockaddr_un *soun = (struct sockaddr_un *)nam; 2063 struct vnode *vp = NULL; 2064 struct socket *so; 2065 struct unpcb *unp; 2066 int error, len; 2067 struct nlookupdata nd; 2068 char buf[SOCK_MAXADDRLEN]; 2069 2070 *unp_ret = NULL; 2071 2072 len = nam->sa_len - offsetof(struct sockaddr_un, sun_path); 2073 if (len <= 0) { 2074 error = EINVAL; 2075 goto failed; 2076 } 2077 strncpy(buf, soun->sun_path, len); 2078 buf[len] = 0; 2079 2080 error = nlookup_init(&nd, buf, UIO_SYSSPACE, NLC_FOLLOW); 2081 if (error == 0) 2082 error = nlookup(&nd); 2083 if (error == 0) 2084 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp); 2085 nlookup_done(&nd); 2086 if (error) { 2087 vp = NULL; 2088 goto failed; 2089 } 2090 2091 if (vp->v_type != VSOCK) { 2092 error = ENOTSOCK; 2093 goto failed; 2094 } 2095 error = VOP_EACCESS(vp, VWRITE, p->p_ucred); 2096 if (error) 2097 goto failed; 2098 so = vp->v_socket; 2099 if (so == NULL) { 2100 error = ECONNREFUSED; 2101 goto failed; 2102 } 2103 if (so->so_type != type) { 2104 error = EPROTOTYPE; 2105 goto failed; 2106 } 2107 2108 /* Lock this unp. */ 2109 unp = unp_getsocktoken(so); 2110 if (!UNP_ISATTACHED(unp)) { 2111 unp_reltoken(unp); 2112 error = ECONNREFUSED; 2113 goto failed; 2114 } 2115 /* And keep this unp referenced. */ 2116 unp_reference(unp); 2117 2118 /* Done! */ 2119 *unp_ret = unp; 2120 error = 0; 2121 failed: 2122 if (vp != NULL) 2123 vput(vp); 2124 return error; 2125 } 2126 2127 static int 2128 unp_connect_pair(struct unpcb *unp, struct unpcb *unp2) 2129 { 2130 struct socket *so = unp->unp_socket; 2131 struct socket *so2 = unp2->unp_socket; 2132 2133 ASSERT_LWKT_TOKEN_HELD(&unp_token); 2134 UNP_ASSERT_TOKEN_HELD(unp); 2135 UNP_ASSERT_TOKEN_HELD(unp2); 2136 2137 KASSERT(so->so_type == so2->so_type, 2138 ("socket type mismatch, so %d, so2 %d", so->so_type, so2->so_type)); 2139 2140 if (!UNP_ISATTACHED(unp)) 2141 return EINVAL; 2142 if (!UNP_ISATTACHED(unp2)) 2143 return ECONNREFUSED; 2144 2145 KASSERT(unp->unp_conn == NULL, ("unp is already connected")); 2146 unp->unp_conn = unp2; 2147 2148 switch (so->so_type) { 2149 case SOCK_DGRAM: 2150 LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink); 2151 soisconnected(so); 2152 break; 2153 2154 case SOCK_STREAM: 2155 case SOCK_SEQPACKET: 2156 KASSERT(unp2->unp_conn == NULL, ("unp2 is already connected")); 2157 unp2->unp_conn = unp; 2158 soisconnected(so); 2159 soisconnected(so2); 2160 break; 2161 2162 default: 2163 panic("unp_connect_pair: unknown socket type %d", so->so_type); 2164 } 2165 return 0; 2166 } 2167 2168 static void 2169 unp_drop(struct unpcb *unp, int error) 2170 { 2171 struct unpcb *unp2; 2172 2173 ASSERT_LWKT_TOKEN_HELD(&unp_token); 2174 UNP_ASSERT_TOKEN_HELD(unp); 2175 KASSERT(unp->unp_flags & UNP_DETACHED, ("unp is not detached")); 2176 2177 unp_disconnect(unp, error); 2178 2179 while ((unp2 = LIST_FIRST(&unp->unp_refs)) != NULL) { 2180 lwkt_getpooltoken(unp2); 2181 unp_disconnect(unp2, ECONNRESET); 2182 lwkt_relpooltoken(unp2); 2183 } 2184 unp_setflags(unp, UNP_DROPPED); 2185 } 2186 2187 static void 2188 unp_defdiscard_taskfunc(void *arg __unused, int pending __unused) 2189 { 2190 struct unp_defdiscard *d; 2191 2192 spin_lock(&unp_defdiscard_spin); 2193 while ((d = SLIST_FIRST(&unp_defdiscard_head)) != NULL) { 2194 SLIST_REMOVE_HEAD(&unp_defdiscard_head, next); 2195 spin_unlock(&unp_defdiscard_spin); 2196 2197 closef(d->fp, NULL); 2198 kfree(d, M_UNPCB); 2199 2200 spin_lock(&unp_defdiscard_spin); 2201 } 2202 spin_unlock(&unp_defdiscard_spin); 2203 } 2204