1 /* $NetBSD: uipc_usrreq.c,v 1.171 2014/09/05 09:20:59 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 2000, 2004, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, and by Andrew Doran. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1982, 1986, 1989, 1991, 1993 35 * The Regents of the University of California. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. Neither the name of the University nor the names of its contributors 46 * may be used to endorse or promote products derived from this software 47 * without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 * SUCH DAMAGE. 60 * 61 * @(#)uipc_usrreq.c 8.9 (Berkeley) 5/14/95 62 */ 63 64 /* 65 * Copyright (c) 1997 Christopher G. Demetriou. All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions 69 * are met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions and the following disclaimer. 72 * 2. Redistributions in binary form must reproduce the above copyright 73 * notice, this list of conditions and the following disclaimer in the 74 * documentation and/or other materials provided with the distribution. 75 * 3. All advertising materials mentioning features or use of this software 76 * must display the following acknowledgement: 77 * This product includes software developed by the University of 78 * California, Berkeley and its contributors. 79 * 4. Neither the name of the University nor the names of its contributors 80 * may be used to endorse or promote products derived from this software 81 * without specific prior written permission. 82 * 83 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 84 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 85 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 86 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 87 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 88 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 89 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 90 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 91 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 92 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 93 * SUCH DAMAGE. 94 * 95 * @(#)uipc_usrreq.c 8.9 (Berkeley) 5/14/95 96 */ 97 98 #include <sys/cdefs.h> 99 __KERNEL_RCSID(0, "$NetBSD: uipc_usrreq.c,v 1.171 2014/09/05 09:20:59 matt Exp $"); 100 101 #include <sys/param.h> 102 #include <sys/systm.h> 103 #include <sys/proc.h> 104 #include <sys/filedesc.h> 105 #include <sys/domain.h> 106 #include <sys/protosw.h> 107 #include <sys/socket.h> 108 #include <sys/socketvar.h> 109 #include <sys/unpcb.h> 110 #include <sys/un.h> 111 #include <sys/namei.h> 112 #include <sys/vnode.h> 113 #include <sys/file.h> 114 #include <sys/stat.h> 115 #include <sys/mbuf.h> 116 #include <sys/kauth.h> 117 #include <sys/kmem.h> 118 #include <sys/atomic.h> 119 #include <sys/uidinfo.h> 120 #include <sys/kernel.h> 121 #include <sys/kthread.h> 122 123 /* 124 * Unix communications domain. 125 * 126 * TODO: 127 * RDM 128 * rethink name space problems 129 * need a proper out-of-band 130 * 131 * Notes on locking: 132 * 133 * The generic rules noted in uipc_socket2.c apply. In addition: 134 * 135 * o We have a global lock, uipc_lock. 136 * 137 * o All datagram sockets are locked by uipc_lock. 138 * 139 * o For stream socketpairs, the two endpoints are created sharing the same 140 * independent lock. Sockets presented to PRU_CONNECT2 must already have 141 * matching locks. 142 * 143 * o Stream sockets created via socket() start life with their own 144 * independent lock. 145 * 146 * o Stream connections to a named endpoint are slightly more complicated. 147 * Sockets that have called listen() have their lock pointer mutated to 148 * the global uipc_lock. When establishing a connection, the connecting 149 * socket also has its lock mutated to uipc_lock, which matches the head 150 * (listening socket). We create a new socket for accept() to return, and 151 * that also shares the head's lock. Until the connection is completely 152 * done on both ends, all three sockets are locked by uipc_lock. Once the 153 * connection is complete, the association with the head's lock is broken. 154 * The connecting socket and the socket returned from accept() have their 155 * lock pointers mutated away from uipc_lock, and back to the connecting 156 * socket's original, independent lock. The head continues to be locked 157 * by uipc_lock. 158 * 159 * o If uipc_lock is determined to be a significant source of contention, 160 * it could easily be hashed out. It is difficult to simply make it an 161 * independent lock because of visibility / garbage collection issues: 162 * if a socket has been associated with a lock at any point, that lock 163 * must remain valid until the socket is no longer visible in the system. 164 * The lock must not be freed or otherwise destroyed until any sockets 165 * that had referenced it have also been destroyed. 166 */ 167 const struct sockaddr_un sun_noname = { 168 .sun_len = offsetof(struct sockaddr_un, sun_path), 169 .sun_family = AF_LOCAL, 170 }; 171 ino_t unp_ino; /* prototype for fake inode numbers */ 172 173 static struct mbuf * unp_addsockcred(struct lwp *, struct mbuf *); 174 static void unp_discard_later(file_t *); 175 static void unp_discard_now(file_t *); 176 static void unp_disconnect1(struct unpcb *); 177 static bool unp_drop(struct unpcb *, int); 178 static int unp_internalize(struct mbuf **); 179 static void unp_mark(file_t *); 180 static void unp_scan(struct mbuf *, void (*)(file_t *), int); 181 static void unp_shutdown1(struct unpcb *); 182 static void unp_thread(void *); 183 static void unp_thread_kick(void); 184 185 static kmutex_t *uipc_lock; 186 187 static kcondvar_t unp_thread_cv; 188 static lwp_t *unp_thread_lwp; 189 static SLIST_HEAD(,file) unp_thread_discard; 190 static int unp_defer; 191 192 /* 193 * Initialize Unix protocols. 194 */ 195 void 196 uipc_init(void) 197 { 198 int error; 199 200 uipc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 201 cv_init(&unp_thread_cv, "unpgc"); 202 203 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, unp_thread, 204 NULL, &unp_thread_lwp, "unpgc"); 205 if (error != 0) 206 panic("uipc_init %d", error); 207 } 208 209 /* 210 * A connection succeeded: disassociate both endpoints from the head's 211 * lock, and make them share their own lock. There is a race here: for 212 * a very brief time one endpoint will be locked by a different lock 213 * than the other end. However, since the current thread holds the old 214 * lock (the listening socket's lock, the head) access can still only be 215 * made to one side of the connection. 216 */ 217 static void 218 unp_setpeerlocks(struct socket *so, struct socket *so2) 219 { 220 struct unpcb *unp; 221 kmutex_t *lock; 222 223 KASSERT(solocked2(so, so2)); 224 225 /* 226 * Bail out if either end of the socket is not yet fully 227 * connected or accepted. We only break the lock association 228 * with the head when the pair of sockets stand completely 229 * on their own. 230 */ 231 KASSERT(so->so_head == NULL); 232 if (so2->so_head != NULL) 233 return; 234 235 /* 236 * Drop references to old lock. A third reference (from the 237 * queue head) must be held as we still hold its lock. Bonus: 238 * we don't need to worry about garbage collecting the lock. 239 */ 240 lock = so->so_lock; 241 KASSERT(lock == uipc_lock); 242 mutex_obj_free(lock); 243 mutex_obj_free(lock); 244 245 /* 246 * Grab stream lock from the initiator and share between the two 247 * endpoints. Issue memory barrier to ensure all modifications 248 * become globally visible before the lock change. so2 is 249 * assumed not to have a stream lock, because it was created 250 * purely for the server side to accept this connection and 251 * started out life using the domain-wide lock. 252 */ 253 unp = sotounpcb(so); 254 KASSERT(unp->unp_streamlock != NULL); 255 KASSERT(sotounpcb(so2)->unp_streamlock == NULL); 256 lock = unp->unp_streamlock; 257 unp->unp_streamlock = NULL; 258 mutex_obj_hold(lock); 259 membar_exit(); 260 /* 261 * possible race if lock is not held - see comment in 262 * uipc_usrreq(PRU_ACCEPT). 263 */ 264 KASSERT(mutex_owned(lock)); 265 solockreset(so, lock); 266 solockreset(so2, lock); 267 } 268 269 /* 270 * Reset a socket's lock back to the domain-wide lock. 271 */ 272 static void 273 unp_resetlock(struct socket *so) 274 { 275 kmutex_t *olock, *nlock; 276 struct unpcb *unp; 277 278 KASSERT(solocked(so)); 279 280 olock = so->so_lock; 281 nlock = uipc_lock; 282 if (olock == nlock) 283 return; 284 unp = sotounpcb(so); 285 KASSERT(unp->unp_streamlock == NULL); 286 unp->unp_streamlock = olock; 287 mutex_obj_hold(nlock); 288 mutex_enter(nlock); 289 solockreset(so, nlock); 290 mutex_exit(olock); 291 } 292 293 static void 294 unp_free(struct unpcb *unp) 295 { 296 if (unp->unp_addr) 297 free(unp->unp_addr, M_SONAME); 298 if (unp->unp_streamlock != NULL) 299 mutex_obj_free(unp->unp_streamlock); 300 kmem_free(unp, sizeof(*unp)); 301 } 302 303 static int 304 unp_output(struct mbuf *m, struct mbuf *control, struct unpcb *unp) 305 { 306 struct socket *so2; 307 const struct sockaddr_un *sun; 308 309 /* XXX: server side closed the socket */ 310 if (unp->unp_conn == NULL) 311 return ECONNREFUSED; 312 so2 = unp->unp_conn->unp_socket; 313 314 KASSERT(solocked(so2)); 315 316 if (unp->unp_addr) 317 sun = unp->unp_addr; 318 else 319 sun = &sun_noname; 320 if (unp->unp_conn->unp_flags & UNP_WANTCRED) 321 control = unp_addsockcred(curlwp, control); 322 if (sbappendaddr(&so2->so_rcv, (const struct sockaddr *)sun, m, 323 control) == 0) { 324 so2->so_rcv.sb_overflowed++; 325 unp_dispose(control); 326 m_freem(control); 327 m_freem(m); 328 return (ENOBUFS); 329 } else { 330 sorwakeup(so2); 331 return (0); 332 } 333 } 334 335 static void 336 unp_setaddr(struct socket *so, struct mbuf *nam, bool peeraddr) 337 { 338 const struct sockaddr_un *sun; 339 struct unpcb *unp; 340 bool ext; 341 342 KASSERT(solocked(so)); 343 unp = sotounpcb(so); 344 ext = false; 345 346 for (;;) { 347 sun = NULL; 348 if (peeraddr) { 349 if (unp->unp_conn && unp->unp_conn->unp_addr) 350 sun = unp->unp_conn->unp_addr; 351 } else { 352 if (unp->unp_addr) 353 sun = unp->unp_addr; 354 } 355 if (sun == NULL) 356 sun = &sun_noname; 357 nam->m_len = sun->sun_len; 358 if (nam->m_len > MLEN && !ext) { 359 sounlock(so); 360 MEXTMALLOC(nam, MAXPATHLEN * 2, M_WAITOK); 361 solock(so); 362 ext = true; 363 } else { 364 KASSERT(nam->m_len <= MAXPATHLEN * 2); 365 memcpy(mtod(nam, void *), sun, (size_t)nam->m_len); 366 break; 367 } 368 } 369 } 370 371 static int 372 unp_rcvd(struct socket *so, int flags, struct lwp *l) 373 { 374 struct unpcb *unp = sotounpcb(so); 375 struct socket *so2; 376 u_int newhiwat; 377 378 KASSERT(solocked(so)); 379 KASSERT(unp != NULL); 380 381 switch (so->so_type) { 382 383 case SOCK_DGRAM: 384 panic("uipc 1"); 385 /*NOTREACHED*/ 386 387 case SOCK_SEQPACKET: /* FALLTHROUGH */ 388 case SOCK_STREAM: 389 #define rcv (&so->so_rcv) 390 #define snd (&so2->so_snd) 391 if (unp->unp_conn == 0) 392 break; 393 so2 = unp->unp_conn->unp_socket; 394 KASSERT(solocked2(so, so2)); 395 /* 396 * Adjust backpressure on sender 397 * and wakeup any waiting to write. 398 */ 399 snd->sb_mbmax += unp->unp_mbcnt - rcv->sb_mbcnt; 400 unp->unp_mbcnt = rcv->sb_mbcnt; 401 newhiwat = snd->sb_hiwat + unp->unp_cc - rcv->sb_cc; 402 (void)chgsbsize(so2->so_uidinfo, 403 &snd->sb_hiwat, newhiwat, RLIM_INFINITY); 404 unp->unp_cc = rcv->sb_cc; 405 sowwakeup(so2); 406 #undef snd 407 #undef rcv 408 break; 409 410 default: 411 panic("uipc 2"); 412 } 413 414 return 0; 415 } 416 417 static int 418 unp_recvoob(struct socket *so, struct mbuf *m, int flags) 419 { 420 KASSERT(solocked(so)); 421 422 return EOPNOTSUPP; 423 } 424 425 static int 426 unp_send(struct socket *so, struct mbuf *m, struct mbuf *nam, 427 struct mbuf *control, struct lwp *l) 428 { 429 struct unpcb *unp = sotounpcb(so); 430 int error = 0; 431 u_int newhiwat; 432 struct socket *so2; 433 434 KASSERT(solocked(so)); 435 KASSERT(unp != NULL); 436 KASSERT(m != NULL); 437 438 /* 439 * Note: unp_internalize() rejects any control message 440 * other than SCM_RIGHTS, and only allows one. This 441 * has the side-effect of preventing a caller from 442 * forging SCM_CREDS. 443 */ 444 if (control) { 445 sounlock(so); 446 error = unp_internalize(&control); 447 solock(so); 448 if (error != 0) { 449 m_freem(control); 450 m_freem(m); 451 return error; 452 } 453 } 454 455 switch (so->so_type) { 456 457 case SOCK_DGRAM: { 458 KASSERT(so->so_lock == uipc_lock); 459 if (nam) { 460 if ((so->so_state & SS_ISCONNECTED) != 0) 461 error = EISCONN; 462 else { 463 /* 464 * Note: once connected, the 465 * socket's lock must not be 466 * dropped until we have sent 467 * the message and disconnected. 468 * This is necessary to prevent 469 * intervening control ops, like 470 * another connection. 471 */ 472 error = unp_connect(so, nam, l); 473 } 474 } else { 475 if ((so->so_state & SS_ISCONNECTED) == 0) 476 error = ENOTCONN; 477 } 478 if (error) { 479 unp_dispose(control); 480 m_freem(control); 481 m_freem(m); 482 return error; 483 } 484 error = unp_output(m, control, unp); 485 if (nam) 486 unp_disconnect1(unp); 487 break; 488 } 489 490 case SOCK_SEQPACKET: /* FALLTHROUGH */ 491 case SOCK_STREAM: 492 #define rcv (&so2->so_rcv) 493 #define snd (&so->so_snd) 494 if (unp->unp_conn == NULL) { 495 error = ENOTCONN; 496 break; 497 } 498 so2 = unp->unp_conn->unp_socket; 499 KASSERT(solocked2(so, so2)); 500 if (unp->unp_conn->unp_flags & UNP_WANTCRED) { 501 /* 502 * Credentials are passed only once on 503 * SOCK_STREAM and SOCK_SEQPACKET. 504 */ 505 unp->unp_conn->unp_flags &= ~UNP_WANTCRED; 506 control = unp_addsockcred(l, control); 507 } 508 /* 509 * Send to paired receive port, and then reduce 510 * send buffer hiwater marks to maintain backpressure. 511 * Wake up readers. 512 */ 513 if (control) { 514 if (sbappendcontrol(rcv, m, control) != 0) 515 control = NULL; 516 } else { 517 switch(so->so_type) { 518 case SOCK_SEQPACKET: 519 sbappendrecord(rcv, m); 520 break; 521 case SOCK_STREAM: 522 sbappend(rcv, m); 523 break; 524 default: 525 panic("uipc_usrreq"); 526 break; 527 } 528 } 529 snd->sb_mbmax -= 530 rcv->sb_mbcnt - unp->unp_conn->unp_mbcnt; 531 unp->unp_conn->unp_mbcnt = rcv->sb_mbcnt; 532 newhiwat = snd->sb_hiwat - 533 (rcv->sb_cc - unp->unp_conn->unp_cc); 534 (void)chgsbsize(so->so_uidinfo, 535 &snd->sb_hiwat, newhiwat, RLIM_INFINITY); 536 unp->unp_conn->unp_cc = rcv->sb_cc; 537 sorwakeup(so2); 538 #undef snd 539 #undef rcv 540 if (control != NULL) { 541 unp_dispose(control); 542 m_freem(control); 543 } 544 break; 545 546 default: 547 panic("uipc 4"); 548 } 549 550 return error; 551 } 552 553 static int 554 unp_sendoob(struct socket *so, struct mbuf *m, struct mbuf * control) 555 { 556 KASSERT(solocked(so)); 557 558 m_freem(m); 559 m_freem(control); 560 561 return EOPNOTSUPP; 562 } 563 564 static int 565 unp_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam, 566 struct mbuf *control, struct lwp *l) 567 { 568 569 KASSERT(req != PRU_ATTACH); 570 KASSERT(req != PRU_DETACH); 571 KASSERT(req != PRU_ACCEPT); 572 KASSERT(req != PRU_BIND); 573 KASSERT(req != PRU_LISTEN); 574 KASSERT(req != PRU_CONNECT); 575 KASSERT(req != PRU_CONNECT2); 576 KASSERT(req != PRU_DISCONNECT); 577 KASSERT(req != PRU_SHUTDOWN); 578 KASSERT(req != PRU_ABORT); 579 KASSERT(req != PRU_CONTROL); 580 KASSERT(req != PRU_SENSE); 581 KASSERT(req != PRU_PEERADDR); 582 KASSERT(req != PRU_SOCKADDR); 583 KASSERT(req != PRU_RCVD); 584 KASSERT(req != PRU_RCVOOB); 585 KASSERT(req != PRU_SEND); 586 KASSERT(req != PRU_SENDOOB); 587 KASSERT(req != PRU_PURGEIF); 588 589 KASSERT(solocked(so)); 590 591 if (sotounpcb(so) == NULL) 592 return EINVAL; 593 594 panic("piusrreq"); 595 596 return 0; 597 } 598 599 /* 600 * Unix domain socket option processing. 601 */ 602 int 603 uipc_ctloutput(int op, struct socket *so, struct sockopt *sopt) 604 { 605 struct unpcb *unp = sotounpcb(so); 606 int optval = 0, error = 0; 607 608 KASSERT(solocked(so)); 609 610 if (sopt->sopt_level != 0) { 611 error = ENOPROTOOPT; 612 } else switch (op) { 613 614 case PRCO_SETOPT: 615 switch (sopt->sopt_name) { 616 case LOCAL_CREDS: 617 case LOCAL_CONNWAIT: 618 error = sockopt_getint(sopt, &optval); 619 if (error) 620 break; 621 switch (sopt->sopt_name) { 622 #define OPTSET(bit) \ 623 if (optval) \ 624 unp->unp_flags |= (bit); \ 625 else \ 626 unp->unp_flags &= ~(bit); 627 628 case LOCAL_CREDS: 629 OPTSET(UNP_WANTCRED); 630 break; 631 case LOCAL_CONNWAIT: 632 OPTSET(UNP_CONNWAIT); 633 break; 634 } 635 break; 636 #undef OPTSET 637 638 default: 639 error = ENOPROTOOPT; 640 break; 641 } 642 break; 643 644 case PRCO_GETOPT: 645 sounlock(so); 646 switch (sopt->sopt_name) { 647 case LOCAL_PEEREID: 648 if (unp->unp_flags & UNP_EIDSVALID) { 649 error = sockopt_set(sopt, 650 &unp->unp_connid, sizeof(unp->unp_connid)); 651 } else { 652 error = EINVAL; 653 } 654 break; 655 case LOCAL_CREDS: 656 #define OPTBIT(bit) (unp->unp_flags & (bit) ? 1 : 0) 657 658 optval = OPTBIT(UNP_WANTCRED); 659 error = sockopt_setint(sopt, optval); 660 break; 661 #undef OPTBIT 662 663 default: 664 error = ENOPROTOOPT; 665 break; 666 } 667 solock(so); 668 break; 669 } 670 return (error); 671 } 672 673 /* 674 * Both send and receive buffers are allocated PIPSIZ bytes of buffering 675 * for stream sockets, although the total for sender and receiver is 676 * actually only PIPSIZ. 677 * Datagram sockets really use the sendspace as the maximum datagram size, 678 * and don't really want to reserve the sendspace. Their recvspace should 679 * be large enough for at least one max-size datagram plus address. 680 */ 681 #define PIPSIZ 4096 682 u_long unpst_sendspace = PIPSIZ; 683 u_long unpst_recvspace = PIPSIZ; 684 u_long unpdg_sendspace = 2*1024; /* really max datagram size */ 685 u_long unpdg_recvspace = 4*1024; 686 687 u_int unp_rights; /* files in flight */ 688 u_int unp_rights_ratio = 2; /* limit, fraction of maxfiles */ 689 690 static int 691 unp_attach(struct socket *so, int proto) 692 { 693 struct unpcb *unp = sotounpcb(so); 694 u_long sndspc, rcvspc; 695 int error; 696 697 KASSERT(unp == NULL); 698 699 switch (so->so_type) { 700 case SOCK_SEQPACKET: 701 /* FALLTHROUGH */ 702 case SOCK_STREAM: 703 if (so->so_lock == NULL) { 704 so->so_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 705 solock(so); 706 } 707 sndspc = unpst_sendspace; 708 rcvspc = unpst_recvspace; 709 break; 710 711 case SOCK_DGRAM: 712 if (so->so_lock == NULL) { 713 mutex_obj_hold(uipc_lock); 714 so->so_lock = uipc_lock; 715 solock(so); 716 } 717 sndspc = unpdg_sendspace; 718 rcvspc = unpdg_recvspace; 719 break; 720 721 default: 722 panic("unp_attach"); 723 } 724 725 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 726 error = soreserve(so, sndspc, rcvspc); 727 if (error) { 728 return error; 729 } 730 } 731 732 unp = kmem_zalloc(sizeof(*unp), KM_SLEEP); 733 nanotime(&unp->unp_ctime); 734 unp->unp_socket = so; 735 so->so_pcb = unp; 736 737 KASSERT(solocked(so)); 738 return 0; 739 } 740 741 static void 742 unp_detach(struct socket *so) 743 { 744 struct unpcb *unp; 745 vnode_t *vp; 746 747 unp = sotounpcb(so); 748 KASSERT(unp != NULL); 749 KASSERT(solocked(so)); 750 retry: 751 if ((vp = unp->unp_vnode) != NULL) { 752 sounlock(so); 753 /* Acquire v_interlock to protect against unp_connect(). */ 754 /* XXXAD racy */ 755 mutex_enter(vp->v_interlock); 756 vp->v_socket = NULL; 757 mutex_exit(vp->v_interlock); 758 vrele(vp); 759 solock(so); 760 unp->unp_vnode = NULL; 761 } 762 if (unp->unp_conn) 763 unp_disconnect1(unp); 764 while (unp->unp_refs) { 765 KASSERT(solocked2(so, unp->unp_refs->unp_socket)); 766 if (unp_drop(unp->unp_refs, ECONNRESET)) { 767 solock(so); 768 goto retry; 769 } 770 } 771 soisdisconnected(so); 772 so->so_pcb = NULL; 773 if (unp_rights) { 774 /* 775 * Normally the receive buffer is flushed later, in sofree, 776 * but if our receive buffer holds references to files that 777 * are now garbage, we will enqueue those file references to 778 * the garbage collector and kick it into action. 779 */ 780 sorflush(so); 781 unp_free(unp); 782 unp_thread_kick(); 783 } else 784 unp_free(unp); 785 } 786 787 static int 788 unp_accept(struct socket *so, struct mbuf *nam) 789 { 790 struct unpcb *unp = sotounpcb(so); 791 struct socket *so2; 792 793 KASSERT(solocked(so)); 794 KASSERT(nam != NULL); 795 796 /* XXX code review required to determine if unp can ever be NULL */ 797 if (unp == NULL) 798 return EINVAL; 799 800 KASSERT(so->so_lock == uipc_lock); 801 /* 802 * Mark the initiating STREAM socket as connected *ONLY* 803 * after it's been accepted. This prevents a client from 804 * overrunning a server and receiving ECONNREFUSED. 805 */ 806 if (unp->unp_conn == NULL) { 807 /* 808 * This will use the empty socket and will not 809 * allocate. 810 */ 811 unp_setaddr(so, nam, true); 812 return 0; 813 } 814 so2 = unp->unp_conn->unp_socket; 815 if (so2->so_state & SS_ISCONNECTING) { 816 KASSERT(solocked2(so, so->so_head)); 817 KASSERT(solocked2(so2, so->so_head)); 818 soisconnected(so2); 819 } 820 /* 821 * If the connection is fully established, break the 822 * association with uipc_lock and give the connected 823 * pair a separate lock to share. 824 * There is a race here: sotounpcb(so2)->unp_streamlock 825 * is not locked, so when changing so2->so_lock 826 * another thread can grab it while so->so_lock is still 827 * pointing to the (locked) uipc_lock. 828 * this should be harmless, except that this makes 829 * solocked2() and solocked() unreliable. 830 * Another problem is that unp_setaddr() expects the 831 * the socket locked. Grabing sotounpcb(so2)->unp_streamlock 832 * fixes both issues. 833 */ 834 mutex_enter(sotounpcb(so2)->unp_streamlock); 835 unp_setpeerlocks(so2, so); 836 /* 837 * Only now return peer's address, as we may need to 838 * block in order to allocate memory. 839 * 840 * XXX Minor race: connection can be broken while 841 * lock is dropped in unp_setaddr(). We will return 842 * error == 0 and sun_noname as the peer address. 843 */ 844 unp_setaddr(so, nam, true); 845 /* so_lock now points to unp_streamlock */ 846 mutex_exit(so2->so_lock); 847 return 0; 848 } 849 850 static int 851 unp_ioctl(struct socket *so, u_long cmd, void *nam, struct ifnet *ifp) 852 { 853 return EOPNOTSUPP; 854 } 855 856 static int 857 unp_stat(struct socket *so, struct stat *ub) 858 { 859 struct unpcb *unp; 860 struct socket *so2; 861 862 KASSERT(solocked(so)); 863 864 unp = sotounpcb(so); 865 if (unp == NULL) 866 return EINVAL; 867 868 ub->st_blksize = so->so_snd.sb_hiwat; 869 switch (so->so_type) { 870 case SOCK_SEQPACKET: /* FALLTHROUGH */ 871 case SOCK_STREAM: 872 if (unp->unp_conn == 0) 873 break; 874 875 so2 = unp->unp_conn->unp_socket; 876 KASSERT(solocked2(so, so2)); 877 ub->st_blksize += so2->so_rcv.sb_cc; 878 break; 879 default: 880 break; 881 } 882 ub->st_dev = NODEV; 883 if (unp->unp_ino == 0) 884 unp->unp_ino = unp_ino++; 885 ub->st_atimespec = ub->st_mtimespec = ub->st_ctimespec = unp->unp_ctime; 886 ub->st_ino = unp->unp_ino; 887 return (0); 888 } 889 890 static int 891 unp_peeraddr(struct socket *so, struct mbuf *nam) 892 { 893 KASSERT(solocked(so)); 894 KASSERT(sotounpcb(so) != NULL); 895 KASSERT(nam != NULL); 896 897 unp_setaddr(so, nam, true); 898 return 0; 899 } 900 901 static int 902 unp_sockaddr(struct socket *so, struct mbuf *nam) 903 { 904 KASSERT(solocked(so)); 905 KASSERT(sotounpcb(so) != NULL); 906 KASSERT(nam != NULL); 907 908 unp_setaddr(so, nam, false); 909 return 0; 910 } 911 912 /* 913 * Allocate the new sockaddr. We have to allocate one 914 * extra byte so that we can ensure that the pathname 915 * is nul-terminated. Note that unlike linux, we don't 916 * include in the address length the NUL in the path 917 * component, because doing so, would exceed sizeof(sockaddr_un) 918 * for fully occupied pathnames. Linux is also inconsistent, 919 * because it does not include the NUL in the length of 920 * what it calls "abstract" unix sockets. 921 */ 922 static struct sockaddr_un * 923 makeun(struct mbuf *nam, size_t *addrlen) { 924 struct sockaddr_un *sun; 925 926 *addrlen = nam->m_len + 1; 927 sun = malloc(*addrlen, M_SONAME, M_WAITOK); 928 m_copydata(nam, 0, nam->m_len, (void *)sun); 929 *(((char *)sun) + nam->m_len) = '\0'; 930 sun->sun_len = strlen(sun->sun_path) + 931 offsetof(struct sockaddr_un, sun_path); 932 return sun; 933 } 934 935 static int 936 unp_bind(struct socket *so, struct mbuf *nam, struct lwp *l) 937 { 938 struct sockaddr_un *sun; 939 struct unpcb *unp; 940 vnode_t *vp; 941 struct vattr vattr; 942 size_t addrlen; 943 int error; 944 struct pathbuf *pb; 945 struct nameidata nd; 946 proc_t *p; 947 948 unp = sotounpcb(so); 949 950 KASSERT(solocked(so)); 951 KASSERT(unp != NULL); 952 KASSERT(nam != NULL); 953 954 if (unp->unp_vnode != NULL) 955 return (EINVAL); 956 if ((unp->unp_flags & UNP_BUSY) != 0) { 957 /* 958 * EALREADY may not be strictly accurate, but since this 959 * is a major application error it's hardly a big deal. 960 */ 961 return (EALREADY); 962 } 963 unp->unp_flags |= UNP_BUSY; 964 sounlock(so); 965 966 p = l->l_proc; 967 sun = makeun(nam, &addrlen); 968 969 pb = pathbuf_create(sun->sun_path); 970 if (pb == NULL) { 971 error = ENOMEM; 972 goto bad; 973 } 974 NDINIT(&nd, CREATE, FOLLOW | LOCKPARENT | TRYEMULROOT, pb); 975 976 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ 977 if ((error = namei(&nd)) != 0) { 978 pathbuf_destroy(pb); 979 goto bad; 980 } 981 vp = nd.ni_vp; 982 if (vp != NULL) { 983 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); 984 if (nd.ni_dvp == vp) 985 vrele(nd.ni_dvp); 986 else 987 vput(nd.ni_dvp); 988 vrele(vp); 989 pathbuf_destroy(pb); 990 error = EADDRINUSE; 991 goto bad; 992 } 993 vattr_null(&vattr); 994 vattr.va_type = VSOCK; 995 vattr.va_mode = ACCESSPERMS & ~(p->p_cwdi->cwdi_cmask); 996 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); 997 if (error) { 998 vput(nd.ni_dvp); 999 pathbuf_destroy(pb); 1000 goto bad; 1001 } 1002 vp = nd.ni_vp; 1003 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1004 solock(so); 1005 vp->v_socket = unp->unp_socket; 1006 unp->unp_vnode = vp; 1007 unp->unp_addrlen = addrlen; 1008 unp->unp_addr = sun; 1009 unp->unp_connid.unp_pid = p->p_pid; 1010 unp->unp_connid.unp_euid = kauth_cred_geteuid(l->l_cred); 1011 unp->unp_connid.unp_egid = kauth_cred_getegid(l->l_cred); 1012 unp->unp_flags |= UNP_EIDSBIND; 1013 VOP_UNLOCK(vp); 1014 vput(nd.ni_dvp); 1015 unp->unp_flags &= ~UNP_BUSY; 1016 pathbuf_destroy(pb); 1017 return (0); 1018 1019 bad: 1020 free(sun, M_SONAME); 1021 solock(so); 1022 unp->unp_flags &= ~UNP_BUSY; 1023 return (error); 1024 } 1025 1026 static int 1027 unp_listen(struct socket *so, struct lwp *l) 1028 { 1029 struct unpcb *unp = sotounpcb(so); 1030 1031 KASSERT(solocked(so)); 1032 KASSERT(unp != NULL); 1033 1034 /* 1035 * If the socket can accept a connection, it must be 1036 * locked by uipc_lock. 1037 */ 1038 unp_resetlock(so); 1039 if (unp->unp_vnode == NULL) 1040 return EINVAL; 1041 1042 return 0; 1043 } 1044 1045 static int 1046 unp_disconnect(struct socket *so) 1047 { 1048 KASSERT(solocked(so)); 1049 KASSERT(sotounpcb(so) != NULL); 1050 1051 unp_disconnect1(sotounpcb(so)); 1052 return 0; 1053 } 1054 1055 static int 1056 unp_shutdown(struct socket *so) 1057 { 1058 KASSERT(solocked(so)); 1059 KASSERT(sotounpcb(so) != NULL); 1060 1061 socantsendmore(so); 1062 unp_shutdown1(sotounpcb(so)); 1063 return 0; 1064 } 1065 1066 static int 1067 unp_abort(struct socket *so) 1068 { 1069 KASSERT(solocked(so)); 1070 KASSERT(sotounpcb(so) != NULL); 1071 1072 (void)unp_drop(sotounpcb(so), ECONNABORTED); 1073 KASSERT(so->so_head == NULL); 1074 KASSERT(so->so_pcb != NULL); 1075 unp_detach(so); 1076 return 0; 1077 } 1078 1079 static int 1080 unp_connect1(struct socket *so, struct socket *so2) 1081 { 1082 struct unpcb *unp = sotounpcb(so); 1083 struct unpcb *unp2; 1084 1085 if (so2->so_type != so->so_type) 1086 return EPROTOTYPE; 1087 1088 /* 1089 * All three sockets involved must be locked by same lock: 1090 * 1091 * local endpoint (so) 1092 * remote endpoint (so2) 1093 * queue head (so2->so_head, only if PR_CONNREQUIRED) 1094 */ 1095 KASSERT(solocked2(so, so2)); 1096 KASSERT(so->so_head == NULL); 1097 if (so2->so_head != NULL) { 1098 KASSERT(so2->so_lock == uipc_lock); 1099 KASSERT(solocked2(so2, so2->so_head)); 1100 } 1101 1102 unp2 = sotounpcb(so2); 1103 unp->unp_conn = unp2; 1104 switch (so->so_type) { 1105 1106 case SOCK_DGRAM: 1107 unp->unp_nextref = unp2->unp_refs; 1108 unp2->unp_refs = unp; 1109 soisconnected(so); 1110 break; 1111 1112 case SOCK_SEQPACKET: /* FALLTHROUGH */ 1113 case SOCK_STREAM: 1114 1115 /* 1116 * SOCK_SEQPACKET and SOCK_STREAM cases are handled by callers 1117 * which are unp_connect() or unp_connect2(). 1118 */ 1119 1120 break; 1121 1122 default: 1123 panic("unp_connect1"); 1124 } 1125 1126 return 0; 1127 } 1128 1129 int 1130 unp_connect(struct socket *so, struct mbuf *nam, struct lwp *l) 1131 { 1132 struct sockaddr_un *sun; 1133 vnode_t *vp; 1134 struct socket *so2, *so3; 1135 struct unpcb *unp, *unp2, *unp3; 1136 size_t addrlen; 1137 int error; 1138 struct pathbuf *pb; 1139 struct nameidata nd; 1140 1141 unp = sotounpcb(so); 1142 if ((unp->unp_flags & UNP_BUSY) != 0) { 1143 /* 1144 * EALREADY may not be strictly accurate, but since this 1145 * is a major application error it's hardly a big deal. 1146 */ 1147 return (EALREADY); 1148 } 1149 unp->unp_flags |= UNP_BUSY; 1150 sounlock(so); 1151 1152 sun = makeun(nam, &addrlen); 1153 pb = pathbuf_create(sun->sun_path); 1154 if (pb == NULL) { 1155 error = ENOMEM; 1156 goto bad2; 1157 } 1158 1159 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | TRYEMULROOT, pb); 1160 1161 if ((error = namei(&nd)) != 0) { 1162 pathbuf_destroy(pb); 1163 goto bad2; 1164 } 1165 vp = nd.ni_vp; 1166 if (vp->v_type != VSOCK) { 1167 error = ENOTSOCK; 1168 goto bad; 1169 } 1170 pathbuf_destroy(pb); 1171 if ((error = VOP_ACCESS(vp, VWRITE, l->l_cred)) != 0) 1172 goto bad; 1173 /* Acquire v_interlock to protect against unp_detach(). */ 1174 mutex_enter(vp->v_interlock); 1175 so2 = vp->v_socket; 1176 if (so2 == NULL) { 1177 mutex_exit(vp->v_interlock); 1178 error = ECONNREFUSED; 1179 goto bad; 1180 } 1181 if (so->so_type != so2->so_type) { 1182 mutex_exit(vp->v_interlock); 1183 error = EPROTOTYPE; 1184 goto bad; 1185 } 1186 solock(so); 1187 unp_resetlock(so); 1188 mutex_exit(vp->v_interlock); 1189 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) != 0) { 1190 /* 1191 * This may seem somewhat fragile but is OK: if we can 1192 * see SO_ACCEPTCONN set on the endpoint, then it must 1193 * be locked by the domain-wide uipc_lock. 1194 */ 1195 KASSERT((so2->so_options & SO_ACCEPTCONN) == 0 || 1196 so2->so_lock == uipc_lock); 1197 if ((so2->so_options & SO_ACCEPTCONN) == 0 || 1198 (so3 = sonewconn(so2, false)) == NULL) { 1199 error = ECONNREFUSED; 1200 sounlock(so); 1201 goto bad; 1202 } 1203 unp2 = sotounpcb(so2); 1204 unp3 = sotounpcb(so3); 1205 if (unp2->unp_addr) { 1206 unp3->unp_addr = malloc(unp2->unp_addrlen, 1207 M_SONAME, M_WAITOK); 1208 memcpy(unp3->unp_addr, unp2->unp_addr, 1209 unp2->unp_addrlen); 1210 unp3->unp_addrlen = unp2->unp_addrlen; 1211 } 1212 unp3->unp_flags = unp2->unp_flags; 1213 unp3->unp_connid.unp_pid = l->l_proc->p_pid; 1214 unp3->unp_connid.unp_euid = kauth_cred_geteuid(l->l_cred); 1215 unp3->unp_connid.unp_egid = kauth_cred_getegid(l->l_cred); 1216 unp3->unp_flags |= UNP_EIDSVALID; 1217 if (unp2->unp_flags & UNP_EIDSBIND) { 1218 unp->unp_connid = unp2->unp_connid; 1219 unp->unp_flags |= UNP_EIDSVALID; 1220 } 1221 so2 = so3; 1222 } 1223 error = unp_connect1(so, so2); 1224 if (error) { 1225 sounlock(so); 1226 goto bad; 1227 } 1228 unp2 = sotounpcb(so2); 1229 switch (so->so_type) { 1230 1231 /* 1232 * SOCK_DGRAM and default cases are handled in prior call to 1233 * unp_connect1(), do not add a default case without fixing 1234 * unp_connect1(). 1235 */ 1236 1237 case SOCK_SEQPACKET: /* FALLTHROUGH */ 1238 case SOCK_STREAM: 1239 unp2->unp_conn = unp; 1240 if ((unp->unp_flags | unp2->unp_flags) & UNP_CONNWAIT) 1241 soisconnecting(so); 1242 else 1243 soisconnected(so); 1244 soisconnected(so2); 1245 /* 1246 * If the connection is fully established, break the 1247 * association with uipc_lock and give the connected 1248 * pair a seperate lock to share. 1249 */ 1250 KASSERT(so2->so_head != NULL); 1251 unp_setpeerlocks(so, so2); 1252 break; 1253 1254 } 1255 sounlock(so); 1256 bad: 1257 vput(vp); 1258 bad2: 1259 free(sun, M_SONAME); 1260 solock(so); 1261 unp->unp_flags &= ~UNP_BUSY; 1262 return (error); 1263 } 1264 1265 int 1266 unp_connect2(struct socket *so, struct socket *so2) 1267 { 1268 struct unpcb *unp = sotounpcb(so); 1269 struct unpcb *unp2; 1270 int error = 0; 1271 1272 KASSERT(solocked2(so, so2)); 1273 1274 error = unp_connect1(so, so2); 1275 if (error) 1276 return error; 1277 1278 unp2 = sotounpcb(so2); 1279 switch (so->so_type) { 1280 1281 /* 1282 * SOCK_DGRAM and default cases are handled in prior call to 1283 * unp_connect1(), do not add a default case without fixing 1284 * unp_connect1(). 1285 */ 1286 1287 case SOCK_SEQPACKET: /* FALLTHROUGH */ 1288 case SOCK_STREAM: 1289 unp2->unp_conn = unp; 1290 soisconnected(so); 1291 soisconnected(so2); 1292 break; 1293 1294 } 1295 return error; 1296 } 1297 1298 static void 1299 unp_disconnect1(struct unpcb *unp) 1300 { 1301 struct unpcb *unp2 = unp->unp_conn; 1302 struct socket *so; 1303 1304 if (unp2 == 0) 1305 return; 1306 unp->unp_conn = 0; 1307 so = unp->unp_socket; 1308 switch (so->so_type) { 1309 case SOCK_DGRAM: 1310 if (unp2->unp_refs == unp) 1311 unp2->unp_refs = unp->unp_nextref; 1312 else { 1313 unp2 = unp2->unp_refs; 1314 for (;;) { 1315 KASSERT(solocked2(so, unp2->unp_socket)); 1316 if (unp2 == 0) 1317 panic("unp_disconnect1"); 1318 if (unp2->unp_nextref == unp) 1319 break; 1320 unp2 = unp2->unp_nextref; 1321 } 1322 unp2->unp_nextref = unp->unp_nextref; 1323 } 1324 unp->unp_nextref = 0; 1325 so->so_state &= ~SS_ISCONNECTED; 1326 break; 1327 1328 case SOCK_SEQPACKET: /* FALLTHROUGH */ 1329 case SOCK_STREAM: 1330 KASSERT(solocked2(so, unp2->unp_socket)); 1331 soisdisconnected(so); 1332 unp2->unp_conn = 0; 1333 soisdisconnected(unp2->unp_socket); 1334 break; 1335 } 1336 } 1337 1338 static void 1339 unp_shutdown1(struct unpcb *unp) 1340 { 1341 struct socket *so; 1342 1343 switch(unp->unp_socket->so_type) { 1344 case SOCK_SEQPACKET: /* FALLTHROUGH */ 1345 case SOCK_STREAM: 1346 if (unp->unp_conn && (so = unp->unp_conn->unp_socket)) 1347 socantrcvmore(so); 1348 break; 1349 default: 1350 break; 1351 } 1352 } 1353 1354 static bool 1355 unp_drop(struct unpcb *unp, int errno) 1356 { 1357 struct socket *so = unp->unp_socket; 1358 1359 KASSERT(solocked(so)); 1360 1361 so->so_error = errno; 1362 unp_disconnect1(unp); 1363 if (so->so_head) { 1364 so->so_pcb = NULL; 1365 /* sofree() drops the socket lock */ 1366 sofree(so); 1367 unp_free(unp); 1368 return true; 1369 } 1370 return false; 1371 } 1372 1373 #ifdef notdef 1374 unp_drain(void) 1375 { 1376 1377 } 1378 #endif 1379 1380 int 1381 unp_externalize(struct mbuf *rights, struct lwp *l, int flags) 1382 { 1383 struct cmsghdr * const cm = mtod(rights, struct cmsghdr *); 1384 struct proc * const p = l->l_proc; 1385 file_t **rp; 1386 int error = 0; 1387 1388 const size_t nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / 1389 sizeof(file_t *); 1390 if (nfds == 0) 1391 goto noop; 1392 1393 int * const fdp = kmem_alloc(nfds * sizeof(int), KM_SLEEP); 1394 rw_enter(&p->p_cwdi->cwdi_lock, RW_READER); 1395 1396 /* Make sure the recipient should be able to see the files.. */ 1397 rp = (file_t **)CMSG_DATA(cm); 1398 for (size_t i = 0; i < nfds; i++) { 1399 file_t * const fp = *rp++; 1400 if (fp == NULL) { 1401 error = EINVAL; 1402 goto out; 1403 } 1404 /* 1405 * If we are in a chroot'ed directory, and 1406 * someone wants to pass us a directory, make 1407 * sure it's inside the subtree we're allowed 1408 * to access. 1409 */ 1410 if (p->p_cwdi->cwdi_rdir != NULL && fp->f_type == DTYPE_VNODE) { 1411 vnode_t *vp = fp->f_vnode; 1412 if ((vp->v_type == VDIR) && 1413 !vn_isunder(vp, p->p_cwdi->cwdi_rdir, l)) { 1414 error = EPERM; 1415 goto out; 1416 } 1417 } 1418 } 1419 1420 restart: 1421 /* 1422 * First loop -- allocate file descriptor table slots for the 1423 * new files. 1424 */ 1425 for (size_t i = 0; i < nfds; i++) { 1426 if ((error = fd_alloc(p, 0, &fdp[i])) != 0) { 1427 /* 1428 * Back out what we've done so far. 1429 */ 1430 while (i-- > 0) { 1431 fd_abort(p, NULL, fdp[i]); 1432 } 1433 if (error == ENOSPC) { 1434 fd_tryexpand(p); 1435 error = 0; 1436 goto restart; 1437 } 1438 /* 1439 * This is the error that has historically 1440 * been returned, and some callers may 1441 * expect it. 1442 */ 1443 error = EMSGSIZE; 1444 goto out; 1445 } 1446 } 1447 1448 /* 1449 * Now that adding them has succeeded, update all of the 1450 * file passing state and affix the descriptors. 1451 */ 1452 rp = (file_t **)CMSG_DATA(cm); 1453 int *ofdp = (int *)CMSG_DATA(cm); 1454 for (size_t i = 0; i < nfds; i++) { 1455 file_t * const fp = *rp++; 1456 const int fd = fdp[i]; 1457 atomic_dec_uint(&unp_rights); 1458 fd_set_exclose(l, fd, (flags & O_CLOEXEC) != 0); 1459 fd_affix(p, fp, fd); 1460 /* 1461 * Done with this file pointer, replace it with a fd; 1462 */ 1463 *ofdp++ = fd; 1464 mutex_enter(&fp->f_lock); 1465 fp->f_msgcount--; 1466 mutex_exit(&fp->f_lock); 1467 /* 1468 * Note that fd_affix() adds a reference to the file. 1469 * The file may already have been closed by another 1470 * LWP in the process, so we must drop the reference 1471 * added by unp_internalize() with closef(). 1472 */ 1473 closef(fp); 1474 } 1475 1476 /* 1477 * Adjust length, in case of transition from large file_t 1478 * pointers to ints. 1479 */ 1480 if (sizeof(file_t *) != sizeof(int)) { 1481 cm->cmsg_len = CMSG_LEN(nfds * sizeof(int)); 1482 rights->m_len = CMSG_SPACE(nfds * sizeof(int)); 1483 } 1484 out: 1485 if (__predict_false(error != 0)) { 1486 file_t **const fpp = (file_t **)CMSG_DATA(cm); 1487 for (size_t i = 0; i < nfds; i++) 1488 unp_discard_now(fpp[i]); 1489 /* 1490 * Truncate the array so that nobody will try to interpret 1491 * what is now garbage in it. 1492 */ 1493 cm->cmsg_len = CMSG_LEN(0); 1494 rights->m_len = CMSG_SPACE(0); 1495 } 1496 rw_exit(&p->p_cwdi->cwdi_lock); 1497 kmem_free(fdp, nfds * sizeof(int)); 1498 1499 noop: 1500 /* 1501 * Don't disclose kernel memory in the alignment space. 1502 */ 1503 KASSERT(cm->cmsg_len <= rights->m_len); 1504 memset(&mtod(rights, char *)[cm->cmsg_len], 0, rights->m_len - 1505 cm->cmsg_len); 1506 return error; 1507 } 1508 1509 static int 1510 unp_internalize(struct mbuf **controlp) 1511 { 1512 filedesc_t *fdescp = curlwp->l_fd; 1513 struct mbuf *control = *controlp; 1514 struct cmsghdr *newcm, *cm = mtod(control, struct cmsghdr *); 1515 file_t **rp, **files; 1516 file_t *fp; 1517 int i, fd, *fdp; 1518 int nfds, error; 1519 u_int maxmsg; 1520 1521 error = 0; 1522 newcm = NULL; 1523 1524 /* Sanity check the control message header. */ 1525 if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET || 1526 cm->cmsg_len > control->m_len || 1527 cm->cmsg_len < CMSG_ALIGN(sizeof(*cm))) 1528 return (EINVAL); 1529 1530 /* 1531 * Verify that the file descriptors are valid, and acquire 1532 * a reference to each. 1533 */ 1534 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / sizeof(int); 1535 fdp = (int *)CMSG_DATA(cm); 1536 maxmsg = maxfiles / unp_rights_ratio; 1537 for (i = 0; i < nfds; i++) { 1538 fd = *fdp++; 1539 if (atomic_inc_uint_nv(&unp_rights) > maxmsg) { 1540 atomic_dec_uint(&unp_rights); 1541 nfds = i; 1542 error = EAGAIN; 1543 goto out; 1544 } 1545 if ((fp = fd_getfile(fd)) == NULL 1546 || fp->f_type == DTYPE_KQUEUE) { 1547 if (fp) 1548 fd_putfile(fd); 1549 atomic_dec_uint(&unp_rights); 1550 nfds = i; 1551 error = EBADF; 1552 goto out; 1553 } 1554 } 1555 1556 /* Allocate new space and copy header into it. */ 1557 newcm = malloc(CMSG_SPACE(nfds * sizeof(file_t *)), M_MBUF, M_WAITOK); 1558 if (newcm == NULL) { 1559 error = E2BIG; 1560 goto out; 1561 } 1562 memcpy(newcm, cm, sizeof(struct cmsghdr)); 1563 files = (file_t **)CMSG_DATA(newcm); 1564 1565 /* 1566 * Transform the file descriptors into file_t pointers, in 1567 * reverse order so that if pointers are bigger than ints, the 1568 * int won't get until we're done. No need to lock, as we have 1569 * already validated the descriptors with fd_getfile(). 1570 */ 1571 fdp = (int *)CMSG_DATA(cm) + nfds; 1572 rp = files + nfds; 1573 for (i = 0; i < nfds; i++) { 1574 fp = fdescp->fd_dt->dt_ff[*--fdp]->ff_file; 1575 KASSERT(fp != NULL); 1576 mutex_enter(&fp->f_lock); 1577 *--rp = fp; 1578 fp->f_count++; 1579 fp->f_msgcount++; 1580 mutex_exit(&fp->f_lock); 1581 } 1582 1583 out: 1584 /* Release descriptor references. */ 1585 fdp = (int *)CMSG_DATA(cm); 1586 for (i = 0; i < nfds; i++) { 1587 fd_putfile(*fdp++); 1588 if (error != 0) { 1589 atomic_dec_uint(&unp_rights); 1590 } 1591 } 1592 1593 if (error == 0) { 1594 if (control->m_flags & M_EXT) { 1595 m_freem(control); 1596 *controlp = control = m_get(M_WAIT, MT_CONTROL); 1597 } 1598 MEXTADD(control, newcm, CMSG_SPACE(nfds * sizeof(file_t *)), 1599 M_MBUF, NULL, NULL); 1600 cm = newcm; 1601 /* 1602 * Adjust message & mbuf to note amount of space 1603 * actually used. 1604 */ 1605 cm->cmsg_len = CMSG_LEN(nfds * sizeof(file_t *)); 1606 control->m_len = CMSG_SPACE(nfds * sizeof(file_t *)); 1607 } 1608 1609 return error; 1610 } 1611 1612 struct mbuf * 1613 unp_addsockcred(struct lwp *l, struct mbuf *control) 1614 { 1615 struct sockcred *sc; 1616 struct mbuf *m; 1617 void *p; 1618 1619 m = sbcreatecontrol1(&p, SOCKCREDSIZE(kauth_cred_ngroups(l->l_cred)), 1620 SCM_CREDS, SOL_SOCKET, M_WAITOK); 1621 if (m == NULL) 1622 return control; 1623 1624 sc = p; 1625 sc->sc_uid = kauth_cred_getuid(l->l_cred); 1626 sc->sc_euid = kauth_cred_geteuid(l->l_cred); 1627 sc->sc_gid = kauth_cred_getgid(l->l_cred); 1628 sc->sc_egid = kauth_cred_getegid(l->l_cred); 1629 sc->sc_ngroups = kauth_cred_ngroups(l->l_cred); 1630 1631 for (int i = 0; i < sc->sc_ngroups; i++) 1632 sc->sc_groups[i] = kauth_cred_group(l->l_cred, i); 1633 1634 return m_add(control, m); 1635 } 1636 1637 /* 1638 * Do a mark-sweep GC of files in the system, to free up any which are 1639 * caught in flight to an about-to-be-closed socket. Additionally, 1640 * process deferred file closures. 1641 */ 1642 static void 1643 unp_gc(file_t *dp) 1644 { 1645 extern struct domain unixdomain; 1646 file_t *fp, *np; 1647 struct socket *so, *so1; 1648 u_int i, oflags, rflags; 1649 bool didwork; 1650 1651 KASSERT(curlwp == unp_thread_lwp); 1652 KASSERT(mutex_owned(&filelist_lock)); 1653 1654 /* 1655 * First, process deferred file closures. 1656 */ 1657 while (!SLIST_EMPTY(&unp_thread_discard)) { 1658 fp = SLIST_FIRST(&unp_thread_discard); 1659 KASSERT(fp->f_unpcount > 0); 1660 KASSERT(fp->f_count > 0); 1661 KASSERT(fp->f_msgcount > 0); 1662 KASSERT(fp->f_count >= fp->f_unpcount); 1663 KASSERT(fp->f_count >= fp->f_msgcount); 1664 KASSERT(fp->f_msgcount >= fp->f_unpcount); 1665 SLIST_REMOVE_HEAD(&unp_thread_discard, f_unplist); 1666 i = fp->f_unpcount; 1667 fp->f_unpcount = 0; 1668 mutex_exit(&filelist_lock); 1669 for (; i != 0; i--) { 1670 unp_discard_now(fp); 1671 } 1672 mutex_enter(&filelist_lock); 1673 } 1674 1675 /* 1676 * Clear mark bits. Ensure that we don't consider new files 1677 * entering the file table during this loop (they will not have 1678 * FSCAN set). 1679 */ 1680 unp_defer = 0; 1681 LIST_FOREACH(fp, &filehead, f_list) { 1682 for (oflags = fp->f_flag;; oflags = rflags) { 1683 rflags = atomic_cas_uint(&fp->f_flag, oflags, 1684 (oflags | FSCAN) & ~(FMARK|FDEFER)); 1685 if (__predict_true(oflags == rflags)) { 1686 break; 1687 } 1688 } 1689 } 1690 1691 /* 1692 * Iterate over the set of sockets, marking ones believed (based on 1693 * refcount) to be referenced from a process, and marking for rescan 1694 * sockets which are queued on a socket. Recan continues descending 1695 * and searching for sockets referenced by sockets (FDEFER), until 1696 * there are no more socket->socket references to be discovered. 1697 */ 1698 do { 1699 didwork = false; 1700 for (fp = LIST_FIRST(&filehead); fp != NULL; fp = np) { 1701 KASSERT(mutex_owned(&filelist_lock)); 1702 np = LIST_NEXT(fp, f_list); 1703 mutex_enter(&fp->f_lock); 1704 if ((fp->f_flag & FDEFER) != 0) { 1705 atomic_and_uint(&fp->f_flag, ~FDEFER); 1706 unp_defer--; 1707 KASSERT(fp->f_count != 0); 1708 } else { 1709 if (fp->f_count == 0 || 1710 (fp->f_flag & FMARK) != 0 || 1711 fp->f_count == fp->f_msgcount || 1712 fp->f_unpcount != 0) { 1713 mutex_exit(&fp->f_lock); 1714 continue; 1715 } 1716 } 1717 atomic_or_uint(&fp->f_flag, FMARK); 1718 1719 if (fp->f_type != DTYPE_SOCKET || 1720 (so = fp->f_socket) == NULL || 1721 so->so_proto->pr_domain != &unixdomain || 1722 (so->so_proto->pr_flags & PR_RIGHTS) == 0) { 1723 mutex_exit(&fp->f_lock); 1724 continue; 1725 } 1726 1727 /* Gain file ref, mark our position, and unlock. */ 1728 didwork = true; 1729 LIST_INSERT_AFTER(fp, dp, f_list); 1730 fp->f_count++; 1731 mutex_exit(&fp->f_lock); 1732 mutex_exit(&filelist_lock); 1733 1734 /* 1735 * Mark files referenced from sockets queued on the 1736 * accept queue as well. 1737 */ 1738 solock(so); 1739 unp_scan(so->so_rcv.sb_mb, unp_mark, 0); 1740 if ((so->so_options & SO_ACCEPTCONN) != 0) { 1741 TAILQ_FOREACH(so1, &so->so_q0, so_qe) { 1742 unp_scan(so1->so_rcv.sb_mb, unp_mark, 0); 1743 } 1744 TAILQ_FOREACH(so1, &so->so_q, so_qe) { 1745 unp_scan(so1->so_rcv.sb_mb, unp_mark, 0); 1746 } 1747 } 1748 sounlock(so); 1749 1750 /* Re-lock and restart from where we left off. */ 1751 closef(fp); 1752 mutex_enter(&filelist_lock); 1753 np = LIST_NEXT(dp, f_list); 1754 LIST_REMOVE(dp, f_list); 1755 } 1756 /* 1757 * Bail early if we did nothing in the loop above. Could 1758 * happen because of concurrent activity causing unp_defer 1759 * to get out of sync. 1760 */ 1761 } while (unp_defer != 0 && didwork); 1762 1763 /* 1764 * Sweep pass. 1765 * 1766 * We grab an extra reference to each of the files that are 1767 * not otherwise accessible and then free the rights that are 1768 * stored in messages on them. 1769 */ 1770 for (fp = LIST_FIRST(&filehead); fp != NULL; fp = np) { 1771 KASSERT(mutex_owned(&filelist_lock)); 1772 np = LIST_NEXT(fp, f_list); 1773 mutex_enter(&fp->f_lock); 1774 1775 /* 1776 * Ignore non-sockets. 1777 * Ignore dead sockets, or sockets with pending close. 1778 * Ignore sockets obviously referenced elsewhere. 1779 * Ignore sockets marked as referenced by our scan. 1780 * Ignore new sockets that did not exist during the scan. 1781 */ 1782 if (fp->f_type != DTYPE_SOCKET || 1783 fp->f_count == 0 || fp->f_unpcount != 0 || 1784 fp->f_count != fp->f_msgcount || 1785 (fp->f_flag & (FMARK | FSCAN)) != FSCAN) { 1786 mutex_exit(&fp->f_lock); 1787 continue; 1788 } 1789 1790 /* Gain file ref, mark our position, and unlock. */ 1791 LIST_INSERT_AFTER(fp, dp, f_list); 1792 fp->f_count++; 1793 mutex_exit(&fp->f_lock); 1794 mutex_exit(&filelist_lock); 1795 1796 /* 1797 * Flush all data from the socket's receive buffer. 1798 * This will cause files referenced only by the 1799 * socket to be queued for close. 1800 */ 1801 so = fp->f_socket; 1802 solock(so); 1803 sorflush(so); 1804 sounlock(so); 1805 1806 /* Re-lock and restart from where we left off. */ 1807 closef(fp); 1808 mutex_enter(&filelist_lock); 1809 np = LIST_NEXT(dp, f_list); 1810 LIST_REMOVE(dp, f_list); 1811 } 1812 } 1813 1814 /* 1815 * Garbage collector thread. While SCM_RIGHTS messages are in transit, 1816 * wake once per second to garbage collect. Run continually while we 1817 * have deferred closes to process. 1818 */ 1819 static void 1820 unp_thread(void *cookie) 1821 { 1822 file_t *dp; 1823 1824 /* Allocate a dummy file for our scans. */ 1825 if ((dp = fgetdummy()) == NULL) { 1826 panic("unp_thread"); 1827 } 1828 1829 mutex_enter(&filelist_lock); 1830 for (;;) { 1831 KASSERT(mutex_owned(&filelist_lock)); 1832 if (SLIST_EMPTY(&unp_thread_discard)) { 1833 if (unp_rights != 0) { 1834 (void)cv_timedwait(&unp_thread_cv, 1835 &filelist_lock, hz); 1836 } else { 1837 cv_wait(&unp_thread_cv, &filelist_lock); 1838 } 1839 } 1840 unp_gc(dp); 1841 } 1842 /* NOTREACHED */ 1843 } 1844 1845 /* 1846 * Kick the garbage collector into action if there is something for 1847 * it to process. 1848 */ 1849 static void 1850 unp_thread_kick(void) 1851 { 1852 1853 if (!SLIST_EMPTY(&unp_thread_discard) || unp_rights != 0) { 1854 mutex_enter(&filelist_lock); 1855 cv_signal(&unp_thread_cv); 1856 mutex_exit(&filelist_lock); 1857 } 1858 } 1859 1860 void 1861 unp_dispose(struct mbuf *m) 1862 { 1863 1864 if (m) 1865 unp_scan(m, unp_discard_later, 1); 1866 } 1867 1868 void 1869 unp_scan(struct mbuf *m0, void (*op)(file_t *), int discard) 1870 { 1871 struct mbuf *m; 1872 file_t **rp, *fp; 1873 struct cmsghdr *cm; 1874 int i, qfds; 1875 1876 while (m0) { 1877 for (m = m0; m; m = m->m_next) { 1878 if (m->m_type != MT_CONTROL || 1879 m->m_len < sizeof(*cm)) { 1880 continue; 1881 } 1882 cm = mtod(m, struct cmsghdr *); 1883 if (cm->cmsg_level != SOL_SOCKET || 1884 cm->cmsg_type != SCM_RIGHTS) 1885 continue; 1886 qfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) 1887 / sizeof(file_t *); 1888 rp = (file_t **)CMSG_DATA(cm); 1889 for (i = 0; i < qfds; i++) { 1890 fp = *rp; 1891 if (discard) { 1892 *rp = 0; 1893 } 1894 (*op)(fp); 1895 rp++; 1896 } 1897 } 1898 m0 = m0->m_nextpkt; 1899 } 1900 } 1901 1902 void 1903 unp_mark(file_t *fp) 1904 { 1905 1906 if (fp == NULL) 1907 return; 1908 1909 /* If we're already deferred, don't screw up the defer count */ 1910 mutex_enter(&fp->f_lock); 1911 if (fp->f_flag & (FMARK | FDEFER)) { 1912 mutex_exit(&fp->f_lock); 1913 return; 1914 } 1915 1916 /* 1917 * Minimize the number of deferrals... Sockets are the only type of 1918 * file which can hold references to another file, so just mark 1919 * other files, and defer unmarked sockets for the next pass. 1920 */ 1921 if (fp->f_type == DTYPE_SOCKET) { 1922 unp_defer++; 1923 KASSERT(fp->f_count != 0); 1924 atomic_or_uint(&fp->f_flag, FDEFER); 1925 } else { 1926 atomic_or_uint(&fp->f_flag, FMARK); 1927 } 1928 mutex_exit(&fp->f_lock); 1929 } 1930 1931 static void 1932 unp_discard_now(file_t *fp) 1933 { 1934 1935 if (fp == NULL) 1936 return; 1937 1938 KASSERT(fp->f_count > 0); 1939 KASSERT(fp->f_msgcount > 0); 1940 1941 mutex_enter(&fp->f_lock); 1942 fp->f_msgcount--; 1943 mutex_exit(&fp->f_lock); 1944 atomic_dec_uint(&unp_rights); 1945 (void)closef(fp); 1946 } 1947 1948 static void 1949 unp_discard_later(file_t *fp) 1950 { 1951 1952 if (fp == NULL) 1953 return; 1954 1955 KASSERT(fp->f_count > 0); 1956 KASSERT(fp->f_msgcount > 0); 1957 1958 mutex_enter(&filelist_lock); 1959 if (fp->f_unpcount++ == 0) { 1960 SLIST_INSERT_HEAD(&unp_thread_discard, fp, f_unplist); 1961 } 1962 mutex_exit(&filelist_lock); 1963 } 1964 1965 const struct pr_usrreqs unp_usrreqs = { 1966 .pr_attach = unp_attach, 1967 .pr_detach = unp_detach, 1968 .pr_accept = unp_accept, 1969 .pr_bind = unp_bind, 1970 .pr_listen = unp_listen, 1971 .pr_connect = unp_connect, 1972 .pr_connect2 = unp_connect2, 1973 .pr_disconnect = unp_disconnect, 1974 .pr_shutdown = unp_shutdown, 1975 .pr_abort = unp_abort, 1976 .pr_ioctl = unp_ioctl, 1977 .pr_stat = unp_stat, 1978 .pr_peeraddr = unp_peeraddr, 1979 .pr_sockaddr = unp_sockaddr, 1980 .pr_rcvd = unp_rcvd, 1981 .pr_recvoob = unp_recvoob, 1982 .pr_send = unp_send, 1983 .pr_sendoob = unp_sendoob, 1984 .pr_generic = unp_usrreq, 1985 }; 1986