1 /* $OpenBSD: uipc_usrreq.c,v 1.100 2016/07/19 05:30:48 tedu Exp $ */ 2 /* $NetBSD: uipc_usrreq.c,v 1.18 1996/02/09 19:00:50 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/proc.h> 38 #include <sys/filedesc.h> 39 #include <sys/domain.h> 40 #include <sys/protosw.h> 41 #include <sys/queue.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/unpcb.h> 45 #include <sys/un.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/file.h> 49 #include <sys/stat.h> 50 #include <sys/mbuf.h> 51 #include <sys/task.h> 52 #include <sys/pledge.h> 53 54 void uipc_setaddr(const struct unpcb *, struct mbuf *); 55 56 /* list of all UNIX domain sockets, for unp_gc() */ 57 LIST_HEAD(unp_head, unpcb) unp_head = LIST_HEAD_INITIALIZER(&unp_head); 58 59 /* 60 * Stack of sets of files that were passed over a socket but were 61 * not received and need to be closed. 62 */ 63 struct unp_deferral { 64 SLIST_ENTRY(unp_deferral) ud_link; 65 int ud_n; 66 /* followed by ud_n struct file * pointers */ 67 struct file *ud_fp[]; 68 }; 69 70 /* list of sets of files that were sent over sockets that are now closed */ 71 SLIST_HEAD(,unp_deferral) unp_deferred = SLIST_HEAD_INITIALIZER(&unp_deferred); 72 73 struct task unp_gc_task = TASK_INITIALIZER(unp_gc, NULL); 74 75 76 /* 77 * Unix communications domain. 78 * 79 * TODO: 80 * RDM 81 * rethink name space problems 82 * need a proper out-of-band 83 */ 84 struct sockaddr sun_noname = { sizeof(sun_noname), AF_UNIX }; 85 ino_t unp_ino; /* prototype for fake inode numbers */ 86 87 void 88 uipc_setaddr(const struct unpcb *unp, struct mbuf *nam) 89 { 90 if (unp != NULL && unp->unp_addr != NULL) { 91 nam->m_len = unp->unp_addr->m_len; 92 memcpy(mtod(nam, caddr_t), mtod(unp->unp_addr, caddr_t), 93 nam->m_len); 94 } else { 95 nam->m_len = sizeof(sun_noname); 96 memcpy(mtod(nam, struct sockaddr *), &sun_noname, 97 nam->m_len); 98 } 99 } 100 101 int 102 uipc_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam, 103 struct mbuf *control, struct proc *p) 104 { 105 struct unpcb *unp = sotounpcb(so); 106 struct socket *so2; 107 int error = 0; 108 109 if (req == PRU_CONTROL) 110 return (EOPNOTSUPP); 111 if (req != PRU_SEND && control && control->m_len) { 112 error = EOPNOTSUPP; 113 goto release; 114 } 115 if (unp == NULL && req != PRU_ATTACH) { 116 error = EINVAL; 117 goto release; 118 } 119 switch (req) { 120 121 case PRU_ATTACH: 122 if (unp) { 123 error = EISCONN; 124 break; 125 } 126 error = unp_attach(so); 127 break; 128 129 case PRU_DETACH: 130 unp_detach(unp); 131 break; 132 133 case PRU_BIND: 134 error = unp_bind(unp, nam, p); 135 break; 136 137 case PRU_LISTEN: 138 if (unp->unp_vnode == NULL) 139 error = EINVAL; 140 break; 141 142 case PRU_CONNECT: 143 error = unp_connect(so, nam, p); 144 break; 145 146 case PRU_CONNECT2: 147 error = unp_connect2(so, (struct socket *)nam); 148 break; 149 150 case PRU_DISCONNECT: 151 unp_disconnect(unp); 152 break; 153 154 case PRU_ACCEPT: 155 /* 156 * Pass back name of connected socket, 157 * if it was bound and we are still connected 158 * (our peer may have closed already!). 159 */ 160 uipc_setaddr(unp->unp_conn, nam); 161 break; 162 163 case PRU_SHUTDOWN: 164 socantsendmore(so); 165 unp_shutdown(unp); 166 break; 167 168 case PRU_RCVD: 169 switch (so->so_type) { 170 171 case SOCK_DGRAM: 172 panic("uipc 1"); 173 /*NOTREACHED*/ 174 175 case SOCK_STREAM: 176 case SOCK_SEQPACKET: 177 #define rcv (&so->so_rcv) 178 #define snd (&so2->so_snd) 179 if (unp->unp_conn == NULL) 180 break; 181 so2 = unp->unp_conn->unp_socket; 182 /* 183 * Adjust backpressure on sender 184 * and wakeup any waiting to write. 185 */ 186 snd->sb_mbcnt = rcv->sb_mbcnt; 187 snd->sb_cc = rcv->sb_cc; 188 sowwakeup(so2); 189 #undef snd 190 #undef rcv 191 break; 192 193 default: 194 panic("uipc 2"); 195 } 196 break; 197 198 case PRU_SEND: 199 if (control && (error = unp_internalize(control, p))) 200 break; 201 switch (so->so_type) { 202 203 case SOCK_DGRAM: { 204 struct sockaddr *from; 205 206 if (nam) { 207 if (unp->unp_conn) { 208 error = EISCONN; 209 break; 210 } 211 error = unp_connect(so, nam, p); 212 if (error) 213 break; 214 } else { 215 if (unp->unp_conn == NULL) { 216 error = ENOTCONN; 217 break; 218 } 219 } 220 so2 = unp->unp_conn->unp_socket; 221 if (unp->unp_addr) 222 from = mtod(unp->unp_addr, struct sockaddr *); 223 else 224 from = &sun_noname; 225 if (sbappendaddr(&so2->so_rcv, from, m, control)) { 226 sorwakeup(so2); 227 m = NULL; 228 control = NULL; 229 } else 230 error = ENOBUFS; 231 if (nam) 232 unp_disconnect(unp); 233 break; 234 } 235 236 case SOCK_STREAM: 237 case SOCK_SEQPACKET: 238 #define rcv (&so2->so_rcv) 239 #define snd (&so->so_snd) 240 if (so->so_state & SS_CANTSENDMORE) { 241 error = EPIPE; 242 break; 243 } 244 if (unp->unp_conn == NULL) { 245 error = ENOTCONN; 246 break; 247 } 248 so2 = unp->unp_conn->unp_socket; 249 /* 250 * Send to paired receive port, and then raise 251 * send buffer counts to maintain backpressure. 252 * Wake up readers. 253 */ 254 if (control) { 255 if (sbappendcontrol(rcv, m, control)) 256 control = NULL; 257 } else if (so->so_type == SOCK_SEQPACKET) 258 sbappendrecord(rcv, m); 259 else 260 sbappend(rcv, m); 261 snd->sb_mbcnt = rcv->sb_mbcnt; 262 snd->sb_cc = rcv->sb_cc; 263 sorwakeup(so2); 264 m = NULL; 265 #undef snd 266 #undef rcv 267 break; 268 269 default: 270 panic("uipc 4"); 271 } 272 /* we need to undo unp_internalize in case of errors */ 273 if (control && error) 274 unp_dispose(control); 275 break; 276 277 case PRU_ABORT: 278 unp_drop(unp, ECONNABORTED); 279 break; 280 281 case PRU_SENSE: { 282 struct stat *sb = (struct stat *)m; 283 284 sb->st_blksize = so->so_snd.sb_hiwat; 285 sb->st_dev = NODEV; 286 if (unp->unp_ino == 0) 287 unp->unp_ino = unp_ino++; 288 sb->st_atim.tv_sec = 289 sb->st_mtim.tv_sec = 290 sb->st_ctim.tv_sec = unp->unp_ctime.tv_sec; 291 sb->st_atim.tv_nsec = 292 sb->st_mtim.tv_nsec = 293 sb->st_ctim.tv_nsec = unp->unp_ctime.tv_nsec; 294 sb->st_ino = unp->unp_ino; 295 return (0); 296 } 297 298 case PRU_RCVOOB: 299 return (EOPNOTSUPP); 300 301 case PRU_SENDOOB: 302 error = EOPNOTSUPP; 303 break; 304 305 case PRU_SOCKADDR: 306 uipc_setaddr(unp, nam); 307 break; 308 309 case PRU_PEERADDR: 310 uipc_setaddr(unp->unp_conn, nam); 311 break; 312 313 case PRU_SLOWTIMO: 314 break; 315 316 default: 317 panic("piusrreq"); 318 } 319 release: 320 if (control) 321 m_freem(control); 322 if (m) 323 m_freem(m); 324 return (error); 325 } 326 327 /* 328 * Both send and receive buffers are allocated PIPSIZ bytes of buffering 329 * for stream sockets, although the total for sender and receiver is 330 * actually only PIPSIZ. 331 * Datagram sockets really use the sendspace as the maximum datagram size, 332 * and don't really want to reserve the sendspace. Their recvspace should 333 * be large enough for at least one max-size datagram plus address. 334 */ 335 #define PIPSIZ 4096 336 u_long unpst_sendspace = PIPSIZ; 337 u_long unpst_recvspace = PIPSIZ; 338 u_long unpdg_sendspace = 2*1024; /* really max datagram size */ 339 u_long unpdg_recvspace = 4*1024; 340 341 int unp_rights; /* file descriptors in flight */ 342 343 int 344 unp_attach(struct socket *so) 345 { 346 struct unpcb *unp; 347 int error; 348 349 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 350 switch (so->so_type) { 351 352 case SOCK_STREAM: 353 case SOCK_SEQPACKET: 354 error = soreserve(so, unpst_sendspace, unpst_recvspace); 355 break; 356 357 case SOCK_DGRAM: 358 error = soreserve(so, unpdg_sendspace, unpdg_recvspace); 359 break; 360 361 default: 362 panic("unp_attach"); 363 } 364 if (error) 365 return (error); 366 } 367 unp = malloc(sizeof(*unp), M_PCB, M_NOWAIT|M_ZERO); 368 if (unp == NULL) 369 return (ENOBUFS); 370 unp->unp_socket = so; 371 so->so_pcb = unp; 372 getnanotime(&unp->unp_ctime); 373 LIST_INSERT_HEAD(&unp_head, unp, unp_link); 374 return (0); 375 } 376 377 void 378 unp_detach(struct unpcb *unp) 379 { 380 struct vnode *vp; 381 382 LIST_REMOVE(unp, unp_link); 383 if (unp->unp_vnode) { 384 unp->unp_vnode->v_socket = NULL; 385 vp = unp->unp_vnode; 386 unp->unp_vnode = NULL; 387 vrele(vp); 388 } 389 if (unp->unp_conn) 390 unp_disconnect(unp); 391 while (!SLIST_EMPTY(&unp->unp_refs)) 392 unp_drop(SLIST_FIRST(&unp->unp_refs), ECONNRESET); 393 soisdisconnected(unp->unp_socket); 394 unp->unp_socket->so_pcb = NULL; 395 m_freem(unp->unp_addr); 396 free(unp, M_PCB, sizeof *unp); 397 if (unp_rights) 398 task_add(systq, &unp_gc_task); 399 } 400 401 int 402 unp_bind(struct unpcb *unp, struct mbuf *nam, struct proc *p) 403 { 404 struct sockaddr_un *soun = mtod(nam, struct sockaddr_un *); 405 struct mbuf *nam2; 406 struct vnode *vp; 407 struct vattr vattr; 408 int error; 409 struct nameidata nd; 410 size_t pathlen; 411 412 if (unp->unp_vnode != NULL) 413 return (EINVAL); 414 415 if (soun->sun_len > sizeof(struct sockaddr_un) || 416 soun->sun_len < offsetof(struct sockaddr_un, sun_path)) 417 return (EINVAL); 418 if (soun->sun_family != AF_UNIX) 419 return (EAFNOSUPPORT); 420 421 pathlen = strnlen(soun->sun_path, soun->sun_len - 422 offsetof(struct sockaddr_un, sun_path)); 423 if (pathlen == sizeof(soun->sun_path)) 424 return (EINVAL); 425 426 nam2 = m_getclr(M_WAITOK, MT_SONAME); 427 nam2->m_len = sizeof(struct sockaddr_un); 428 memcpy(mtod(nam2, struct sockaddr_un *), soun, 429 offsetof(struct sockaddr_un, sun_path) + pathlen); 430 /* No need to NUL terminate: m_getclr() returns zero'd mbufs. */ 431 432 soun = mtod(nam2, struct sockaddr_un *); 433 434 /* Fixup sun_len to keep it in sync with m_len. */ 435 soun->sun_len = nam2->m_len; 436 437 NDINIT(&nd, CREATE, NOFOLLOW | LOCKPARENT, UIO_SYSSPACE, 438 soun->sun_path, p); 439 nd.ni_pledge = PLEDGE_UNIX; 440 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ 441 if ((error = namei(&nd)) != 0) { 442 m_freem(nam2); 443 return (error); 444 } 445 vp = nd.ni_vp; 446 if (vp != NULL) { 447 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); 448 if (nd.ni_dvp == vp) 449 vrele(nd.ni_dvp); 450 else 451 vput(nd.ni_dvp); 452 vrele(vp); 453 m_freem(nam2); 454 return (EADDRINUSE); 455 } 456 VATTR_NULL(&vattr); 457 vattr.va_type = VSOCK; 458 vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask; 459 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); 460 if (error) { 461 m_freem(nam2); 462 return (error); 463 } 464 unp->unp_addr = nam2; 465 vp = nd.ni_vp; 466 vp->v_socket = unp->unp_socket; 467 unp->unp_vnode = vp; 468 unp->unp_connid.uid = p->p_ucred->cr_uid; 469 unp->unp_connid.gid = p->p_ucred->cr_gid; 470 unp->unp_connid.pid = p->p_p->ps_pid; 471 unp->unp_flags |= UNP_FEIDSBIND; 472 VOP_UNLOCK(vp, p); 473 return (0); 474 } 475 476 int 477 unp_connect(struct socket *so, struct mbuf *nam, struct proc *p) 478 { 479 struct sockaddr_un *soun = mtod(nam, struct sockaddr_un *); 480 struct vnode *vp; 481 struct socket *so2, *so3; 482 struct unpcb *unp, *unp2, *unp3; 483 int error; 484 struct nameidata nd; 485 486 if (soun->sun_family != AF_UNIX) 487 return (EAFNOSUPPORT); 488 489 if (nam->m_len < sizeof(struct sockaddr_un)) 490 *(mtod(nam, caddr_t) + nam->m_len) = 0; 491 else if (nam->m_len > sizeof(struct sockaddr_un)) 492 return (EINVAL); 493 else if (memchr(soun->sun_path, '\0', sizeof(soun->sun_path)) == NULL) 494 return (EINVAL); 495 496 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, soun->sun_path, p); 497 nd.ni_pledge = PLEDGE_UNIX; 498 if ((error = namei(&nd)) != 0) 499 return (error); 500 vp = nd.ni_vp; 501 if (vp->v_type != VSOCK) { 502 error = ENOTSOCK; 503 goto bad; 504 } 505 if ((error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p)) != 0) 506 goto bad; 507 so2 = vp->v_socket; 508 if (so2 == NULL) { 509 error = ECONNREFUSED; 510 goto bad; 511 } 512 if (so->so_type != so2->so_type) { 513 error = EPROTOTYPE; 514 goto bad; 515 } 516 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 517 if ((so2->so_options & SO_ACCEPTCONN) == 0 || 518 (so3 = sonewconn(so2, 0)) == 0) { 519 error = ECONNREFUSED; 520 goto bad; 521 } 522 unp = sotounpcb(so); 523 unp2 = sotounpcb(so2); 524 unp3 = sotounpcb(so3); 525 if (unp2->unp_addr) 526 unp3->unp_addr = 527 m_copym(unp2->unp_addr, 0, M_COPYALL, M_NOWAIT); 528 unp3->unp_connid.uid = p->p_ucred->cr_uid; 529 unp3->unp_connid.gid = p->p_ucred->cr_gid; 530 unp3->unp_connid.pid = p->p_p->ps_pid; 531 unp3->unp_flags |= UNP_FEIDS; 532 so2 = so3; 533 if (unp2->unp_flags & UNP_FEIDSBIND) { 534 unp->unp_connid = unp2->unp_connid; 535 unp->unp_flags |= UNP_FEIDS; 536 } 537 } 538 error = unp_connect2(so, so2); 539 bad: 540 vput(vp); 541 return (error); 542 } 543 544 int 545 unp_connect2(struct socket *so, struct socket *so2) 546 { 547 struct unpcb *unp = sotounpcb(so); 548 struct unpcb *unp2; 549 550 if (so2->so_type != so->so_type) 551 return (EPROTOTYPE); 552 unp2 = sotounpcb(so2); 553 unp->unp_conn = unp2; 554 switch (so->so_type) { 555 556 case SOCK_DGRAM: 557 SLIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_nextref); 558 soisconnected(so); 559 break; 560 561 case SOCK_STREAM: 562 case SOCK_SEQPACKET: 563 unp2->unp_conn = unp; 564 soisconnected(so); 565 soisconnected(so2); 566 break; 567 568 default: 569 panic("unp_connect2"); 570 } 571 return (0); 572 } 573 574 void 575 unp_disconnect(struct unpcb *unp) 576 { 577 struct unpcb *unp2 = unp->unp_conn; 578 579 if (unp2 == NULL) 580 return; 581 unp->unp_conn = NULL; 582 switch (unp->unp_socket->so_type) { 583 584 case SOCK_DGRAM: 585 SLIST_REMOVE(&unp2->unp_refs, unp, unpcb, unp_nextref); 586 unp->unp_socket->so_state &= ~SS_ISCONNECTED; 587 break; 588 589 case SOCK_STREAM: 590 case SOCK_SEQPACKET: 591 unp->unp_socket->so_snd.sb_mbcnt = 0; 592 unp->unp_socket->so_snd.sb_cc = 0; 593 soisdisconnected(unp->unp_socket); 594 unp2->unp_conn = NULL; 595 unp2->unp_socket->so_snd.sb_mbcnt = 0; 596 unp2->unp_socket->so_snd.sb_cc = 0; 597 soisdisconnected(unp2->unp_socket); 598 break; 599 } 600 } 601 602 void 603 unp_shutdown(struct unpcb *unp) 604 { 605 struct socket *so; 606 607 switch (unp->unp_socket->so_type) { 608 case SOCK_STREAM: 609 case SOCK_SEQPACKET: 610 if (unp->unp_conn && (so = unp->unp_conn->unp_socket)) 611 socantrcvmore(so); 612 break; 613 default: 614 break; 615 } 616 } 617 618 void 619 unp_drop(struct unpcb *unp, int errno) 620 { 621 struct socket *so = unp->unp_socket; 622 623 so->so_error = errno; 624 unp_disconnect(unp); 625 if (so->so_head) { 626 so->so_pcb = NULL; 627 sofree(so); 628 m_freem(unp->unp_addr); 629 free(unp, M_PCB, sizeof *unp); 630 } 631 } 632 633 #ifdef notdef 634 unp_drain(void) 635 { 636 637 } 638 #endif 639 640 extern struct domain unixdomain; 641 642 static struct unpcb * 643 fptounp(struct file *fp) 644 { 645 struct socket *so; 646 647 if (fp->f_type != DTYPE_SOCKET) 648 return (NULL); 649 if ((so = fp->f_data) == NULL) 650 return (NULL); 651 if (so->so_proto->pr_domain != &unixdomain) 652 return (NULL); 653 return (sotounpcb(so)); 654 } 655 656 int 657 unp_externalize(struct mbuf *rights, socklen_t controllen, int flags) 658 { 659 struct proc *p = curproc; /* XXX */ 660 struct cmsghdr *cm = mtod(rights, struct cmsghdr *); 661 int i, *fdp = NULL; 662 struct file **rp; 663 struct file *fp; 664 int nfds, error = 0; 665 666 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / 667 sizeof(struct file *); 668 if (controllen < CMSG_ALIGN(sizeof(struct cmsghdr))) 669 controllen = 0; 670 else 671 controllen -= CMSG_ALIGN(sizeof(struct cmsghdr)); 672 if (nfds > controllen / sizeof(int)) { 673 error = EMSGSIZE; 674 goto restart; 675 } 676 677 /* Make sure the recipient should be able to see the descriptors.. */ 678 rp = (struct file **)CMSG_DATA(cm); 679 for (i = 0; i < nfds; i++) { 680 fp = *rp++; 681 error = pledge_recvfd(p, fp); 682 if (error) 683 break; 684 685 /* 686 * No to block devices. If passing a directory, 687 * make sure that it is underneath the root. 688 */ 689 if (p->p_fd->fd_rdir != NULL && fp->f_type == DTYPE_VNODE) { 690 struct vnode *vp = (struct vnode *)fp->f_data; 691 692 if (vp->v_type == VBLK || 693 (vp->v_type == VDIR && 694 !vn_isunder(vp, p->p_fd->fd_rdir, p))) { 695 error = EPERM; 696 break; 697 } 698 } 699 } 700 701 fdp = mallocarray(nfds, sizeof(int), M_TEMP, M_WAITOK); 702 703 restart: 704 fdplock(p->p_fd); 705 if (error != 0) { 706 if (nfds > 0) { 707 rp = ((struct file **)CMSG_DATA(cm)); 708 unp_discard(rp, nfds); 709 } 710 goto out; 711 } 712 713 /* 714 * First loop -- allocate file descriptor table slots for the 715 * new descriptors. 716 */ 717 rp = ((struct file **)CMSG_DATA(cm)); 718 for (i = 0; i < nfds; i++) { 719 if ((error = fdalloc(p, 0, &fdp[i])) != 0) { 720 /* 721 * Back out what we've done so far. 722 */ 723 for (--i; i >= 0; i--) 724 fdremove(p->p_fd, fdp[i]); 725 726 if (error == ENOSPC) { 727 fdexpand(p); 728 error = 0; 729 } else { 730 /* 731 * This is the error that has historically 732 * been returned, and some callers may 733 * expect it. 734 */ 735 error = EMSGSIZE; 736 } 737 fdpunlock(p->p_fd); 738 goto restart; 739 } 740 741 /* 742 * Make the slot reference the descriptor so that 743 * fdalloc() works properly.. We finalize it all 744 * in the loop below. 745 */ 746 p->p_fd->fd_ofiles[fdp[i]] = *rp++; 747 748 if (flags & MSG_CMSG_CLOEXEC) 749 p->p_fd->fd_ofileflags[fdp[i]] |= UF_EXCLOSE; 750 } 751 752 /* 753 * Now that adding them has succeeded, update all of the 754 * descriptor passing state. 755 */ 756 rp = (struct file **)CMSG_DATA(cm); 757 for (i = 0; i < nfds; i++) { 758 struct unpcb *unp; 759 760 fp = *rp++; 761 if ((unp = fptounp(fp)) != NULL) 762 unp->unp_msgcount--; 763 unp_rights--; 764 } 765 766 /* 767 * Copy temporary array to message and adjust length, in case of 768 * transition from large struct file pointers to ints. 769 */ 770 memcpy(CMSG_DATA(cm), fdp, nfds * sizeof(int)); 771 cm->cmsg_len = CMSG_LEN(nfds * sizeof(int)); 772 rights->m_len = CMSG_LEN(nfds * sizeof(int)); 773 out: 774 fdpunlock(p->p_fd); 775 if (fdp) 776 free(fdp, M_TEMP, nfds * sizeof(int)); 777 return (error); 778 } 779 780 int 781 unp_internalize(struct mbuf *control, struct proc *p) 782 { 783 struct filedesc *fdp = p->p_fd; 784 struct cmsghdr *cm = mtod(control, struct cmsghdr *); 785 struct file **rp, *fp; 786 struct unpcb *unp; 787 int i, error; 788 int nfds, *ip, fd, neededspace; 789 790 /* 791 * Check for two potential msg_controllen values because 792 * IETF stuck their nose in a place it does not belong. 793 */ 794 if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET || 795 !(cm->cmsg_len == control->m_len || 796 control->m_len == CMSG_ALIGN(cm->cmsg_len))) 797 return (EINVAL); 798 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / sizeof (int); 799 800 if (unp_rights + nfds > maxfiles / 10) 801 return (EMFILE); 802 803 /* Make sure we have room for the struct file pointers */ 804 morespace: 805 neededspace = CMSG_SPACE(nfds * sizeof(struct file *)) - 806 control->m_len; 807 if (neededspace > M_TRAILINGSPACE(control)) { 808 char *tmp; 809 /* if we already have a cluster, the message is just too big */ 810 if (control->m_flags & M_EXT) 811 return (E2BIG); 812 813 /* copy cmsg data temporarily out of the mbuf */ 814 tmp = malloc(control->m_len, M_TEMP, M_WAITOK); 815 memcpy(tmp, mtod(control, caddr_t), control->m_len); 816 817 /* allocate a cluster and try again */ 818 MCLGET(control, M_WAIT); 819 if ((control->m_flags & M_EXT) == 0) { 820 free(tmp, M_TEMP, control->m_len); 821 return (ENOBUFS); /* allocation failed */ 822 } 823 824 /* copy the data back into the cluster */ 825 cm = mtod(control, struct cmsghdr *); 826 memcpy(cm, tmp, control->m_len); 827 free(tmp, M_TEMP, control->m_len); 828 goto morespace; 829 } 830 831 /* adjust message & mbuf to note amount of space actually used. */ 832 cm->cmsg_len = CMSG_LEN(nfds * sizeof(struct file *)); 833 control->m_len = CMSG_SPACE(nfds * sizeof(struct file *)); 834 835 ip = ((int *)CMSG_DATA(cm)) + nfds - 1; 836 rp = ((struct file **)CMSG_DATA(cm)) + nfds - 1; 837 for (i = 0; i < nfds; i++) { 838 memcpy(&fd, ip, sizeof fd); 839 ip--; 840 if ((fp = fd_getfile(fdp, fd)) == NULL) { 841 error = EBADF; 842 goto fail; 843 } 844 if (fp->f_count == LONG_MAX-2) { 845 error = EDEADLK; 846 goto fail; 847 } 848 error = pledge_sendfd(p, fp); 849 if (error) 850 goto fail; 851 852 /* kqueue descriptors cannot be copied */ 853 if (fp->f_type == DTYPE_KQUEUE) { 854 error = EINVAL; 855 goto fail; 856 } 857 memcpy(rp, &fp, sizeof fp); 858 rp--; 859 fp->f_count++; 860 if ((unp = fptounp(fp)) != NULL) { 861 unp->unp_file = fp; 862 unp->unp_msgcount++; 863 } 864 unp_rights++; 865 } 866 return (0); 867 fail: 868 /* Back out what we just did. */ 869 for ( ; i > 0; i--) { 870 rp++; 871 memcpy(&fp, rp, sizeof(fp)); 872 fp->f_count--; 873 if ((unp = fptounp(fp)) != NULL) 874 unp->unp_msgcount--; 875 unp_rights--; 876 } 877 878 return (error); 879 } 880 881 int unp_defer, unp_gcing; 882 883 void 884 unp_gc(void *arg __unused) 885 { 886 struct unp_deferral *defer; 887 struct file *fp; 888 struct socket *so; 889 struct unpcb *unp; 890 int nunref, i; 891 892 if (unp_gcing) 893 return; 894 unp_gcing = 1; 895 896 /* close any fds on the deferred list */ 897 while ((defer = SLIST_FIRST(&unp_deferred)) != NULL) { 898 SLIST_REMOVE_HEAD(&unp_deferred, ud_link); 899 for (i = 0; i < defer->ud_n; i++) { 900 fp = defer->ud_fp[i]; 901 if (fp == NULL) 902 continue; 903 FREF(fp); 904 if ((unp = fptounp(fp)) != NULL) 905 unp->unp_msgcount--; 906 unp_rights--; 907 (void) closef(fp, NULL); 908 } 909 free(defer, M_TEMP, sizeof(*defer) + sizeof(fp) * defer->ud_n); 910 } 911 912 unp_defer = 0; 913 LIST_FOREACH(unp, &unp_head, unp_link) 914 unp->unp_flags &= ~(UNP_GCMARK | UNP_GCDEFER | UNP_GCDEAD); 915 do { 916 nunref = 0; 917 LIST_FOREACH(unp, &unp_head, unp_link) { 918 if (unp->unp_flags & UNP_GCDEFER) { 919 /* 920 * This socket is referenced by another 921 * socket which is known to be live, 922 * so it's certainly live. 923 */ 924 unp->unp_flags &= ~UNP_GCDEFER; 925 unp_defer--; 926 } else if (unp->unp_flags & UNP_GCMARK) { 927 /* marked as live in previous pass */ 928 continue; 929 } else if ((fp = unp->unp_file) == NULL) { 930 /* not being passed, so can't be in loop */ 931 } else if (fp->f_count == 0) { 932 /* 933 * Already being closed, let normal close 934 * path take its course 935 */ 936 } else { 937 /* 938 * Unreferenced by other sockets so far, 939 * so if all the references (f_count) are 940 * from passing (unp_msgcount) then this 941 * socket is prospectively dead 942 */ 943 if (fp->f_count == unp->unp_msgcount) { 944 nunref++; 945 unp->unp_flags |= UNP_GCDEAD; 946 continue; 947 } 948 } 949 950 /* 951 * This is the first time we've seen this socket on 952 * the mark pass and known it has a live reference, 953 * so mark it, then scan its receive buffer for 954 * sockets and note them as deferred (== referenced, 955 * but not yet marked). 956 */ 957 unp->unp_flags |= UNP_GCMARK; 958 959 so = unp->unp_socket; 960 #ifdef notdef 961 if (so->so_rcv.sb_flags & SB_LOCK) { 962 /* 963 * This is problematical; it's not clear 964 * we need to wait for the sockbuf to be 965 * unlocked (on a uniprocessor, at least), 966 * and it's also not clear what to do 967 * if sbwait returns an error due to receipt 968 * of a signal. If sbwait does return 969 * an error, we'll go into an infinite 970 * loop. Delete all of this for now. 971 */ 972 (void) sbwait(&so->so_rcv); 973 goto restart; 974 } 975 #endif 976 unp_scan(so->so_rcv.sb_mb, unp_mark); 977 } 978 } while (unp_defer); 979 980 /* 981 * If there are any unreferenced sockets, then for each dispose 982 * of files in its receive buffer and then close it. 983 */ 984 if (nunref) { 985 LIST_FOREACH(unp, &unp_head, unp_link) { 986 if (unp->unp_flags & UNP_GCDEAD) 987 unp_scan(unp->unp_socket->so_rcv.sb_mb, 988 unp_discard); 989 } 990 } 991 unp_gcing = 0; 992 } 993 994 void 995 unp_dispose(struct mbuf *m) 996 { 997 998 if (m) 999 unp_scan(m, unp_discard); 1000 } 1001 1002 void 1003 unp_scan(struct mbuf *m0, void (*op)(struct file **, int)) 1004 { 1005 struct mbuf *m; 1006 struct file **rp; 1007 struct cmsghdr *cm; 1008 int qfds; 1009 1010 while (m0) { 1011 for (m = m0; m; m = m->m_next) { 1012 if (m->m_type == MT_CONTROL && 1013 m->m_len >= sizeof(*cm)) { 1014 cm = mtod(m, struct cmsghdr *); 1015 if (cm->cmsg_level != SOL_SOCKET || 1016 cm->cmsg_type != SCM_RIGHTS) 1017 continue; 1018 qfds = (cm->cmsg_len - CMSG_ALIGN(sizeof *cm)) 1019 / sizeof(struct file *); 1020 if (qfds > 0) { 1021 rp = (struct file **)CMSG_DATA(cm); 1022 op(rp, qfds); 1023 } 1024 break; /* XXX, but saves time */ 1025 } 1026 } 1027 m0 = m0->m_nextpkt; 1028 } 1029 } 1030 1031 void 1032 unp_mark(struct file **rp, int nfds) 1033 { 1034 struct unpcb *unp; 1035 int i; 1036 1037 for (i = 0; i < nfds; i++) { 1038 if (rp[i] == NULL) 1039 continue; 1040 1041 unp = fptounp(rp[i]); 1042 if (unp == NULL) 1043 continue; 1044 1045 if (unp->unp_flags & (UNP_GCMARK|UNP_GCDEFER)) 1046 continue; 1047 1048 unp_defer++; 1049 unp->unp_flags |= UNP_GCDEFER; 1050 unp->unp_flags &= ~UNP_GCDEAD; 1051 } 1052 } 1053 1054 void 1055 unp_discard(struct file **rp, int nfds) 1056 { 1057 struct unp_deferral *defer; 1058 1059 /* copy the file pointers to a deferral structure */ 1060 defer = malloc(sizeof(*defer) + sizeof(*rp) * nfds, M_TEMP, M_WAITOK); 1061 defer->ud_n = nfds; 1062 memcpy(&defer->ud_fp[0], rp, sizeof(*rp) * nfds); 1063 memset(rp, 0, sizeof(*rp) * nfds); 1064 SLIST_INSERT_HEAD(&unp_deferred, defer, ud_link); 1065 1066 task_add(systq, &unp_gc_task); 1067 } 1068