1 /* $OpenBSD: uipc_usrreq.c,v 1.15 2001/10/26 12:03:27 art Exp $ */ 2 /* $NetBSD: uipc_usrreq.c,v 1.18 1996/02/09 19:00:50 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1989, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/proc.h> 42 #include <sys/filedesc.h> 43 #include <sys/domain.h> 44 #include <sys/protosw.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/unpcb.h> 48 #include <sys/un.h> 49 #include <sys/namei.h> 50 #include <sys/vnode.h> 51 #include <sys/file.h> 52 #include <sys/stat.h> 53 #include <sys/mbuf.h> 54 55 /* 56 * Unix communications domain. 57 * 58 * TODO: 59 * SEQPACKET, RDM 60 * rethink name space problems 61 * need a proper out-of-band 62 */ 63 struct sockaddr sun_noname = { sizeof(sun_noname), AF_UNIX }; 64 ino_t unp_ino; /* prototype for fake inode numbers */ 65 66 /*ARGSUSED*/ 67 int 68 uipc_usrreq(so, req, m, nam, control) 69 struct socket *so; 70 int req; 71 struct mbuf *m, *nam, *control; 72 { 73 struct unpcb *unp = sotounpcb(so); 74 register struct socket *so2; 75 register int error = 0; 76 struct proc *p = curproc; /* XXX */ 77 78 if (req == PRU_CONTROL) 79 return (EOPNOTSUPP); 80 if (req != PRU_SEND && control && control->m_len) { 81 error = EOPNOTSUPP; 82 goto release; 83 } 84 if (unp == 0 && req != PRU_ATTACH) { 85 error = EINVAL; 86 goto release; 87 } 88 switch (req) { 89 90 case PRU_ATTACH: 91 if (unp) { 92 error = EISCONN; 93 break; 94 } 95 error = unp_attach(so); 96 break; 97 98 case PRU_DETACH: 99 unp_detach(unp); 100 break; 101 102 case PRU_BIND: 103 error = unp_bind(unp, nam, p); 104 break; 105 106 case PRU_LISTEN: 107 if (unp->unp_vnode == 0) 108 error = EINVAL; 109 break; 110 111 case PRU_CONNECT: 112 error = unp_connect(so, nam, p); 113 break; 114 115 case PRU_CONNECT2: 116 error = unp_connect2(so, (struct socket *)nam); 117 break; 118 119 case PRU_DISCONNECT: 120 unp_disconnect(unp); 121 break; 122 123 case PRU_ACCEPT: 124 /* 125 * Pass back name of connected socket, 126 * if it was bound and we are still connected 127 * (our peer may have closed already!). 128 */ 129 if (unp->unp_conn && unp->unp_conn->unp_addr) { 130 nam->m_len = unp->unp_conn->unp_addr->m_len; 131 bcopy(mtod(unp->unp_conn->unp_addr, caddr_t), 132 mtod(nam, caddr_t), (unsigned)nam->m_len); 133 } else { 134 nam->m_len = sizeof(sun_noname); 135 *(mtod(nam, struct sockaddr *)) = sun_noname; 136 } 137 break; 138 139 case PRU_SHUTDOWN: 140 socantsendmore(so); 141 unp_shutdown(unp); 142 break; 143 144 case PRU_RCVD: 145 switch (so->so_type) { 146 147 case SOCK_DGRAM: 148 panic("uipc 1"); 149 /*NOTREACHED*/ 150 151 case SOCK_STREAM: 152 #define rcv (&so->so_rcv) 153 #define snd (&so2->so_snd) 154 if (unp->unp_conn == 0) 155 break; 156 so2 = unp->unp_conn->unp_socket; 157 /* 158 * Adjust backpressure on sender 159 * and wakeup any waiting to write. 160 */ 161 snd->sb_mbmax += unp->unp_mbcnt - rcv->sb_mbcnt; 162 unp->unp_mbcnt = rcv->sb_mbcnt; 163 snd->sb_hiwat += unp->unp_cc - rcv->sb_cc; 164 unp->unp_cc = rcv->sb_cc; 165 sowwakeup(so2); 166 #undef snd 167 #undef rcv 168 break; 169 170 default: 171 panic("uipc 2"); 172 } 173 break; 174 175 case PRU_SEND: 176 if (control && (error = unp_internalize(control, p))) 177 break; 178 switch (so->so_type) { 179 180 case SOCK_DGRAM: { 181 struct sockaddr *from; 182 183 if (nam) { 184 if (unp->unp_conn) { 185 error = EISCONN; 186 break; 187 } 188 error = unp_connect(so, nam, p); 189 if (error) 190 break; 191 } else { 192 if (unp->unp_conn == 0) { 193 error = ENOTCONN; 194 break; 195 } 196 } 197 so2 = unp->unp_conn->unp_socket; 198 if (unp->unp_addr) 199 from = mtod(unp->unp_addr, struct sockaddr *); 200 else 201 from = &sun_noname; 202 if (sbappendaddr(&so2->so_rcv, from, m, control)) { 203 sorwakeup(so2); 204 m = 0; 205 control = 0; 206 } else 207 error = ENOBUFS; 208 if (nam) 209 unp_disconnect(unp); 210 break; 211 } 212 213 case SOCK_STREAM: 214 #define rcv (&so2->so_rcv) 215 #define snd (&so->so_snd) 216 if (so->so_state & SS_CANTSENDMORE) { 217 error = EPIPE; 218 break; 219 } 220 if (unp->unp_conn == 0) 221 panic("uipc 3"); 222 so2 = unp->unp_conn->unp_socket; 223 /* 224 * Send to paired receive port, and then reduce 225 * send buffer hiwater marks to maintain backpressure. 226 * Wake up readers. 227 */ 228 if (control) { 229 if (sbappendcontrol(rcv, m, control)) 230 control = 0; 231 } else 232 sbappend(rcv, m); 233 snd->sb_mbmax -= 234 rcv->sb_mbcnt - unp->unp_conn->unp_mbcnt; 235 unp->unp_conn->unp_mbcnt = rcv->sb_mbcnt; 236 snd->sb_hiwat -= rcv->sb_cc - unp->unp_conn->unp_cc; 237 unp->unp_conn->unp_cc = rcv->sb_cc; 238 sorwakeup(so2); 239 m = 0; 240 #undef snd 241 #undef rcv 242 break; 243 244 default: 245 panic("uipc 4"); 246 } 247 break; 248 249 case PRU_ABORT: 250 unp_drop(unp, ECONNABORTED); 251 break; 252 253 case PRU_SENSE: 254 ((struct stat *) m)->st_blksize = so->so_snd.sb_hiwat; 255 if (so->so_type == SOCK_STREAM && unp->unp_conn != 0) { 256 so2 = unp->unp_conn->unp_socket; 257 ((struct stat *) m)->st_blksize += so2->so_rcv.sb_cc; 258 } 259 ((struct stat *) m)->st_dev = NODEV; 260 if (unp->unp_ino == 0) 261 unp->unp_ino = unp_ino++; 262 ((struct stat *) m)->st_atimespec = 263 ((struct stat *) m)->st_mtimespec = 264 ((struct stat *) m)->st_ctimespec = unp->unp_ctime; 265 ((struct stat *) m)->st_ino = unp->unp_ino; 266 return (0); 267 268 case PRU_RCVOOB: 269 return (EOPNOTSUPP); 270 271 case PRU_SENDOOB: 272 error = EOPNOTSUPP; 273 break; 274 275 case PRU_SOCKADDR: 276 if (unp->unp_addr) { 277 nam->m_len = unp->unp_addr->m_len; 278 bcopy(mtod(unp->unp_addr, caddr_t), 279 mtod(nam, caddr_t), (unsigned)nam->m_len); 280 } else 281 nam->m_len = 0; 282 break; 283 284 case PRU_PEERADDR: 285 if (unp->unp_conn && unp->unp_conn->unp_addr) { 286 nam->m_len = unp->unp_conn->unp_addr->m_len; 287 bcopy(mtod(unp->unp_conn->unp_addr, caddr_t), 288 mtod(nam, caddr_t), (unsigned)nam->m_len); 289 } else 290 nam->m_len = 0; 291 break; 292 293 case PRU_PEEREID: 294 if (unp->unp_flags & UNP_FEIDS) { 295 nam->m_len = sizeof(struct unpcbid); 296 bcopy((caddr_t)(&(unp->unp_connid)), 297 mtod(nam, caddr_t), (unsigned)nam->m_len); 298 } else 299 nam->m_len = 0; 300 break; 301 302 case PRU_SLOWTIMO: 303 break; 304 305 default: 306 panic("piusrreq"); 307 } 308 release: 309 if (control) 310 m_freem(control); 311 if (m) 312 m_freem(m); 313 return (error); 314 } 315 316 /* 317 * Both send and receive buffers are allocated PIPSIZ bytes of buffering 318 * for stream sockets, although the total for sender and receiver is 319 * actually only PIPSIZ. 320 * Datagram sockets really use the sendspace as the maximum datagram size, 321 * and don't really want to reserve the sendspace. Their recvspace should 322 * be large enough for at least one max-size datagram plus address. 323 */ 324 #define PIPSIZ 4096 325 u_long unpst_sendspace = PIPSIZ; 326 u_long unpst_recvspace = PIPSIZ; 327 u_long unpdg_sendspace = 2*1024; /* really max datagram size */ 328 u_long unpdg_recvspace = 4*1024; 329 330 int unp_rights; /* file descriptors in flight */ 331 332 int 333 unp_attach(so) 334 struct socket *so; 335 { 336 register struct unpcb *unp; 337 struct timeval tv; 338 int error; 339 340 if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { 341 switch (so->so_type) { 342 343 case SOCK_STREAM: 344 error = soreserve(so, unpst_sendspace, unpst_recvspace); 345 break; 346 347 case SOCK_DGRAM: 348 error = soreserve(so, unpdg_sendspace, unpdg_recvspace); 349 break; 350 351 default: 352 panic("unp_attach"); 353 } 354 if (error) 355 return (error); 356 } 357 unp = malloc(sizeof(*unp), M_PCB, M_NOWAIT); 358 if (unp == NULL) 359 return (ENOBUFS); 360 bzero((caddr_t)unp, sizeof(*unp)); 361 unp->unp_socket = so; 362 so->so_pcb = unp; 363 microtime(&tv); 364 TIMEVAL_TO_TIMESPEC(&tv, &unp->unp_ctime); 365 return (0); 366 } 367 368 void 369 unp_detach(unp) 370 register struct unpcb *unp; 371 { 372 373 if (unp->unp_vnode) { 374 unp->unp_vnode->v_socket = 0; 375 vrele(unp->unp_vnode); 376 unp->unp_vnode = 0; 377 } 378 if (unp->unp_conn) 379 unp_disconnect(unp); 380 while (unp->unp_refs) 381 unp_drop(unp->unp_refs, ECONNRESET); 382 soisdisconnected(unp->unp_socket); 383 unp->unp_socket->so_pcb = 0; 384 m_freem(unp->unp_addr); 385 if (unp_rights) { 386 /* 387 * Normally the receive buffer is flushed later, 388 * in sofree, but if our receive buffer holds references 389 * to descriptors that are now garbage, we will dispose 390 * of those descriptor references after the garbage collector 391 * gets them (resulting in a "panic: closef: count < 0"). 392 */ 393 sorflush(unp->unp_socket); 394 free(unp, M_PCB); 395 unp_gc(); 396 } else 397 free(unp, M_PCB); 398 } 399 400 int 401 unp_bind(unp, nam, p) 402 struct unpcb *unp; 403 struct mbuf *nam; 404 struct proc *p; 405 { 406 struct sockaddr_un *soun = mtod(nam, struct sockaddr_un *); 407 register struct vnode *vp; 408 struct vattr vattr; 409 int error, namelen; 410 struct nameidata nd; 411 char buf[MLEN]; 412 413 if (unp->unp_vnode != NULL) 414 return (EINVAL); 415 namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); 416 if (namelen <= 0 || namelen >= MLEN) 417 return EINVAL; 418 strncpy(buf, soun->sun_path, namelen); 419 buf[namelen] = 0; /* null-terminate the string */ 420 NDINIT(&nd, CREATE, NOFOLLOW | LOCKPARENT, UIO_SYSSPACE, buf, p); 421 /* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ 422 if ((error = namei(&nd)) != 0) 423 return (error); 424 vp = nd.ni_vp; 425 if (vp != NULL) { 426 VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); 427 if (nd.ni_dvp == vp) 428 vrele(nd.ni_dvp); 429 else 430 vput(nd.ni_dvp); 431 vrele(vp); 432 return (EADDRINUSE); 433 } 434 VATTR_NULL(&vattr); 435 vattr.va_type = VSOCK; 436 vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask; 437 VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); 438 error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); 439 if (error) 440 return (error); 441 vp = nd.ni_vp; 442 vp->v_socket = unp->unp_socket; 443 unp->unp_vnode = vp; 444 unp->unp_addr = m_copy(nam, 0, (int)M_COPYALL); 445 VOP_UNLOCK(vp, 0, p); 446 return (0); 447 } 448 449 int 450 unp_connect(so, nam, p) 451 struct socket *so; 452 struct mbuf *nam; 453 struct proc *p; 454 { 455 register struct sockaddr_un *soun = mtod(nam, struct sockaddr_un *); 456 register struct vnode *vp; 457 register struct socket *so2, *so3; 458 struct unpcb *unp2, *unp3; 459 int error; 460 struct nameidata nd; 461 462 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, soun->sun_path, p); 463 if (nam->m_data + nam->m_len == &nam->m_dat[MLEN]) { /* XXX */ 464 if (*(mtod(nam, caddr_t) + nam->m_len - 1) != 0) 465 return (EMSGSIZE); 466 } else 467 *(mtod(nam, caddr_t) + nam->m_len) = 0; 468 if ((error = namei(&nd)) != 0) 469 return (error); 470 vp = nd.ni_vp; 471 if (vp->v_type != VSOCK) { 472 error = ENOTSOCK; 473 goto bad; 474 } 475 if ((error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p)) != 0) 476 goto bad; 477 so2 = vp->v_socket; 478 if (so2 == 0) { 479 error = ECONNREFUSED; 480 goto bad; 481 } 482 if (so->so_type != so2->so_type) { 483 error = EPROTOTYPE; 484 goto bad; 485 } 486 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 487 if ((so2->so_options & SO_ACCEPTCONN) == 0 || 488 (so3 = sonewconn(so2, 0)) == 0) { 489 error = ECONNREFUSED; 490 goto bad; 491 } 492 unp2 = sotounpcb(so2); 493 unp3 = sotounpcb(so3); 494 if (unp2->unp_addr) 495 unp3->unp_addr = 496 m_copy(unp2->unp_addr, 0, (int)M_COPYALL); 497 unp3->unp_connid.unp_euid = p->p_ucred->cr_uid; 498 unp3->unp_connid.unp_egid = p->p_ucred->cr_gid; 499 unp3->unp_flags |= UNP_FEIDS; 500 so2 = so3; 501 } 502 error = unp_connect2(so, so2); 503 bad: 504 vput(vp); 505 return (error); 506 } 507 508 int 509 unp_connect2(so, so2) 510 register struct socket *so; 511 register struct socket *so2; 512 { 513 register struct unpcb *unp = sotounpcb(so); 514 register struct unpcb *unp2; 515 516 if (so2->so_type != so->so_type) 517 return (EPROTOTYPE); 518 unp2 = sotounpcb(so2); 519 unp->unp_conn = unp2; 520 switch (so->so_type) { 521 522 case SOCK_DGRAM: 523 unp->unp_nextref = unp2->unp_refs; 524 unp2->unp_refs = unp; 525 soisconnected(so); 526 break; 527 528 case SOCK_STREAM: 529 unp2->unp_conn = unp; 530 soisconnected(so); 531 soisconnected(so2); 532 break; 533 534 default: 535 panic("unp_connect2"); 536 } 537 return (0); 538 } 539 540 void 541 unp_disconnect(unp) 542 struct unpcb *unp; 543 { 544 register struct unpcb *unp2 = unp->unp_conn; 545 546 if (unp2 == 0) 547 return; 548 unp->unp_conn = 0; 549 switch (unp->unp_socket->so_type) { 550 551 case SOCK_DGRAM: 552 if (unp2->unp_refs == unp) 553 unp2->unp_refs = unp->unp_nextref; 554 else { 555 unp2 = unp2->unp_refs; 556 for (;;) { 557 if (unp2 == 0) 558 panic("unp_disconnect"); 559 if (unp2->unp_nextref == unp) 560 break; 561 unp2 = unp2->unp_nextref; 562 } 563 unp2->unp_nextref = unp->unp_nextref; 564 } 565 unp->unp_nextref = 0; 566 unp->unp_socket->so_state &= ~SS_ISCONNECTED; 567 break; 568 569 case SOCK_STREAM: 570 soisdisconnected(unp->unp_socket); 571 unp2->unp_conn = 0; 572 soisdisconnected(unp2->unp_socket); 573 break; 574 } 575 } 576 577 #ifdef notdef 578 unp_abort(unp) 579 struct unpcb *unp; 580 { 581 582 unp_detach(unp); 583 } 584 #endif 585 586 void 587 unp_shutdown(unp) 588 struct unpcb *unp; 589 { 590 struct socket *so; 591 592 if (unp->unp_socket->so_type == SOCK_STREAM && unp->unp_conn && 593 (so = unp->unp_conn->unp_socket)) 594 socantrcvmore(so); 595 } 596 597 void 598 unp_drop(unp, errno) 599 struct unpcb *unp; 600 int errno; 601 { 602 struct socket *so = unp->unp_socket; 603 604 so->so_error = errno; 605 unp_disconnect(unp); 606 if (so->so_head) { 607 so->so_pcb = 0; 608 sofree(so); 609 m_freem(unp->unp_addr); 610 free(unp, M_PCB); 611 } 612 } 613 614 #ifdef notdef 615 unp_drain() 616 { 617 618 } 619 #endif 620 621 int 622 unp_externalize(rights) 623 struct mbuf *rights; 624 { 625 struct proc *p = curproc; /* XXX */ 626 struct cmsghdr *cm = mtod(rights, struct cmsghdr *); 627 int i, *fdp; 628 struct file **rp; 629 struct file *fp; 630 int nfds, error = 0; 631 632 nfds = (cm->cmsg_len - CMSG_ALIGN(sizeof(*cm))) / 633 sizeof(struct file *); 634 rp = (struct file **)CMSG_DATA(cm); 635 636 fdp = malloc(nfds * sizeof(int), M_TEMP, M_WAITOK); 637 638 #ifdef notyet 639 /* Make sure the recipient should be able to see the descriptors.. */ 640 if (p->p_cwdi->cwdi_rdir != NULL) { 641 rp = (struct file **)CMSG_DATA(cm); 642 for (i = 0; i < nfds; i++) { 643 fp = *rp++; 644 /* 645 * If we are in a chroot'ed directory, and 646 * someone wants to pass us a directory, make 647 * sure it's inside the subtree we're allowed 648 * to access. 649 */ 650 if (fp->f_type == DTYPE_VNODE) { 651 struct vnode *vp = (struct vnode *)fp->f_data; 652 if ((vp->v_type == VDIR) && 653 !vn_isunder(vp, p->p_cwdi->cwdi_rdir, p)) { 654 error = EPERM; 655 break; 656 } 657 } 658 } 659 } 660 #endif 661 662 restart: 663 rp = (struct file **)CMSG_DATA(cm); 664 if (error != 0) { 665 for (i = 0; i < nfds; i++) { 666 fp = *rp; 667 /* 668 * zero the pointer before calling unp_discard, 669 * since it may end up in unp_gc().. 670 */ 671 *rp++ = 0; 672 unp_discard(fp); 673 } 674 goto out; 675 } 676 677 /* 678 * First loop -- allocate file descriptor table slots for the 679 * new descriptors. 680 */ 681 for (i = 0; i < nfds; i++) { 682 fp = *rp++; 683 if ((error = fdalloc(p, 0, &fdp[i])) != 0) { 684 /* 685 * Back out what we've done so far. 686 */ 687 for (--i; i >= 0; i--) 688 fdremove(p->p_fd, fdp[i]); 689 690 if (error == ENOSPC) { 691 fdexpand(p); 692 error = 0; 693 } else { 694 /* 695 * This is the error that has historically 696 * been returned, and some callers may 697 * expect it. 698 */ 699 error = EMSGSIZE; 700 } 701 goto restart; 702 } 703 704 /* 705 * Make the slot reference the descriptor so that 706 * fdalloc() works properly.. We finalize it all 707 * in the loop below. 708 */ 709 p->p_fd->fd_ofiles[fdp[i]] = fp; 710 } 711 712 /* 713 * Now that adding them has succeeded, update all of the 714 * descriptor passing state. 715 */ 716 rp = (struct file **)CMSG_DATA(cm); 717 for (i = 0; i < nfds; i++) { 718 fp = *rp++; 719 fp->f_msgcount--; 720 unp_rights--; 721 } 722 723 /* 724 * Copy temporary array to message and adjust length, in case of 725 * transition from large struct file pointers to ints. 726 */ 727 memcpy(CMSG_DATA(cm), fdp, nfds * sizeof(int)); 728 cm->cmsg_len = CMSG_LEN(nfds * sizeof(int)); 729 rights->m_len = CMSG_SPACE(nfds * sizeof(int)); 730 out: 731 free(fdp, M_TEMP); 732 return (error); 733 } 734 735 int 736 unp_internalize(control, p) 737 struct mbuf *control; 738 struct proc *p; 739 { 740 struct filedesc *fdp = p->p_fd; 741 register struct cmsghdr *cm = mtod(control, struct cmsghdr *); 742 struct file **rp, *fp; 743 register int i; 744 struct mbuf *n = NULL; 745 int oldfds, *ip, fd; 746 747 if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET || 748 cm->cmsg_len != control->m_len) 749 return (EINVAL); 750 oldfds = (cm->cmsg_len - sizeof (*cm)) / sizeof (int); 751 ip = (int *)(cm + 1); 752 for (i = 0; i < oldfds; i++) { 753 fd = *ip++; 754 if (fd_getfile(fdp, fd) == NULL) 755 return (EBADF); 756 if (fdp->fd_ofiles[fd]->f_count == LONG_MAX-2 || 757 fdp->fd_ofiles[fd]->f_msgcount == LONG_MAX-2) 758 return (EDEADLK); 759 } 760 ip = (int *)(cm + 1); 761 if (sizeof(int) != sizeof(struct file *)) { 762 MGET(n, M_WAIT, MT_DATA); 763 rp = (struct file **)mtod(n, caddr_t); 764 } else 765 rp = (struct file **)ip; 766 for (i = 0; i < oldfds; i++) { 767 bcopy(ip, &fd, sizeof fd); 768 ip++; 769 fp = fdp->fd_ofiles[fd]; 770 bcopy(&fp, rp, sizeof fp); 771 rp++; 772 fp->f_count++; 773 fp->f_msgcount++; 774 unp_rights++; 775 } 776 if (n) { 777 m_adj(control, -(oldfds * sizeof(int))); 778 n->m_len = oldfds * sizeof(struct file *); 779 m_cat(control, n); 780 } 781 return (0); 782 } 783 784 int unp_defer, unp_gcing; 785 extern struct domain unixdomain; 786 787 void 788 unp_gc() 789 { 790 register struct file *fp, *nextfp; 791 register struct socket *so; 792 struct file **extra_ref, **fpp; 793 int nunref, i; 794 795 if (unp_gcing) 796 return; 797 unp_gcing = 1; 798 unp_defer = 0; 799 for (fp = filehead.lh_first; fp != 0; fp = fp->f_list.le_next) 800 fp->f_flag &= ~(FMARK|FDEFER); 801 do { 802 for (fp = filehead.lh_first; fp != 0; fp = fp->f_list.le_next) { 803 if (fp->f_count == 0) 804 continue; 805 if (fp->f_flag & FDEFER) { 806 fp->f_flag &= ~FDEFER; 807 unp_defer--; 808 } else { 809 if (fp->f_flag & FMARK) 810 continue; 811 if (fp->f_count == fp->f_msgcount) 812 continue; 813 fp->f_flag |= FMARK; 814 } 815 if (fp->f_type != DTYPE_SOCKET || 816 (so = (struct socket *)fp->f_data) == 0) 817 continue; 818 if (so->so_proto->pr_domain != &unixdomain || 819 (so->so_proto->pr_flags&PR_RIGHTS) == 0) 820 continue; 821 #ifdef notdef 822 if (so->so_rcv.sb_flags & SB_LOCK) { 823 /* 824 * This is problematical; it's not clear 825 * we need to wait for the sockbuf to be 826 * unlocked (on a uniprocessor, at least), 827 * and it's also not clear what to do 828 * if sbwait returns an error due to receipt 829 * of a signal. If sbwait does return 830 * an error, we'll go into an infinite 831 * loop. Delete all of this for now. 832 */ 833 (void) sbwait(&so->so_rcv); 834 goto restart; 835 } 836 #endif 837 unp_scan(so->so_rcv.sb_mb, unp_mark); 838 } 839 } while (unp_defer); 840 /* 841 * We grab an extra reference to each of the file table entries 842 * that are not otherwise accessible and then free the rights 843 * that are stored in messages on them. 844 * 845 * The bug in the original code is a little tricky, so I'll describe 846 * what's wrong with it here. 847 * 848 * It is incorrect to simply unp_discard each entry for f_msgcount 849 * times -- consider the case of sockets A and B that contain 850 * references to each other. On a last close of some other socket, 851 * we trigger a gc since the number of outstanding rights (unp_rights) 852 * is non-zero. If during the sweep phase the gc code un_discards, 853 * we end up doing a (full) closef on the descriptor. A closef on A 854 * results in the following chain. Closef calls soo_close, which 855 * calls soclose. Soclose calls first (through the switch 856 * uipc_usrreq) unp_detach, which re-invokes unp_gc. Unp_gc simply 857 * returns because the previous instance had set unp_gcing, and 858 * we return all the way back to soclose, which marks the socket 859 * with SS_NOFDREF, and then calls sofree. Sofree calls sorflush 860 * to free up the rights that are queued in messages on the socket A, 861 * i.e., the reference on B. The sorflush calls via the dom_dispose 862 * switch unp_dispose, which unp_scans with unp_discard. This second 863 * instance of unp_discard just calls closef on B. 864 * 865 * Well, a similar chain occurs on B, resulting in a sorflush on B, 866 * which results in another closef on A. Unfortunately, A is already 867 * being closed, and the descriptor has already been marked with 868 * SS_NOFDREF, and soclose panics at this point. 869 * 870 * Here, we first take an extra reference to each inaccessible 871 * descriptor. Then, we call sorflush ourself, since we know 872 * it is a Unix domain socket anyhow. After we destroy all the 873 * rights carried in messages, we do a last closef to get rid 874 * of our extra reference. This is the last close, and the 875 * unp_detach etc will shut down the socket. 876 * 877 * 91/09/19, bsy@cs.cmu.edu 878 */ 879 extra_ref = malloc(nfiles * sizeof(struct file *), M_FILE, M_WAITOK); 880 for (nunref = 0, fp = filehead.lh_first, fpp = extra_ref; fp != 0; 881 fp = nextfp) { 882 nextfp = fp->f_list.le_next; 883 if (fp->f_count == 0) 884 continue; 885 if (fp->f_count == fp->f_msgcount && !(fp->f_flag & FMARK)) { 886 *fpp++ = fp; 887 nunref++; 888 fp->f_count++; 889 } 890 } 891 for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) 892 if ((*fpp)->f_type == DTYPE_SOCKET && (*fpp)->f_data != NULL) 893 sorflush((struct socket *)(*fpp)->f_data); 894 for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) 895 (void) closef(*fpp, (struct proc *)0); 896 free((caddr_t)extra_ref, M_FILE); 897 unp_gcing = 0; 898 } 899 900 void 901 unp_dispose(m) 902 struct mbuf *m; 903 { 904 905 if (m) 906 unp_scan(m, unp_discard); 907 } 908 909 void 910 unp_scan(m0, op) 911 register struct mbuf *m0; 912 void (*op) __P((struct file *)); 913 { 914 register struct mbuf *m; 915 struct file **rp, *fp; 916 register struct cmsghdr *cm; 917 register int i; 918 int qfds; 919 920 while (m0) { 921 for (m = m0; m; m = m->m_next) 922 if (m->m_type == MT_CONTROL && 923 m->m_len >= sizeof(*cm)) { 924 cm = mtod(m, struct cmsghdr *); 925 if (cm->cmsg_level != SOL_SOCKET || 926 cm->cmsg_type != SCM_RIGHTS) 927 continue; 928 qfds = (cm->cmsg_len - sizeof *cm) / sizeof (int); 929 rp = (struct file **)(cm + 1); 930 for (i = 0; i < qfds; i++) { 931 bcopy(rp, &fp, sizeof fp); 932 rp++; 933 (*op)(fp); 934 } 935 break; /* XXX, but saves time */ 936 } 937 m0 = m0->m_act; 938 } 939 } 940 941 void 942 unp_mark(fp) 943 struct file *fp; 944 { 945 946 if (fp->f_flag & FMARK) 947 return; 948 unp_defer++; 949 fp->f_flag |= (FMARK|FDEFER); 950 } 951 952 void 953 unp_discard(fp) 954 struct file *fp; 955 { 956 957 fp->f_msgcount--; 958 unp_rights--; 959 (void) closef(fp, (struct proc *)0); 960 } 961