1 /* 2 * Copyright (c) 1992, 1993, 1994 The Regents of the University of California. 3 * Copyright (c) 1992, 1993, 1994 Jan-Simon Pendry. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * Jan-Simon Pendry. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)union_vnops.c 1.3 (Berkeley) 02/01/94 12 */ 13 14 #include <sys/param.h> 15 #include <sys/systm.h> 16 #include <sys/proc.h> 17 #include <sys/file.h> 18 #include <sys/filedesc.h> 19 #include <sys/time.h> 20 #include <sys/types.h> 21 #include <sys/vnode.h> 22 #include <sys/mount.h> 23 #include <sys/namei.h> 24 #include <sys/malloc.h> 25 #include <sys/buf.h> 26 #include "union.h" 27 28 /* 29 * Create a shadow directory in the upper layer. 30 * The new vnode is returned locked. 31 */ 32 static int 33 union_mkshadow(dvp, cnp, vpp) 34 struct vnode *dvp; 35 struct componentname *cnp; 36 struct vnode *vpp; 37 { 38 int error; 39 struct vattr va; 40 struct proc *p = cnp->cn_proc; 41 42 /* 43 * policy: when creating the shadow directory in the 44 * upper layer, create it owned by the current user, 45 * group from parent directory, and mode 777 modified 46 * by umask (ie mostly identical to the mkdir syscall). 47 * (jsp, kb) 48 * TODO: create the directory owned by the user who 49 * did the mount (um->um_cred). 50 */ 51 52 VATTR_NULL(&va); 53 va.va_type = VDIR; 54 va.va_mode = UN_DIRMODE &~ p->p_fd->fd_cmask; 55 VOP_UNLOCK(dvp); 56 LEASE_CHECK(dvp, p, p->p_ucred, LEASE_WRITE); 57 VREF(dvp); 58 VOP_LOCK(dvp); 59 error = VOP_MKDIR(dvp, vpp, cnp, &va); 60 VOP_LOCK(dvp); 61 return (error); 62 } 63 64 static int 65 union_lookup1(dvp, vpp, cnp) 66 struct vnode *dvp; 67 struct vnode **vpp; 68 struct componentname *cnp; 69 { 70 int error; 71 struct vnode *tdvp; 72 struct mount *mp; 73 74 if (cnp->cn_flags & ISDOTDOT) { 75 for (;;) { 76 if ((dvp->v_flag & VROOT) == 0 || 77 (cnp->cn_flags & NOCROSSMOUNT)) 78 break; 79 80 tdvp = dvp; 81 dvp = dvp->v_mount->mnt_vnodecovered; 82 vput(tdvp); 83 VREF(dvp); 84 VOP_LOCK(dvp); 85 } 86 } 87 88 error = VOP_LOOKUP(dvp, &tdvp, cnp); 89 if (error) 90 return (error); 91 92 dvp = tdvp; 93 while ((dvp->v_type == VDIR) && (mp = dvp->v_mountedhere) && 94 (cnp->cn_flags & NOCROSSMOUNT) == 0) { 95 96 if (mp->mnt_flag & MNT_MLOCK) { 97 mp->mnt_flag |= MNT_MWAIT; 98 sleep((caddr_t) mp, PVFS); 99 continue; 100 } 101 102 if (error = VFS_ROOT(mp, &tdvp)) { 103 vput(dvp); 104 return (error); 105 } 106 107 vput(dvp); 108 dvp = tdvp; 109 } 110 111 *vpp = dvp; 112 return (0); 113 } 114 115 int 116 union_lookup(ap) 117 struct vop_lookup_args /* { 118 struct vnodeop_desc *a_desc; 119 struct vnode *a_dvp; 120 struct vnode **a_vpp; 121 struct componentname *a_cnp; 122 } */ *ap; 123 { 124 int error; 125 int uerror, lerror; 126 struct vnode *uppervp, *lowervp; 127 struct vnode *upperdvp, *lowerdvp; 128 struct vnode *dvp = ap->a_dvp; 129 struct union_node *dun = VTOUNION(ap->a_dvp); 130 struct componentname *cnp = ap->a_cnp; 131 int lockparent = cnp->cn_flags & LOCKPARENT; 132 133 cnp->cn_flags |= LOCKPARENT; 134 135 upperdvp = dun->un_uppervp; 136 lowerdvp = dun->un_lowervp; 137 uppervp = 0; 138 lowervp = 0; 139 140 /* 141 * do the lookup in the upper level. 142 * if that level comsumes additional pathnames, 143 * then assume that something special is going 144 * on and just return that vnode. 145 */ 146 uppervp = 0; 147 if (upperdvp) { 148 VOP_LOCK(upperdvp); 149 uerror = union_lookup1(upperdvp, &uppervp, cnp); 150 VOP_UNLOCK(upperdvp); 151 152 if (cnp->cn_consume != 0) { 153 *ap->a_vpp = uppervp; 154 if (!lockparent) 155 cnp->cn_flags &= ~LOCKPARENT; 156 return (uerror); 157 } 158 } else { 159 uerror = ENOENT; 160 } 161 162 /* 163 * in a similar way to the upper layer, do the lookup 164 * in the lower layer. this time, if there is some 165 * component magic going on, then vput whatever we got 166 * back from the upper layer and return the lower vnode 167 * instead. 168 */ 169 lowervp = 0; 170 if (lowerdvp) { 171 VOP_LOCK(lowerdvp); 172 lerror = union_lookup1(lowerdvp, &lowervp, cnp); 173 VOP_UNLOCK(lowerdvp); 174 175 if (cnp->cn_consume != 0) { 176 if (uppervp) { 177 vput(uppervp); 178 uppervp = 0; 179 } 180 *ap->a_vpp = lowervp; 181 if (!lockparent) 182 cnp->cn_flags &= ~LOCKPARENT; 183 return (lerror); 184 } 185 } else { 186 lerror = ENOENT; 187 } 188 189 if (!lockparent) 190 cnp->cn_flags &= ~LOCKPARENT; 191 192 /* 193 * at this point, we have uerror and lerror indicating 194 * possible errors with the lookups in the upper and lower 195 * layers. additionally, uppervp and lowervp are (locked) 196 * references to existing vnodes in the upper and lower layers. 197 * 198 * there are now three cases to consider. 199 * 1. if both layers returned an error, then return whatever 200 * error the upper layer generated. 201 * 202 * 2. if the top layer failed and the bottom layer succeeded 203 * then two subcases occur. 204 * a. the bottom vnode is not a directory, in which 205 * case just return a new union vnode referencing 206 * an empty top layer and the existing bottom layer. 207 * b. the bottom vnode is a directory, in which case 208 * create a new directory in the top-level and 209 * continue as in case 3. 210 * 211 * 3. if the top layer succeeded then return a new union 212 * vnode referencing whatever the new top layer and 213 * whatever the bottom layer returned. 214 */ 215 216 /* case 1. */ 217 if ((uerror != 0) && (lerror != 0)) { 218 *ap->a_vpp = 0; 219 return (uerror); 220 } 221 222 /* case 2. */ 223 if (uerror != 0 /* && (lerror == 0) */ ) { 224 if (lowervp->v_type == VDIR) { /* case 2b. */ 225 VOP_LOCK(upperdvp); 226 uerror = union_mkshadow(upperdvp, cnp, &uppervp); 227 VOP_UNLOCK(upperdvp); 228 if (uerror) { 229 if (lowervp) { 230 vput(lowervp); 231 lowervp = 0; 232 } 233 return (uerror); 234 } 235 } 236 } 237 238 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, cnp, 239 uppervp, lowervp); 240 241 if (uppervp) 242 VOP_UNLOCK(uppervp); 243 if (lowervp) 244 VOP_UNLOCK(lowervp); 245 246 if (error) { 247 if (uppervp) 248 vrele(uppervp); 249 if (lowervp) 250 vrele(lowervp); 251 } else { 252 if (!lockparent) 253 VOP_UNLOCK(*ap->a_vpp); 254 } 255 256 return (error); 257 } 258 259 int 260 union_create(ap) 261 struct vop_create_args /* { 262 struct vnode *a_dvp; 263 struct vnode **a_vpp; 264 struct componentname *a_cnp; 265 struct vattr *a_vap; 266 } */ *ap; 267 { 268 struct union_node *un = VTOUNION(ap->a_dvp); 269 struct vnode *dvp = un->un_uppervp; 270 271 if (dvp) { 272 int error; 273 struct vnode *vp; 274 struct mount *mp = ap->a_dvp->v_mount; 275 276 VREF(dvp); 277 VOP_LOCK(dvp); 278 vput(ap->a_dvp); 279 error = VOP_CREATE(dvp, &vp, ap->a_cnp, ap->a_vap); 280 if (error) 281 return (error); 282 283 error = union_allocvp( 284 ap->a_vpp, 285 mp, 286 NULLVP, 287 ap->a_cnp, 288 vp, 289 NULLVP); 290 VOP_UNLOCK(vp); 291 if (error) 292 vrele(vp); 293 return (error); 294 } 295 296 vput(ap->a_dvp); 297 return (EROFS); 298 } 299 300 int 301 union_mknod(ap) 302 struct vop_mknod_args /* { 303 struct vnode *a_dvp; 304 struct vnode **a_vpp; 305 struct componentname *a_cnp; 306 struct vattr *a_vap; 307 } */ *ap; 308 { 309 struct union_node *un = VTOUNION(ap->a_dvp); 310 struct vnode *dvp = un->un_uppervp; 311 312 if (dvp) { 313 int error; 314 struct vnode *vp; 315 struct mount *mp = ap->a_dvp->v_mount; 316 317 VREF(dvp); 318 VOP_LOCK(dvp); 319 vput(ap->a_dvp); 320 error = VOP_MKNOD(dvp, &vp, ap->a_cnp, ap->a_vap); 321 if (error) 322 return (error); 323 324 if (vp) { 325 error = union_allocvp( 326 ap->a_vpp, 327 mp, 328 NULLVP, 329 ap->a_cnp, 330 vp, 331 NULLVP); 332 VOP_UNLOCK(vp); 333 if (error) 334 vrele(vp); 335 } 336 return (error); 337 } 338 339 vput(ap->a_dvp); 340 return (EROFS); 341 } 342 343 /* 344 * copyfile. copy the vnode (fvp) to the vnode (tvp) 345 * using a sequence of reads and writes. both (fvp) 346 * and (tvp) are locked on entry and exit. 347 */ 348 static int 349 union_copyfile(p, cred, fvp, tvp) 350 struct proc *p; 351 struct ucred *cred; 352 struct vnode *fvp; 353 struct vnode *tvp; 354 { 355 char *buf; 356 struct uio uio; 357 struct iovec iov; 358 int error = 0; 359 off_t offset; 360 361 /* 362 * strategy: 363 * allocate a buffer of size MAXBSIZE. 364 * loop doing reads and writes, keeping track 365 * of the current uio offset. 366 * give up at the first sign of trouble. 367 */ 368 369 uio.uio_procp = p; 370 uio.uio_segflg = UIO_SYSSPACE; 371 offset = 0; 372 373 VOP_UNLOCK(fvp); /* XXX */ 374 LEASE_CHECK(fvp, p, cred, LEASE_READ); 375 VOP_LOCK(fvp); /* XXX */ 376 VOP_UNLOCK(tvp); /* XXX */ 377 LEASE_CHECK(tvp, p, cred, LEASE_WRITE); 378 VOP_LOCK(tvp); /* XXX */ 379 380 buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK); 381 do { 382 uio.uio_iov = &iov; 383 uio.uio_iovcnt = 1; 384 iov.iov_base = buf; 385 iov.iov_len = MAXBSIZE; 386 uio.uio_resid = iov.iov_len; 387 uio.uio_offset = offset; 388 uio.uio_rw = UIO_READ; 389 error = VOP_READ(fvp, &uio, 0, cred); 390 391 if (error == 0) { 392 uio.uio_iov = &iov; 393 uio.uio_iovcnt = 1; 394 iov.iov_base = buf; 395 iov.iov_len = MAXBSIZE - uio.uio_resid; 396 uio.uio_rw = UIO_WRITE; 397 uio.uio_resid = iov.iov_len; 398 uio.uio_offset = offset; 399 400 do { 401 error = VOP_WRITE(tvp, &uio, 0, cred); 402 } while (error == 0 && uio.uio_resid > 0); 403 if (error == 0) 404 offset = uio.uio_offset; 405 } 406 } while ((uio.uio_resid == 0) && (error == 0)); 407 408 free(buf, M_TEMP); 409 return (error); 410 } 411 412 int 413 union_open(ap) 414 struct vop_open_args /* { 415 struct vnodeop_desc *a_desc; 416 struct vnode *a_vp; 417 int a_mode; 418 struct ucred *a_cred; 419 struct proc *a_p; 420 } */ *ap; 421 { 422 struct union_node *un = VTOUNION(ap->a_vp); 423 struct vnode *tvp; 424 int mode = ap->a_mode; 425 struct ucred *cred = ap->a_cred; 426 struct proc *p = ap->a_p; 427 int error; 428 429 /* 430 * If there is an existing upper vp then simply open that. 431 */ 432 tvp = un->un_uppervp; 433 if (tvp == NULLVP) { 434 /* 435 * If the lower vnode is being opened for writing, then 436 * copy the file contents to the upper vnode and open that, 437 * otherwise can simply open the lower vnode. 438 */ 439 tvp = un->un_lowervp; 440 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) { 441 struct nameidata nd; 442 struct filedesc *fdp = p->p_fd; 443 int fmode; 444 int cmode; 445 446 /* 447 * Open the named file in the upper layer. Note that 448 * the file may have come into existence *since* the 449 * lookup was done, since the upper layer may really 450 * be a loopback mount of some other filesystem... 451 * so open the file with exclusive create and barf if 452 * it already exists. 453 * XXX - perhaps shoudl re-lookup the node (once more 454 * with feeling) and simply open that. Who knows. 455 */ 456 NDINIT(&nd, CREATE, 0, UIO_SYSSPACE, un->un_path, p); 457 fmode = (O_CREAT|O_TRUNC|O_EXCL); 458 cmode = UN_FILEMODE & ~fdp->fd_cmask; 459 error = vn_open(&nd, fmode, cmode); 460 if (error) 461 return (error); 462 un->un_uppervp = nd.ni_vp; /* XXX */ 463 /* at this point, uppervp is locked */ 464 465 /* 466 * Now, if the file is being opened with truncation, 467 * then the (new) upper vnode is ready to fly, 468 * otherwise the data from the lower vnode must be 469 * copied to the upper layer first. This only works 470 * for regular files (check is made above). 471 */ 472 if ((mode & O_TRUNC) == 0) { 473 /* 474 * XXX - should not ignore errors 475 * from VOP_CLOSE 476 */ 477 VOP_LOCK(un->un_lowervp); 478 error = VOP_OPEN(tvp, FREAD, cred, p); 479 if (error == 0) { 480 error = union_copyfile(p, cred, 481 tvp, un->un_uppervp); 482 VOP_UNLOCK(tvp); 483 (void) VOP_CLOSE(tvp, FREAD); 484 } else { 485 VOP_UNLOCK(tvp); 486 } 487 VOP_UNLOCK(un->un_uppervp); 488 (void) VOP_CLOSE(un->un_uppervp, FWRITE); 489 VOP_LOCK(un->un_uppervp); 490 } 491 if (error == 0) 492 error = VOP_OPEN(un->un_uppervp, mode, cred, p); 493 VOP_UNLOCK(un->un_uppervp); 494 return (error); 495 } 496 } 497 498 VOP_LOCK(tvp); 499 error = VOP_OPEN(tvp, mode, cred, p); 500 VOP_UNLOCK(tvp); 501 502 return (error); 503 } 504 505 int 506 union_close(ap) 507 struct vop_close_args /* { 508 struct vnode *a_vp; 509 int a_fflag; 510 struct ucred *a_cred; 511 struct proc *a_p; 512 } */ *ap; 513 { 514 515 return (VOP_CLOSE(OTHERVP(ap->a_vp), ap->a_fflag, ap->a_cred, ap->a_p)); 516 } 517 518 /* 519 * Check access permission on the union vnode. 520 * The access check being enforced is to check 521 * against both the underlying vnode, and any 522 * copied vnode. This ensures that no additional 523 * file permissions are given away simply because 524 * the user caused an implicit file copy. 525 */ 526 int 527 union_access(ap) 528 struct vop_access_args /* { 529 struct vnodeop_desc *a_desc; 530 struct vnode *a_vp; 531 int a_mode; 532 struct ucred *a_cred; 533 struct proc *a_p; 534 } */ *ap; 535 { 536 struct union_node *un = VTOUNION(ap->a_vp); 537 int error = 0; 538 struct vnode *vp; 539 540 if (vp = un->un_lowervp) { 541 VOP_LOCK(vp); 542 error = VOP_ACCESS(vp, ap->a_mode, ap->a_cred, ap->a_p); 543 VOP_UNLOCK(vp); 544 if (error) 545 return (error); 546 } 547 548 if (vp = un->un_uppervp) { 549 VOP_LOCK(vp); 550 error = VOP_ACCESS(vp, ap->a_mode, ap->a_cred, ap->a_p); 551 VOP_UNLOCK(vp); 552 } 553 554 return (error); 555 } 556 557 /* 558 * We handle getattr only to change the fsid. 559 */ 560 int 561 union_getattr(ap) 562 struct vop_getattr_args /* { 563 struct vnode *a_vp; 564 struct vattr *a_vap; 565 struct ucred *a_cred; 566 struct proc *a_p; 567 } */ *ap; 568 { 569 int error; 570 struct vnode *vp = OTHERVP(ap->a_vp); 571 572 VOP_LOCK(vp); 573 error = VOP_GETATTR(vp, ap->a_vap, ap->a_cred, ap->a_p); 574 VOP_UNLOCK(vp); 575 576 /* Requires that arguments be restored. */ 577 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 578 return (0); 579 } 580 581 int 582 union_setattr(ap) 583 struct vop_setattr_args /* { 584 struct vnode *a_vp; 585 struct vattr *a_vap; 586 struct ucred *a_cred; 587 struct proc *a_p; 588 } */ *ap; 589 { 590 struct union_node *un = VTOUNION(ap->a_vp); 591 int error; 592 593 if (un->un_uppervp) { 594 VOP_LOCK(un->un_uppervp); 595 error = VOP_SETATTR(un->un_uppervp, ap->a_vap, 596 ap->a_cred, ap->a_p); 597 VOP_UNLOCK(un->un_uppervp); 598 } else { 599 /* 600 * XXX should do a copyfile (perhaps only if 601 * the file permission change, which would not 602 * track va_ctime correctly). 603 */ 604 error = EROFS; 605 } 606 607 return (error); 608 } 609 610 int 611 union_read(ap) 612 struct vop_read_args /* { 613 struct vnode *a_vp; 614 struct uio *a_uio; 615 int a_ioflag; 616 struct ucred *a_cred; 617 } */ *ap; 618 { 619 int error; 620 struct vnode *vp = OTHERVP(ap->a_vp); 621 622 VOP_LOCK(vp); 623 error = VOP_READ(vp, ap->a_uio, ap->a_ioflag, ap->a_cred); 624 VOP_UNLOCK(vp); 625 626 return (error); 627 } 628 629 int 630 union_write(ap) 631 struct vop_read_args /* { 632 struct vnode *a_vp; 633 struct uio *a_uio; 634 int a_ioflag; 635 struct ucred *a_cred; 636 } */ *ap; 637 { 638 int error; 639 struct vnode *vp = OTHERVP(ap->a_vp); 640 641 VOP_LOCK(vp); 642 error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag, ap->a_cred); 643 VOP_UNLOCK(vp); 644 645 return (error); 646 } 647 648 int 649 union_ioctl(ap) 650 struct vop_ioctl_args /* { 651 struct vnode *a_vp; 652 int a_command; 653 caddr_t a_data; 654 int a_fflag; 655 struct ucred *a_cred; 656 struct proc *a_p; 657 } */ *ap; 658 { 659 660 return (VOP_IOCTL(OTHERVP(ap->a_vp), ap->a_command, ap->a_data, 661 ap->a_fflag, ap->a_cred, ap->a_p)); 662 } 663 664 int 665 union_select(ap) 666 struct vop_select_args /* { 667 struct vnode *a_vp; 668 int a_which; 669 int a_fflags; 670 struct ucred *a_cred; 671 struct proc *a_p; 672 } */ *ap; 673 { 674 675 return (VOP_SELECT(OTHERVP(ap->a_vp), ap->a_which, ap->a_fflags, 676 ap->a_cred, ap->a_p)); 677 } 678 679 int 680 union_mmap(ap) 681 struct vop_mmap_args /* { 682 struct vnode *a_vp; 683 int a_fflags; 684 struct ucred *a_cred; 685 struct proc *a_p; 686 } */ *ap; 687 { 688 689 return (VOP_MMAP(OTHERVP(ap->a_vp), ap->a_fflags, 690 ap->a_cred, ap->a_p)); 691 } 692 693 int 694 union_fsync(ap) 695 struct vop_fsync_args /* { 696 struct vnode *a_vp; 697 struct ucred *a_cred; 698 int a_waitfor; 699 struct proc *a_p; 700 } */ *ap; 701 { 702 int error = 0; 703 struct vnode *targetvp = OTHERVP(ap->a_vp); 704 705 if (targetvp) { 706 VOP_LOCK(targetvp); 707 error = VOP_FSYNC(targetvp, ap->a_cred, 708 ap->a_waitfor, ap->a_p); 709 VOP_UNLOCK(targetvp); 710 } 711 712 return (error); 713 } 714 715 int 716 union_seek(ap) 717 struct vop_seek_args /* { 718 struct vnode *a_vp; 719 off_t a_oldoff; 720 off_t a_newoff; 721 struct ucred *a_cred; 722 } */ *ap; 723 { 724 725 return (VOP_SEEK(OTHERVP(ap->a_vp), ap->a_oldoff, ap->a_newoff, ap->a_cred)); 726 } 727 728 int 729 union_remove(ap) 730 struct vop_remove_args /* { 731 struct vnode *a_dvp; 732 struct vnode *a_vp; 733 struct componentname *a_cnp; 734 } */ *ap; 735 { 736 int error; 737 struct union_node *dun = VTOUNION(ap->a_dvp); 738 struct union_node *un = VTOUNION(ap->a_vp); 739 740 if (dun->un_uppervp && un->un_uppervp) { 741 struct vnode *dvp = dun->un_uppervp; 742 struct vnode *vp = un->un_uppervp; 743 744 VREF(dvp); 745 VOP_LOCK(dvp); 746 vput(ap->a_dvp); 747 VREF(vp); 748 VOP_LOCK(vp); 749 vput(ap->a_vp); 750 751 error = VOP_REMOVE(dvp, vp, ap->a_cnp); 752 } else { 753 /* 754 * XXX: should create a whiteout here 755 */ 756 vput(ap->a_dvp); 757 vput(ap->a_vp); 758 error = EROFS; 759 } 760 761 return (error); 762 } 763 764 int 765 union_link(ap) 766 struct vop_link_args /* { 767 struct vnode *a_vp; 768 struct vnode *a_tdvp; 769 struct componentname *a_cnp; 770 } */ *ap; 771 { 772 int error; 773 struct union_node *dun = VTOUNION(ap->a_vp); 774 struct union_node *un = VTOUNION(ap->a_tdvp); 775 776 if (dun->un_uppervp && un->un_uppervp) { 777 struct vnode *dvp = dun->un_uppervp; 778 struct vnode *vp = un->un_uppervp; 779 780 VREF(dvp); 781 VOP_LOCK(dvp); 782 vput(ap->a_vp); 783 VREF(vp); 784 vrele(ap->a_tdvp); 785 786 error = VOP_LINK(dvp, vp, ap->a_cnp); 787 } else { 788 /* 789 * XXX: need to copy to upper layer 790 * and do the link there. 791 */ 792 vput(ap->a_vp); 793 vrele(ap->a_tdvp); 794 error = EROFS; 795 } 796 797 return (error); 798 } 799 800 int 801 union_rename(ap) 802 struct vop_rename_args /* { 803 struct vnode *a_fdvp; 804 struct vnode *a_fvp; 805 struct componentname *a_fcnp; 806 struct vnode *a_tdvp; 807 struct vnode *a_tvp; 808 struct componentname *a_tcnp; 809 } */ *ap; 810 { 811 int error; 812 813 struct vnode *fdvp = ap->a_fdvp; 814 struct vnode *fvp = ap->a_fvp; 815 struct vnode *tdvp = ap->a_tdvp; 816 struct vnode *tvp = ap->a_tvp; 817 818 if (fdvp->v_op == union_vnodeop_p) { /* always true */ 819 struct union_node *un = VTOUNION(fdvp); 820 if (un->un_uppervp == 0) { 821 error = EROFS; 822 goto bad; 823 } 824 825 fdvp = un->un_uppervp; 826 VREF(fdvp); 827 vrele(ap->a_fdvp); 828 } 829 830 if (fvp->v_op == union_vnodeop_p) { /* always true */ 831 struct union_node *un = VTOUNION(fvp); 832 if (un->un_uppervp == 0) { 833 error = EROFS; 834 goto bad; 835 } 836 837 fvp = un->un_uppervp; 838 VREF(fvp); 839 vrele(ap->a_fvp); 840 } 841 842 if (tdvp->v_op == union_vnodeop_p) { 843 struct union_node *un = VTOUNION(tdvp); 844 if (un->un_uppervp == 0) { 845 error = EROFS; 846 goto bad; 847 } 848 849 tdvp = un->un_uppervp; 850 VREF(tdvp); 851 VOP_LOCK(tdvp); 852 vput(ap->a_fdvp); 853 } 854 855 if (tvp && tvp->v_op == union_vnodeop_p) { 856 struct union_node *un = VTOUNION(tvp); 857 if (un->un_uppervp == 0) { 858 error = EROFS; 859 goto bad; 860 } 861 862 tvp = un->un_uppervp; 863 VREF(tvp); 864 VOP_LOCK(tvp); 865 vput(ap->a_tvp); 866 } 867 868 return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp)); 869 870 bad: 871 vrele(fdvp); 872 vrele(fvp); 873 vput(tdvp); 874 if (tvp) 875 vput(tvp); 876 877 return (error); 878 } 879 880 int 881 union_mkdir(ap) 882 struct vop_mkdir_args /* { 883 struct vnode *a_dvp; 884 struct vnode **a_vpp; 885 struct componentname *a_cnp; 886 struct vattr *a_vap; 887 } */ *ap; 888 { 889 struct union_node *un = VTOUNION(ap->a_dvp); 890 struct vnode *dvp = un->un_uppervp; 891 892 if (dvp) { 893 int error; 894 struct vnode *vp; 895 struct mount *mp = ap->a_dvp->v_mount; 896 897 VREF(dvp); 898 VOP_LOCK(dvp); 899 vput(ap->a_dvp); 900 error = VOP_MKDIR(dvp, &vp, ap->a_cnp, ap->a_vap); 901 if (error) 902 return (error); 903 904 error = union_allocvp( 905 ap->a_vpp, 906 mp, 907 NULLVP, 908 ap->a_cnp, 909 vp, 910 NULLVP); 911 VOP_UNLOCK(vp); 912 if (error) 913 vrele(vp); 914 return (error); 915 } 916 917 vput(ap->a_dvp); 918 return (EROFS); 919 } 920 921 int 922 union_rmdir(ap) 923 struct vop_rmdir_args /* { 924 struct vnode *a_dvp; 925 struct vnode *a_vp; 926 struct componentname *a_cnp; 927 } */ *ap; 928 { 929 int error; 930 struct union_node *dun = VTOUNION(ap->a_dvp); 931 struct union_node *un = VTOUNION(ap->a_vp); 932 933 if (dun->un_uppervp && un->un_uppervp) { 934 struct vnode *dvp = dun->un_uppervp; 935 struct vnode *vp = un->un_uppervp; 936 937 VREF(dvp); 938 VOP_LOCK(dvp); 939 vput(ap->a_dvp); 940 VREF(vp); 941 VOP_LOCK(vp); 942 vput(ap->a_vp); 943 944 error = VOP_REMOVE(dvp, vp, ap->a_cnp); 945 } else { 946 /* 947 * XXX: should create a whiteout here 948 */ 949 vput(ap->a_dvp); 950 vput(ap->a_vp); 951 error = EROFS; 952 } 953 954 return (error); 955 } 956 957 int 958 union_symlink(ap) 959 struct vop_symlink_args /* { 960 struct vnode *a_dvp; 961 struct vnode **a_vpp; 962 struct componentname *a_cnp; 963 struct vattr *a_vap; 964 char *a_target; 965 } */ *ap; 966 { 967 struct union_node *un = VTOUNION(ap->a_dvp); 968 struct vnode *dvp = un->un_uppervp; 969 970 if (dvp) { 971 int error; 972 struct vnode *vp; 973 struct mount *mp = ap->a_dvp->v_mount; 974 975 VREF(dvp); 976 VOP_LOCK(dvp); 977 vput(ap->a_dvp); 978 error = VOP_SYMLINK(dvp, &vp, ap->a_cnp, 979 ap->a_vap, ap->a_target); 980 *ap->a_vpp = 0; 981 return (error); 982 } 983 984 vput(ap->a_dvp); 985 return (EROFS); 986 } 987 988 /* 989 * union_readdir works in concert with getdirentries and 990 * readdir(3) to provide a list of entries in the unioned 991 * directories. getdirentries is responsible for walking 992 * down the union stack. readdir(3) is responsible for 993 * eliminating duplicate names from the returned data stream. 994 */ 995 int 996 union_readdir(ap) 997 struct vop_readdir_args /* { 998 struct vnodeop_desc *a_desc; 999 struct vnode *a_vp; 1000 struct uio *a_uio; 1001 struct ucred *a_cred; 1002 } */ *ap; 1003 { 1004 int error = 0; 1005 struct union_node *un = VTOUNION(ap->a_vp); 1006 1007 if (un->un_uppervp) { 1008 struct vnode *vp = OTHERVP(ap->a_vp); 1009 1010 VOP_LOCK(vp); 1011 error = VOP_READLINK(vp, ap->a_uio, ap->a_cred); 1012 VOP_UNLOCK(vp); 1013 } 1014 1015 return (error); 1016 } 1017 1018 int 1019 union_readlink(ap) 1020 struct vop_readlink_args /* { 1021 struct vnode *a_vp; 1022 struct uio *a_uio; 1023 struct ucred *a_cred; 1024 } */ *ap; 1025 { 1026 int error; 1027 struct vnode *vp = OTHERVP(ap->a_vp); 1028 1029 VOP_LOCK(vp); 1030 error = VOP_READLINK(vp, ap->a_uio, ap->a_cred); 1031 VOP_UNLOCK(vp); 1032 1033 return (error); 1034 } 1035 1036 int 1037 union_abortop(ap) 1038 struct vop_abortop_args /* { 1039 struct vnode *a_dvp; 1040 struct componentname *a_cnp; 1041 } */ *ap; 1042 { 1043 int error; 1044 struct vnode *vp = OTHERVP(ap->a_dvp); 1045 struct union_node *un = VTOUNION(ap->a_dvp); 1046 int islocked = un->un_flags & UN_LOCKED; 1047 1048 if (islocked) 1049 VOP_LOCK(vp); 1050 error = VOP_ABORTOP(vp, ap->a_cnp); 1051 if (islocked) 1052 VOP_UNLOCK(vp); 1053 1054 return (error); 1055 } 1056 1057 int 1058 union_inactive(ap) 1059 struct vop_inactive_args /* { 1060 struct vnode *a_vp; 1061 } */ *ap; 1062 { 1063 1064 /* 1065 * Do nothing (and _don't_ bypass). 1066 * Wait to vrele lowervp until reclaim, 1067 * so that until then our union_node is in the 1068 * cache and reusable. 1069 * 1070 * NEEDSWORK: Someday, consider inactive'ing 1071 * the lowervp and then trying to reactivate it 1072 * with capabilities (v_id) 1073 * like they do in the name lookup cache code. 1074 * That's too much work for now. 1075 */ 1076 return (0); 1077 } 1078 1079 int 1080 union_reclaim(ap) 1081 struct vop_reclaim_args /* { 1082 struct vnode *a_vp; 1083 } */ *ap; 1084 { 1085 struct vnode *vp = ap->a_vp; 1086 struct union_node *un = VTOUNION(vp); 1087 struct vnode *uppervp = un->un_uppervp; 1088 struct vnode *lowervp = un->un_lowervp; 1089 struct vnode *dirvp = un->un_dirvp; 1090 char *path = un->un_path; 1091 1092 /* 1093 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p, 1094 * so we can't call VOPs on ourself. 1095 */ 1096 /* After this assignment, this node will not be re-used. */ 1097 un->un_uppervp = 0; 1098 un->un_lowervp = 0; 1099 un->un_dirvp = 0; 1100 un->un_path = NULL; 1101 union_freevp(vp); 1102 if (uppervp) 1103 vrele(uppervp); 1104 if (lowervp) 1105 vrele(lowervp); 1106 if (dirvp) 1107 vrele(dirvp); 1108 if (path) 1109 free(path, M_TEMP); 1110 return (0); 1111 } 1112 1113 int 1114 union_lock(ap) 1115 struct vop_lock_args *ap; 1116 { 1117 struct union_node *un = VTOUNION(ap->a_vp); 1118 1119 while (un->un_flags & UN_LOCKED) { 1120 #ifdef DIAGNOSTIC 1121 if (un->un_pid == curproc->p_pid) 1122 panic("union: locking agsinst myself"); 1123 #endif 1124 un->un_flags |= UN_WANT; 1125 sleep((caddr_t) &un->un_flags, PINOD); 1126 } 1127 un->un_flags |= UN_LOCKED; 1128 #ifdef DIAGNOSTIC 1129 un->un_pid = curproc->p_pid; 1130 #endif 1131 } 1132 1133 int 1134 union_unlock(ap) 1135 struct vop_lock_args *ap; 1136 { 1137 struct union_node *un = VTOUNION(ap->a_vp); 1138 1139 #ifdef DIAGNOSTIC 1140 if ((un->un_flags & UN_LOCKED) == 0) 1141 panic("union: unlock unlocked node"); 1142 if (un->un_pid != curproc->p_pid) 1143 panic("union: unlocking other process's union node"); 1144 #endif 1145 1146 un->un_flags &= ~UN_LOCKED; 1147 if (un->un_flags & UN_WANT) { 1148 un->un_flags &= ~UN_WANT; 1149 wakeup((caddr_t) &un->un_flags); 1150 } 1151 1152 #ifdef DIAGNOSTIC 1153 un->un_pid = 0; 1154 #endif 1155 } 1156 1157 int 1158 union_bmap(ap) 1159 struct vop_bmap_args /* { 1160 struct vnode *a_vp; 1161 daddr_t a_bn; 1162 struct vnode **a_vpp; 1163 daddr_t *a_bnp; 1164 int *a_runp; 1165 } */ *ap; 1166 { 1167 int error; 1168 struct vnode *vp = OTHERVP(ap->a_vp); 1169 1170 VOP_LOCK(vp); 1171 error = VOP_BMAP(vp, ap->a_bn, ap->a_vpp, ap->a_bnp, ap->a_runp); 1172 VOP_UNLOCK(vp); 1173 1174 return (error); 1175 } 1176 1177 int 1178 union_print(ap) 1179 struct vop_print_args /* { 1180 struct vnode *a_vp; 1181 } */ *ap; 1182 { 1183 struct vnode *vp = ap->a_vp; 1184 1185 printf("\ttag VT_UNION, vp=%x, uppervp=%x, lowervp=%x\n", 1186 vp, UPPERVP(vp), LOWERVP(vp)); 1187 return (0); 1188 } 1189 1190 int 1191 union_islocked(ap) 1192 struct vop_islocked_args /* { 1193 struct vnode *a_vp; 1194 } */ *ap; 1195 { 1196 1197 return ((VTOUNION(ap->a_vp)->un_flags & UN_LOCKED) ? 1 : 0); 1198 } 1199 1200 int 1201 union_pathconf(ap) 1202 struct vop_pathconf_args /* { 1203 struct vnode *a_vp; 1204 int a_name; 1205 int *a_retval; 1206 } */ *ap; 1207 { 1208 int error; 1209 struct vnode *vp = OTHERVP(ap->a_vp); 1210 1211 VOP_LOCK(vp); 1212 error = VOP_PATHCONF(vp, ap->a_name, ap->a_retval); 1213 VOP_UNLOCK(vp); 1214 1215 return (error); 1216 } 1217 1218 int 1219 union_advlock(ap) 1220 struct vop_advlock_args /* { 1221 struct vnode *a_vp; 1222 caddr_t a_id; 1223 int a_op; 1224 struct flock *a_fl; 1225 int a_flags; 1226 } */ *ap; 1227 { 1228 1229 return (VOP_ADVLOCK(OTHERVP(ap->a_vp), ap->a_id, ap->a_op, 1230 ap->a_fl, ap->a_flags)); 1231 } 1232 1233 1234 /* 1235 * XXX - vop_strategy must be hand coded because it has no 1236 * vnode in its arguments. 1237 * This goes away with a merged VM/buffer cache. 1238 */ 1239 int 1240 union_strategy(ap) 1241 struct vop_strategy_args /* { 1242 struct buf *a_bp; 1243 } */ *ap; 1244 { 1245 struct buf *bp = ap->a_bp; 1246 int error; 1247 struct vnode *savedvp; 1248 1249 savedvp = bp->b_vp; 1250 bp->b_vp = OTHERVP(bp->b_vp); 1251 1252 #ifdef DIAGNOSTIC 1253 if (bp->b_vp == 0) 1254 panic("union_strategy: nil vp"); 1255 if (((bp->b_flags & B_READ) == 0) && 1256 (bp->b_vp == LOWERVP(savedvp))) 1257 panic("union_strategy: writing to lowervp"); 1258 #endif 1259 1260 error = VOP_STRATEGY(bp); 1261 bp->b_vp = savedvp; 1262 1263 return (error); 1264 } 1265 1266 /* 1267 * Global vfs data structures 1268 */ 1269 int (**union_vnodeop_p)(); 1270 struct vnodeopv_entry_desc union_vnodeop_entries[] = { 1271 { &vop_default_desc, vn_default_error }, 1272 { &vop_lookup_desc, union_lookup }, /* lookup */ 1273 { &vop_create_desc, union_create }, /* create */ 1274 { &vop_mknod_desc, union_mknod }, /* mknod */ 1275 { &vop_open_desc, union_open }, /* open */ 1276 { &vop_close_desc, union_close }, /* close */ 1277 { &vop_access_desc, union_access }, /* access */ 1278 { &vop_getattr_desc, union_getattr }, /* getattr */ 1279 { &vop_setattr_desc, union_setattr }, /* setattr */ 1280 { &vop_read_desc, union_read }, /* read */ 1281 { &vop_write_desc, union_write }, /* write */ 1282 { &vop_ioctl_desc, union_ioctl }, /* ioctl */ 1283 { &vop_select_desc, union_select }, /* select */ 1284 { &vop_mmap_desc, union_mmap }, /* mmap */ 1285 { &vop_fsync_desc, union_fsync }, /* fsync */ 1286 { &vop_seek_desc, union_seek }, /* seek */ 1287 { &vop_remove_desc, union_remove }, /* remove */ 1288 { &vop_link_desc, union_link }, /* link */ 1289 { &vop_rename_desc, union_rename }, /* rename */ 1290 { &vop_mkdir_desc, union_mkdir }, /* mkdir */ 1291 { &vop_rmdir_desc, union_rmdir }, /* rmdir */ 1292 { &vop_symlink_desc, union_symlink }, /* symlink */ 1293 { &vop_readdir_desc, union_readdir }, /* readdir */ 1294 { &vop_readlink_desc, union_readlink }, /* readlink */ 1295 { &vop_abortop_desc, union_abortop }, /* abortop */ 1296 { &vop_inactive_desc, union_inactive }, /* inactive */ 1297 { &vop_reclaim_desc, union_reclaim }, /* reclaim */ 1298 { &vop_lock_desc, union_lock }, /* lock */ 1299 { &vop_unlock_desc, union_unlock }, /* unlock */ 1300 { &vop_bmap_desc, union_bmap }, /* bmap */ 1301 { &vop_strategy_desc, union_strategy }, /* strategy */ 1302 { &vop_print_desc, union_print }, /* print */ 1303 { &vop_islocked_desc, union_islocked }, /* islocked */ 1304 { &vop_pathconf_desc, union_pathconf }, /* pathconf */ 1305 { &vop_advlock_desc, union_advlock }, /* advlock */ 1306 #ifdef notdef 1307 { &vop_blkatoff_desc, union_blkatoff }, /* blkatoff */ 1308 { &vop_valloc_desc, union_valloc }, /* valloc */ 1309 { &vop_vfree_desc, union_vfree }, /* vfree */ 1310 { &vop_truncate_desc, union_truncate }, /* truncate */ 1311 { &vop_update_desc, union_update }, /* update */ 1312 { &vop_bwrite_desc, union_bwrite }, /* bwrite */ 1313 #endif 1314 { (struct vnodeop_desc*)NULL, (int(*)())NULL } 1315 }; 1316 struct vnodeopv_desc union_vnodeop_opv_desc = 1317 { &union_vnodeop_p, union_vnodeop_entries }; 1318