1 /* 2 * Copyright (c) 1992, 1993, 1994 The Regents of the University of California. 3 * Copyright (c) 1992, 1993, 1994 Jan-Simon Pendry. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * Jan-Simon Pendry. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)union_vnops.c 1.2 (Berkeley) 02/01/94 12 */ 13 14 #include <sys/param.h> 15 #include <sys/systm.h> 16 #include <sys/proc.h> 17 #include <sys/file.h> 18 #include <sys/filedesc.h> 19 #include <sys/time.h> 20 #include <sys/types.h> 21 #include <sys/vnode.h> 22 #include <sys/mount.h> 23 #include <sys/namei.h> 24 #include <sys/malloc.h> 25 #include <sys/buf.h> 26 #include "union.h" 27 28 static int 29 union_mkshadow(dvp, cnp, vpp) 30 struct vnode *dvp; 31 struct componentname *cnp; 32 struct vnode *vpp; 33 { 34 int error; 35 struct vattr va; 36 struct proc *p = cnp->cn_proc; 37 int lockparent = (cnp->cn_flags & LOCKPARENT); 38 39 /* 40 * policy: when creating the shadow directory in the 41 * upper layer, create it owned by the current user, 42 * group from parent directory, and mode 777 modified 43 * by umask (ie mostly identical to the mkdir syscall). 44 * (jsp, kb) 45 * TODO: create the directory owned by the user who 46 * did the mount (um->um_cred). 47 */ 48 49 VATTR_NULL(&va); 50 va.va_type = VDIR; 51 va.va_mode = UN_DIRMODE &~ p->p_fd->fd_cmask; 52 if (lockparent) 53 VOP_UNLOCK(dvp); 54 LEASE_CHECK(dvp, p, p->p_ucred, LEASE_WRITE); 55 VOP_LOCK(dvp); 56 error = VOP_MKDIR(dvp, vpp, cnp, &va); 57 if (lockparent) 58 VOP_LOCK(dvp); 59 return (error); 60 } 61 62 static int 63 union_lookup1(dvp, vpp, cnp) 64 struct vnode *dvp; 65 struct vnode **vpp; 66 struct componentname *cnp; 67 { 68 int error; 69 struct vnode *tdvp; 70 struct mount *mp; 71 72 if (cnp->cn_flags & ISDOTDOT) { 73 for (;;) { 74 if ((dvp->v_flag & VROOT) == 0 || 75 (cnp->cn_flags & NOCROSSMOUNT)) 76 break; 77 78 tdvp = dvp; 79 dvp = dvp->v_mount->mnt_vnodecovered; 80 vput(tdvp); 81 VREF(dvp); 82 VOP_LOCK(dvp); 83 } 84 } 85 86 error = VOP_LOOKUP(dvp, &tdvp, cnp); 87 if (error) 88 return (error); 89 90 dvp = tdvp; 91 while ((dvp->v_type == VDIR) && (mp = dvp->v_mountedhere) && 92 (cnp->cn_flags & NOCROSSMOUNT) == 0) { 93 94 if (mp->mnt_flag & MNT_MLOCK) { 95 mp->mnt_flag |= MNT_MWAIT; 96 sleep((caddr_t) mp, PVFS); 97 continue; 98 } 99 100 if (error = VFS_ROOT(mp, &tdvp)) { 101 vput(dvp); 102 return (error); 103 } 104 105 vput(tdvp); 106 dvp = tdvp; 107 } 108 109 *vpp = dvp; 110 return (0); 111 } 112 113 int 114 union_lookup(ap) 115 struct vop_lookup_args /* { 116 struct vnodeop_desc *a_desc; 117 struct vnode *a_dvp; 118 struct vnode **a_vpp; 119 struct componentname *a_cnp; 120 } */ *ap; 121 { 122 int uerror, lerror; 123 struct vnode *uppervp, *lowervp; 124 struct vnode *upperdvp, *lowerdvp; 125 struct vnode *dvp = ap->a_dvp; 126 struct union_node *dun = VTOUNION(ap->a_dvp); 127 struct componentname *cnp = ap->a_cnp; 128 int lockparent = cnp->cn_flags & LOCKPARENT; 129 130 upperdvp = dun->un_uppervp; 131 lowerdvp = dun->un_lowervp; 132 133 /* 134 * do the lookup in the upper level. 135 * if that level comsumes additional pathnames, 136 * then assume that something special is going 137 * on and just return that vnode. 138 */ 139 uppervp = 0; 140 if (upperdvp) { 141 uerror = union_lookup1(upperdvp, &uppervp, cnp); 142 if (cnp->cn_consume != 0) { 143 *ap->a_vpp = uppervp; 144 return (uerror); 145 } 146 if (!lockparent) 147 VOP_LOCK(upperdvp); 148 } else { 149 uerror = ENOENT; 150 } 151 152 /* 153 * in a similar way to the upper layer, do the lookup 154 * in the lower layer. this time, if there is some 155 * component magic going on, then vput whatever we got 156 * back from the upper layer and return the lower vnode 157 * instead. 158 */ 159 lowervp = 0; 160 if (lowerdvp) { 161 lerror = union_lookup1(lowerdvp, &lowervp, cnp); 162 if (cnp->cn_consume != 0) { 163 if (uppervp) { 164 vput(uppervp); 165 uppervp = 0; 166 } 167 *ap->a_vpp = lowervp; 168 return (lerror); 169 } 170 if (!lockparent) 171 VOP_LOCK(lowerdvp); 172 } else { 173 lerror = ENOENT; 174 } 175 176 /* 177 * at this point, we have uerror and lerror indicating 178 * possible errors with the lookups in the upper and lower 179 * layers. additionally, uppervp and lowervp are (locked) 180 * references to existing vnodes in the upper and lower layers. 181 * 182 * there are now three cases to consider. 183 * 1. if both layers returned an error, then return whatever 184 * error the upper layer generated. 185 * 186 * 2. if the top layer failed and the bottom layer succeeded 187 * then two subcases occur. 188 * a. the bottom vnode is not a directory, in which 189 * case just return a new union vnode referencing 190 * an empty top layer and the existing bottom layer. 191 * b. the bottom vnode is a directory, in which case 192 * create a new directory in the top-level and 193 * continue as in case 3. 194 * 195 * 3. if the top layer succeeded then return a new union 196 * vnode referencing whatever the new top layer and 197 * whatever the bottom layer returned. 198 */ 199 200 /* case 1. */ 201 if ((uerror != 0) && (lerror != 0)) { 202 *ap->a_vpp = 0; 203 return (uerror); 204 } 205 206 /* case 2. */ 207 if (uerror != 0 /* && (lerror == 0) */ ) { 208 if (lowervp->v_type == VDIR) { /* case 2b. */ 209 uerror = union_mkshadow(upperdvp, cnp, &uppervp); 210 if (uerror) { 211 if (lowervp) { 212 vput(lowervp); 213 lowervp = 0; 214 } 215 return (uerror); 216 } 217 } 218 } 219 220 return (union_allocvp(ap->a_vpp, dvp->v_mount, dvp, cnp, 221 uppervp, lowervp)); 222 } 223 224 int 225 union_create(ap) 226 struct vop_create_args /* { 227 struct vnode *a_dvp; 228 struct vnode **a_vpp; 229 struct componentname *a_cnp; 230 struct vattr *a_vap; 231 } */ *ap; 232 { 233 struct union_node *un = VTOUNION(ap->a_dvp); 234 struct vnode *dvp = un->un_uppervp; 235 236 if (dvp) { 237 int error; 238 struct vnode *vp; 239 struct mount *mp = ap->a_dvp->v_mount; 240 241 VREF(dvp); 242 VOP_LOCK(dvp); 243 vput(ap->a_dvp); 244 error = VOP_CREATE(dvp, &vp, ap->a_cnp, ap->a_vap); 245 if (error) 246 return (error); 247 248 error = union_allocvp( 249 ap->a_vpp, 250 mp, 251 un->un_uppervp, 252 ap->a_cnp, 253 vp, 254 NULLVP); 255 return (error); 256 } 257 258 vput(ap->a_dvp); 259 return (EROFS); 260 } 261 262 int 263 union_mknod(ap) 264 struct vop_mknod_args /* { 265 struct vnode *a_dvp; 266 struct vnode **a_vpp; 267 struct componentname *a_cnp; 268 struct vattr *a_vap; 269 } */ *ap; 270 { 271 struct union_node *un = VTOUNION(ap->a_dvp); 272 struct vnode *dvp = un->un_uppervp; 273 274 if (dvp) { 275 int error; 276 struct vnode *vp; 277 struct mount *mp = ap->a_dvp->v_mount; 278 279 VREF(dvp); 280 VOP_LOCK(dvp); 281 vput(ap->a_dvp); 282 error = VOP_MKNOD(dvp, &vp, ap->a_cnp, ap->a_vap); 283 if (error) 284 return (error); 285 286 error = union_allocvp( 287 ap->a_vpp, 288 mp, 289 un->un_uppervp, 290 ap->a_cnp, 291 vp, 292 NULLVP); 293 return (error); 294 } 295 296 vput(ap->a_dvp); 297 return (EROFS); 298 } 299 300 /* 301 * copyfile. copy the vnode (fvp) to the vnode (tvp) 302 * using a sequence of reads and writes. both (fvp) 303 * and (tvp) are locked on entry and exit. 304 */ 305 static int 306 union_copyfile(p, cred, fvp, tvp) 307 struct proc *p; 308 struct ucred *cred; 309 struct vnode *fvp; 310 struct vnode *tvp; 311 { 312 char *buf; 313 struct uio uio; 314 struct iovec iov; 315 int error = 0; 316 off_t offset; 317 318 /* 319 * strategy: 320 * allocate a buffer of size MAXBSIZE. 321 * loop doing reads and writes, keeping track 322 * of the current uio offset. 323 * give up at the first sign of trouble. 324 */ 325 326 uio.uio_procp = p; 327 uio.uio_segflg = UIO_SYSSPACE; 328 offset = 0; 329 330 VOP_UNLOCK(fvp); /* XXX */ 331 LEASE_CHECK(fvp, p, cred, LEASE_READ); 332 VOP_LOCK(fvp); /* XXX */ 333 VOP_UNLOCK(tvp); /* XXX */ 334 LEASE_CHECK(tvp, p, cred, LEASE_WRITE); 335 VOP_LOCK(tvp); /* XXX */ 336 337 buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK); 338 do { 339 uio.uio_iov = &iov; 340 uio.uio_iovcnt = 1; 341 iov.iov_base = buf; 342 iov.iov_len = MAXBSIZE; 343 uio.uio_resid = iov.iov_len; 344 uio.uio_offset = offset; 345 uio.uio_rw = UIO_READ; 346 error = VOP_READ(fvp, &uio, 0, cred); 347 348 if (error == 0) { 349 uio.uio_iov = &iov; 350 uio.uio_iovcnt = 1; 351 iov.iov_base = buf; 352 iov.iov_len = MAXBSIZE - uio.uio_resid; 353 uio.uio_rw = UIO_WRITE; 354 uio.uio_resid = iov.iov_len; 355 uio.uio_offset = offset; 356 357 do { 358 error = VOP_WRITE(tvp, &uio, 0, cred); 359 } while (error == 0 && uio.uio_resid > 0); 360 if (error == 0) 361 offset = uio.uio_offset; 362 } 363 } while ((uio.uio_resid == 0) && (error == 0)); 364 365 free(buf, M_TEMP); 366 return (error); 367 } 368 369 int 370 union_open(ap) 371 struct vop_open_args /* { 372 struct vnodeop_desc *a_desc; 373 struct vnode *a_vp; 374 int a_mode; 375 struct ucred *a_cred; 376 struct proc *a_p; 377 } */ *ap; 378 { 379 struct union_node *un = VTOUNION(ap->a_vp); 380 int mode = ap->a_mode; 381 struct ucred *cred = ap->a_cred; 382 struct proc *p = ap->a_p; 383 384 /* 385 * If there is an existing upper vp then simply open that. 386 */ 387 if (un->un_uppervp) { 388 int error; 389 390 VOP_LOCK(un->un_uppervp); 391 error = VOP_OPEN(un->un_uppervp, mode, cred, p); 392 VOP_UNLOCK(un->un_lowervp); 393 394 return (error); 395 } 396 397 /* 398 * If the lower vnode is being opened for writing, then 399 * copy the file contents to the upper vnode and open that, 400 * otherwise can simply open the lower vnode. 401 */ 402 if ((ap->a_mode & FWRITE) && (un->un_lowervp->v_type == VREG)) { 403 int error; 404 struct nameidata nd; 405 struct filedesc *fdp = p->p_fd; 406 int fmode; 407 int cmode; 408 409 /* 410 * Open the named file in the upper layer. Note that 411 * the file may have come into existence *since* the lookup 412 * was done, since the upper layer may really be a 413 * loopback mount of some other filesystem... so open 414 * the file with exclusive create and barf if it already 415 * exists. 416 * XXX - perhaps shoudl re-lookup the node (once more with 417 * feeling) and simply open that. Who knows. 418 */ 419 NDINIT(&nd, CREATE, 0, UIO_SYSSPACE, un->un_path, p); 420 fmode = (O_CREAT|O_TRUNC|O_EXCL); 421 cmode = UN_FILEMODE & ~fdp->fd_cmask; 422 error = vn_open(&nd, fmode, cmode); 423 if (error) 424 return (error); 425 un->un_uppervp = nd.ni_vp; /* XXX */ 426 /* at this point, uppervp is locked */ 427 428 /* 429 * Now, if the file is being opened with truncation, then 430 * the (new) upper vnode is ready to fly, otherwise the 431 * data from the lower vnode must be copied to the upper 432 * layer first. This only works for regular files (check 433 * is made above). 434 */ 435 if ((mode & O_TRUNC) == 0) { 436 /* XXX - should not ignore errors from VOP_CLOSE */ 437 VOP_LOCK(un->un_lowervp); 438 error = VOP_OPEN(un->un_lowervp, FREAD, cred, p); 439 if (error == 0) { 440 error = union_copyfile(p, cred, 441 un->un_lowervp, un->un_uppervp); 442 (void) VOP_CLOSE(un->un_lowervp, FREAD); 443 } 444 VOP_UNLOCK(un->un_lowervp); 445 VOP_UNLOCK(un->un_uppervp); 446 (void) VOP_CLOSE(un->un_uppervp, FWRITE); 447 VOP_LOCK(un->un_uppervp); 448 } 449 if (error == 0) 450 error = VOP_OPEN(un->un_uppervp, FREAD, cred, p); 451 return (error); 452 } 453 454 return (VOP_OPEN(un->un_lowervp, mode, cred, p)); 455 } 456 457 int 458 union_close(ap) 459 struct vop_close_args /* { 460 struct vnode *a_vp; 461 int a_fflag; 462 struct ucred *a_cred; 463 struct proc *a_p; 464 } */ *ap; 465 { 466 467 return (VOP_CLOSE(OTHERVP(ap->a_vp), ap->a_fflag, ap->a_cred, ap->a_p)); 468 } 469 470 /* 471 * Check access permission on the union vnode. 472 * The access check being enforced is to check 473 * against both the underlying vnode, and any 474 * copied vnode. This ensures that no additional 475 * file permissions are given away simply because 476 * the user caused an implicit file copy. 477 */ 478 int 479 union_access(ap) 480 struct vop_access_args /* { 481 struct vnodeop_desc *a_desc; 482 struct vnode *a_vp; 483 int a_mode; 484 struct ucred *a_cred; 485 struct proc *a_p; 486 } */ *ap; 487 { 488 struct union_node *un = VTOUNION(ap->a_vp); 489 struct vnode *vp; 490 491 if (vp = un->un_lowervp) { 492 int error; 493 494 error = VOP_ACCESS(vp, ap->a_mode, ap->a_cred, ap->a_p); 495 if (error) 496 return (error); 497 } 498 499 if (vp = un->un_uppervp) 500 return (VOP_ACCESS(vp, ap->a_mode, ap->a_cred, ap->a_p)); 501 502 return (0); 503 } 504 505 /* 506 * We handle getattr only to change the fsid. 507 */ 508 int 509 union_getattr(ap) 510 struct vop_getattr_args /* { 511 struct vnode *a_vp; 512 struct vattr *a_vap; 513 struct ucred *a_cred; 514 struct proc *a_p; 515 } */ *ap; 516 { 517 int error; 518 519 if (error = union_bypass(ap)) 520 return (error); 521 /* Requires that arguments be restored. */ 522 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 523 return (0); 524 } 525 526 int 527 lofs_setattr(ap) 528 struct vop_setattr_args /* { 529 struct vnode *a_vp; 530 struct vattr *a_vap; 531 struct ucred *a_cred; 532 struct proc *a_p; 533 } */ *ap; 534 { 535 struct union_node *un = VTOUNION(ap->a_vp); 536 int error; 537 538 if (un->un_uppervp) { 539 VOP_LOCK(un->un_uppervp); 540 error = VOP_SETATTR(un->un_uppervp, ap->a_vap, 541 ap->a_cred, ap->a_p); 542 VOP_UNLOCK(un->un_uppervp); 543 } else { 544 /* 545 * XXX should do a copyfile (perhaps only if 546 * the file permission change, which would not 547 * track va_ctime correctly). 548 */ 549 error = EROFS; 550 } 551 552 return (error); 553 } 554 555 int 556 union_read(ap) 557 struct vop_read_args /* { 558 struct vnode *a_vp; 559 struct uio *a_uio; 560 int a_ioflag; 561 struct ucred *a_cred; 562 } */ *ap; 563 { 564 int error; 565 struct vnode *vp = OTHERVP(ap->a_vp); 566 567 VOP_LOCKvp); 568 error = VOP_READ(vp, ap->a_uio, ap->a_ioflag, ap->a_cred); 569 VOP_UNLOCKvp); 570 571 return (error); 572 } 573 574 int 575 union_write(ap) 576 struct vop_read_args /* { 577 struct vnode *a_vp; 578 struct uio *a_uio; 579 int a_ioflag; 580 struct ucred *a_cred; 581 } */ *ap; 582 { 583 int error; 584 struct vnode *vp = OTHERVP(ap->a_vp); 585 586 VOP_LOCK(vp); 587 error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag, ap->a_cred); 588 VOP_UNLOCK(vp); 589 590 return (error); 591 } 592 593 int 594 union_ioctl(ap) 595 struct vop_ioctl_args /* { 596 struct vnode *a_vp; 597 int a_command; 598 caddr_t a_data; 599 int a_fflag; 600 struct ucred *a_cred; 601 struct proc *a_p; 602 } */ *ap; 603 { 604 605 return (VOP_IOCTL(OTHERVP(ap->a_vp), ap->a_command, ap->a_data, 606 ap->a_fflag, ap->a_cred, ap->a_p)); 607 } 608 609 int 610 union_select(ap) 611 struct vop_select_args /* { 612 struct vnode *a_vp; 613 int a_which; 614 int a_fflags; 615 struct ucred *a_cred; 616 struct proc *a_p; 617 } */ *ap; 618 { 619 620 return (VOP_SELECT(OTHERVP(ap->a_vp), ap->a_which, ap->a_fflags, 621 ap->a_cred, ap->a_p)); 622 } 623 624 int 625 union_mmap(ap) 626 struct vop_mmap_args /* { 627 struct vnode *a_vp; 628 int a_fflags; 629 struct ucred *a_cred; 630 struct proc *a_p; 631 } */ *ap; 632 { 633 634 return (VOP_MMAP(OTHERVP(ap->a_vp), ap->a_fflags, 635 ap->a_cred, ap->a_p)); 636 } 637 638 int 639 union_fsync(ap) 640 struct vop_fsync_args /* { 641 struct vnode *a_vp; 642 struct ucred *a_cred; 643 int a_waitfor; 644 struct proc *a_p; 645 } */ *ap; 646 { 647 int error = 0; 648 struct vnode *targetvp = OTHERVP(ap->a_vp); 649 650 if (targetvp) { 651 VOP_LOCK(targetvp); 652 error = VOP_FSYNC(targetvp, ap->a_cred, 653 ap->a_waitfor, ap->a_p); 654 VOP_UNLOCK(targetvp); 655 } 656 657 return (error); 658 } 659 660 int 661 union_seek(ap) 662 struct vop_seek_args /* { 663 struct vnode *a_vp; 664 off_t a_oldoff; 665 off_t a_newoff; 666 struct ucred *a_cred; 667 } */ *ap; 668 { 669 670 return (VOP_SEEK(OTHERVP(ap->a_vp), ap->a_oldoff, ap->a_newoff, ap->a_cred)); 671 } 672 673 int 674 union_remove(ap) 675 struct vop_remove_args /* { 676 struct vnode *a_dvp; 677 struct vnode *a_vp; 678 struct componentname *a_cnp; 679 } */ *ap; 680 { 681 int error; 682 struct union_node *dun = VTOUNION(ap->a_dvp); 683 struct union_node *un = VTOUNION(ap->a_vp); 684 685 if (dun->un_uppervp && un->un_uppervp) { 686 struct vnode *dvp = dun->un_uppervp; 687 struct vnode *vp = un->un_uppervp; 688 689 VREF(dvp); 690 VOP_LOCK(dvp); 691 vput(ap->a_dvp); 692 VREF(vp); 693 VOP_LOCK(vp); 694 vput(ap->a_vp); 695 696 error = VOP_REMOVE(dvp, vp, ap->a_cnp); 697 } else { 698 /* 699 * XXX: should create a whiteout here 700 */ 701 vput(ap->a_dvp); 702 vput(ap->a_vp); 703 error = EROFS; 704 } 705 706 return (error); 707 } 708 709 int 710 union_link(ap) 711 struct vop_link_args /* { 712 struct vnode *a_vp; 713 struct vnode *a_tdvp; 714 struct componentname *a_cnp; 715 } */ *ap; 716 { 717 int error; 718 struct union_node *dun = VTOUNION(ap->a_vp); 719 struct union_node *un = VTOUNION(ap->a_tdvp); 720 721 if (dun->un_uppervp && un->un_uppervp) { 722 struct vnode *dvp = dun->un_uppervp; 723 struct vnode *vp = un->un_uppervp; 724 725 VREF(dvp); 726 VOP_LOCK(dvp); 727 vput(ap->a_vp); 728 VREF(vp); 729 vrele(ap->a_tdvp); 730 731 error = VOP_LINK(dvp, vp, ap->a_cnp); 732 } else { 733 /* 734 * XXX: need to copy to upper layer 735 * and do the link there. 736 */ 737 vput(ap->a_vp); 738 vrele(ap->a_tdvp); 739 error = EROFS; 740 } 741 742 return (error); 743 } 744 745 int 746 union_rename(ap) 747 struct vop_rename_args /* { 748 struct vnode *a_fdvp; 749 struct vnode *a_fvp; 750 struct componentname *a_fcnp; 751 struct vnode *a_tdvp; 752 struct vnode *a_tvp; 753 struct componentname *a_tcnp; 754 } */ *ap; 755 { 756 int error; 757 758 struct vnode *fdvp = ap->a_fdvp; 759 struct vnode *fvp = ap->a_fvp; 760 struct vnode *tdvp = ap->a_tdvp; 761 struct vnode *tvp = ap->a_tvp; 762 763 if (fdvp->v_op == union_vnodeop_p) { /* always true */ 764 struct union_node *un = VTOUNION(fdvp); 765 if (un->un_uppervp == 0) { 766 error = EROFS; 767 goto bad; 768 } 769 770 fdvp = un->un_uppervp; 771 VREF(fdvp); 772 vrele(ap->a_fdvp); 773 } 774 775 if (fvp->v_op == union_vnodeop_p) { /* always true */ 776 struct union_node *un = VTOUNION(fvp); 777 if (un->un_uppervp == 0) { 778 error = EROFS; 779 goto bad; 780 } 781 782 fvp = un->un_uppervp; 783 VREF(fvp); 784 vrele(ap->a_fvp); 785 } 786 787 if (tdvp->v_op == union_vnodeop_p) { 788 struct union_node *un = VTOUNION(tdvp); 789 if (un->un_uppervp == 0) { 790 error = EROFS; 791 goto bad; 792 } 793 794 tdvp = un->un_uppervp; 795 VREF(tdvp); 796 VOP_LOCK(tdvp); 797 vput(ap->a_fdvp); 798 } 799 800 if (tvp && tvp->v_op == union_vnodeop_p) { 801 struct union_node *un = VTOUNION(tvp); 802 if (un->un_uppervp == 0) { 803 error = EROFS; 804 goto bad; 805 } 806 807 tvp = un->un_uppervp; 808 VREF(tvp); 809 VOP_LOCK(tvp); 810 vput(ap->a_tvp); 811 } 812 813 return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp)); 814 815 bad: 816 vrele(fdvp); 817 vrele(fvp); 818 vput(tdvp); 819 if (tvp) 820 vput(tvp); 821 822 return (error); 823 } 824 825 int 826 union_mkdir(ap) 827 struct vop_mkdir_args /* { 828 struct vnode *a_dvp; 829 struct vnode **a_vpp; 830 struct componentname *a_cnp; 831 struct vattr *a_vap; 832 } */ *ap; 833 { 834 struct union_node *un = VTOUNION(ap->a_dvp); 835 struct vnode *dvp = un->un_uppervp; 836 837 if (dvp) { 838 int error; 839 struct vnode *vp; 840 struct mount *mp = ap->a_dvp->v_mount; 841 842 VREF(dvp); 843 VOP_LOCK(dvp); 844 vput(ap->a_dvp); 845 error = VOP_MKDIR(dvp, &vp, ap->a_cnp, ap->a_vap); 846 if (error) 847 return (error); 848 849 error = union_allocvp( 850 ap->a_vpp, 851 mp, 852 un->un_uppervp, 853 ap->a_cnp, 854 vp, 855 NULLVP); 856 return (error); 857 } 858 859 vput(ap->a_dvp); 860 return (EROFS); 861 } 862 863 int 864 union_rmdir(ap) 865 struct vop_rmdir_args /* { 866 struct vnode *a_dvp; 867 struct vnode *a_vp; 868 struct componentname *a_cnp; 869 } */ *ap; 870 { 871 int error; 872 struct union_node *dun = VTOUNION(ap->a_dvp); 873 struct union_node *un = VTOUNION(ap->a_vp); 874 875 if (dun->un_uppervp && un->un_uppervp) { 876 struct vnode *dvp = dun->un_uppervp; 877 struct vnode *vp = un->un_uppervp; 878 879 VREF(dvp); 880 VOP_LOCK(dvp); 881 vput(ap->a_dvp); 882 VREF(vp); 883 VOP_LOCK(vp); 884 vput(ap->a_vp); 885 886 error = VOP_REMOVE(dvp, vp, ap->a_cnp); 887 } else { 888 /* 889 * XXX: should create a whiteout here 890 */ 891 vput(ap->a_dvp); 892 vput(ap->a_vp); 893 error = EROFS; 894 } 895 896 return (error); 897 } 898 899 int 900 union_symlink(ap) 901 struct vop_symlink_args /* { 902 struct vnode *a_dvp; 903 struct vnode **a_vpp; 904 struct componentname *a_cnp; 905 struct vattr *a_vap; 906 char *a_target; 907 } */ *ap; 908 { 909 struct union_node *un = VTOUNION(ap->a_dvp); 910 struct vnode *dvp = un->un_uppervp; 911 912 if (dvp) { 913 int error; 914 struct vnode *vp; 915 struct mount *mp = ap->a_dvp->v_mount; 916 917 VREF(dvp); 918 VOP_LOCK(dvp); 919 vput(ap->a_dvp); 920 error = VOP_SYMLINK(dvp, &vp, ap->a_cnp, 921 ap->a_vap, ap->a_target); 922 if (error) 923 return (error); 924 925 error = union_allocvp( 926 ap->a_vpp, 927 mp, 928 un->un_uppervp, 929 ap->a_cnp, 930 vp, 931 NULLVP); 932 vput(*ap->a_vpp); 933 return (error); 934 } 935 936 vput(ap->a_dvp); 937 return (EROFS); 938 } 939 940 /* 941 * union_readdir works in concert with getdirentries and 942 * readdir(3) to provide a list of entries in the unioned 943 * directories. getdirentries is responsible for walking 944 * down the union stack. readdir(3) is responsible for 945 * eliminating duplicate names from the returned data stream. 946 */ 947 int 948 union_readdir(ap) 949 struct vop_readdir_args /* { 950 struct vnodeop_desc *a_desc; 951 struct vnode *a_vp; 952 struct uio *a_uio; 953 struct ucred *a_cred; 954 } */ *ap; 955 { 956 int error = 0; 957 struct union_node *un = VTOUNION(ap->a_vp); 958 959 if (un->un_uppervp) { 960 struct vnode *vp = OTHERVP(ap->a_vp); 961 962 VOP_LOCK(vp); 963 error = VOP_READLINK(vp, ap->a_uio, ap->a_cred); 964 VOP_UNLOCK(vp); 965 } 966 967 return (error); 968 } 969 970 int 971 union_readlink(ap) 972 struct vop_readlink_args /* { 973 struct vnode *a_vp; 974 struct uio *a_uio; 975 struct ucred *a_cred; 976 } */ *ap; 977 { 978 int error; 979 struct vnode *vp = OTHERVP(ap->a_vp); 980 981 VOP_LOCK(vp); 982 error = VOP_READLINK(vp, ap->a_uio, ap->a_cred); 983 VOP_UNLOCK(vp); 984 985 return (error); 986 } 987 988 int 989 union_abortop(ap) 990 struct vop_abortop_args /* { 991 struct vnode *a_dvp; 992 struct componentname *a_cnp; 993 } */ *ap; 994 { 995 int error; 996 struct vnode *vp = OTHERVP(a->a_dvp); 997 struct union_node *un = VTOUNION(ap->a_dvp); 998 int islocked = un->un_flags & UN_LOCKED; 999 1000 if (islocked) 1001 VOP_LOCK(vp); 1002 error = VOP_ABORTOP(vp, ap->a_cnp); 1003 if (islocked) 1004 VOP_UNLOCK(vp); 1005 1006 return (error); 1007 } 1008 1009 int 1010 union_inactive(ap) 1011 struct vop_inactive_args /* { 1012 struct vnode *a_vp; 1013 } */ *ap; 1014 { 1015 1016 /* 1017 * Do nothing (and _don't_ bypass). 1018 * Wait to vrele lowervp until reclaim, 1019 * so that until then our union_node is in the 1020 * cache and reusable. 1021 * 1022 * NEEDSWORK: Someday, consider inactive'ing 1023 * the lowervp and then trying to reactivate it 1024 * with capabilities (v_id) 1025 * like they do in the name lookup cache code. 1026 * That's too much work for now. 1027 */ 1028 return (0); 1029 } 1030 1031 int 1032 union_reclaim(ap) 1033 struct vop_reclaim_args /* { 1034 struct vnode *a_vp; 1035 } */ *ap; 1036 { 1037 struct vnode *vp = ap->a_vp; 1038 struct union_node *un = VTOUNION(vp); 1039 struct vnode *uppervp = un->un_uppervp; 1040 struct vnode *lowervp = un->un_lowervp; 1041 struct vnode *dirvp = un->un_dirvp; 1042 char *path = un->un_path; 1043 1044 /* 1045 * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p, 1046 * so we can't call VOPs on ourself. 1047 */ 1048 /* After this assignment, this node will not be re-used. */ 1049 un->un_uppervp = 0; 1050 un->un_lowervp = 0; 1051 un->un_dirvp = 0; 1052 un->un_path = NULL; 1053 union_freevp(vp); 1054 if (uppervp) 1055 vrele(uppervp); 1056 if (lowervp) 1057 vrele(lowervp); 1058 if (dirvp) 1059 vrele(dirvp); 1060 if (path) 1061 free(path, M_TEMP); 1062 return (0); 1063 } 1064 1065 int 1066 union_lock(ap) 1067 struct vop_lock_args *ap; 1068 { 1069 struct union_node *un = VTOUNION(ap->a_vp); 1070 1071 #ifdef DIAGNOSTIC 1072 if (un->un_pid == curproc->p_pid) 1073 panic("union: locking agsinst myself"); 1074 #endif 1075 while (un->un_flags & UN_LOCKED) { 1076 un->un_flags |= UN_WANT; 1077 sleep((caddr_t) &un->un_flags, PINOD); 1078 } 1079 un->un_flags |= UN_LOCKED; 1080 #ifdef DIAGNOSTIC 1081 un->un_pid = curproc->p_pid; 1082 #endif 1083 } 1084 1085 int 1086 union_unlock(ap) 1087 struct vop_lock_args *ap; 1088 { 1089 struct union_node *un = VTOUNION(ap->a_vp); 1090 1091 #ifdef DIAGNOSTIC 1092 if (un->un_pid != curproc->p_pid) 1093 panic("union: unlocking other process's union node"); 1094 if ((un->un_flags & UN_LOCKED) == 0) 1095 panic("union: unlock unlocked node"); 1096 #endif 1097 1098 un->un_flags &= ~UN_LOCKED; 1099 if (un->un_flags & UN_WANT) { 1100 un->un_flags &= ~UN_WANT; 1101 wakeup((caddr_t) &un->un_flags); 1102 } 1103 1104 #ifdef DIAGNOSTIC 1105 un->un_pid = 0; 1106 #endif 1107 } 1108 1109 int 1110 union_bmap(ap) 1111 struct vop_bmap_args /* { 1112 struct vnode *a_vp; 1113 daddr_t a_bn; 1114 struct vnode **a_vpp; 1115 daddr_t *a_bnp; 1116 int *a_runp; 1117 } */ *ap; 1118 { 1119 int error; 1120 struct vnode *vp = OTHERVP(ap->a_vp); 1121 1122 VOP_LOCK(vp); 1123 error = VOP_BMAP(vp, ap->a_bn, ap->a_vpp, ap->a_bnp, ap->a_runp); 1124 VOP_UNLOCK(vp); 1125 1126 return (error); 1127 } 1128 1129 int 1130 union_print(ap) 1131 struct vop_print_args /* { 1132 struct vnode *a_vp; 1133 } */ *ap; 1134 { 1135 struct vnode *vp = ap->a_vp; 1136 1137 printf("\ttag VT_UNION, vp=%x, uppervp=%x, lowervp=%x\n", 1138 vp, UPPERVP(vp), LOWERVP(vp)); 1139 return (0); 1140 } 1141 1142 int 1143 union_islocked(ap) 1144 struct vop_islocked_args /* { 1145 struct vnode *a_vp; 1146 } */ *ap; 1147 { 1148 1149 return ((VTOUNION(ap->a_vp)->un_flags & UN_LOCKED) ? 1 : 0); 1150 } 1151 1152 int 1153 union_pathconf(ap) 1154 struct vop_pathconf_args /* { 1155 struct vnode *a_vp; 1156 int a_name; 1157 int *a_retval; 1158 } */ *ap; 1159 { 1160 int error; 1161 struct vnode *vp = OTHERVP(ap->a_vp); 1162 1163 VOP_LOCK(vp); 1164 error = VOP_PATHCONF(vp, ap->a_name, ap->a_retval); 1165 VOP_UNLOCK(vp); 1166 1167 return (error); 1168 } 1169 1170 int 1171 union_advlock(ap) 1172 struct vop_advlock_args /* { 1173 struct vnode *a_vp; 1174 caddr_t a_id; 1175 int a_op; 1176 struct flock *a_fl; 1177 int a_flags; 1178 } */ *ap; 1179 { 1180 1181 return (VOP_ADVLOCK(OTHERVP(ap->a_vp), ap->a_id, ap->a_op, 1182 ap->a_fl, ap->a_flags)); 1183 } 1184 1185 1186 /* 1187 * XXX - vop_strategy must be hand coded because it has no 1188 * vnode in its arguments. 1189 * This goes away with a merged VM/buffer cache. 1190 */ 1191 int 1192 union_strategy(ap) 1193 struct vop_strategy_args /* { 1194 struct buf *a_bp; 1195 } */ *ap; 1196 { 1197 struct buf *bp = ap->a_bp; 1198 int error; 1199 struct vnode *savedvp; 1200 1201 savedvp = bp->b_vp; 1202 bp->b_vp = OTHERVP(bp->b_vp); 1203 1204 #ifdef DIAGNOSTIC 1205 if (bp->b_vp == 0) 1206 panic("union_strategy: nil vp"); 1207 if (((bp->b_flags & B_READ) == 0) && 1208 (bp->b_vp == LOWERVP(savedvp))) 1209 panic("union_strategy: writing to lowervp"); 1210 #endif 1211 1212 error = VOP_STRATEGY(bp); 1213 bp->b_vp = savedvp; 1214 1215 return (error); 1216 } 1217 1218 /* 1219 * Global vfs data structures 1220 */ 1221 int (**union_vnodeop_p)(); 1222 struct vnodeopv_entry_desc lofs_vnodeop_entries[] = { 1223 { &vop_default_desc, vn_default_error }, 1224 { &vop_lookup_desc, union_lookup }, /* lookup */ 1225 { &vop_create_desc, union_create }, /* create */ 1226 { &vop_mknod_desc, union_mknod }, /* mknod */ 1227 { &vop_open_desc, union_open }, /* open */ 1228 { &vop_close_desc, union_close }, /* close */ 1229 { &vop_access_desc, union_access }, /* access */ 1230 { &vop_getattr_desc, union_getattr }, /* getattr */ 1231 { &vop_setattr_desc, union_setattr }, /* setattr */ 1232 { &vop_read_desc, union_read }, /* read */ 1233 { &vop_write_desc, union_write }, /* write */ 1234 { &vop_ioctl_desc, union_ioctl }, /* ioctl */ 1235 { &vop_select_desc, union_select }, /* select */ 1236 { &vop_mmap_desc, union_mmap }, /* mmap */ 1237 { &vop_fsync_desc, union_fsync }, /* fsync */ 1238 { &vop_seek_desc, union_seek }, /* seek */ 1239 { &vop_remove_desc, union_remove }, /* remove */ 1240 { &vop_link_desc, union_link }, /* link */ 1241 { &vop_rename_desc, union_rename }, /* rename */ 1242 { &vop_mkdir_desc, union_mkdir }, /* mkdir */ 1243 { &vop_rmdir_desc, union_rmdir }, /* rmdir */ 1244 { &vop_symlink_desc, union_symlink }, /* symlink */ 1245 { &vop_readdir_desc, union_readdir }, /* readdir */ 1246 { &vop_readlink_desc, union_readlink }, /* readlink */ 1247 { &vop_abortop_desc, union_abortop }, /* abortop */ 1248 { &vop_inactive_desc, union_inactive }, /* inactive */ 1249 { &vop_reclaim_desc, union_reclaim }, /* reclaim */ 1250 { &vop_lock_desc, union_lock }, /* lock */ 1251 { &vop_unlock_desc, union_unlock }, /* unlock */ 1252 { &vop_bmap_desc, union_bmap }, /* bmap */ 1253 { &vop_strategy_desc, union_strategy }, /* strategy */ 1254 { &vop_print_desc, union_print }, /* print */ 1255 { &vop_islocked_desc, union_islocked }, /* islocked */ 1256 { &vop_pathconf_desc, union_pathconf }, /* pathconf */ 1257 { &vop_advlock_desc, union_advlock }, /* advlock */ 1258 #ifdef notdef 1259 { &vop_blkatoff_desc, union_blkatoff }, /* blkatoff */ 1260 { &vop_valloc_desc, union_valloc }, /* valloc */ 1261 { &vop_vfree_desc, union_vfree }, /* vfree */ 1262 { &vop_truncate_desc, union_truncate }, /* truncate */ 1263 { &vop_update_desc, union_update }, /* update */ 1264 { &vop_bwrite_desc, union_bwrite }, /* bwrite */ 1265 #endif 1266 { (struct vnodeop_desc*)NULL, (int(*)())NULL } 1267 }; 1268 struct vnodeopv_desc union_vnodeop_opv_desc = 1269 { &union_vnodeop_p, union_vnodeop_entries }; 1270