1 /*- 2 * Copyright (c) 1994 Jan-Simon Pendry 3 * Copyright (c) 1994 4 * The Regents of the University of California. All rights reserved. 5 * Copyright (c) 2005, 2006 Masanori Ozawa <ozawa@ongs.co.jp>, ONGS Inc. 6 * Copyright (c) 2006 Daichi Goto <daichi@freebsd.org> 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Jan-Simon Pendry. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)union_subr.c 8.20 (Berkeley) 5/20/95 36 * $FreeBSD: src/sys/fs/unionfs/union_subr.c,v 1.99 2008/01/24 12:34:27 attilio Exp $ 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/malloc.h> 45 #include <sys/mount.h> 46 #include <sys/namei.h> 47 #include <sys/proc.h> 48 #include <sys/vnode.h> 49 #include <sys/dirent.h> 50 #include <sys/fcntl.h> 51 #include <sys/filedesc.h> 52 #include <sys/stat.h> 53 #include <sys/kauth.h> 54 #include <sys/resourcevar.h> 55 56 #include <fs/unionfs/unionfs.h> 57 58 MALLOC_DEFINE(M_UNIONFSPATH, "UNIONFS path", "UNIONFS path private part"); 59 60 /* 61 * Make a new or get existing unionfs node. 62 * 63 * uppervp and lowervp should be unlocked. Because if new unionfs vnode is 64 * locked, uppervp or lowervp is locked too. In order to prevent dead lock, 65 * you should not lock plurality simultaneously. 66 */ 67 int 68 unionfs_nodeget(struct mount *mp, struct vnode *uppervp, 69 struct vnode *lowervp, struct vnode *dvp, 70 struct vnode **vpp, struct componentname *cnp) 71 { 72 struct unionfs_mount *ump; 73 struct unionfs_node *unp; 74 struct vnode *vp; 75 int error; 76 const char *path; 77 78 ump = MOUNTTOUNIONFSMOUNT(mp); 79 path = (cnp ? cnp->cn_nameptr : NULL); 80 81 if (uppervp == NULLVP && lowervp == NULLVP) 82 panic("unionfs_nodeget: upper and lower is null"); 83 84 /* If it has no ISLASTCN flag, path check is skipped. */ 85 if (cnp && !(cnp->cn_flags & ISLASTCN)) 86 path = NULL; 87 88 if ((uppervp == NULLVP || ump->um_uppervp != uppervp) || 89 (lowervp == NULLVP || ump->um_lowervp != lowervp)) { 90 if (dvp == NULLVP) 91 return (EINVAL); 92 } 93 94 unp = kmem_zalloc(sizeof(*unp), KM_SLEEP); 95 if (unp == NULL) 96 return (ENOMEM); 97 error = getnewvnode(VT_UNION, mp, unionfs_vnodeop_p, &vp); 98 if (error != 0) { 99 kmem_free(unp, sizeof(*unp)); 100 return (error); 101 } 102 if (dvp != NULLVP) 103 vref(dvp); 104 if (uppervp != NULLVP) 105 vref(uppervp); 106 if (lowervp != NULLVP) 107 vref(lowervp); 108 109 unp->un_vnode = vp; 110 unp->un_uppervp = uppervp; 111 unp->un_lowervp = lowervp; 112 unp->un_dvp = dvp; 113 114 if (path != NULL) { 115 unp->un_path = (char *) 116 malloc(cnp->cn_namelen +1, M_UNIONFSPATH, M_WAITOK|M_ZERO); 117 memcpy(unp->un_path, cnp->cn_nameptr, cnp->cn_namelen); 118 unp->un_path[cnp->cn_namelen] = '\0'; 119 } 120 vp->v_type = (uppervp != NULLVP ? uppervp->v_type : lowervp->v_type); 121 vp->v_data = unp; 122 uvm_vnp_setsize(vp, 0); 123 124 if ((uppervp != NULLVP && ump->um_uppervp == uppervp) && 125 (lowervp != NULLVP && ump->um_lowervp == lowervp)) 126 vp->v_vflag |= VV_ROOT; 127 128 *vpp = vp; 129 130 return (0); 131 } 132 133 /* 134 * Clean up the unionfs node. 135 */ 136 void 137 unionfs_noderem(struct vnode *vp) 138 { 139 struct unionfs_mount *ump; 140 struct unionfs_node *unp; 141 struct unionfs_node_status *unsp; 142 struct vnode *lvp; 143 struct vnode *uvp; 144 145 ump = MOUNTTOUNIONFSMOUNT(vp->v_mount); 146 147 /* 148 * Use the interlock to protect the clearing of v_data to 149 * prevent faults in unionfs_lock(). 150 */ 151 unp = VTOUNIONFS(vp); 152 lvp = unp->un_lowervp; 153 uvp = unp->un_uppervp; 154 unp->un_lowervp = unp->un_uppervp = NULLVP; 155 vp->v_data = NULL; 156 157 if (lvp != NULLVP) 158 vrele(lvp); 159 if (uvp != NULLVP) 160 vrele(uvp); 161 if (unp->un_dvp != NULLVP) { 162 vrele(unp->un_dvp); 163 unp->un_dvp = NULLVP; 164 } 165 if (unp->un_path) { 166 free(unp->un_path, M_UNIONFSPATH); 167 unp->un_path = NULL; 168 } 169 170 while ((unsp = LIST_FIRST(&(unp->un_unshead))) != NULL) { 171 LIST_REMOVE(unsp, uns_list); 172 free(unsp, M_TEMP); 173 } 174 kmem_free(unp, sizeof(*unp)); 175 } 176 177 /* 178 * Get the unionfs node status. 179 * You need exclusive lock this vnode. 180 */ 181 void 182 unionfs_get_node_status(struct unionfs_node *unp, 183 struct unionfs_node_status **unspp) 184 { 185 struct unionfs_node_status *unsp; 186 pid_t pid; 187 lwpid_t lid; 188 189 KASSERT(NULL != unspp); 190 KASSERT(VOP_ISLOCKED(UNIONFSTOV(unp)) == LK_EXCLUSIVE); 191 192 pid = curproc->p_pid; 193 lid = curlwp->l_lid; 194 195 LIST_FOREACH(unsp, &(unp->un_unshead), uns_list) { 196 if (unsp->uns_pid == pid && unsp->uns_lid == lid) { 197 *unspp = unsp; 198 return; 199 } 200 } 201 202 /* create a new unionfs node status */ 203 unsp = kmem_zalloc(sizeof(*unsp), KM_SLEEP); 204 unsp->uns_pid = pid; 205 unsp->uns_lid = lid; 206 LIST_INSERT_HEAD(&(unp->un_unshead), unsp, uns_list); 207 208 *unspp = unsp; 209 } 210 211 /* 212 * Remove the unionfs node status, if you can. 213 * You need exclusive lock this vnode. 214 */ 215 void 216 unionfs_tryrem_node_status(struct unionfs_node *unp, 217 struct unionfs_node_status *unsp) 218 { 219 KASSERT(NULL != unsp); 220 KASSERT(VOP_ISLOCKED(UNIONFSTOV(unp)) == LK_EXCLUSIVE); 221 222 if (0 < unsp->uns_lower_opencnt || 0 < unsp->uns_upper_opencnt) 223 return; 224 225 LIST_REMOVE(unsp, uns_list); 226 kmem_free(unsp, sizeof(*unsp)); 227 } 228 229 /* 230 * Create upper node attr. 231 */ 232 void 233 unionfs_create_uppervattr_core(struct unionfs_mount *ump, 234 struct vattr *lva, 235 struct vattr *uva) 236 { 237 vattr_null(uva); 238 uva->va_type = lva->va_type; 239 uva->va_atime = lva->va_atime; 240 uva->va_mtime = lva->va_mtime; 241 uva->va_ctime = lva->va_ctime; 242 243 switch (ump->um_copymode) { 244 case UNIONFS_TRANSPARENT: 245 uva->va_mode = lva->va_mode; 246 uva->va_uid = lva->va_uid; 247 uva->va_gid = lva->va_gid; 248 break; 249 case UNIONFS_MASQUERADE: 250 if (ump->um_uid == lva->va_uid) { 251 uva->va_mode = lva->va_mode & 077077; 252 uva->va_mode |= (lva->va_type == VDIR ? ump->um_udir : ump->um_ufile) & 0700; 253 uva->va_uid = lva->va_uid; 254 uva->va_gid = lva->va_gid; 255 } else { 256 uva->va_mode = (lva->va_type == VDIR ? ump->um_udir : ump->um_ufile); 257 uva->va_uid = ump->um_uid; 258 uva->va_gid = ump->um_gid; 259 } 260 break; 261 default: /* UNIONFS_TRADITIONAL */ 262 uva->va_mode = 0777 & ~curproc->p_cwdi->cwdi_cmask; 263 uva->va_uid = ump->um_uid; 264 uva->va_gid = ump->um_gid; 265 break; 266 } 267 } 268 269 /* 270 * Create upper node attr. 271 */ 272 int 273 unionfs_create_uppervattr(struct unionfs_mount *ump, 274 struct vnode *lvp, 275 struct vattr *uva, 276 kauth_cred_t cred) 277 { 278 int error; 279 struct vattr lva; 280 281 if ((error = VOP_GETATTR(lvp, &lva, cred))) 282 return (error); 283 284 unionfs_create_uppervattr_core(ump, &lva, uva); 285 286 return (error); 287 } 288 289 /* 290 * relookup 291 * 292 * dvp should be locked on entry and will be locked on return. 293 * 294 * If an error is returned, *vpp will be invalid, otherwise it will hold a 295 * locked, referenced vnode. If *vpp == dvp then remember that only one 296 * LK_EXCLUSIVE lock is held. 297 */ 298 static int 299 unionfs_relookup(struct vnode *dvp, struct vnode **vpp, 300 struct componentname *cnp, struct componentname *cn, 301 const char *path, int pathlen, u_long nameiop) 302 { 303 int error; 304 305 cn->cn_namelen = pathlen; 306 cn->cn_pnbuf = PNBUF_GET(); 307 memcpy(cn->cn_pnbuf, path, pathlen); 308 cn->cn_pnbuf[pathlen] = '\0'; 309 310 cn->cn_nameiop = nameiop; 311 cn->cn_flags = (LOCKPARENT | LOCKLEAF | HASBUF | SAVENAME | ISLASTCN); 312 cn->cn_cred = cnp->cn_cred; 313 314 cn->cn_nameptr = cn->cn_pnbuf; 315 cn->cn_consume = cnp->cn_consume; 316 317 if (nameiop == DELETE) 318 cn->cn_flags |= (cnp->cn_flags & (DOWHITEOUT | SAVESTART)); 319 else if (RENAME == nameiop) 320 cn->cn_flags |= (cnp->cn_flags & SAVESTART); 321 322 vref(dvp); 323 VOP_UNLOCK(dvp); 324 325 if ((error = relookup(dvp, vpp, cn))) { 326 PNBUF_PUT(cn->cn_pnbuf); 327 cn->cn_flags &= ~HASBUF; 328 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 329 } else 330 vrele(dvp); 331 332 return (error); 333 } 334 335 /* 336 * relookup for CREATE namei operation. 337 * 338 * dvp is unionfs vnode. dvp should be locked. 339 * 340 * If it called 'unionfs_copyfile' function by unionfs_link etc, 341 * VOP_LOOKUP information is broken. 342 * So it need relookup in order to create link etc. 343 */ 344 int 345 unionfs_relookup_for_create(struct vnode *dvp, struct componentname *cnp) 346 { 347 int error; 348 struct vnode *udvp; 349 struct vnode *vp; 350 struct componentname cn; 351 352 udvp = UNIONFSVPTOUPPERVP(dvp); 353 vp = NULLVP; 354 355 error = unionfs_relookup(udvp, &vp, cnp, &cn, cnp->cn_nameptr, 356 strlen(cnp->cn_nameptr), CREATE); 357 if (error) 358 return (error); 359 360 if (vp != NULLVP) { 361 if (udvp == vp) 362 vrele(vp); 363 else 364 vput(vp); 365 366 error = EEXIST; 367 } 368 369 if (cn.cn_flags & HASBUF) { 370 PNBUF_PUT(cn.cn_pnbuf); 371 cn.cn_flags &= ~HASBUF; 372 } 373 374 if (!error) { 375 cn.cn_flags |= (cnp->cn_flags & HASBUF); 376 cnp->cn_flags = cn.cn_flags; 377 } 378 379 return (error); 380 } 381 382 /* 383 * relookup for DELETE namei operation. 384 * 385 * dvp is unionfs vnode. dvp should be locked. 386 */ 387 int 388 unionfs_relookup_for_delete(struct vnode *dvp, struct componentname *cnp) 389 { 390 int error; 391 struct vnode *udvp; 392 struct vnode *vp; 393 struct componentname cn; 394 395 udvp = UNIONFSVPTOUPPERVP(dvp); 396 vp = NULLVP; 397 398 error = unionfs_relookup(udvp, &vp, cnp, &cn, cnp->cn_nameptr, 399 strlen(cnp->cn_nameptr), DELETE); 400 if (error) 401 return (error); 402 403 if (vp == NULLVP) 404 error = ENOENT; 405 else { 406 if (udvp == vp) 407 vrele(vp); 408 else 409 vput(vp); 410 } 411 412 if (cn.cn_flags & HASBUF) { 413 PNBUF_PUT(cn.cn_pnbuf); 414 cn.cn_flags &= ~HASBUF; 415 } 416 417 if (!error) { 418 cn.cn_flags |= (cnp->cn_flags & HASBUF); 419 cnp->cn_flags = cn.cn_flags; 420 } 421 422 return (error); 423 } 424 425 /* 426 * relookup for RENAME namei operation. 427 * 428 * dvp is unionfs vnode. dvp should be locked. 429 */ 430 int 431 unionfs_relookup_for_rename(struct vnode *dvp, struct componentname *cnp) 432 { 433 int error; 434 struct vnode *udvp; 435 struct vnode *vp; 436 struct componentname cn; 437 438 udvp = UNIONFSVPTOUPPERVP(dvp); 439 vp = NULLVP; 440 441 error = unionfs_relookup(udvp, &vp, cnp, &cn, cnp->cn_nameptr, 442 strlen(cnp->cn_nameptr), RENAME); 443 if (error) 444 return (error); 445 446 if (vp != NULLVP) { 447 if (udvp == vp) 448 vrele(vp); 449 else 450 vput(vp); 451 } 452 453 if (cn.cn_flags & HASBUF) { 454 PNBUF_PUT(cn.cn_pnbuf); 455 cn.cn_flags &= ~HASBUF; 456 } 457 458 if (!error) { 459 cn.cn_flags |= (cnp->cn_flags & HASBUF); 460 cnp->cn_flags = cn.cn_flags; 461 } 462 463 return (error); 464 465 } 466 467 /* 468 * Update the unionfs_node. 469 * 470 * uvp is new locked upper vnode. unionfs vnode's lock will be exchanged to the 471 * uvp's lock and lower's lock will be unlocked. 472 */ 473 static void 474 unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp) 475 { 476 struct vnode *vp; 477 struct vnode *lvp; 478 479 vp = UNIONFSTOV(unp); 480 lvp = unp->un_lowervp; 481 482 /* 483 * lock update 484 */ 485 mutex_enter(&vp->v_interlock); 486 unp->un_uppervp = uvp; 487 KASSERT(VOP_ISLOCKED(lvp) == LK_EXCLUSIVE); 488 mutex_exit(&vp->v_interlock); 489 } 490 491 /* 492 * Create a new shadow dir. 493 * 494 * udvp should be locked on entry and will be locked on return. 495 * 496 * If no error returned, unp will be updated. 497 */ 498 int 499 unionfs_mkshadowdir(struct unionfs_mount *ump, struct vnode *udvp, 500 struct unionfs_node *unp, struct componentname *cnp) 501 { 502 int error; 503 struct vnode *lvp; 504 struct vnode *uvp; 505 struct vattr va; 506 struct vattr lva; 507 struct componentname cn; 508 509 if (unp->un_uppervp != NULLVP) 510 return (EEXIST); 511 512 lvp = unp->un_lowervp; 513 uvp = NULLVP; 514 515 memset(&cn, 0, sizeof(cn)); 516 517 if ((error = VOP_GETATTR(lvp, &lva, cnp->cn_cred))) 518 goto unionfs_mkshadowdir_abort; 519 520 if ((error = unionfs_relookup(udvp, &uvp, cnp, &cn, cnp->cn_nameptr, cnp->cn_namelen, CREATE))) 521 goto unionfs_mkshadowdir_abort; 522 if (uvp != NULLVP) { 523 if (udvp == uvp) 524 vrele(uvp); 525 else 526 vput(uvp); 527 528 error = EEXIST; 529 goto unionfs_mkshadowdir_free_out; 530 } 531 532 unionfs_create_uppervattr_core(ump, &lva, &va); 533 534 error = VOP_MKDIR(udvp, &uvp, &cn, &va); 535 536 if (!error) { 537 unionfs_node_update(unp, uvp); 538 539 /* 540 * XXX The bug which cannot set uid/gid was corrected. 541 * Ignore errors. XXXNETBSD Why is this done as root? 542 */ 543 va.va_type = VNON; 544 VOP_SETATTR(uvp, &va, lwp0.l_cred); 545 } 546 547 unionfs_mkshadowdir_free_out: 548 if (cn.cn_flags & HASBUF) { 549 PNBUF_PUT(cn.cn_pnbuf); 550 cn.cn_flags &= ~HASBUF; 551 } 552 553 unionfs_mkshadowdir_abort: 554 555 return (error); 556 } 557 558 /* 559 * Create a new whiteout. 560 * 561 * dvp should be locked on entry and will be locked on return. 562 */ 563 int 564 unionfs_mkwhiteout(struct vnode *dvp, struct componentname *cnp, const char *path) 565 { 566 int error; 567 struct vnode *wvp; 568 struct componentname cn; 569 570 if (path == NULL) 571 path = cnp->cn_nameptr; 572 573 wvp = NULLVP; 574 if ((error = unionfs_relookup(dvp, &wvp, cnp, &cn, path, strlen(path), CREATE))) 575 return (error); 576 if (wvp != NULLVP) { 577 if (cn.cn_flags & HASBUF) { 578 PNBUF_PUT(cn.cn_pnbuf); 579 cn.cn_flags &= ~HASBUF; 580 } 581 if (dvp == wvp) 582 vrele(wvp); 583 else 584 vput(wvp); 585 586 return (EEXIST); 587 } 588 589 if (cn.cn_flags & HASBUF) { 590 PNBUF_PUT(cn.cn_pnbuf); 591 cn.cn_flags &= ~HASBUF; 592 } 593 594 return (error); 595 } 596 597 /* 598 * Create a new vnode for create a new shadow file. 599 * 600 * If an error is returned, *vpp will be invalid, otherwise it will hold a 601 * locked, referenced and opened vnode. 602 * 603 * unp is never updated. 604 */ 605 static int 606 unionfs_vn_create_on_upper(struct vnode **vpp, struct vnode *udvp, 607 struct unionfs_node *unp, struct vattr *uvap) 608 { 609 struct unionfs_mount *ump; 610 struct vnode *vp; 611 struct vnode *lvp; 612 kauth_cred_t cred; 613 struct vattr lva; 614 int fmode; 615 int error; 616 struct componentname cn; 617 618 ump = MOUNTTOUNIONFSMOUNT(UNIONFSTOV(unp)->v_mount); 619 vp = NULLVP; 620 lvp = unp->un_lowervp; 621 cred = kauth_cred_get(); 622 fmode = FFLAGS(O_WRONLY | O_CREAT | O_TRUNC | O_EXCL); 623 error = 0; 624 625 if ((error = VOP_GETATTR(lvp, &lva, cred)) != 0) 626 return (error); 627 unionfs_create_uppervattr_core(ump, &lva, uvap); 628 629 if (unp->un_path == NULL) 630 panic("unionfs: un_path is null"); 631 632 cn.cn_namelen = strlen(unp->un_path); 633 cn.cn_pnbuf = PNBUF_GET(); 634 memcpy(cn.cn_pnbuf, unp->un_path, cn.cn_namelen + 1); 635 cn.cn_nameiop = CREATE; 636 cn.cn_flags = (LOCKPARENT | LOCKLEAF | HASBUF | SAVENAME | ISLASTCN); 637 cn.cn_cred = cred; 638 cn.cn_nameptr = cn.cn_pnbuf; 639 cn.cn_consume = 0; 640 641 vref(udvp); 642 if ((error = relookup(udvp, &vp, &cn)) != 0) 643 goto unionfs_vn_create_on_upper_free_out2; 644 vrele(udvp); 645 646 if (vp != NULLVP) { 647 if (vp == udvp) 648 vrele(vp); 649 else 650 vput(vp); 651 error = EEXIST; 652 goto unionfs_vn_create_on_upper_free_out1; 653 } 654 655 if ((error = VOP_CREATE(udvp, &vp, &cn, uvap)) != 0) 656 goto unionfs_vn_create_on_upper_free_out1; 657 658 if ((error = VOP_OPEN(vp, fmode, cred)) != 0) { 659 vput(vp); 660 goto unionfs_vn_create_on_upper_free_out1; 661 } 662 vp->v_writecount++; 663 *vpp = vp; 664 665 unionfs_vn_create_on_upper_free_out1: 666 VOP_UNLOCK(udvp); 667 668 unionfs_vn_create_on_upper_free_out2: 669 if (cn.cn_flags & HASBUF) { 670 PNBUF_PUT(cn.cn_pnbuf); 671 cn.cn_flags &= ~HASBUF; 672 } 673 674 return (error); 675 } 676 677 /* 678 * Copy from lvp to uvp. 679 * 680 * lvp and uvp should be locked and opened on entry and will be locked and 681 * opened on return. 682 */ 683 static int 684 unionfs_copyfile_core(struct vnode *lvp, struct vnode *uvp, 685 kauth_cred_t cred) 686 { 687 int error; 688 off_t offset; 689 int count; 690 int bufoffset; 691 char *buf; 692 struct uio uio; 693 struct iovec iov; 694 695 error = 0; 696 memset(&uio, 0, sizeof(uio)); 697 UIO_SETUP_SYSSPACE(&uio); 698 uio.uio_offset = 0; 699 700 buf = kmem_alloc(MAXBSIZE, KM_SLEEP); 701 if (buf == NULL) 702 return ENOMEM; 703 704 while (error == 0) { 705 offset = uio.uio_offset; 706 707 uio.uio_iov = &iov; 708 uio.uio_iovcnt = 1; 709 iov.iov_base = buf; 710 iov.iov_len = MAXBSIZE; 711 uio.uio_resid = iov.iov_len; 712 uio.uio_rw = UIO_READ; 713 714 if ((error = VOP_READ(lvp, &uio, 0, cred)) != 0) 715 break; 716 if ((count = MAXBSIZE - uio.uio_resid) == 0) 717 break; 718 719 bufoffset = 0; 720 while (bufoffset < count) { 721 uio.uio_iov = &iov; 722 uio.uio_iovcnt = 1; 723 iov.iov_base = buf + bufoffset; 724 iov.iov_len = count - bufoffset; 725 uio.uio_offset = offset + bufoffset; 726 uio.uio_resid = iov.iov_len; 727 uio.uio_rw = UIO_WRITE; 728 729 if ((error = VOP_WRITE(uvp, &uio, 0, cred)) != 0) 730 break; 731 732 bufoffset += (count - bufoffset) - uio.uio_resid; 733 } 734 735 uio.uio_offset = offset + bufoffset; 736 } 737 738 kmem_free(buf, MAXBSIZE); 739 740 return (error); 741 } 742 743 /* 744 * Copy file from lower to upper. 745 * 746 * If you need copy of the contents, set 1 to docopy. Otherwise, set 0 to 747 * docopy. 748 * 749 * If no error returned, unp will be updated. 750 */ 751 int 752 unionfs_copyfile(struct unionfs_node *unp, int docopy, kauth_cred_t cred) 753 { 754 int error; 755 struct vnode *udvp; 756 struct vnode *lvp; 757 struct vnode *uvp; 758 struct vattr uva; 759 760 lvp = unp->un_lowervp; 761 uvp = NULLVP; 762 763 if ((UNIONFSTOV(unp)->v_mount->mnt_flag & MNT_RDONLY)) 764 return (EROFS); 765 if (unp->un_dvp == NULLVP) 766 return (EINVAL); 767 if (unp->un_uppervp != NULLVP) 768 return (EEXIST); 769 udvp = VTOUNIONFS(unp->un_dvp)->un_uppervp; 770 if (udvp == NULLVP) 771 return (EROFS); 772 if ((udvp->v_mount->mnt_flag & MNT_RDONLY)) 773 return (EROFS); 774 775 error = VOP_ACCESS(lvp, VREAD, cred); 776 if (error != 0) 777 return (error); 778 779 error = unionfs_vn_create_on_upper(&uvp, udvp, unp, &uva); 780 if (error != 0) 781 return (error); 782 783 if (docopy != 0) { 784 error = VOP_OPEN(lvp, FREAD, cred); 785 if (error == 0) { 786 error = unionfs_copyfile_core(lvp, uvp, cred); 787 VOP_CLOSE(lvp, FREAD, cred); 788 } 789 } 790 VOP_CLOSE(uvp, FWRITE, cred); 791 uvp->v_writecount--; 792 793 if (error == 0) { 794 /* Reset the attributes. Ignore errors. */ 795 uva.va_type = VNON; 796 VOP_SETATTR(uvp, &uva, cred); 797 } 798 799 unionfs_node_update(unp, uvp); 800 801 return (error); 802 } 803 804 /* 805 * It checks whether vp can rmdir. (check empty) 806 * 807 * vp is unionfs vnode. 808 * vp should be locked. 809 */ 810 int 811 unionfs_check_rmdir(struct vnode *vp, kauth_cred_t cred) 812 { 813 int error; 814 int eofflag; 815 int lookuperr; 816 struct vnode *uvp; 817 struct vnode *lvp; 818 struct vnode *tvp; 819 struct vattr va; 820 struct componentname cn; 821 /* 822 * The size of buf needs to be larger than DIRBLKSIZ. 823 */ 824 char buf[256 * 6]; 825 struct dirent *dp; 826 struct dirent *edp; 827 struct uio uio; 828 struct iovec iov; 829 830 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE); 831 832 eofflag = 0; 833 uvp = UNIONFSVPTOUPPERVP(vp); 834 lvp = UNIONFSVPTOLOWERVP(vp); 835 836 /* check opaque */ 837 if ((error = VOP_GETATTR(uvp, &va, cred)) != 0) 838 return (error); 839 if (va.va_flags & OPAQUE) 840 return (0); 841 842 /* open vnode */ 843 if ((error = VOP_ACCESS(vp, VEXEC|VREAD, cred)) != 0) 844 return (error); 845 if ((error = VOP_OPEN(vp, FREAD, cred)) != 0) 846 return (error); 847 848 UIO_SETUP_SYSSPACE(&uio); 849 uio.uio_rw = UIO_READ; 850 uio.uio_offset = 0; 851 852 while (!error && !eofflag) { 853 iov.iov_base = buf; 854 iov.iov_len = sizeof(buf); 855 uio.uio_iov = &iov; 856 uio.uio_iovcnt = 1; 857 uio.uio_resid = iov.iov_len; 858 859 error = VOP_READDIR(lvp, &uio, cred, &eofflag, NULL, NULL); 860 if (error) 861 break; 862 863 edp = (struct dirent*)&buf[sizeof(buf) - uio.uio_resid]; 864 for (dp = (struct dirent*)buf; !error && dp < edp; 865 dp = (struct dirent*)((char *)dp + dp->d_reclen)) { 866 if (dp->d_type == DT_WHT || 867 (dp->d_namlen == 1 && dp->d_name[0] == '.') || 868 (dp->d_namlen == 2 && !memcmp(dp->d_name, "..", 2))) 869 continue; 870 871 cn.cn_namelen = dp->d_namlen; 872 cn.cn_pnbuf = NULL; 873 cn.cn_nameptr = dp->d_name; 874 cn.cn_nameiop = LOOKUP; 875 cn.cn_flags = (LOCKPARENT | LOCKLEAF | SAVENAME | RDONLY | ISLASTCN); 876 cn.cn_cred = cred; 877 cn.cn_consume = 0; 878 879 /* 880 * check entry in lower. 881 * Sometimes, readdir function returns 882 * wrong entry. 883 */ 884 lookuperr = VOP_LOOKUP(lvp, &tvp, &cn); 885 886 if (!lookuperr) 887 vput(tvp); 888 else 889 continue; /* skip entry */ 890 891 /* 892 * check entry 893 * If it has no exist/whiteout entry in upper, 894 * directory is not empty. 895 */ 896 cn.cn_flags = (LOCKPARENT | LOCKLEAF | SAVENAME | RDONLY | ISLASTCN); 897 lookuperr = VOP_LOOKUP(uvp, &tvp, &cn); 898 899 if (!lookuperr) 900 vput(tvp); 901 902 /* ignore exist or whiteout entry */ 903 if (!lookuperr || 904 (lookuperr == ENOENT && (cn.cn_flags & ISWHITEOUT))) 905 continue; 906 907 error = ENOTEMPTY; 908 } 909 } 910 911 /* close vnode */ 912 VOP_CLOSE(vp, FREAD, cred); 913 914 return (error); 915 } 916 917 #ifdef DIAGNOSTIC 918 919 struct vnode * 920 unionfs_checkuppervp(struct vnode *vp, const char *fil, int lno) 921 { 922 struct unionfs_node *unp; 923 924 unp = VTOUNIONFS(vp); 925 926 #ifdef notyet 927 if (vp->v_op != unionfs_vnodeop_p) { 928 printf("unionfs_checkuppervp: on non-unionfs-node.\n"); 929 #ifdef KDB 930 kdb_enter(KDB_WHY_UNIONFS, 931 "unionfs_checkuppervp: on non-unionfs-node.\n"); 932 #endif 933 panic("unionfs_checkuppervp"); 934 }; 935 #endif 936 return (unp->un_uppervp); 937 } 938 939 struct vnode * 940 unionfs_checklowervp(struct vnode *vp, const char *fil, int lno) 941 { 942 struct unionfs_node *unp; 943 944 unp = VTOUNIONFS(vp); 945 946 #ifdef notyet 947 if (vp->v_op != unionfs_vnodeop_p) { 948 printf("unionfs_checklowervp: on non-unionfs-node.\n"); 949 #ifdef KDB 950 kdb_enter(KDB_WHY_UNIONFS, 951 "unionfs_checklowervp: on non-unionfs-node.\n"); 952 #endif 953 panic("unionfs_checklowervp"); 954 }; 955 #endif 956 return (unp->un_lowervp); 957 } 958 #endif 959