Lines Matching +full:layer +full:- +full:base +full:- +full:offset
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1994 Jan-Simon Pendry
11 * Jan-Simon Pendry.
66 #define UNIONFSHASHMASK (NUNIONFSNODECACHE - 1)
122 MPASS(unp->un_dvp != NULL);
123 vrele(unp->un_dvp);
127 /* We expect this function to be single-threaded, thus no atomic */
138 return (&(unp->un_hashtbl[vfs_hash_index(lookup) & UNIONFSHASHMASK]));
155 if (unp->un_uppervp == lookup ||
156 unp->un_lowervp == lookup) {
159 vp->v_iflag &= ~VI_OWEINACT;
161 ((vp->v_iflag & VI_DOINGINACT) != 0)) {
208 if (uncp->un_uppervp != NULLVP) {
209 ASSERT_VOP_ELOCKED(uncp->un_uppervp, __func__);
210 KASSERT(uncp->un_uppervp->v_type == VDIR,
212 vp = unionfs_get_cached_vnode_locked(uncp->un_uppervp, dvp);
213 } else if (uncp->un_lowervp != NULLVP) {
214 ASSERT_VOP_ELOCKED(uncp->un_lowervp, __func__);
215 KASSERT(uncp->un_lowervp->v_type == VDIR,
217 vp = unionfs_get_cached_vnode_locked(uncp->un_lowervp, dvp);
220 hd = unionfs_get_hashhead(dvp, (uncp->un_uppervp != NULLVP ?
221 uncp->un_uppervp : uncp->un_lowervp));
240 if (unp->un_hash.le_prev != NULL) {
242 unp->un_hash.le_next = NULL;
243 unp->un_hash.le_prev = NULL;
251 * the caller. Upper and lower vnodes, if non-NULL, are also expected to be
263 lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
270 vp->v_data = NULL;
271 vp->v_vnlock = &vp->v_lock;
272 vp->v_op = &dead_vnodeops;
277 if (unp->un_dvp != NULLVP)
278 vrele(unp->un_dvp);
279 if (unp->un_uppervp != NULLVP) {
280 vput(unp->un_uppervp);
281 if (unp->un_lowervp != NULLVP)
282 vrele(unp->un_lowervp);
283 } else if (unp->un_lowervp != NULLVP)
284 vput(unp->un_lowervp);
285 if (unp->un_hashtbl != NULL)
286 hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, UNIONFSHASHMASK);
287 free(unp->un_path, M_UNIONFSPATH);
314 lkflags = (cnp ? cnp->cn_lkflags : 0);
315 path = (cnp ? cnp->cn_nameptr : NULL);
321 vt = (uppervp != NULLVP ? uppervp->v_type : lowervp->v_type);
324 if (cnp && !(cnp->cn_flags & ISLASTCN))
354 unp->un_hashtbl = hashinit(NUNIONFSNODECACHE, M_UNIONFSHASH,
360 unp->un_vnode = vp;
361 unp->un_uppervp = uppervp;
362 unp->un_lowervp = lowervp;
363 unp->un_dvp = dvp;
365 vp->v_vnlock = uppervp->v_vnlock;
367 vp->v_vnlock = lowervp->v_vnlock;
370 unp->un_path = malloc(cnp->cn_namelen + 1,
372 bcopy(cnp->cn_nameptr, unp->un_path, cnp->cn_namelen);
373 unp->un_path[cnp->cn_namelen] = '\0';
374 unp->un_pathlen = cnp->cn_namelen;
376 vp->v_type = vt;
377 vp->v_data = unp;
387 if (ump->um_uppervp == uppervp || ump->um_lowervp == lowervp)
388 vp->v_vflag |= VV_ROOT;
389 KASSERT(dvp != NULL || (vp->v_vflag & VV_ROOT) != 0,
390 ("%s: NULL dvp for non-root vp %p", __func__, vp));
394 * NOTE: There is still a possibility for cross-filesystem locking here.
396 * created here only has a lower-layer FS component, then we will end
397 * up taking a lower-FS lock while holding an upper-FS lock.
402 * a few places within unionfs (which could lead to the same cross-FS
479 KASSERT(vp->v_vnlock->lk_recurse == 0 || (vp->v_vflag & VV_ROOT) != 0,
484 lvp = unp->un_lowervp;
485 uvp = unp->un_uppervp;
486 dvp = unp->un_dvp;
494 * base vnodes for its entire lifecycled, so unionfs_lock() should
496 * Moreover, during unmount of a non-"below" unionfs mount, the lower
499 if (uvp != NULLVP && lvp != NULLVP && (vp->v_vflag & VV_ROOT) == 0) {
504 if (lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
511 unp->un_lowervp = unp->un_uppervp = NULLVP;
512 vp->v_vnlock = &(vp->v_lock);
513 vp->v_data = NULL;
514 vp->v_object = NULL;
515 if (unp->un_hashtbl != NULL) {
519 * be reclaimed with a non-zero use count. Otherwise the
523 hd = unp->un_hashtbl + count;
526 unp_t1->un_hash.le_next = NULL;
527 unp_t1->un_hash.le_prev = NULL;
533 writerefs = atomic_load_int(&vp->v_writecount);
544 VOP_ADD_WRITECOUNT(uvp, -writerefs);
556 if (unp->un_path != NULL) {
557 free(unp->un_path, M_UNIONFSPATH);
558 unp->un_path = NULL;
559 unp->un_pathlen = 0;
562 if (unp->un_hashtbl != NULL) {
563 hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, UNIONFSHASHMASK);
566 LIST_FOREACH_SAFE(unsp, &(unp->un_unshead), uns_list, unsp_tmp) {
590 pid = td->td_proc->p_pid;
594 LIST_FOREACH(unsp, &(unp->un_unshead), uns_list) {
595 if (unsp->uns_pid == pid) {
615 pid = td->td_proc->p_pid;
624 unsp->uns_pid = pid;
625 LIST_INSERT_HEAD(&(unp->un_unshead), unsp, uns_list);
642 if (0 < unsp->uns_lower_opencnt || 0 < unsp->uns_upper_opencnt)
657 uva->va_type = lva->va_type;
658 uva->va_atime = lva->va_atime;
659 uva->va_mtime = lva->va_mtime;
660 uva->va_ctime = lva->va_ctime;
662 switch (ump->um_copymode) {
664 uva->va_mode = lva->va_mode;
665 uva->va_uid = lva->va_uid;
666 uva->va_gid = lva->va_gid;
669 if (ump->um_uid == lva->va_uid) {
670 uva->va_mode = lva->va_mode & 077077;
671 uva->va_mode |= (lva->va_type == VDIR ?
672 ump->um_udir : ump->um_ufile) & 0700;
673 uva->va_uid = lva->va_uid;
674 uva->va_gid = lva->va_gid;
676 uva->va_mode = (lva->va_type == VDIR ?
677 ump->um_udir : ump->um_ufile);
678 uva->va_uid = ump->um_uid;
679 uva->va_gid = ump->um_gid;
683 uva->va_mode = 0777 & ~td->td_proc->p_pd->pd_cmask;
684 uva->va_uid = ump->um_uid;
685 uva->va_gid = ump->um_gid;
725 cn->cn_namelen = pathlen;
726 cn->cn_pnbuf = path;
727 cn->cn_nameiop = nameiop;
728 cn->cn_flags = (LOCKPARENT | LOCKLEAF | ISLASTCN);
729 cn->cn_lkflags = LK_EXCLUSIVE;
730 cn->cn_cred = cnp->cn_cred;
731 cn->cn_nameptr = cn->cn_pnbuf;
735 cn->cn_flags |= (cnp->cn_flags & DOWHITEOUT);
739 cn->cn_flags |= NOCACHE;
750 KASSERT(cn->cn_pnbuf == path, ("%s: cn_pnbuf changed", __func__));
772 lvp = unp->un_lowervp;
775 dvp = unp->un_dvp;
777 VNASSERT(vp->v_writecount == 0, vp,
778 ("%s: non-zero writecount", __func__));
783 lockrec = lvp->v_vnlock->lk_recurse;
787 unp->un_uppervp = uvp;
788 vp->v_vnlock = uvp->v_vnlock;
794 * Re-cache the unionfs vnode against the upper vnode
796 if (dvp != NULLVP && vp->v_type == VDIR) {
798 if (unp->un_hash.le_prev != NULL) {
803 VI_UNLOCK(unp->un_dvp);
810 * This is useful, for example, during copy-up operations in which
812 * possibility of a concurrent copy-up on the same vnode triggering
825 while (error == 0 && (unp->un_flag & flag) != 0) {
832 * If we waited on a concurrent copy-up and that
833 * copy-up was successful, return a non-fatal
837 * be re-queried to avoid creating a duplicate unionfs
844 unp->un_uppervp != NULLVP)
851 unp->un_flag |= flag;
866 VNASSERT((unp->un_flag & flag) != 0, vp,
868 unp->un_flag &= ~flag;
903 ump = MOUNTTOUNIONFSMOUNT(vp->v_mount);
905 if (unp->un_uppervp != NULLVP)
908 udvp = dunp->un_uppervp;
916 lvp = unp->un_lowervp;
918 credbk = cnp->cn_cred;
922 cred = crdup(cnp->cn_cred);
927 cnp->cn_cred = cred;
932 if ((error = VOP_GETATTR(lvp, &lva, cnp->cn_cred)))
938 cnp->cn_nameptr, cnp->cn_namelen, CREATE))) {
974 * Temporarily NUL-terminate the current pathname component.
984 * NUL-termination will not have an effect on other threads.
1043 cnp->cn_cred = credbk;
1061 * The unionfs vnode shares its lock with the base-layer vnode(s); if the
1062 * base FS must transiently drop its vnode lock, the unionfs vnode may
1068 * it stops sharing its lock with the base vnode, so even if the
1069 * forwarded VOP reacquires the base vnode lock the unionfs vnode
1073 * 2) Loss of reference on the base vnode. The caller is expected to
1075 * unionfs vnode holds a reference on the base-layer vnode(s). But
1077 * doomed, violating the base layer's expectation that its caller
1080 * basevp1 and basevp2 represent two base-layer vnodes which are
1095 * Take an additional reference on the base-layer vnodes to
1135 * used to re-lock unionvp1 and unionvp2, respectively, if either
1150 * a flag indicating that it needs to be re-locked.
1151 * Otherwise, simply drop the base-vnode reference that
1162 * If any of the unionfs vnodes need to be re-locked, that
1163 * means the unionfs vnode's lock is now de-coupled from the
1164 * corresponding base vnode. We therefore need to drop the
1165 * base vnode lock (since nothing else will after this point),
1182 * scenario would require at least re-locking the unionfs
1220 udvp = VTOUNIONFS(dvp)->un_uppervp;
1283 ump = MOUNTTOUNIONFSMOUNT(UNIONFSTOV(unp)->v_mount);
1285 lvp = unp->un_lowervp;
1286 cred = td->td_ucred;
1294 if (unp->un_path == NULL)
1297 nd.ni_cnd.cn_namelen = unp->un_pathlen;
1298 nd.ni_cnd.cn_pnbuf = unp->un_path;
1331 __func__, uvp, uvp->v_writecount);
1356 off_t offset;
1371 offset = uio.uio_offset;
1382 if ((count = MAXBSIZE - uio.uio_resid) == 0)
1390 iov.iov_len = count - bufoffset;
1391 uio.uio_offset = offset + bufoffset;
1398 bufoffset += (count - bufoffset) - uio.uio_resid;
1401 uio.uio_offset = offset + bufoffset;
1435 lvp = unp->un_lowervp;
1438 if ((UNIONFSTOV(unp)->v_mount->mnt_flag & MNT_RDONLY))
1440 if (unp->un_dvp == NULLVP)
1442 if (unp->un_uppervp != NULLVP)
1446 VI_LOCK(unp->un_dvp);
1447 dunp = VTOUNIONFS(unp->un_dvp);
1449 udvp = dunp->un_uppervp;
1450 VI_UNLOCK(unp->un_dvp);
1454 if ((udvp->v_mount->mnt_flag & MNT_RDONLY))
1499 VOP_ADD_WRITECOUNT_CHECKED(uvp, -1);
1501 __func__, uvp, uvp->v_writecount);
1521 * We assume the VOP_RMDIR() against the upper layer vnode will take
1525 * then cross-check any files we find against the upper FS to see if
1527 * non-present).
1555 * to hold both the upper and lower layer locks as well as the upper
1557 * deadlock. However, if the cross-check logic below needs to call
1561 * below, such as VOP_OPEN() and VOP_READDIR(), may also re-lock the
1568 * read-only) may change while the relevant lock is dropped. But
1569 * since re-locking may happen here and open up such a window anyway,
1573 * of out-of-band state changes) that we can avoid these costly checks
1616 if (dp->d_type == DT_WHT)
1623 switch (dp->d_namlen) {
1625 if (dp->d_name[1] != '.') {
1631 if (dp->d_name[0] != '.') {
1640 cn.cn_namelen = dp->d_namlen;
1642 cn.cn_nameptr = dp->d_name;