Lines Matching defs:vp

108 static void	delmntque(struct vnode *vp);
112 static int vtryrecycle(struct vnode *vp, bool isvnlru);
115 static void vn_seqc_write_end_free(struct vnode *vp);
118 static void vdropl_recycle(struct vnode *vp);
119 static void vdrop_recycle(struct vnode *vp);
124 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
323 static void vdbatch_dequeue(struct vnode *vp);
433 struct vnode *vp;
455 vp = nd.ni_vp;
457 if (VN_IS_DOOMED(vp)) {
468 vgone(vp);
470 vput(vp);
481 struct vnode *vp;
495 vp = fp->f_vnode;
497 error = vn_lock(vp, LK_EXCLUSIVE);
501 vgone(vp);
502 VOP_UNLOCK(vp);
516 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */
593 struct vnode *vp;
595 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
596 vp->v_type = VMARKER;
597 vp->v_mount = mp;
599 return (vp);
603 vn_free_marker(struct vnode *vp)
606 MPASS(vp->v_type == VMARKER);
607 free(vp, M_VNODE_MARKER);
664 struct vnode *vp;
666 vp = mem;
667 bzero(vp, size);
671 vp->v_vnlock = &vp->v_lock;
672 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
676 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT,
681 bufobj_init(&vp->v_bufobj, vp);
685 cache_vnode_init(vp);
689 rangelock_init(&vp->v_rl);
691 vp->v_dbatchcpu = NOCPU;
693 vp->v_state = VSTATE_DEAD;
698 vp->v_holdcnt = VHOLD_NO_SMR;
699 vp->v_type = VNON;
701 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist);
712 struct vnode *vp;
715 vp = mem;
716 vdbatch_dequeue(vp);
718 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist);
720 rangelock_destroy(&vp->v_rl);
721 lockdestroy(vp->v_vnlock);
722 mtx_destroy(&vp->v_interlock);
723 bo = &vp->v_bufobj;
1242 struct vnode *vp, *mvp;
1255 vp = mvp;
1257 vp = TAILQ_NEXT(vp, v_vnodelist);
1258 if (__predict_false(vp == NULL))
1261 if (__predict_false(vp->v_type == VMARKER))
1270 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 ||
1271 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)))
1274 if (vp->v_type == VBAD || vp->v_type == VNON)
1277 object = atomic_load_ptr(&vp->v_object);
1289 if (!VI_TRYLOCK(vp))
1291 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) {
1292 VI_UNLOCK(vp);
1295 if (vp->v_mount == NULL) {
1296 VI_UNLOCK(vp);
1299 vholdl(vp);
1300 VI_UNLOCK(vp);
1302 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1305 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1306 vdrop_recycle(vp);
1309 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) {
1310 vdrop_recycle(vp);
1315 VI_LOCK(vp);
1316 if (vp->v_usecount > 0 ||
1317 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) ||
1318 (vp->v_object != NULL && vp->v_object->handle == vp &&
1319 vp->v_object->resident_page_count > trigger)) {
1320 VOP_UNLOCK(vp);
1321 vdropl_recycle(vp);
1326 vgonel(vp);
1327 VOP_UNLOCK(vp);
1328 vdropl_recycle(vp);
1336 MPASS(vp->v_type != VMARKER);
1340 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1368 struct vnode *vp;
1382 vp = mvp;
1384 vp = TAILQ_NEXT(vp, v_vnodelist);
1385 if (__predict_false(vp == NULL)) {
1396 vp = mvp;
1409 if (__predict_false(vp->v_type == VMARKER))
1411 if (vp->v_holdcnt > 0)
1419 if (mnt_op != NULL && (mp = vp->v_mount) != NULL &&
1423 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) {
1426 if (!vhold_recycle_free(vp))
1429 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1452 vtryrecycle(vp, isvnlru);
1458 vp = mvp;
1905 vtryrecycle(struct vnode *vp, bool isvnlru)
1909 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
1910 VNPASS(vp->v_holdcnt > 0, vp);
1915 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1917 "%s: impossible to recycle, vp %p lock is already held",
1918 __func__, vp);
1919 vdrop_recycle(vp);
1925 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
1926 VOP_UNLOCK(vp);
1929 __func__, vp);
1930 vdrop_recycle(vp);
1939 VI_LOCK(vp);
1940 if (vp->v_usecount) {
1941 VOP_UNLOCK(vp);
1942 vdropl_recycle(vp);
1946 __func__, vp);
1949 if (!VN_IS_DOOMED(vp)) {
1954 vgonel(vp);
1956 VOP_UNLOCK(vp);
1957 vdropl_recycle(vp);
2053 vn_free(struct vnode *vp)
2057 uma_zfree_smr(vnode_zone, vp);
2067 struct vnode *vp;
2079 vp = td->td_vp_reserved;
2082 vp = vn_alloc(mp);
2086 vn_set_state(vp, VSTATE_UNINITIALIZED);
2102 lo = &vp->v_vnlock->lock_object;
2115 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE;
2119 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp));
2120 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp));
2121 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp));
2122 vp->v_type = VNON;
2123 vp->v_op = vops;
2124 vp->v_irflag = 0;
2125 v_init_counters(vp);
2126 vn_seqc_init(vp);
2127 vp->v_bufobj.bo_ops = &buf_ops_bio;
2133 mac_vnode_init(vp);
2135 mac_vnode_associate_singlelabel(mp, vp);
2138 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize;
2147 vp->v_hash = (uintptr_t)vp >> vnsz2log;
2149 *vpp = vp;
2176 freevnode(struct vnode *vp)
2189 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp);
2193 vn_seqc_write_end_free(vp);
2195 bo = &vp->v_bufobj;
2196 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
2197 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp);
2198 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
2199 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
2200 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
2201 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
2202 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp,
2204 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
2205 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp,
2207 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp,
2209 VI_UNLOCK(vp);
2210 cache_assert_no_entries(vp);
2213 mac_vnode_destroy(vp);
2215 if (vp->v_pollinfo != NULL) {
2221 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT);
2222 destroy_vpollinfo(vp->v_pollinfo);
2223 VOP_UNLOCK(vp);
2224 vp->v_pollinfo = NULL;
2226 vp->v_mountedhere = NULL;
2227 vp->v_unpcb = NULL;
2228 vp->v_rdev = NULL;
2229 vp->v_fifoinfo = NULL;
2230 vp->v_iflag = 0;
2231 vp->v_vflag = 0;
2233 vn_free(vp);
2240 delmntque(struct vnode *vp)
2244 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp);
2246 mp = vp->v_mount;
2248 VI_LOCK(vp);
2249 vp->v_mount = NULL;
2250 VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
2252 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2259 ASSERT_VI_LOCKED(vp, __func__);
2263 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr)
2266 KASSERT(vp->v_mount == NULL,
2268 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
2270 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp");
2287 VI_LOCK(vp);
2291 (vp->v_vflag & VV_FORCEINSMQ) == 0) {
2292 VI_UNLOCK(vp);
2295 vp->v_data = NULL;
2296 vp->v_op = &dead_vnodeops;
2297 vgone(vp);
2298 vput(vp);
2302 vp->v_mount = mp;
2304 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2305 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
2308 VI_UNLOCK(vp);
2319 insmntque(struct vnode *vp, struct mount *mp)
2321 return (insmntque1_int(vp, mp, true));
2325 insmntque1(struct vnode *vp, struct mount *mp)
2327 return (insmntque1_int(vp, mp, false));
2422 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
2425 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
2426 ASSERT_VOP_LOCKED(vp, "vinvalbuf");
2427 if (vp->v_object != NULL && vp->v_object->handle != vp)
2429 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo));
2554 vtruncbuf(struct vnode *vp, off_t length, int blksize)
2560 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__,
2561 vp, blksize, (uintmax_t)length);
2568 ASSERT_VOP_LOCKED(vp, "vtruncbuf");
2570 bo = &vp->v_bufobj;
2574 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN)
2594 VNASSERT((bp->b_flags & B_DELWRI), vp,
2606 vnode_pager_setsize(vp, length);
2616 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn,
2622 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range");
2627 bo = &vp->v_bufobj;
2631 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN)
2635 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1));
2639 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
2647 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked");
2677 nbp->b_vp != vp ||
2826 bgetvp(struct vnode *vp, struct buf *bp)
2831 bo = &vp->v_bufobj;
2835 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
2836 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
2843 bp->b_vp = vp;
2852 vhold(vp);
2870 struct vnode *vp;
2872 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2878 vp = bp->b_vp; /* XXX */
2892 vdrop(vp);
2949 struct vnode *vp;
2955 vp = bo2vnode(*bo);
2956 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0)
2964 vholdl(vp);
2966 VI_UNLOCK(vp);
2967 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2968 vdrop(vp);
2974 ("suspended mp syncing vp %p", vp));
2975 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2976 (void) VOP_FSYNC(vp, MNT_LAZY, td);
2977 VOP_UNLOCK(vp);
2990 vdrop(vp);
3201 struct vnode *vp;
3208 vp = bp->b_vp;
3214 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
3236 switch (vp->v_type) {
3280 v_init_counters(struct vnode *vp)
3283 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0,
3284 vp, ("%s called for an initialized vnode", __FUNCTION__));
3285 ASSERT_VI_UNLOCKED(vp, __FUNCTION__);
3287 refcount_init(&vp->v_holdcnt, 1);
3288 refcount_init(&vp->v_usecount, 1);
3302 vget_prep_smr(struct vnode *vp)
3308 if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
3311 if (vhold_smr(vp))
3320 vget_prep(struct vnode *vp)
3324 if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
3327 vhold(vp);
3334 vget_abort(struct vnode *vp, enum vgetstate vs)
3339 vrele(vp);
3342 vdrop(vp);
3350 vget(struct vnode *vp, int flags)
3354 vs = vget_prep(vp);
3355 return (vget_finish(vp, flags, vs));
3359 vget_finish(struct vnode *vp, int flags, enum vgetstate vs)
3364 ASSERT_VI_LOCKED(vp, __func__);
3366 ASSERT_VI_UNLOCKED(vp, __func__);
3367 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp);
3368 VNPASS(vp->v_holdcnt > 0, vp);
3369 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp);
3371 error = vn_lock(vp, flags);
3373 vget_abort(vp, vs);
3375 vp);
3379 vget_finish_ref(vp, vs);
3384 vget_finish_ref(struct vnode *vp, enum vgetstate vs)
3388 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp);
3389 VNPASS(vp->v_holdcnt > 0, vp);
3390 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp);
3400 old = atomic_fetchadd_int(&vp->v_usecount, 1);
3401 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old));
3404 old = atomic_fetchadd_int(&vp->v_holdcnt, -1);
3405 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old));
3407 refcount_release(&vp->v_holdcnt);
3413 vref(struct vnode *vp)
3417 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3418 vs = vget_prep(vp);
3419 vget_finish_ref(vp, vs);
3423 vrefact(struct vnode *vp)
3427 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3428 old = refcount_acquire(&vp->v_usecount);
3429 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old));
3433 vlazy(struct vnode *vp)
3437 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__));
3439 if ((vp->v_mflag & VMP_LAZYLIST) != 0)
3444 if (VN_IS_DOOMED(vp))
3446 mp = vp->v_mount;
3448 if ((vp->v_mflag & VMP_LAZYLIST) == 0) {
3449 vp->v_mflag |= VMP_LAZYLIST;
3450 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3457 vunlazy(struct vnode *vp)
3461 ASSERT_VI_LOCKED(vp, __func__);
3462 VNPASS(!VN_IS_DOOMED(vp), vp);
3464 mp = vp->v_mount;
3466 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
3473 if (vp->v_holdcnt == 0) {
3474 vp->v_mflag &= ~VMP_LAZYLIST;
3475 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3486 vunlazy_gone(struct vnode *vp)
3490 ASSERT_VOP_ELOCKED(vp, __func__);
3491 ASSERT_VI_LOCKED(vp, __func__);
3492 VNPASS(!VN_IS_DOOMED(vp), vp);
3494 if (vp->v_mflag & VMP_LAZYLIST) {
3495 mp = vp->v_mount;
3497 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
3498 vp->v_mflag &= ~VMP_LAZYLIST;
3499 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3506 vdefer_inactive(struct vnode *vp)
3509 ASSERT_VI_LOCKED(vp, __func__);
3510 VNPASS(vp->v_holdcnt > 0, vp);
3511 if (VN_IS_DOOMED(vp)) {
3512 vdropl(vp);
3515 if (vp->v_iflag & VI_DEFINACT) {
3516 VNPASS(vp->v_holdcnt > 1, vp);
3517 vdropl(vp);
3520 if (vp->v_usecount > 0) {
3521 vp->v_iflag &= ~VI_OWEINACT;
3522 vdropl(vp);
3525 vlazy(vp);
3526 vp->v_iflag |= VI_DEFINACT;
3527 VI_UNLOCK(vp);
3532 vdefer_inactive_unlocked(struct vnode *vp)
3535 VI_LOCK(vp);
3536 if ((vp->v_iflag & VI_OWEINACT) == 0) {
3537 vdropl(vp);
3540 vdefer_inactive(vp);
3562 vput_final(struct vnode *vp, enum vput_op func)
3567 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3568 VNPASS(vp->v_holdcnt > 0, vp);
3570 VI_LOCK(vp);
3576 if (vp->v_usecount > 0)
3583 if (VN_IS_DOOMED(vp))
3586 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0))
3589 if (vp->v_iflag & VI_DOINGINACT)
3598 vp->v_iflag |= VI_OWEINACT;
3603 switch (VOP_ISLOCKED(vp)) {
3609 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK);
3610 VI_LOCK(vp);
3624 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
3625 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK |
3627 VI_LOCK(vp);
3631 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
3632 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK);
3633 VI_LOCK(vp);
3639 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp,
3641 vp->v_vflag |= VV_UNREF;
3644 error = vinactive(vp);
3646 VOP_UNLOCK(vp);
3649 VOP_LOCK(vp, LK_EXCLUSIVE);
3652 vp->v_vflag &= ~VV_UNREF;
3653 vdropl(vp);
3655 vdefer_inactive(vp);
3660 VOP_UNLOCK(vp);
3661 vdropl(vp);
3678 vrele(struct vnode *vp)
3681 ASSERT_VI_UNLOCKED(vp, __func__);
3682 if (!refcount_release(&vp->v_usecount))
3684 vput_final(vp, VRELE);
3692 vput(struct vnode *vp)
3695 ASSERT_VOP_LOCKED(vp, __func__);
3696 ASSERT_VI_UNLOCKED(vp, __func__);
3697 if (!refcount_release(&vp->v_usecount)) {
3698 VOP_UNLOCK(vp);
3701 vput_final(vp, VPUT);
3709 vunref(struct vnode *vp)
3712 ASSERT_VOP_LOCKED(vp, __func__);
3713 ASSERT_VI_UNLOCKED(vp, __func__);
3714 if (!refcount_release(&vp->v_usecount))
3716 vput_final(vp, VUNREF);
3720 vhold(struct vnode *vp)
3724 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3725 old = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3726 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp,
3733 vholdnz(struct vnode *vp)
3736 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3738 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3739 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp,
3742 atomic_add_int(&vp->v_holdcnt, 1);
3757 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3766 vhold_smr(struct vnode *vp)
3772 count = atomic_load_int(&vp->v_holdcnt);
3775 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp,
3779 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count));
3780 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) {
3805 vhold_recycle_free(struct vnode *vp)
3811 count = atomic_load_int(&vp->v_holdcnt);
3814 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp,
3818 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count));
3822 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) {
3832 struct vnode *vp;
3855 vp = vd->tab[i];
3857 MPASS(vp->v_dbatchcpu != NOCPU);
3858 vp->v_dbatchcpu = NOCPU;
3873 vp = vd->tab[i];
3875 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist);
3876 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist);
3877 MPASS(vp->v_dbatchcpu != NOCPU);
3878 vp->v_dbatchcpu = NOCPU;
3886 vdbatch_enqueue(struct vnode *vp)
3890 ASSERT_VI_LOCKED(vp, __func__);
3891 VNPASS(!VN_IS_DOOMED(vp), vp);
3893 if (vp->v_dbatchcpu != NOCPU) {
3894 VI_UNLOCK(vp);
3907 vp->v_dbatchcpu = curcpu;
3908 vd->tab[vd->index] = vp;
3910 VI_UNLOCK(vp);
3923 vdbatch_dequeue(struct vnode *vp)
3929 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp);
3931 cpu = vp->v_dbatchcpu;
3938 if (vd->tab[i] != vp)
3940 vp->v_dbatchcpu = NOCPU;
3950 MPASS(vp->v_dbatchcpu == NOCPU);
3963 vdropl_final(struct vnode *vp)
3966 ASSERT_VI_LOCKED(vp, __func__);
3967 VNPASS(VN_IS_DOOMED(vp), vp);
3974 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) {
3976 VI_UNLOCK(vp);
3986 freevnode(vp);
3990 vdrop(struct vnode *vp)
3993 ASSERT_VI_UNLOCKED(vp, __func__);
3994 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3995 if (refcount_release_if_not_last(&vp->v_holdcnt))
3997 VI_LOCK(vp);
3998 vdropl(vp);
4002 vdropl_impl(struct vnode *vp, bool enqueue)
4005 ASSERT_VI_LOCKED(vp, __func__);
4006 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4007 if (!refcount_release(&vp->v_holdcnt)) {
4008 VI_UNLOCK(vp);
4011 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp);
4012 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp);
4013 if (VN_IS_DOOMED(vp)) {
4014 vdropl_final(vp);
4019 if (vp->v_mflag & VMP_LAZYLIST) {
4020 vunlazy(vp);
4024 VI_UNLOCK(vp);
4033 vdbatch_enqueue(vp);
4037 vdropl(struct vnode *vp)
4040 vdropl_impl(vp, true);
4055 vdropl_recycle(struct vnode *vp)
4058 vdropl_impl(vp, false);
4062 vdrop_recycle(struct vnode *vp)
4065 VI_LOCK(vp);
4066 vdropl_recycle(vp);
4074 vinactivef(struct vnode *vp)
4078 ASSERT_VOP_ELOCKED(vp, "vinactive");
4079 ASSERT_VI_LOCKED(vp, "vinactive");
4080 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp);
4081 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4082 vp->v_iflag |= VI_DOINGINACT;
4083 vp->v_iflag &= ~VI_OWEINACT;
4084 VI_UNLOCK(vp);
4096 if ((vp->v_vflag & VV_NOSYNC) == 0)
4097 vnode_pager_clean_async(vp);
4099 error = VOP_INACTIVE(vp);
4100 VI_LOCK(vp);
4101 VNPASS(vp->v_iflag & VI_DOINGINACT, vp);
4102 vp->v_iflag &= ~VI_DOINGINACT;
4107 vinactive(struct vnode *vp)
4110 ASSERT_VOP_ELOCKED(vp, "vinactive");
4111 ASSERT_VI_LOCKED(vp, "vinactive");
4112 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4114 if ((vp->v_iflag & VI_OWEINACT) == 0)
4116 if (vp->v_iflag & VI_DOINGINACT)
4118 if (vp->v_usecount > 0) {
4119 vp->v_iflag &= ~VI_OWEINACT;
4122 return (vinactivef(vp));
4153 struct vnode *vp, *mvp, *rootvp = NULL;
4174 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
4175 vholdl(vp);
4176 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
4178 vdrop(vp);
4185 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
4186 VOP_UNLOCK(vp);
4187 vdrop(vp);
4196 vnode_pager_clean_async(vp);
4198 error = VOP_FSYNC(vp, MNT_WAIT, td);
4201 VOP_UNLOCK(vp);
4202 vdrop(vp);
4206 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
4207 VI_LOCK(vp);
4209 if ((vp->v_type == VNON ||
4211 (vp->v_writecount <= 0 || vp->v_type != VREG)) {
4212 VOP_UNLOCK(vp);
4213 vdropl(vp);
4217 VI_LOCK(vp);
4224 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
4225 vgonel(vp);
4230 vn_printf(vp, "vflush: busy vnode ");
4233 VOP_UNLOCK(vp);
4234 vdropl(vp);
4268 vrecycle(struct vnode *vp)
4272 VI_LOCK(vp);
4273 recycled = vrecyclel(vp);
4274 VI_UNLOCK(vp);
4279 * vrecycle, with the vp interlock held.
4282 vrecyclel(struct vnode *vp)
4286 ASSERT_VOP_ELOCKED(vp, __func__);
4287 ASSERT_VI_LOCKED(vp, __func__);
4288 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4290 if (vp->v_usecount == 0) {
4292 vgonel(vp);
4302 vgone(struct vnode *vp)
4304 VI_LOCK(vp);
4305 vgonel(vp);
4306 VI_UNLOCK(vp);
4313 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event)
4318 mp = atomic_load_ptr(&vp->v_mount);
4332 VFS_RECLAIM_LOWERVP(ump->mp, vp);
4335 VFS_UNLINK_LOWERVP(ump->mp, vp);
4350 * vgone, with the vp interlock held.
4353 vgonel(struct vnode *vp)
4360 ASSERT_VOP_ELOCKED(vp, "vgonel");
4361 ASSERT_VI_LOCKED(vp, "vgonel");
4362 VNASSERT(vp->v_holdcnt, vp,
4363 ("vgonel: vp %p has no reference.", vp));
4364 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4370 if (VN_IS_DOOMED(vp)) {
4371 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \
4372 vn_get_state(vp) == VSTATE_DEAD, vp);
4378 vn_seqc_write_begin_locked(vp);
4379 vunlazy_gone(vp);
4380 vn_irflag_set_locked(vp, VIRF_DOOMED);
4381 vn_set_state(vp, VSTATE_DESTROYING);
4391 active = vp->v_usecount > 0;
4392 oweinact = (vp->v_iflag & VI_OWEINACT) != 0;
4393 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0;
4398 if (vp->v_iflag & VI_DEFINACT) {
4399 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count"));
4400 vp->v_iflag &= ~VI_DEFINACT;
4401 vdropl(vp);
4403 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count"));
4404 VI_UNLOCK(vp);
4406 cache_purge_vgone(vp);
4407 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM);
4414 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
4418 VI_LOCK(vp);
4419 vinactivef(vp);
4420 oweinact = (vp->v_iflag & VI_OWEINACT) != 0;
4421 VI_UNLOCK(vp);
4425 if (vp->v_type == VSOCK)
4426 vfs_unp_reclaim(vp);
4433 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
4434 (void) vn_start_secondary_write(vp, &mp, V_WAIT);
4435 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) {
4436 while (vinvalbuf(vp, 0, 0, 0) != 0)
4440 BO_LOCK(&vp->v_bufobj);
4441 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) &&
4442 vp->v_bufobj.bo_dirty.bv_cnt == 0 &&
4443 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) &&
4444 vp->v_bufobj.bo_clean.bv_cnt == 0,
4445 ("vp %p bufobj not invalidated", vp));
4452 object = vp->v_bufobj.bo_object;
4454 vp->v_bufobj.bo_flag |= BO_DEAD;
4455 BO_UNLOCK(&vp->v_bufobj);
4464 object->handle == vp)
4465 vnode_destroy_vobject(vp);
4470 if (VOP_RECLAIM(vp))
4474 VNASSERT(vp->v_object == NULL, vp,
4475 ("vop_reclaim left v_object vp=%p", vp));
4479 if (vp->v_lockf != NULL) {
4480 (void)VOP_ADVLOCKPURGE(vp);
4481 vp->v_lockf = NULL;
4486 if (vp->v_mount == NULL) {
4487 VI_LOCK(vp);
4489 delmntque(vp);
4490 ASSERT_VI_LOCKED(vp, "vgonel 2");
4496 vp->v_vnlock = &vp->v_lock;
4497 vp->v_op = &dead_vnodeops;
4498 vp->v_type = VBAD;
4499 vn_set_state(vp, VSTATE_DEAD);
4533 vn_printf(struct vnode *vp, const char *fmt, ...)
4544 printf("%p: ", (void *)vp);
4545 printf("type %s state %s op %p\n", vtypename[vp->v_type],
4546 vstatename[vp->v_state], vp->v_op);
4547 holdcnt = atomic_load_int(&vp->v_holdcnt);
4549 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS,
4550 vp->v_seqc_users);
4551 switch (vp->v_type) {
4553 printf(" mountedhere %p\n", vp->v_mountedhere);
4556 printf(" rdev %p\n", vp->v_rdev);
4559 printf(" socket %p\n", vp->v_unpcb);
4562 printf(" fifoinfo %p\n", vp->v_fifoinfo);
4576 irflag = vn_irflag_read(vp);
4590 if (vp->v_vflag & VV_ROOT)
4592 if (vp->v_vflag & VV_ISTTY)
4594 if (vp->v_vflag & VV_NOSYNC)
4596 if (vp->v_vflag & VV_ETERNALDEV)
4598 if (vp->v_vflag & VV_CACHEDLABEL)
4600 if (vp->v_vflag & VV_VMSIZEVNLOCK)
4602 if (vp->v_vflag & VV_COPYONWRITE)
4604 if (vp->v_vflag & VV_SYSTEM)
4606 if (vp->v_vflag & VV_PROCDEP)
4608 if (vp->v_vflag & VV_DELETED)
4610 if (vp->v_vflag & VV_MD)
4612 if (vp->v_vflag & VV_FORCEINSMQ)
4614 if (vp->v_vflag & VV_READLINK)
4616 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV |
4623 if (vp->v_iflag & VI_MOUNT)
4625 if (vp->v_iflag & VI_DOINGINACT)
4627 if (vp->v_iflag & VI_OWEINACT)
4629 if (vp->v_iflag & VI_DEFINACT)
4631 if (vp->v_iflag & VI_FOPENING)
4633 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT |
4639 if (vp->v_mflag & VMP_LAZYLIST)
4641 flags = vp->v_mflag & ~(VMP_LAZYLIST);
4647 if (mtx_owned(VI_MTX(vp)))
4650 if (vp->v_object != NULL)
4653 vp->v_object, vp->v_object->ref_count,
4654 vp->v_object->resident_page_count,
4655 vp->v_bufobj.bo_clean.bv_cnt,
4656 vp->v_bufobj.bo_dirty.bv_cnt);
4658 lockmgr_printinfo(vp->v_vnlock);
4659 if (vp->v_data != NULL)
4660 VOP_PRINT(vp);
4671 struct vnode *vp;
4681 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4682 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp))
4683 vn_printf(vp, "vnode ");
4693 struct vnode *vp;
4697 vp = (struct vnode *)addr;
4698 vn_printf(vp, "vnode ");
4709 struct vnode *vp;
4879 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4880 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) {
4881 vn_printf(vp, "vnode ");
4887 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4888 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) {
4889 vn_printf(vp, "vnode ");
5098 vfs_deferred_inactive(struct vnode *vp, int lkflags)
5101 ASSERT_VI_LOCKED(vp, __func__);
5102 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp);
5103 if ((vp->v_iflag & VI_OWEINACT) == 0) {
5104 vdropl(vp);
5107 if (vn_lock(vp, lkflags) == 0) {
5108 VI_LOCK(vp);
5109 vinactive(vp);
5110 VOP_UNLOCK(vp);
5111 vdropl(vp);
5114 vdefer_inactive_unlocked(vp);
5118 vfs_periodic_inactive_filter(struct vnode *vp, void *arg)
5121 return (vp->v_iflag & VI_DEFINACT);
5127 struct vnode *vp, *mvp;
5134 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) {
5135 if ((vp->v_iflag & VI_DEFINACT) == 0) {
5136 VI_UNLOCK(vp);
5139 vp->v_iflag &= ~VI_DEFINACT;
5140 vfs_deferred_inactive(vp, lkflags);
5145 vfs_want_msync(struct vnode *vp)
5153 if (vp->v_vflag & VV_NOSYNC)
5155 obj = vp->v_object;
5160 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused)
5163 if (vp->v_vflag & VV_NOSYNC)
5165 if (vp->v_iflag & VI_DEFINACT)
5167 return (vfs_want_msync(vp));
5173 struct vnode *vp, *mvp;
5181 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) {
5183 if (vp->v_iflag & VI_DEFINACT) {
5184 vp->v_iflag &= ~VI_DEFINACT;
5187 if (!vfs_want_msync(vp)) {
5189 vfs_deferred_inactive(vp, lkflags);
5191 VI_UNLOCK(vp);
5194 if (vget(vp, lkflags) == 0) {
5195 if ((vp->v_vflag & VV_NOSYNC) == 0) {
5197 vnode_pager_clean_sync(vp);
5199 vnode_pager_clean_async(vp);
5201 vput(vp);
5203 vdrop(vp);
5206 vdefer_inactive_unlocked(vp);
5245 v_addpollinfo(struct vnode *vp)
5249 if (vp->v_pollinfo != NULL)
5253 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
5255 VI_LOCK(vp);
5256 if (vp->v_pollinfo != NULL) {
5257 VI_UNLOCK(vp);
5261 vp->v_pollinfo = vi;
5262 VI_UNLOCK(vp);
5274 vn_pollrecord(struct vnode *vp, struct thread *td, int events)
5277 v_addpollinfo(vp);
5278 mtx_lock(&vp->v_pollinfo->vpi_lock);
5279 if (vp->v_pollinfo->vpi_revents & events) {
5287 events &= vp->v_pollinfo->vpi_revents;
5288 vp->v_pollinfo->vpi_revents &= ~events;
5290 mtx_unlock(&vp->v_pollinfo->vpi_lock);
5293 vp->v_pollinfo->vpi_events |= events;
5294 selrecord(td, &vp->v_pollinfo->vpi_selinfo);
5295 mtx_unlock(&vp->v_pollinfo->vpi_lock);
5329 struct vnode *vp;
5335 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp);
5338 vp->v_type = VNON;
5339 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5340 vp->v_vflag |= VV_FORCEINSMQ;
5341 error = insmntque1(vp, mp);
5344 vp->v_vflag &= ~VV_FORCEINSMQ;
5345 vn_set_state(vp, VSTATE_CONSTRUCTED);
5346 VOP_UNLOCK(vp);
5363 bo = &vp->v_bufobj;
5370 mp->mnt_syncer = vp;
5371 vp = NULL;
5375 if (vp != NULL) {
5376 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5377 vgone(vp);
5378 vput(vp);
5385 struct vnode *vp;
5388 vp = mp->mnt_syncer;
5389 if (vp != NULL)
5392 if (vp != NULL)
5393 vrele(vp);
5460 struct vnode *vp = ap->a_vp;
5463 bo = &vp->v_bufobj;
5466 if (vp->v_mount->mnt_syncer == vp)
5467 vp->v_mount->mnt_syncer = NULL;
5481 vn_need_pageq_flush(struct vnode *vp)
5485 obj = vp->v_object;
5486 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 &&
5494 vn_isdisk_error(struct vnode *vp, int *errp)
5498 if (vp->v_type != VCHR) {
5504 if (vp->v_rdev == NULL)
5506 else if (vp->v_rdev->si_devsw == NULL)
5508 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
5517 vn_isdisk(struct vnode *vp)
5521 return (vn_isdisk_error(vp, &error));
5698 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
5717 return (VOP_ACCESS(vp, accmode, cred, td));
5747 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
5755 vn_printf(vp, "vnode ");
5757 printf("%s: %p %s\n", str, (void *)vp, msg);
5763 assert_vi_locked(struct vnode *vp, const char *str)
5766 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
5767 vfs_badlock("interlock is not locked but should be", str, vp);
5771 assert_vi_unlocked(struct vnode *vp, const char *str)
5774 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
5775 vfs_badlock("interlock is locked but should not be", str, vp);
5779 assert_vop_locked(struct vnode *vp, const char *str)
5781 if (KERNEL_PANICKED() || vp == NULL)
5785 if ((vp->v_irflag & VIRF_CROSSMP) == 0 &&
5786 witness_is_owned(&vp->v_vnlock->lock_object) == -1)
5788 int locked = VOP_ISLOCKED(vp);
5791 vfs_badlock("is not locked but should be", str, vp);
5795 assert_vop_unlocked(struct vnode *vp, const char *str)
5797 if (KERNEL_PANICKED() || vp == NULL)
5801 if ((vp->v_irflag & VIRF_CROSSMP) == 0 &&
5802 witness_is_owned(&vp->v_vnlock->lock_object) == 1)
5804 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
5806 vfs_badlock("is locked but should not be", str, vp);
5810 assert_vop_elocked(struct vnode *vp, const char *str)
5812 if (KERNEL_PANICKED() || vp == NULL)
5815 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
5816 vfs_badlock("is not exclusive locked but should be", str, vp);
5887 struct vnode *vp;
5890 vp = a->a_vp;
5894 VNPASS(VN_IS_DOOMED(vp), vp);
5912 vop_fsync_debugprepost(struct vnode *vp, const char *name)
5914 if (vp->v_type == VCHR)
5918 * is actually determined by vp's write mount as indicated
5920 * may not be the same as vp->v_mount. However, if the
5931 else if (MNT_SHARED_WRITES(vp->v_mount))
5932 ASSERT_VOP_LOCKED(vp, name);
5934 ASSERT_VOP_ELOCKED(vp, name);
6022 struct vnode *vp = a->a_vp;
6024 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp);
6025 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK");
6095 struct vnode *vp;
6098 vp = a->a_vp;
6099 vn_seqc_write_begin(vp);
6106 struct vnode *vp;
6109 vp = a->a_vp;
6110 vn_seqc_write_end(vp);
6119 struct vnode *vp, *tdvp;
6122 vp = a->a_vp;
6124 vn_seqc_write_begin(vp);
6132 struct vnode *vp, *tdvp;
6135 vp = a->a_vp;
6137 vn_seqc_write_end(vp);
6140 VFS_KNOTE_LOCKED(vp, NOTE_LINK);
6209 struct vnode *vp;
6212 vp = a->a_vp;
6213 ASSERT_VOP_IN_SEQC(vp);
6215 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE);
6222 struct vnode *dvp, *vp;
6226 vp = a->a_vp;
6228 vn_seqc_write_begin(vp);
6235 struct vnode *dvp, *vp;
6239 vp = a->a_vp;
6241 vn_seqc_write_end(vp);
6244 VFS_KNOTE_LOCKED(vp, NOTE_DELETE);
6290 struct vnode *dvp, *vp;
6294 vp = a->a_vp;
6296 vn_seqc_write_begin(vp);
6303 struct vnode *dvp, *vp;
6307 vp = a->a_vp;
6309 vn_seqc_write_end(vp);
6311 vp->v_vflag |= VV_UNLINKED;
6313 VFS_KNOTE_LOCKED(vp, NOTE_DELETE);
6321 struct vnode *vp;
6324 vp = a->a_vp;
6325 vn_seqc_write_begin(vp);
6332 struct vnode *vp;
6335 vp = a->a_vp;
6336 vn_seqc_write_end(vp);
6338 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB);
6345 struct vnode *vp;
6348 vp = a->a_vp;
6349 vn_seqc_write_begin(vp);
6356 struct vnode *vp;
6359 vp = a->a_vp;
6360 vn_seqc_write_end(vp);
6367 struct vnode *vp;
6370 vp = a->a_vp;
6371 vn_seqc_write_begin(vp);
6378 struct vnode *vp;
6381 vp = a->a_vp;
6382 vn_seqc_write_end(vp);
6384 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB);
6579 struct vnode *vp = arg;
6581 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
6587 struct vnode *vp = arg;
6589 VOP_UNLOCK(vp);
6596 struct vnode *vp = arg;
6599 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked");
6601 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked");
6608 struct vnode *vp = ap->a_vp;
6612 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ &&
6629 kn->kn_hook = (caddr_t)vp;
6631 v_addpollinfo(vp);
6632 if (vp->v_pollinfo == NULL)
6634 knl = &vp->v_pollinfo->vpi_selinfo.si_note;
6635 vhold(vp);
6647 struct vnode *vp = (struct vnode *)kn->kn_hook;
6649 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
6650 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
6651 vdrop(vp);
6658 struct vnode *vp = (struct vnode *)kn->kn_hook;
6666 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
6667 VI_LOCK(vp);
6669 VI_UNLOCK(vp);
6673 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0)
6676 VI_LOCK(vp);
6679 VI_UNLOCK(vp);
6687 struct vnode *vp = (struct vnode *)kn->kn_hook;
6689 VI_LOCK(vp);
6695 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD))
6699 VI_UNLOCK(vp);
6706 struct vnode *vp = (struct vnode *)kn->kn_hook;
6709 VI_LOCK(vp);
6712 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
6714 VI_UNLOCK(vp);
6718 VI_UNLOCK(vp);
6806 struct vnode *vp;
6812 vp = mp->mnt_rootvnode;
6813 if (vp != NULL) {
6814 if (!VN_IS_DOOMED(vp)) {
6815 vrefact(vp);
6817 error = vn_lock(vp, flags);
6819 *vpp = vp;
6822 vrele(vp);
6831 if (vp != NULL) {
6833 vrele(vp);
6867 struct vnode *vp;
6872 vp = atomic_load_ptr(&mp->mnt_rootvnode);
6873 if (vp == NULL || VN_IS_DOOMED(vp)) {
6877 vrefact(vp);
6879 error = vn_lock(vp, flags);
6881 vrele(vp);
6884 *vpp = vp;
6891 struct vnode *vp;
6897 vp = mp->mnt_rootvnode;
6898 if (vp != NULL)
6899 vn_seqc_write_begin(vp);
6901 return (vp);
6905 vfs_cache_root_set(struct mount *mp, struct vnode *vp)
6909 vrefact(vp);
6910 mp->mnt_rootvnode = vp;
6923 struct vnode *vp;
6928 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL;
6929 vp = TAILQ_NEXT(vp, v_nmntvnodes)) {
6931 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp))
6933 VI_LOCK(vp);
6934 if (VN_IS_DOOMED(vp)) {
6935 VI_UNLOCK(vp);
6940 if (vp == NULL) {
6947 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
6949 return (vp);
6955 struct vnode *vp;
6961 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
6963 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp))
6965 VI_LOCK(vp);
6966 if (VN_IS_DOOMED(vp)) {
6967 VI_UNLOCK(vp);
6972 if (vp == NULL) {
6979 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
6981 return (vp);
7021 * Relock the mp mount vnode list lock with the vp vnode interlock in the
7031 struct vnode *vp)
7037 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp,
7039 ASSERT_VI_UNLOCKED(vp, __func__);
7043 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist);
7051 vhold(vp);
7053 VI_LOCK(vp);
7054 if (VN_IS_DOOMED(vp)) {
7055 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp);
7058 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
7062 if (!refcount_release_if_not_last(&vp->v_holdcnt))
7067 vdropl(vp);
7077 struct vnode *vp;
7082 vp = TAILQ_NEXT(*mvp, v_lazylist);
7083 while (vp != NULL) {
7084 if (vp->v_type == VMARKER) {
7085 vp = TAILQ_NEXT(vp, v_lazylist);
7093 VNPASS(!VN_IS_DOOMED(vp), vp);
7094 if (!cb(vp, cbarg)) {
7096 vp = TAILQ_NEXT(vp, v_lazylist);
7101 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp,
7111 if (!VI_TRYLOCK(vp) &&
7112 !mnt_vnode_next_lazy_relock(*mvp, mp, vp))
7114 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp));
7115 KASSERT(vp->v_mount == mp || vp->v_mount == NULL,
7116 ("alien vnode on the lazy list %p %p", vp, mp));
7117 VNPASS(vp->v_mount == mp, vp);
7118 VNPASS(!VN_IS_DOOMED(vp), vp);
7124 if (vp == NULL) {
7129 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist);
7131 ASSERT_VI_LOCKED(vp, "lazy iter");
7132 return (vp);
7149 struct vnode *vp;
7160 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist);
7161 if (vp == NULL) {
7166 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist);
7184 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp)
7192 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread));
7200 vn_seqc_write_begin_locked(struct vnode *vp)
7203 ASSERT_VI_LOCKED(vp, __func__);
7204 VNPASS(vp->v_holdcnt > 0, vp);
7205 VNPASS(vp->v_seqc_users >= 0, vp);
7206 vp->v_seqc_users++;
7207 if (vp->v_seqc_users == 1)
7208 seqc_sleepable_write_begin(&vp->v_seqc);
7212 vn_seqc_write_begin(struct vnode *vp)
7215 VI_LOCK(vp);
7216 vn_seqc_write_begin_locked(vp);
7217 VI_UNLOCK(vp);
7221 vn_seqc_write_end_locked(struct vnode *vp)
7224 ASSERT_VI_LOCKED(vp, __func__);
7225 VNPASS(vp->v_seqc_users > 0, vp);
7226 vp->v_seqc_users--;
7227 if (vp->v_seqc_users == 0)
7228 seqc_sleepable_write_end(&vp->v_seqc);
7232 vn_seqc_write_end(struct vnode *vp)
7235 VI_LOCK(vp);
7236 vn_seqc_write_end_locked(vp);
7237 VI_UNLOCK(vp);
7247 vn_seqc_init(struct vnode *vp)
7250 vp->v_seqc = 0;
7251 vp->v_seqc_users = 0;
7255 vn_seqc_write_end_free(struct vnode *vp)
7258 VNPASS(seqc_in_modify(vp->v_seqc), vp);
7259 VNPASS(vp->v_seqc_users == 1, vp);
7263 vn_irflag_set_locked(struct vnode *vp, short toset)
7267 ASSERT_VI_LOCKED(vp, __func__);
7268 flags = vn_irflag_read(vp);
7269 VNASSERT((flags & toset) == 0, vp,
7272 atomic_store_short(&vp->v_irflag, flags | toset);
7276 vn_irflag_set(struct vnode *vp, short toset)
7279 VI_LOCK(vp);
7280 vn_irflag_set_locked(vp, toset);
7281 VI_UNLOCK(vp);
7285 vn_irflag_set_cond_locked(struct vnode *vp, short toset)
7289 ASSERT_VI_LOCKED(vp, __func__);
7290 flags = vn_irflag_read(vp);
7291 atomic_store_short(&vp->v_irflag, flags | toset);
7295 vn_irflag_set_cond(struct vnode *vp, short toset)
7298 VI_LOCK(vp);
7299 vn_irflag_set_cond_locked(vp, toset);
7300 VI_UNLOCK(vp);
7304 vn_irflag_unset_locked(struct vnode *vp, short tounset)
7308 ASSERT_VI_LOCKED(vp, __func__);
7309 flags = vn_irflag_read(vp);
7310 VNASSERT((flags & tounset) == tounset, vp,
7313 atomic_store_short(&vp->v_irflag, flags & ~tounset);
7317 vn_irflag_unset(struct vnode *vp, short tounset)
7320 VI_LOCK(vp);
7321 vn_irflag_unset_locked(vp, tounset);
7322 VI_UNLOCK(vp);
7326 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred)
7331 ASSERT_VOP_LOCKED(vp, __func__);
7332 error = VOP_GETATTR(vp, &vattr, cred);
7343 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred)
7347 VOP_LOCK(vp, LK_SHARED);
7348 error = vn_getsize_locked(vp, size, cred);
7349 VOP_UNLOCK(vp);
7355 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state)
7358 switch (vp->v_state) {
7369 ASSERT_VOP_ELOCKED(vp, __func__);
7378 ASSERT_VOP_ELOCKED(vp, __func__);
7396 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state);
7397 panic("invalid state transition %d -> %d\n", vp->v_state, state);