Lines Matching defs:vnode

83 #include <sys/vnode.h>
108 static void delmntque(struct vnode *vp);
112 static int vtryrecycle(struct vnode *vp, bool isvnlru);
113 static void v_init_counters(struct vnode *);
114 static void vn_seqc_init(struct vnode *);
115 static void vn_seqc_write_end_free(struct vnode *vp);
116 static void vgonel(struct vnode *);
117 static bool vhold_recycle_free(struct vnode *);
118 static void vdropl_recycle(struct vnode *vp);
119 static void vdrop_recycle(struct vnode *vp);
124 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
128 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
129 "vnode configuration and statistics");
131 "vnode configuration");
133 "vnode statistics");
135 "vnode recycling");
139 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode.
155 * Conversion tables for conversion from vnode types to inode formats
170 static TAILQ_HEAD(freelst, vnode) vnode_list;
171 static struct vnode *vnode_list_free_marker;
172 static struct vnode *vnode_list_reclaim_marker;
175 * "Free" vnode target. Free vnodes are rarely completely free, but are
203 "Number of vnodes recycled to meet vnode cache targets (legacy)");
206 "Number of vnodes recycled to meet vnode cache targets");
211 "Number of free vnodes recycled to meet vnode cache targets (legacy)");
214 "Number of free vnodes recycled to meet vnode cache targets");
219 "Number of free vnodes recycled by vn_alloc callers to meet vnode cache targets");
252 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll");
319 struct vnode *tab[VDBATCH_SIZE];
323 static void vdbatch_dequeue(struct vnode *vp);
433 struct vnode *vp;
459 * This vnode is being recycled. Return != 0 to let the caller
462 * a new vnode if necessary)
481 struct vnode *vp;
510 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname");
514 "Try to reclaim a vnode by its file descriptor");
519 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log &&
520 sizeof(struct vnode) < 1UL << (vnsz2log + 1),
578 * Initialize the vnode management data structures.
588 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker");
590 static struct vnode *
593 struct vnode *vp;
595 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
603 vn_free_marker(struct vnode *vp)
623 _Static_assert(offsetof(struct vnode, v_vnodelist) <
624 offsetof(struct vnode, v_dbatchcpu),
627 off1 = offsetof(struct vnode, v_vnodelist);
628 off2 = offsetof(struct vnode, v_dbatchcpu);
629 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist);
630 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu);
634 * after the vnode has been freed. Try to get some KASAN coverage by
659 * Initialize a vnode as it first enters the zone.
664 struct vnode *vp;
672 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
676 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT,
707 * Free a vnode when it is cleared from the zone.
712 struct vnode *vp;
731 * vnode memory consumption. The size is specified directly to
772 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ));
802 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor,
845 * vnode belonging to mp.
849 * / vnode lock A / vnode lock (/var) D
850 * /var vnode lock B /log vnode lock(/var/log) E
868 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A.
876 * by setting VV_CROSSLOCK on the covered vnode so that lock B will
1175 * Set vnode attributes to VNOVAL
1216 * whether the vnode should be recycled
1221 * of a vnode blowout so we want to do this). Therefore, this operation
1224 * A number of conditions may prevent a vnode from being reclaimed.
1225 * the buffer cache may have references on the vnode, a directory
1226 * vnode may still have references due to the namei cache representing
1227 * underlying files, or the vnode may be in active use. It is not
1242 struct vnode *vp, *mvp;
1283 * Handle races against vnode allocation. Filesystems lock the
1284 * vnode some time after it gets returned from getnewvnode,
1357 "limit on vnode free requests per call to the vnlru_free routine (legacy)");
1360 "limit on vnode free requests per call to the vnlru_free routine");
1366 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp, bool isvnlru)
1368 struct vnode *vp;
1387 * The free vnode marker can be past eligible vnodes:
1414 * Don't recycle if our vnode is from different type
1417 * vnode is reclaimed.
1444 * The solution would be to pre-check if the vnode is likely to
1498 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp)
1509 struct vnode *
1512 struct vnode *mvp;
1522 vnlru_free_marker(struct vnode *mvp)
1550 "Number of times vnlru awakened due to vnode shortage");
1690 * Sample testcase: vnode limit 8388608, 20 separate directory trees each with
1719 * vnode limit might have changed and now we may be at a significant
1813 * Sleep if the vnode cache is in a good state. This is
1816 * reducing free vnode count). Otherwise, try to reclaim
1898 * Routines having to do with the management of the vnode table.
1902 * Try to recycle a freed vnode.
1905 vtryrecycle(struct vnode *vp, bool isvnlru)
1912 * This vnode may found and locked via some other list, if so we
1935 * anyone picked up this vnode from another list. If not, we will
1963 * Allocate a new vnode.
1974 * The routine can try to free a vnode or stall for up to 1 second waiting for
1981 "Number of times vnode allocation blocked waiting on vnlru");
1983 static struct vnode * __noinline
2003 * Grow the vnode cache if it will not be above its target max after
2004 * growing. Otherwise, if there is at least one free vnode, try to
2013 * Wait for space for a new vnode.
2037 static struct vnode *
2053 vn_free(struct vnode *vp)
2061 * Allocate a new vnode.
2065 struct vnode **vpp)
2067 struct vnode *vp;
2089 * Locks are given the generic name "vnode" when created.
2097 * The change only needs to be made when the vnode moves
2117 * Finalize various vnode identity bits.
2144 * E.g., nullfs uses vfs_hash_index() on the lower vnode for
2176 freevnode(struct vnode *vp)
2181 * The vnode has been marked for destruction, so free it.
2183 * The vnode will be returned to the zone where it will
2184 * normally remain until it is needed for another vnode. We
2187 * so as not to contaminate the freshly allocated vnode.
2189 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp);
2196 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
2200 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
2218 * here while having another vnode locked when trying to
2237 * Delete from old mount point vnode list, if on one.
2240 delmntque(struct vnode *vp)
2251 ("bad mount point vnode list size"));
2263 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr)
2267 ("insmntque: vnode already on per mount vnode list"));
2278 * We acquire the vnode interlock early to ensure that the
2279 * vnode cannot be recycled by another process releasing a
2280 * holdcnt on it before we get it on both the vnode list
2281 * and the active vnode list. The mount mutex protects only
2282 * manipulation of the vnode list and the vnode freelist
2283 * mutex protects only manipulation of the active vnode list.
2284 * Hence the need to hold the vnode interlock throughout.
2306 ("neg mount point vnode list size"));
2315 * insmntque() reclaims the vnode on insertion failure, insmntque1()
2316 * leaves handling of the vnode to the caller.
2319 insmntque(struct vnode *vp, struct mount *mp)
2325 insmntque1(struct vnode *vp, struct mount *mp)
2378 * Wait for I/O to complete. XXX needs cleaning up. The vnode can
2418 * Flush out and invalidate all buffers associated with a vnode.
2422 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
2554 vtruncbuf(struct vnode *vp, off_t length, int blksize)
2579 * Write out vnode metadata, e.g. indirect blocks.
2586 * Since we hold the vnode lock this should only
2616 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn,
2639 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
2823 * Associate a buffer with a vnode.
2826 bgetvp(struct vnode *vp, struct buf *bp)
2840 * Add the buf to the vnode's clean list unless we lost a race and find
2864 * Disassociate a buffer from a vnode.
2870 struct vnode *vp;
2876 * Delete from old vnode list, if on one.
2949 struct vnode *vp;
2959 * We use vhold in case the vnode does not
2960 * successfully sync. vhold prevents the vnode from
2962 * we can acquire the vnode interlock.
3196 * Move the buffer between the clean and dirty lists of its vnode.
3201 struct vnode *vp;
3280 v_init_counters(struct vnode *vp)
3284 vp, ("%s called for an initialized vnode", __FUNCTION__));
3292 * Get a usecount on a vnode.
3294 * vget and vget_finish may fail to lock the vnode if they lose a race against
3297 * Consumers which don't guarantee liveness of the vnode can use SMR to
3298 * try to get a reference. Note this operation can fail since the vnode
3302 vget_prep_smr(struct vnode *vp)
3320 vget_prep(struct vnode *vp)
3334 vget_abort(struct vnode *vp, enum vgetstate vs)
3350 vget(struct vnode *vp, int flags)
3359 vget_finish(struct vnode *vp, int flags, enum vgetstate vs)
3374 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__,
3384 vget_finish_ref(struct vnode *vp, enum vgetstate vs)
3396 * We hold the vnode. If the usecount is 0 it will be utilized to keep
3397 * the vnode around. Otherwise someone else lended their hold count and
3413 vref(struct vnode *vp)
3423 vrefact(struct vnode *vp)
3433 vlazy(struct vnode *vp)
3437 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__));
3442 * We may get here for inactive routines after the vnode got doomed.
3457 vunlazy(struct vnode *vp)
3468 * Don't remove the vnode from the lazy list if another thread
3470 * vnode to the lazy list and is now responsible for its
3483 * the vnode.
3486 vunlazy_gone(struct vnode *vp)
3506 vdefer_inactive(struct vnode *vp)
3532 vdefer_inactive_unlocked(struct vnode *vp)
3549 * provides liveness of the vnode, meaning we have to vdrop.
3552 * exclusive lock on the vnode, while it is legal to call here with only a
3553 * shared lock (or no locks). If locking the vnode in an expected manner fails,
3556 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend
3562 vput_final(struct vnode *vp, enum vput_op func)
3580 * If the vnode is doomed vgone already performed inactive processing
3594 * vnode lock, opening a window where the vnode can get doomed all the
3665 * Decrement ->v_usecount for a vnode.
3678 vrele(struct vnode *vp)
3692 vput(struct vnode *vp)
3709 vunref(struct vnode *vp)
3720 vhold(struct vnode *vp)
3733 vholdnz(struct vnode *vp)
3747 * Grab a hold count unless the vnode is freed.
3750 * freeing the vnode.
3753 * is not set. After the flag is set the vnode becomes immutable to anyone but
3766 vhold_smr(struct vnode *vp)
3789 * Hold a free vnode for recycling.
3793 * Attempts to recycle only need the global vnode list lock and have no use for
3802 * Note: the vnode may gain more references after we transition the count 0->1.
3805 vhold_recycle_free(struct vnode *vp)
3832 struct vnode *vp;
3886 vdbatch_enqueue(struct vnode *vp)
3923 vdbatch_dequeue(struct vnode *vp)
3948 * Either we dequeued the vnode above or the target CPU beat us to it.
3954 * Drop the hold count of the vnode.
3958 * Because the vnode vm object keeps a hold reference on the vnode if
3959 * there is at least one resident non-cached page, the vnode cannot
3963 vdropl_final(struct vnode *vp)
3990 vdrop(struct vnode *vp)
4002 vdropl_impl(struct vnode *vp, bool enqueue)
4030 * released our hold and by now the vnode might have been
4037 vdropl(struct vnode *vp)
4044 * vdrop a vnode when recycling
4047 * regular vdrop by not requeieing the vnode on LRU.
4055 vdropl_recycle(struct vnode *vp)
4062 vdrop_recycle(struct vnode *vp)
4070 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
4074 vinactivef(struct vnode *vp)
4088 * modified pages are converted into the vnode's dirty
4090 * vnode is on the inactive list.
4107 vinactive(struct vnode *vp)
4126 * Remove any vnodes in the vnode table belonging to mount point mp.
4138 * `rootrefs' specifies the base reference count for the root vnode
4139 * of this filesystem. The root vnode is considered busy if its
4141 * will call vrele() on the root vnode exactly rootrefs times.
4153 struct vnode *vp, *mvp, *rootvp = NULL;
4163 * Get the filesystem root vnode. We can vput() it
4220 * vnode data structures and we are done.
4222 * If FORCECLOSE is set, forcibly close the vnode.
4230 vn_printf(vp, "vflush: busy vnode ");
4238 * If just the root vnode is busy, and if its refcount
4265 * Recycle an unused vnode.
4268 vrecycle(struct vnode *vp)
4282 vrecyclel(struct vnode *vp)
4298 * Eliminate all activity associated with a vnode
4302 vgone(struct vnode *vp)
4310 * Notify upper mounts about reclaimed or unlinked vnode.
4313 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event)
4353 vgonel(struct vnode *vp)
4384 * Check to see if the vnode is in use. If so, we have to
4403 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count"));
4410 * If purging an active vnode, it must be closed and
4429 * Clean out any buffers associated with the vnode.
4460 * should not touch the object borrowed from the lower vnode
4468 * Reclaim the vnode.
4484 * Delete from old mount point vnode list.
4494 * the vnode.
4503 * Print out a description of a vnode.
4518 "vnode type name not added to vtypename");
4527 "vnode state name not added to vstatename");
4533 vn_printf(struct vnode *vp, const char *fmt, ...)
4671 struct vnode *vp;
4683 vn_printf(vp, "vnode ");
4689 * Show details about the given vnode.
4691 DB_SHOW_COMMAND(vnode, db_show_vnode)
4693 struct vnode *vp;
4697 vp = (struct vnode *)addr;
4698 vn_printf(vp, "vnode ");
4709 struct vnode *vp;
4881 vn_printf(vp, "vnode ");
4889 vn_printf(vp, "vnode ");
5098 vfs_deferred_inactive(struct vnode *vp, int lkflags)
5118 vfs_periodic_inactive_filter(struct vnode *vp, void *arg)
5127 struct vnode *vp, *mvp;
5145 vfs_want_msync(struct vnode *vp)
5160 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused)
5173 struct vnode *vp, *mvp;
5242 * Initialize per-vnode helper structure to hold poll-related state.
5245 v_addpollinfo(struct vnode *vp)
5252 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
5267 * a vnode. Because poll uses the historic select-style interface
5274 vn_pollrecord(struct vnode *vp, struct thread *td, int events)
5300 * Routine to create and manage a filesystem syncer vnode.
5324 * Create a new filesystem syncer vnode for the specified mount point.
5329 struct vnode *vp;
5334 /* Allocate a new vnode */
5348 * Place the vnode onto the syncer worklist. We attempt to
5385 struct vnode *vp;
5402 struct vnode *syncvp = ap->a_vp;
5442 * The syncer vnode is no referenced.
5453 * The syncer vnode is no longer needed and is being decommissioned.
5460 struct vnode *vp = ap->a_vp;
5481 vn_need_pageq_flush(struct vnode *vp)
5491 * Check if vnode represents a disk device
5494 vn_isdisk_error(struct vnode *vp, int *errp)
5517 vn_isdisk(struct vnode *vp)
5582 * vnode's type, "mode", uid and gid, requested access mode, and credentials.
5698 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
5736 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */
5738 0, "Print vnode details on lock violations");
5747 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
5755 vn_printf(vp, "vnode ");
5763 assert_vi_locked(struct vnode *vp, const char *str)
5771 assert_vi_unlocked(struct vnode *vp, const char *str)
5779 assert_vop_locked(struct vnode *vp, const char *str)
5795 assert_vop_unlocked(struct vnode *vp, const char *str)
5810 assert_vop_elocked(struct vnode *vp, const char *str)
5887 struct vnode *vp;
5912 vop_fsync_debugprepost(struct vnode *vp, const char *name)
6022 struct vnode *vp = a->a_vp;
6049 struct vnode *dvp;
6060 struct vnode *dvp;
6073 struct vnode *dvp;
6084 struct vnode *dvp;
6095 struct vnode *vp;
6106 struct vnode *vp;
6119 struct vnode *vp, *tdvp;
6132 struct vnode *vp, *tdvp;
6149 struct vnode *dvp;
6160 struct vnode *dvp;
6185 struct vnode *dvp;
6196 struct vnode *dvp;
6209 struct vnode *vp;
6222 struct vnode *dvp, *vp;
6235 struct vnode *dvp, *vp;
6290 struct vnode *dvp, *vp;
6303 struct vnode *dvp, *vp;
6321 struct vnode *vp;
6332 struct vnode *vp;
6345 struct vnode *vp;
6356 struct vnode *vp;
6367 struct vnode *vp;
6378 struct vnode *vp;
6391 struct vnode *dvp;
6402 struct vnode *dvp;
6579 struct vnode *vp = arg;
6587 struct vnode *vp = arg;
6596 struct vnode *vp = arg;
6608 struct vnode *vp = ap->a_vp;
6642 * Detach knote from vnode
6647 struct vnode *vp = (struct vnode *)kn->kn_hook;
6658 struct vnode *vp = (struct vnode *)kn->kn_hook;
6687 struct vnode *vp = (struct vnode *)kn->kn_hook;
6706 struct vnode *vp = (struct vnode *)kn->kn_hook;
6800 * Clear out a doomed vnode (if any) and replace it with a new one as long
6801 * as the fs is not being unmounted. Return the root vnode to the caller.
6804 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp)
6806 struct vnode *vp;
6851 panic("%s: mismatch between vnode returned "
6864 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp)
6867 struct vnode *vp;
6888 struct vnode *
6891 struct vnode *vp;
6894 * ops > 0 guarantees there is nobody who can see this vnode
6905 vfs_cache_root_set(struct mount *mp, struct vnode *vp)
6920 struct vnode *
6921 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp)
6923 struct vnode *vp;
6927 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
6952 struct vnode *
6953 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp)
6955 struct vnode *vp;
6985 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp)
6995 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
7008 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp)
7011 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
7021 * Relock the mp mount vnode list lock with the vp vnode interlock in the
7024 * On entry, the mount vnode list lock is held and the vnode interlock is not.
7026 * On failure, the mount vnode list lock is held but the vnode interlock is
7030 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp,
7031 struct vnode *vp)
7038 ("%s: inappropriate vnode", __func__));
7073 static struct vnode *
7074 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
7077 struct vnode *vp;
7080 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
7089 * See if we want to process the vnode. Note we may encounter a
7116 ("alien vnode on the lazy list %p %p", vp, mp));
7135 struct vnode *
7136 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
7145 struct vnode *
7146 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
7149 struct vnode *vp;
7171 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp)
7184 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp)
7197 * to prevent the vnode from getting freed.
7200 vn_seqc_write_begin_locked(struct vnode *vp)
7212 vn_seqc_write_begin(struct vnode *vp)
7221 vn_seqc_write_end_locked(struct vnode *vp)
7232 vn_seqc_write_end(struct vnode *vp)
7243 * The counter remains unchanged on free so that a doomed vnode will
7247 vn_seqc_init(struct vnode *vp)
7255 vn_seqc_write_end_free(struct vnode *vp)
7263 vn_irflag_set_locked(struct vnode *vp, short toset)
7276 vn_irflag_set(struct vnode *vp, short toset)
7285 vn_irflag_set_cond_locked(struct vnode *vp, short toset)
7295 vn_irflag_set_cond(struct vnode *vp, short toset)
7304 vn_irflag_unset_locked(struct vnode *vp, short tounset)
7317 vn_irflag_unset(struct vnode *vp, short tounset)
7326 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred)
7343 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred)
7355 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state)