Lines Matching defs:vp

256 vrefcnt(struct vnode *vp)
259 return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK;
268 #define VSTATE_GET(vp) \
269 vstate_assert_get((vp), __func__, __LINE__)
270 #define VSTATE_CHANGE(vp, from, to) \
271 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
272 #define VSTATE_WAIT_STABLE(vp) \
273 vstate_assert_wait_stable((vp), __func__, __LINE__)
276 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
279 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
280 int refcnt = vrefcnt(vp);
290 mutex_enter((vp)->v_interlock);
293 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
299 mutex_exit((vp)->v_interlock);
302 vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
308 vstate_assert_get(vnode_t *vp, const char *func, int line)
310 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
312 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
314 vnpanic(vp, "state is %s at %s:%d",
321 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
323 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
325 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
327 vnpanic(vp, "state is %s at %s:%d",
331 cv_wait(&vp->v_cv, vp->v_interlock);
334 vnpanic(vp, "state is %s at %s:%d",
339 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
342 bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE);
343 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
345 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
350 vnpanic(vp, "from is %s at %s:%d",
353 vnpanic(vp, "to is %s at %s:%d",
356 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
359 vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n",
365 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
367 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
374 cv_broadcast(&vp->v_cv);
379 #define VSTATE_GET(vp) \
380 (VNODE_TO_VIMPL((vp))->vi_state)
381 #define VSTATE_CHANGE(vp, from, to) \
382 vstate_change((vp), (from), (to))
383 #define VSTATE_WAIT_STABLE(vp) \
384 vstate_wait_stable((vp))
386 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
393 vstate_wait_stable(vnode_t *vp)
395 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
398 cv_wait(&vp->v_cv, vp->v_interlock);
402 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
404 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
409 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
411 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
418 cv_broadcast(&vp->v_cv);
451 vnode_t *vp;
455 vp = VIMPL_TO_VNODE(vip);
456 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
457 vp->v_mount = mp;
458 vp->v_type = VBAD;
459 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
461 vp->v_klist = &vip->vi_klist;
464 return vp;
471 vnfree_marker(vnode_t *vp)
475 vip = VNODE_TO_VIMPL(vp);
477 mutex_obj_free(vp->v_interlock);
478 uvm_obj_destroy(&vp->v_uobj, true);
487 vnis_marker(vnode_t *vp)
490 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
497 lru_which(vnode_t *vp)
500 KASSERT(mutex_owned(vp->v_interlock));
502 if (vp->v_holdcnt > 0)
514 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
523 vip = VNODE_TO_VIMPL(vp);
649 vnode_t *vp;
662 vp = VIMPL_TO_VNODE(vip);
665 if (vrefcnt(vp) > 0)
668 if (!mutex_tryenter(vp->v_interlock))
671 if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
672 mutex_exit(vp->v_interlock);
677 mp = vp->v_mount;
679 mutex_exit(vp->v_interlock);
684 if (vcache_vget(vp) == 0) {
685 if (!vrecycle(vp)) {
686 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
687 mutex_enter(vp->v_interlock);
688 vrelel(vp, 0, LK_EXCLUSIVE);
760 vtryrele(vnode_t *vp)
765 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
770 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
781 vput(vnode_t *vp)
792 if (vrefcnt(vp) > 1) {
793 VOP_UNLOCK(vp);
794 if (vtryrele(vp)) {
799 lktype = VOP_ISLOCKED(vp);
802 mutex_enter(vp->v_interlock);
803 vrelel(vp, 0, lktype);
812 vnode_t *vp;
817 vp = VIMPL_TO_VNODE(vip);
832 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
833 mutex_enter(vp->v_interlock);
834 vrelel(vp, 0, LK_EXCLUSIVE);
844 vrelel(vnode_t *vp, int flags, int lktype)
854 KASSERT(mutex_owned(vp->v_interlock));
856 if (__predict_false(vp->v_op == dead_vnodeop_p &&
857 VSTATE_GET(vp) != VS_RECLAIMED)) {
858 vnpanic(vp, "dead but not clean");
867 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
871 rw_exit(vp->v_uobj.vmobjlock);
874 mutex_exit(vp->v_interlock);
876 VOP_UNLOCK(vp);
877 mutex_enter(vp->v_interlock);
879 if (vtryrele(vp)) {
880 mutex_exit(vp->v_interlock);
883 next = atomic_load_relaxed(&vp->v_usecount);
889 next = atomic_cas_uint(&vp->v_usecount, use, next);
896 if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
897 vnpanic(vp, "%s: bad ref count", __func__);
901 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
902 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
903 vprint("vrelel: missing VOP_CLOSE()", vp);
911 if (VSTATE_GET(vp) == VS_RECLAIMED) {
914 rw_exit(vp->v_uobj.vmobjlock);
917 mutex_exit(vp->v_interlock);
919 VOP_UNLOCK(vp);
920 mutex_enter(vp->v_interlock);
935 error = vn_lock(vp, LK_UPGRADE | LK_RETRY | LK_NOWAIT);
943 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
950 KASSERT(mutex_owned(vp->v_interlock));
957 mutex_exit(vp->v_interlock);
958 VOP_UNLOCK(vp);
959 mutex_enter(vp->v_interlock);
961 lru_requeue(vp, &lru_list[LRU_VRELE]);
962 mutex_exit(vp->v_interlock);
968 use = atomic_load_relaxed(&vp->v_usecount);
974 if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP|VI_WRMAP)) != 0 ||
975 (vp->v_vflag & VV_MAPPED) != 0) {
979 if (!rw_tryenter(vp->v_uobj.vmobjlock, RW_WRITER)) {
980 mutex_exit(vp->v_interlock);
981 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
982 mutex_enter(vp->v_interlock);
986 if ((vp->v_iflag & VI_EXECMAP) != 0) {
987 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
989 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
990 vp->v_vflag &= ~VV_MAPPED;
994 rw_exit(vp->v_uobj.vmobjlock);
1006 mutex_exit(vp->v_interlock);
1008 VOP_INACTIVE(vp, &recycle);
1011 VOP_UNLOCK(vp);
1013 mutex_enter(vp->v_interlock);
1021 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1022 use = atomic_load_relaxed(&vp->v_usecount);
1024 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1034 VSTATE_ASSERT(vp, VS_BLOCKED);
1038 vcache_reclaim(vp);
1040 KASSERT(vrefcnt(vp) > 0);
1044 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
1050 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
1054 mutex_exit(vp->v_interlock);
1062 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
1067 vcache_free(VNODE_TO_VIMPL(vp));
1074 lru_requeue(vp, lru_which(vp));
1075 mutex_exit(vp->v_interlock);
1080 vrele(vnode_t *vp)
1083 if (vtryrele(vp)) {
1086 mutex_enter(vp->v_interlock);
1087 vrelel(vp, 0, LK_NONE);
1094 vrele_async(vnode_t *vp)
1097 if (vtryrele(vp)) {
1100 mutex_enter(vp->v_interlock);
1101 vrelel(vp, VRELEL_ASYNC, LK_NONE);
1111 vref(vnode_t *vp)
1114 KASSERT(vrefcnt(vp) > 0);
1116 atomic_inc_uint(&vp->v_usecount);
1124 vholdl(vnode_t *vp)
1127 KASSERT(mutex_owned(vp->v_interlock));
1129 if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
1130 lru_requeue(vp, lru_which(vp));
1137 vhold(vnode_t *vp)
1140 mutex_enter(vp->v_interlock);
1141 vholdl(vp);
1142 mutex_exit(vp->v_interlock);
1150 holdrelel(vnode_t *vp)
1153 KASSERT(mutex_owned(vp->v_interlock));
1155 if (vp->v_holdcnt <= 0) {
1156 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
1159 vp->v_holdcnt--;
1160 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1161 lru_requeue(vp, lru_which(vp));
1168 holdrele(vnode_t *vp)
1171 mutex_enter(vp->v_interlock);
1172 holdrelel(vp);
1173 mutex_exit(vp->v_interlock);
1180 vrecycle(vnode_t *vp)
1184 mutex_enter(vp->v_interlock);
1187 VSTATE_WAIT_STABLE(vp);
1188 if (VSTATE_GET(vp) != VS_LOADED) {
1189 VSTATE_ASSERT(vp, VS_RECLAIMED);
1190 vrelel(vp, 0, LK_NONE);
1195 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1198 if (vrefcnt(vp) != 1) {
1199 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1200 mutex_exit(vp->v_interlock);
1204 mutex_exit(vp->v_interlock);
1212 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1214 mutex_enter(vp->v_interlock);
1216 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1217 mutex_exit(vp->v_interlock);
1221 KASSERT(vrefcnt(vp) == 1);
1222 vcache_reclaim(vp);
1223 vrelel(vp, 0, LK_NONE);
1263 vrevoke(vnode_t *vp)
1270 KASSERT(vrefcnt(vp) > 0);
1272 mp = vrevoke_suspend_next(NULL, vp->v_mount);
1274 mutex_enter(vp->v_interlock);
1275 VSTATE_WAIT_STABLE(vp);
1276 if (VSTATE_GET(vp) == VS_RECLAIMED) {
1277 mutex_exit(vp->v_interlock);
1278 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1279 atomic_inc_uint(&vp->v_usecount);
1280 mutex_exit(vp->v_interlock);
1281 vgone(vp);
1283 dev = vp->v_rdev;
1284 type = vp->v_type;
1285 mutex_exit(vp->v_interlock);
1301 vgone(vnode_t *vp)
1305 KASSERT(vp->v_mount == dead_rootmount ||
1306 fstrans_is_owner(vp->v_mount));
1308 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1310 mutex_enter(vp->v_interlock);
1311 VSTATE_WAIT_STABLE(vp);
1312 if (VSTATE_GET(vp) == VS_LOADED) {
1313 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1314 vcache_reclaim(vp);
1317 VSTATE_ASSERT(vp, VS_RECLAIMED);
1318 vrelel(vp, 0, lktype);
1436 vnode_t *vp;
1439 vp = VIMPL_TO_VNODE(vip);
1443 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
1445 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
1447 vp->v_klist = &vip->vi_klist;
1448 cv_init(&vp->v_cv, "vnode");
1449 cache_vnode_init(vp);
1451 vp->v_usecount = 1;
1452 vp->v_type = VNON;
1453 vp->v_size = vp->v_writesize = VSIZENOTSET;
1457 lru_requeue(vp, &lru_list[LRU_FREE]);
1470 vnode_t *vp;
1474 vp = VIMPL_TO_VNODE(vip);
1476 vfs_insmntque(vp, dead_rootmount);
1477 mutex_enter(vp->v_interlock);
1478 vp->v_op = dead_vnodeop_p;
1479 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1481 vrelel(vp, 0, LK_NONE);
1491 vnode_t *vp;
1493 vp = VIMPL_TO_VNODE(vip);
1494 KASSERT(mutex_owned(vp->v_interlock));
1496 KASSERT(vrefcnt(vp) == 0);
1497 KASSERT(vp->v_holdcnt == 0);
1498 KASSERT(vp->v_writecount == 0);
1499 lru_requeue(vp, NULL);
1500 mutex_exit(vp->v_interlock);
1502 vfs_insmntque(vp, NULL);
1503 if (vp->v_type == VBLK || vp->v_type == VCHR)
1504 spec_node_destroy(vp);
1506 mutex_obj_free(vp->v_interlock);
1508 uvm_obj_destroy(&vp->v_uobj, true);
1509 KASSERT(vp->v_klist == &vip->vi_klist);
1511 cv_destroy(&vp->v_cv);
1512 cache_vnode_fini(vp);
1523 vcache_tryvget(vnode_t *vp)
1527 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
1531 next = atomic_cas_uint(&vp->v_usecount,
1548 vcache_vget(vnode_t *vp)
1552 KASSERT(mutex_owned(vp->v_interlock));
1555 vp->v_holdcnt++;
1556 VSTATE_WAIT_STABLE(vp);
1557 vp->v_holdcnt--;
1560 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1561 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1562 vcache_free(VNODE_TO_VIMPL(vp));
1564 mutex_exit(vp->v_interlock);
1567 VSTATE_ASSERT(vp, VS_LOADED);
1568 error = vcache_tryvget(vp);
1570 mutex_exit(vp->v_interlock);
1585 struct vnode *vp;
1616 vp = VIMPL_TO_VNODE(vip);
1617 mutex_enter(vp->v_interlock);
1619 error = vcache_vget(vp);
1623 *vpp = vp;
1635 vp = VIMPL_TO_VNODE(new_vip);
1653 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1665 KASSERT(vp->v_op != NULL);
1666 vfs_insmntque(vp, mp);
1668 vp->v_vflag |= VV_MPSAFE;
1675 mutex_enter(vp->v_interlock);
1676 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1677 mutex_exit(vp->v_interlock);
1679 *vpp = vp;
1692 struct vnode *vp, *ovp;
1703 vp = VIMPL_TO_VNODE(vip);
1706 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1715 KASSERT(vp->v_op != NULL);
1738 vfs_insmntque(vp, mp);
1740 vp->v_vflag |= VV_MPSAFE;
1746 mutex_enter(vp->v_interlock);
1747 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1749 mutex_exit(vp->v_interlock);
1750 *vpp = vp;
1759 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1793 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1804 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1828 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1860 vcache_reclaim(vnode_t *vp)
1863 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1864 struct mount *mp = vp->v_mount;
1871 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1872 KASSERT(mutex_owned(vp->v_interlock));
1873 KASSERT(vrefcnt(vp) != 0);
1880 VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
1884 * because VOP_RECLAIM() could cause vp->v_klist to
1888 * Once it's been posted, reset vp->v_klist to point to
1892 KNOTE(&vp->v_klist->vk_klist, NOTE_REVOKE);
1893 vp->v_klist = &vip->vi_klist;
1894 mutex_exit(vp->v_interlock);
1896 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1897 mutex_enter(vp->v_interlock);
1898 if ((vp->v_iflag & VI_EXECMAP) != 0) {
1899 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1901 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1902 vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
1903 mutex_exit(vp->v_interlock);
1904 rw_exit(vp->v_uobj.vmobjlock);
1911 cache_purge(vp);
1931 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1933 if (wapbl_vphaswapbl(vp))
1934 WAPBL_DISCARD(wapbl_vptomp(vp));
1935 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1938 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1939 if (vp->v_type == VBLK || vp->v_type == VCHR) {
1940 spec_node_revoke(vp);
1949 VOP_INACTIVE(vp, &recycle);
1950 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1951 if (VOP_RECLAIM(vp)) {
1952 vnpanic(vp, "%s: cannot reclaim", __func__);
1955 KASSERT(vp->v_data == NULL);
1956 KASSERT((vp->v_iflag & VI_PAGES) == 0);
1958 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1959 uvm_ra_freectx(vp->v_ractx);
1960 vp->v_ractx = NULL;
1976 mutex_enter(vp->v_interlock);
1977 vp->v_op = dead_vnodeop_p;
1978 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1979 vp->v_tag = VT_NON;
1980 mutex_exit(vp->v_interlock);
1987 vp->v_vflag &= ~VV_ROOT;
1989 vfs_insmntque(vp, dead_rootmount);
1992 pax_segvguard_cleanup(vp);
1995 mutex_enter(vp->v_interlock);
1997 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
2007 vcache_make_anon(vnode_t *vp)
2009 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
2013 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
2014 KASSERT(vp->v_mount == dead_rootmount ||
2015 fstrans_is_owner(vp->v_mount));
2016 VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
2035 if (vn_lock(vp, LK_EXCLUSIVE)) {
2036 vnpanic(vp, "%s: cannot lock", __func__);
2038 VOP_INACTIVE(vp, &recycle);
2039 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
2040 if (VOP_RECLAIM(vp)) {
2041 vnpanic(vp, "%s: cannot reclaim", __func__);
2045 cache_purge(vp);
2048 mutex_enter(vp->v_interlock);
2049 vp->v_op = spec_vnodeop_p;
2050 vp->v_vflag |= VV_MPSAFE;
2051 mutex_exit(vp->v_interlock);
2059 vfs_insmntque(vp, dead_rootmount);
2061 vrele(vp);
2070 vnode_t *vp;
2072 if ((vp = bp->b_vp) == NULL)
2075 KASSERT(bp->b_objlock == vp->v_interlock);
2078 if (--vp->v_numoutput < 0)
2079 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
2080 if (vp->v_numoutput == 0)
2081 cv_broadcast(&vp->v_cv);
2094 vdead_check(struct vnode *vp, int flags)
2097 KASSERT(mutex_owned(vp->v_interlock));
2100 VSTATE_WAIT_STABLE(vp);
2102 if (VSTATE_GET(vp) == VS_RECLAIMING) {
2105 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
2132 vnpanic(vnode_t *vp, const char *fmt, ...)
2137 vprint(NULL, vp);