19733a808SDag-Erling Smørgrav /*-
2d63027b6SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause
3d63027b6SPedro F. Giffuni *
4e738085bSDag-Erling Smørgrav * Copyright (c) 2001 Dag-Erling Smørgrav
59733a808SDag-Erling Smørgrav * All rights reserved.
69733a808SDag-Erling Smørgrav *
79733a808SDag-Erling Smørgrav * Redistribution and use in source and binary forms, with or without
89733a808SDag-Erling Smørgrav * modification, are permitted provided that the following conditions
99733a808SDag-Erling Smørgrav * are met:
109733a808SDag-Erling Smørgrav * 1. Redistributions of source code must retain the above copyright
119733a808SDag-Erling Smørgrav * notice, this list of conditions and the following disclaimer
129733a808SDag-Erling Smørgrav * in this position and unchanged.
139733a808SDag-Erling Smørgrav * 2. Redistributions in binary form must reproduce the above copyright
149733a808SDag-Erling Smørgrav * notice, this list of conditions and the following disclaimer in the
159733a808SDag-Erling Smørgrav * documentation and/or other materials provided with the distribution.
169733a808SDag-Erling Smørgrav * 3. The name of the author may not be used to endorse or promote products
179733a808SDag-Erling Smørgrav * derived from this software without specific prior written permission.
189733a808SDag-Erling Smørgrav *
199733a808SDag-Erling Smørgrav * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
209733a808SDag-Erling Smørgrav * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
219733a808SDag-Erling Smørgrav * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
229733a808SDag-Erling Smørgrav * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
239733a808SDag-Erling Smørgrav * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
249733a808SDag-Erling Smørgrav * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
259733a808SDag-Erling Smørgrav * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
269733a808SDag-Erling Smørgrav * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
279733a808SDag-Erling Smørgrav * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
289733a808SDag-Erling Smørgrav * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
299733a808SDag-Erling Smørgrav */
309733a808SDag-Erling Smørgrav
31de52d21aSDag-Erling Smørgrav #include <sys/cdefs.h>
32de52d21aSDag-Erling Smørgrav #include "opt_pseudofs.h"
33de52d21aSDag-Erling Smørgrav
349733a808SDag-Erling Smørgrav #include <sys/param.h>
359733a808SDag-Erling Smørgrav #include <sys/kernel.h>
369733a808SDag-Erling Smørgrav #include <sys/systm.h>
3775b8b3b2SJohn Baldwin #include <sys/eventhandler.h>
3821ceb6efSDag-Erling Smørgrav #include <sys/lock.h>
399733a808SDag-Erling Smørgrav #include <sys/malloc.h>
4049fa664fSDag-Erling Smørgrav #include <sys/mutex.h>
41198bc14bSDag-Erling Smørgrav #include <sys/proc.h>
429733a808SDag-Erling Smørgrav #include <sys/sysctl.h>
439733a808SDag-Erling Smørgrav #include <sys/vnode.h>
449733a808SDag-Erling Smørgrav
459733a808SDag-Erling Smørgrav #include <fs/pseudofs/pseudofs.h>
469733a808SDag-Erling Smørgrav #include <fs/pseudofs/pseudofs_internal.h>
479733a808SDag-Erling Smørgrav
48649ad985SDag-Erling Smørgrav static MALLOC_DEFINE(M_PFSVNCACHE, "pfs_vncache", "pseudofs vnode cache");
499733a808SDag-Erling Smørgrav
509733a808SDag-Erling Smørgrav static struct mtx pfs_vncache_mutex;
5175b8b3b2SJohn Baldwin static eventhandler_tag pfs_exit_tag;
5275b8b3b2SJohn Baldwin static void pfs_exit(void *arg, struct proc *p);
532e310f6fSMateusz Guzik static void pfs_purge_all(void);
549733a808SDag-Erling Smørgrav
55d3d10ed2SPawel Biernacki static SYSCTL_NODE(_vfs_pfs, OID_AUTO, vncache, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
569733a808SDag-Erling Smørgrav "pseudofs vnode cache");
579733a808SDag-Erling Smørgrav
58198bc14bSDag-Erling Smørgrav static int pfs_vncache_entries;
59198bc14bSDag-Erling Smørgrav SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, entries, CTLFLAG_RD,
60198bc14bSDag-Erling Smørgrav &pfs_vncache_entries, 0,
61198bc14bSDag-Erling Smørgrav "number of entries in the vnode cache");
62198bc14bSDag-Erling Smørgrav
63198bc14bSDag-Erling Smørgrav static int pfs_vncache_maxentries;
64198bc14bSDag-Erling Smørgrav SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, maxentries, CTLFLAG_RD,
65198bc14bSDag-Erling Smørgrav &pfs_vncache_maxentries, 0,
66198bc14bSDag-Erling Smørgrav "highest number of entries in the vnode cache");
67198bc14bSDag-Erling Smørgrav
689733a808SDag-Erling Smørgrav static int pfs_vncache_hits;
69198bc14bSDag-Erling Smørgrav SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, hits, CTLFLAG_RD,
70198bc14bSDag-Erling Smørgrav &pfs_vncache_hits, 0,
719733a808SDag-Erling Smørgrav "number of cache hits since initialization");
729733a808SDag-Erling Smørgrav
739733a808SDag-Erling Smørgrav static int pfs_vncache_misses;
74198bc14bSDag-Erling Smørgrav SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, misses, CTLFLAG_RD,
75198bc14bSDag-Erling Smørgrav &pfs_vncache_misses, 0,
769733a808SDag-Erling Smørgrav "number of cache misses since initialization");
779733a808SDag-Erling Smørgrav
78aec0fb7bSPoul-Henning Kamp extern struct vop_vector pfs_vnodeops; /* XXX -> .h file */
799733a808SDag-Erling Smørgrav
SLIST_HEAD(pfs_vncache_head,pfs_vdata)802e310f6fSMateusz Guzik static SLIST_HEAD(pfs_vncache_head, pfs_vdata) *pfs_vncache_hashtbl;
812e310f6fSMateusz Guzik static u_long pfs_vncache_hash;
822e310f6fSMateusz Guzik #define PFS_VNCACHE_HASH(pid) (&pfs_vncache_hashtbl[(pid) & pfs_vncache_hash])
832e310f6fSMateusz Guzik
849733a808SDag-Erling Smørgrav /*
859733a808SDag-Erling Smørgrav * Initialize vnode cache
869733a808SDag-Erling Smørgrav */
879733a808SDag-Erling Smørgrav void
889733a808SDag-Erling Smørgrav pfs_vncache_load(void)
899733a808SDag-Erling Smørgrav {
90f61bc4eaSDag-Erling Smørgrav
91f61bc4eaSDag-Erling Smørgrav mtx_init(&pfs_vncache_mutex, "pfs_vncache", NULL, MTX_DEF);
922e310f6fSMateusz Guzik pfs_vncache_hashtbl = hashinit(maxproc / 4, M_PFSVNCACHE, &pfs_vncache_hash);
9375b8b3b2SJohn Baldwin pfs_exit_tag = EVENTHANDLER_REGISTER(process_exit, pfs_exit, NULL,
9475b8b3b2SJohn Baldwin EVENTHANDLER_PRI_ANY);
959733a808SDag-Erling Smørgrav }
969733a808SDag-Erling Smørgrav
979733a808SDag-Erling Smørgrav /*
989733a808SDag-Erling Smørgrav * Tear down vnode cache
999733a808SDag-Erling Smørgrav */
1009733a808SDag-Erling Smørgrav void
pfs_vncache_unload(void)1019733a808SDag-Erling Smørgrav pfs_vncache_unload(void)
1029733a808SDag-Erling Smørgrav {
103f61bc4eaSDag-Erling Smørgrav
10475b8b3b2SJohn Baldwin EVENTHANDLER_DEREGISTER(process_exit, pfs_exit_tag);
1052e310f6fSMateusz Guzik pfs_purge_all();
106f61bc4eaSDag-Erling Smørgrav KASSERT(pfs_vncache_entries == 0,
107f61bc4eaSDag-Erling Smørgrav ("%d vncache entries remaining", pfs_vncache_entries));
1089733a808SDag-Erling Smørgrav mtx_destroy(&pfs_vncache_mutex);
1090f74021fSDmitry Chagin hashdestroy(pfs_vncache_hashtbl, M_PFSVNCACHE, pfs_vncache_hash);
1109733a808SDag-Erling Smørgrav }
1119733a808SDag-Erling Smørgrav
1129733a808SDag-Erling Smørgrav /*
1139733a808SDag-Erling Smørgrav * Allocate a vnode
1149733a808SDag-Erling Smørgrav */
1159733a808SDag-Erling Smørgrav int
pfs_vncache_alloc(struct mount * mp,struct vnode ** vpp,struct pfs_node * pn,pid_t pid)116649ad985SDag-Erling Smørgrav pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
117649ad985SDag-Erling Smørgrav struct pfs_node *pn, pid_t pid)
1189733a808SDag-Erling Smørgrav {
1192e310f6fSMateusz Guzik struct pfs_vncache_head *hash;
12099ec92c9SKonstantin Belousov struct pfs_vdata *pvd, *pvd2;
121b0827613SJohn Baldwin struct vnode *vp;
12243999a5cSMateusz Guzik enum vgetstate vs;
1239733a808SDag-Erling Smørgrav int error;
1249733a808SDag-Erling Smørgrav
12508f3c749SMatthew Dillon /*
12608f3c749SMatthew Dillon * See if the vnode is in the cache.
12708f3c749SMatthew Dillon */
1282e310f6fSMateusz Guzik hash = PFS_VNCACHE_HASH(pid);
1292e310f6fSMateusz Guzik if (SLIST_EMPTY(hash))
1302e310f6fSMateusz Guzik goto alloc;
131b0827613SJohn Baldwin retry:
132649ad985SDag-Erling Smørgrav mtx_lock(&pfs_vncache_mutex);
1332e310f6fSMateusz Guzik SLIST_FOREACH(pvd, hash, pvd_hash) {
134613fcc13STim J. Robbins if (pvd->pvd_pn == pn && pvd->pvd_pid == pid &&
135613fcc13STim J. Robbins pvd->pvd_vnode->v_mount == mp) {
136b0827613SJohn Baldwin vp = pvd->pvd_vnode;
13743999a5cSMateusz Guzik vs = vget_prep(vp);
1389733a808SDag-Erling Smørgrav mtx_unlock(&pfs_vncache_mutex);
13943999a5cSMateusz Guzik if (vget_finish(vp, LK_EXCLUSIVE, vs) == 0) {
140b0827613SJohn Baldwin ++pfs_vncache_hits;
141b0827613SJohn Baldwin *vpp = vp;
14215bad11fSDag-Erling Smørgrav /*
14315bad11fSDag-Erling Smørgrav * Some callers cache_enter(vp) later, so
14415bad11fSDag-Erling Smørgrav * we have to make sure it's not in the
14515bad11fSDag-Erling Smørgrav * VFS cache so it doesn't get entered
14615bad11fSDag-Erling Smørgrav * twice. A better solution would be to
14715bad11fSDag-Erling Smørgrav * make pfs_vncache_alloc() responsible
14815bad11fSDag-Erling Smørgrav * for entering the vnode in the VFS
14915bad11fSDag-Erling Smørgrav * cache.
15015bad11fSDag-Erling Smørgrav */
151b0827613SJohn Baldwin cache_purge(vp);
1529733a808SDag-Erling Smørgrav return (0);
1539733a808SDag-Erling Smørgrav }
154b0827613SJohn Baldwin goto retry;
155649ad985SDag-Erling Smørgrav }
156649ad985SDag-Erling Smørgrav }
157649ad985SDag-Erling Smørgrav mtx_unlock(&pfs_vncache_mutex);
1582e310f6fSMateusz Guzik alloc:
1599733a808SDag-Erling Smørgrav /* nope, get a new one */
1601ede983cSDag-Erling Smørgrav pvd = malloc(sizeof *pvd, M_PFSVNCACHE, M_WAITOK);
161aec0fb7bSPoul-Henning Kamp error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp);
16265a728a5SPoul-Henning Kamp if (error) {
1631ede983cSDag-Erling Smørgrav free(pvd, M_PFSVNCACHE);
1649733a808SDag-Erling Smørgrav return (error);
16565a728a5SPoul-Henning Kamp }
166649ad985SDag-Erling Smørgrav pvd->pvd_pn = pn;
167649ad985SDag-Erling Smørgrav pvd->pvd_pid = pid;
168649ad985SDag-Erling Smørgrav (*vpp)->v_data = pvd;
1699733a808SDag-Erling Smørgrav switch (pn->pn_type) {
1709733a808SDag-Erling Smørgrav case pfstype_root:
171e6e370a7SJeff Roberson (*vpp)->v_vflag = VV_ROOT;
1729733a808SDag-Erling Smørgrav #if 0
1739733a808SDag-Erling Smørgrav printf("root vnode allocated\n");
1749733a808SDag-Erling Smørgrav #endif
175198bc14bSDag-Erling Smørgrav /* fall through */
1769733a808SDag-Erling Smørgrav case pfstype_dir:
1779733a808SDag-Erling Smørgrav case pfstype_this:
1789733a808SDag-Erling Smørgrav case pfstype_parent:
179649ad985SDag-Erling Smørgrav case pfstype_procdir:
1809733a808SDag-Erling Smørgrav (*vpp)->v_type = VDIR;
1819733a808SDag-Erling Smørgrav break;
1829733a808SDag-Erling Smørgrav case pfstype_file:
1839733a808SDag-Erling Smørgrav (*vpp)->v_type = VREG;
1849733a808SDag-Erling Smørgrav break;
1859733a808SDag-Erling Smørgrav case pfstype_symlink:
1869733a808SDag-Erling Smørgrav (*vpp)->v_type = VLNK;
1879733a808SDag-Erling Smørgrav break;
188649ad985SDag-Erling Smørgrav case pfstype_none:
189649ad985SDag-Erling Smørgrav KASSERT(0, ("pfs_vncache_alloc called for null node\n"));
1909733a808SDag-Erling Smørgrav default:
1919733a808SDag-Erling Smørgrav panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type);
1929733a808SDag-Erling Smørgrav }
19306be2aaaSNate Lawson /*
19406be2aaaSNate Lawson * Propagate flag through to vnode so users know it can change
19506be2aaaSNate Lawson * if the process changes (i.e. execve)
19606be2aaaSNate Lawson */
19706be2aaaSNate Lawson if ((pn->pn_flags & PFS_PROCDEP) != 0)
19806be2aaaSNate Lawson (*vpp)->v_vflag |= VV_PROCDEP;
199198bc14bSDag-Erling Smørgrav pvd->pvd_vnode = *vpp;
200cb05b60aSAttilio Rao vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
2013634d5b2SJohn Baldwin VN_LOCK_AREC(*vpp);
20261b9d89fSTor Egge error = insmntque(*vpp, mp);
20361b9d89fSTor Egge if (error != 0) {
2046cc745d2SKonstantin Belousov free(pvd, M_PFSVNCACHE);
20561b9d89fSTor Egge *vpp = NULLVP;
20661b9d89fSTor Egge return (error);
20761b9d89fSTor Egge }
208829f0bcbSMateusz Guzik vn_set_state(*vpp, VSTATE_CONSTRUCTED);
20999ec92c9SKonstantin Belousov retry2:
210649ad985SDag-Erling Smørgrav mtx_lock(&pfs_vncache_mutex);
21199ec92c9SKonstantin Belousov /*
21299ec92c9SKonstantin Belousov * Other thread may race with us, creating the entry we are
21399ec92c9SKonstantin Belousov * going to insert into the cache. Recheck after
21499ec92c9SKonstantin Belousov * pfs_vncache_mutex is reacquired.
21599ec92c9SKonstantin Belousov */
2162e310f6fSMateusz Guzik SLIST_FOREACH(pvd2, hash, pvd_hash) {
21799ec92c9SKonstantin Belousov if (pvd2->pvd_pn == pn && pvd2->pvd_pid == pid &&
21899ec92c9SKonstantin Belousov pvd2->pvd_vnode->v_mount == mp) {
21999ec92c9SKonstantin Belousov vp = pvd2->pvd_vnode;
2202c2ef670SMateusz Guzik vs = vget_prep(vp);
22199ec92c9SKonstantin Belousov mtx_unlock(&pfs_vncache_mutex);
2222c2ef670SMateusz Guzik if (vget_finish(vp, LK_EXCLUSIVE, vs) == 0) {
22399ec92c9SKonstantin Belousov ++pfs_vncache_hits;
22499ec92c9SKonstantin Belousov vgone(*vpp);
2254424c9d0SJoe Marcus Clarke vput(*vpp);
22699ec92c9SKonstantin Belousov *vpp = vp;
22799ec92c9SKonstantin Belousov cache_purge(vp);
22899ec92c9SKonstantin Belousov return (0);
22999ec92c9SKonstantin Belousov }
23099ec92c9SKonstantin Belousov goto retry2;
23199ec92c9SKonstantin Belousov }
23299ec92c9SKonstantin Belousov }
23399ec92c9SKonstantin Belousov ++pfs_vncache_misses;
23499ec92c9SKonstantin Belousov if (++pfs_vncache_entries > pfs_vncache_maxentries)
23599ec92c9SKonstantin Belousov pfs_vncache_maxentries = pfs_vncache_entries;
2362e310f6fSMateusz Guzik SLIST_INSERT_HEAD(hash, pvd, pvd_hash);
2379733a808SDag-Erling Smørgrav mtx_unlock(&pfs_vncache_mutex);
2389733a808SDag-Erling Smørgrav return (0);
2399733a808SDag-Erling Smørgrav }
2409733a808SDag-Erling Smørgrav
2419733a808SDag-Erling Smørgrav /*
2429733a808SDag-Erling Smørgrav * Free a vnode
2439733a808SDag-Erling Smørgrav */
2449733a808SDag-Erling Smørgrav int
pfs_vncache_free(struct vnode * vp)2459733a808SDag-Erling Smørgrav pfs_vncache_free(struct vnode *vp)
2469733a808SDag-Erling Smørgrav {
2472e310f6fSMateusz Guzik struct pfs_vdata *pvd, *pvd2;
2489733a808SDag-Erling Smørgrav
2499733a808SDag-Erling Smørgrav mtx_lock(&pfs_vncache_mutex);
250198bc14bSDag-Erling Smørgrav pvd = (struct pfs_vdata *)vp->v_data;
251198bc14bSDag-Erling Smørgrav KASSERT(pvd != NULL, ("pfs_vncache_free(): no vnode data\n"));
2522e310f6fSMateusz Guzik SLIST_FOREACH(pvd2, PFS_VNCACHE_HASH(pvd->pvd_pid), pvd_hash) {
2532e310f6fSMateusz Guzik if (pvd2 != pvd)
2542e310f6fSMateusz Guzik continue;
2552e310f6fSMateusz Guzik SLIST_REMOVE(PFS_VNCACHE_HASH(pvd->pvd_pid), pvd, pfs_vdata, pvd_hash);
256481208a8SKonstantin Belousov --pfs_vncache_entries;
2572e310f6fSMateusz Guzik break;
258481208a8SKonstantin Belousov }
2599733a808SDag-Erling Smørgrav mtx_unlock(&pfs_vncache_mutex);
260649ad985SDag-Erling Smørgrav
2611ede983cSDag-Erling Smørgrav free(pvd, M_PFSVNCACHE);
262649ad985SDag-Erling Smørgrav vp->v_data = NULL;
2639733a808SDag-Erling Smørgrav return (0);
2649733a808SDag-Erling Smørgrav }
265198bc14bSDag-Erling Smørgrav
266198bc14bSDag-Erling Smørgrav /*
267388596dfSDag-Erling Smørgrav * Purge the cache of dead entries
26815bad11fSDag-Erling Smørgrav *
2692e310f6fSMateusz Guzik * The code is not very efficient and this perhaps can be addressed without
2702e310f6fSMateusz Guzik * a complete rewrite. Previous iteration was walking a linked list from
2712e310f6fSMateusz Guzik * scratch every time. This code only walks the relevant hash chain (if pid
2722e310f6fSMateusz Guzik * is provided), but still resorts to scanning the entire cache at least twice
2732e310f6fSMateusz Guzik * if a specific component is to be removed which is slower. This can be
2742e310f6fSMateusz Guzik * augmented with resizing the hash.
2752e310f6fSMateusz Guzik *
2762e310f6fSMateusz Guzik * Explanation of the previous state:
2772e310f6fSMateusz Guzik *
27815bad11fSDag-Erling Smørgrav * This is extremely inefficient due to the fact that vgone() not only
27915bad11fSDag-Erling Smørgrav * indirectly modifies the vnode cache, but may also sleep. We can
28015bad11fSDag-Erling Smørgrav * neither hold pfs_vncache_mutex across a vgone() call, nor make any
28115bad11fSDag-Erling Smørgrav * assumptions about the state of the cache after vgone() returns. In
28215bad11fSDag-Erling Smørgrav * consequence, we must start over after every vgone() call, and keep
28315bad11fSDag-Erling Smørgrav * trying until we manage to traverse the entire cache.
28415bad11fSDag-Erling Smørgrav *
28515bad11fSDag-Erling Smørgrav * The only way to improve this situation is to change the data structure
28615bad11fSDag-Erling Smørgrav * used to implement the cache.
28715bad11fSDag-Erling Smørgrav */
28815bad11fSDag-Erling Smørgrav
2892e310f6fSMateusz Guzik static void
pfs_purge_one(struct vnode * vnp)2902e310f6fSMateusz Guzik pfs_purge_one(struct vnode *vnp)
2912e310f6fSMateusz Guzik {
2922e310f6fSMateusz Guzik
29322db15c0SAttilio Rao VOP_LOCK(vnp, LK_EXCLUSIVE);
29415bad11fSDag-Erling Smørgrav vgone(vnp);
295b249ce48SMateusz Guzik VOP_UNLOCK(vnp);
296c097b308SDag-Erling Smørgrav vdrop(vnp);
297c097b308SDag-Erling Smørgrav }
298c097b308SDag-Erling Smørgrav
299c097b308SDag-Erling Smørgrav void
pfs_purge(struct pfs_node * pn)300c097b308SDag-Erling Smørgrav pfs_purge(struct pfs_node *pn)
301c097b308SDag-Erling Smørgrav {
3022e310f6fSMateusz Guzik struct pfs_vdata *pvd;
3032e310f6fSMateusz Guzik struct vnode *vnp;
3042e310f6fSMateusz Guzik u_long i, removed;
305c097b308SDag-Erling Smørgrav
306c097b308SDag-Erling Smørgrav mtx_lock(&pfs_vncache_mutex);
3072e310f6fSMateusz Guzik restart:
3082e310f6fSMateusz Guzik removed = 0;
309*e0d0f093SAlvin Chen for (i = 0; i <= pfs_vncache_hash; i++) {
3102e310f6fSMateusz Guzik restart_chain:
3112e310f6fSMateusz Guzik SLIST_FOREACH(pvd, &pfs_vncache_hashtbl[i], pvd_hash) {
3122e310f6fSMateusz Guzik if (pn != NULL && pvd->pvd_pn != pn)
3132e310f6fSMateusz Guzik continue;
3142e310f6fSMateusz Guzik vnp = pvd->pvd_vnode;
3152e310f6fSMateusz Guzik vhold(vnp);
31615bad11fSDag-Erling Smørgrav mtx_unlock(&pfs_vncache_mutex);
3172e310f6fSMateusz Guzik pfs_purge_one(vnp);
3182e310f6fSMateusz Guzik removed++;
3192e310f6fSMateusz Guzik mtx_lock(&pfs_vncache_mutex);
3202e310f6fSMateusz Guzik goto restart_chain;
3212e310f6fSMateusz Guzik }
3222e310f6fSMateusz Guzik }
3232e310f6fSMateusz Guzik if (removed > 0)
3242e310f6fSMateusz Guzik goto restart;
3252e310f6fSMateusz Guzik mtx_unlock(&pfs_vncache_mutex);
3262e310f6fSMateusz Guzik }
3272e310f6fSMateusz Guzik
3282e310f6fSMateusz Guzik static void
pfs_purge_all(void)3292e310f6fSMateusz Guzik pfs_purge_all(void)
3302e310f6fSMateusz Guzik {
3312e310f6fSMateusz Guzik
3322e310f6fSMateusz Guzik pfs_purge(NULL);
33315bad11fSDag-Erling Smørgrav }
33415bad11fSDag-Erling Smørgrav
33515bad11fSDag-Erling Smørgrav /*
336198bc14bSDag-Erling Smørgrav * Free all vnodes associated with a defunct process
337198bc14bSDag-Erling Smørgrav */
338198bc14bSDag-Erling Smørgrav static void
pfs_exit(void * arg,struct proc * p)33975b8b3b2SJohn Baldwin pfs_exit(void *arg, struct proc *p)
340198bc14bSDag-Erling Smørgrav {
3412e310f6fSMateusz Guzik struct pfs_vncache_head *hash;
342c9b9a826SDag-Erling Smørgrav struct pfs_vdata *pvd;
3432e310f6fSMateusz Guzik struct vnode *vnp;
3442e310f6fSMateusz Guzik int pid;
345198bc14bSDag-Erling Smørgrav
3462e310f6fSMateusz Guzik pid = p->p_pid;
3472e310f6fSMateusz Guzik hash = PFS_VNCACHE_HASH(pid);
3482e310f6fSMateusz Guzik if (SLIST_EMPTY(hash))
349a24042b7SPoul-Henning Kamp return;
3502e310f6fSMateusz Guzik restart:
351c9b9a826SDag-Erling Smørgrav mtx_lock(&pfs_vncache_mutex);
3522e310f6fSMateusz Guzik SLIST_FOREACH(pvd, hash, pvd_hash) {
3532e310f6fSMateusz Guzik if (pvd->pvd_pid != pid)
3542e310f6fSMateusz Guzik continue;
3552e310f6fSMateusz Guzik vnp = pvd->pvd_vnode;
3562e310f6fSMateusz Guzik vhold(vnp);
3572e310f6fSMateusz Guzik mtx_unlock(&pfs_vncache_mutex);
3582e310f6fSMateusz Guzik pfs_purge_one(vnp);
3592e310f6fSMateusz Guzik goto restart;
3602e310f6fSMateusz Guzik }
361c097b308SDag-Erling Smørgrav mtx_unlock(&pfs_vncache_mutex);
362198bc14bSDag-Erling Smørgrav }
363