xref: /netbsd-src/sys/nfs/nfs_node.c (revision 5aefcfdc06931dd97e76246d2fe0302f7b3fe094)
1 /*	$NetBSD: nfs_node.c,v 1.38 2000/11/27 08:39:48 chs Exp $	*/
2 
3 /*
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Rick Macklem at The University of Guelph.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)nfs_node.c	8.6 (Berkeley) 5/22/95
39  */
40 
41 #include "opt_nfs.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/mount.h>
47 #include <sys/namei.h>
48 #include <sys/vnode.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/lock.h>
53 
54 #include <nfs/rpcv2.h>
55 #include <nfs/nfsproto.h>
56 #include <nfs/nfs.h>
57 #include <nfs/nfsnode.h>
58 #include <nfs/nfsmount.h>
59 #include <nfs/nqnfs.h>
60 #include <nfs/nfs_var.h>
61 
62 LIST_HEAD(nfsnodehashhead, nfsnode) *nfsnodehashtbl;
63 u_long nfsnodehash;
64 struct lock nfs_hashlock;
65 
66 struct pool nfs_node_pool;		/* memory pool for nfs nodes */
67 struct pool nfs_vattr_pool;		/* memory pool for nfs vattrs */
68 
69 #define TRUE	1
70 #define	FALSE	0
71 
72 /*
73  * Initialize hash links for nfsnodes
74  * and build nfsnode free list.
75  */
76 void
77 nfs_nhinit()
78 {
79 
80 	nfsnodehashtbl = hashinit(desiredvnodes, HASH_LIST, M_NFSNODE,
81 	    M_WAITOK, &nfsnodehash);
82 	lockinit(&nfs_hashlock, PINOD, "nfs_hashlock", 0, 0);
83 
84 	pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl",
85 	    0, pool_page_alloc_nointr, pool_page_free_nointr, M_NFSNODE);
86 	pool_init(&nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl",
87 	    0, pool_page_alloc_nointr, pool_page_free_nointr, M_NFSNODE);
88 }
89 
90 /*
91  * Free resources previoslu allocated in nfs_nhinit().
92  */
93 void
94 nfs_nhdone()
95 {
96 	hashdone(nfsnodehashtbl, M_NFSNODE);
97 	pool_destroy(&nfs_node_pool);
98 	pool_destroy(&nfs_vattr_pool);
99 }
100 
101 /*
102  * Compute an entry in the NFS hash table structure
103  */
104 u_long
105 nfs_hash(fhp, fhsize)
106 	nfsfh_t *fhp;
107 	int fhsize;
108 {
109 	u_char *fhpp;
110 	u_long fhsum;
111 	int i;
112 
113 	fhpp = &fhp->fh_bytes[0];
114 	fhsum = 0;
115 	for (i = 0; i < fhsize; i++)
116 		fhsum += *fhpp++;
117 	return (fhsum);
118 }
119 
120 /*
121  * Look up a vnode/nfsnode by file handle.
122  * Callers must check for mount points!!
123  * In all cases, a pointer to a
124  * nfsnode structure is returned.
125  */
126 int
127 nfs_nget(mntp, fhp, fhsize, npp)
128 	struct mount *mntp;
129 	nfsfh_t *fhp;
130 	int fhsize;
131 	struct nfsnode **npp;
132 {
133 	struct nfsnode *np;
134 	struct nfsnodehashhead *nhpp;
135 	struct vnode *vp;
136 	struct vnode *nvp;
137 	int error;
138 
139 	nhpp = NFSNOHASH(nfs_hash(fhp, fhsize));
140 loop:
141 	for (np = nhpp->lh_first; np != 0; np = np->n_hash.le_next) {
142 		if (mntp != NFSTOV(np)->v_mount || np->n_fhsize != fhsize ||
143 		    memcmp(fhp, np->n_fhp, fhsize))
144 			continue;
145 		vp = NFSTOV(np);
146 		if (vget(vp, LK_EXCLUSIVE))
147 			goto loop;
148 		*npp = np;
149 		return(0);
150 	}
151 	if (lockmgr(&nfs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0))
152 		goto loop;
153 	error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, &nvp);
154 	if (error) {
155 		*npp = 0;
156 		lockmgr(&nfs_hashlock, LK_RELEASE, 0);
157 		return (error);
158 	}
159 	nvp->v_vnlock = 0;	/* XXX At least untill we do locking */
160 	vp = nvp;
161 	np = pool_get(&nfs_node_pool, PR_WAITOK);
162 	memset(np, 0, sizeof *np);
163 	lockinit(&np->n_commitlock, PINOD, "nfsclock", 0, 0);
164 	vp->v_data = np;
165 	np->n_vnode = vp;
166 
167 	/*
168 	 * Insert the nfsnode in the hash queue for its new file handle
169 	 */
170 	LIST_INSERT_HEAD(nhpp, np, n_hash);
171 	if (fhsize > NFS_SMALLFH) {
172 		np->n_fhp = malloc(fhsize, M_NFSBIGFH, M_WAITOK);
173 	} else
174 		np->n_fhp = &np->n_fh;
175 	memcpy(np->n_fhp, fhp, fhsize);
176 	np->n_fhsize = fhsize;
177 	np->n_accstamp = -1;
178 	np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);
179 
180 	/*
181 	 * XXXUBC doing this while holding the nfs_hashlock is bad,
182 	 * but there's no alternative at the moment.
183 	 */
184 	error = VOP_GETATTR(vp, np->n_vattr, curproc->p_ucred, curproc);
185 	if (error) {
186 		return error;
187 	}
188 	uvm_vnp_setsize(vp, np->n_vattr->va_size);
189 
190 	lockmgr(&nfs_hashlock, LK_RELEASE, 0);
191 	*npp = np;
192 	return (0);
193 }
194 
195 int
196 nfs_inactive(v)
197 	void *v;
198 {
199 	struct vop_inactive_args /* {
200 		struct vnode *a_vp;
201 		struct proc *a_p;
202 	} */ *ap = v;
203 	struct nfsnode *np;
204 	struct sillyrename *sp;
205 	struct proc *p = ap->a_p;
206 	extern int prtactive;
207 
208 	np = VTONFS(ap->a_vp);
209 	if (prtactive && ap->a_vp->v_usecount != 0)
210 		vprint("nfs_inactive: pushing active", ap->a_vp);
211 	if (ap->a_vp->v_type != VDIR) {
212 		sp = np->n_sillyrename;
213 		np->n_sillyrename = (struct sillyrename *)0;
214 	} else
215 		sp = (struct sillyrename *)0;
216 	if (sp) {
217 		/*
218 		 * If the usecount is greater than zero, then we are
219 		 * being inactivated by a forcible unmount and do not
220 		 * have to get our own reference. In the normal case,
221 		 * we need a reference to keep the vnode from being
222 		 * recycled by getnewvnode while we do the I/O
223 		 * associated with discarding the buffers.
224 		 */
225 		if (ap->a_vp->v_usecount > 0)
226 			(void) nfs_vinvalbuf(ap->a_vp, 0, sp->s_cred, p, 1);
227 		else if (vget(ap->a_vp, 0))
228                         panic("nfs_inactive: lost vnode");
229 		else {
230 			(void) nfs_vinvalbuf(ap->a_vp, 0, sp->s_cred, p, 1);
231 			vrele(ap->a_vp);
232 		}
233 
234 
235 		/*
236 		 * Remove the silly file that was rename'd earlier
237 		 */
238 		nfs_removeit(sp);
239 		crfree(sp->s_cred);
240 		vrele(sp->s_dvp);
241 		FREE(sp, M_NFSREQ);
242 	}
243 	np->n_flag &= (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NQNFSEVICTED |
244 		NQNFSNONCACHE | NQNFSWRITE);
245 	VOP_UNLOCK(ap->a_vp, 0);
246 	return (0);
247 }
248 
249 /*
250  * Reclaim an nfsnode so that it can be used for other purposes.
251  */
252 int
253 nfs_reclaim(v)
254 	void *v;
255 {
256 	struct vop_reclaim_args /* {
257 		struct vnode *a_vp;
258 	} */ *ap = v;
259 	struct vnode *vp = ap->a_vp;
260 	struct nfsnode *np = VTONFS(vp);
261 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
262 	extern int prtactive;
263 
264 	if (prtactive && vp->v_usecount != 0)
265 		vprint("nfs_reclaim: pushing active", vp);
266 
267 	LIST_REMOVE(np, n_hash);
268 
269 	/*
270 	 * For nqnfs, take it off the timer queue as required.
271 	 */
272 	if ((nmp->nm_flag & NFSMNT_NQNFS) && np->n_timer.cqe_next != 0) {
273 		CIRCLEQ_REMOVE(&nmp->nm_timerhead, np, n_timer);
274 	}
275 
276 	/*
277 	 * Free up any directory cookie structures and
278 	 * large file handle structures that might be associated with
279 	 * this nfs node.
280 	 */
281 	if (vp->v_type == VDIR && np->n_dircache) {
282 		nfs_invaldircache(vp, 1);
283 		FREE(np->n_dircache, M_NFSDIROFF);
284 	}
285 	if (np->n_fhsize > NFS_SMALLFH) {
286 		free(np->n_fhp, M_NFSBIGFH);
287 	}
288 
289 	pool_put(&nfs_vattr_pool, np->n_vattr);
290 	if (np->n_rcred) {
291 		crfree(np->n_rcred);
292 	}
293 	if (np->n_wcred) {
294 		crfree(np->n_wcred);
295 	}
296 	cache_purge(vp);
297 	pool_put(&nfs_node_pool, vp->v_data);
298 	vp->v_data = NULL;
299 	return (0);
300 }
301