xref: /netbsd-src/sys/nfs/nfs_node.c (revision 2e31951ce35b19b425cc71066afbe82e73ab68f4)
1 /*	$NetBSD: nfs_node.c,v 1.107 2008/11/19 18:36:09 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Rick Macklem at The University of Guelph.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)nfs_node.c	8.6 (Berkeley) 5/22/95
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: nfs_node.c,v 1.107 2008/11/19 18:36:09 ad Exp $");
39 
40 #ifdef _KERNEL_OPT
41 #include "opt_nfs.h"
42 #endif
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/vnode.h>
50 #include <sys/kernel.h>
51 #include <sys/pool.h>
52 #include <sys/lock.h>
53 #include <sys/hash.h>
54 #include <sys/kauth.h>
55 
56 #include <nfs/rpcv2.h>
57 #include <nfs/nfsproto.h>
58 #include <nfs/nfs.h>
59 #include <nfs/nfsnode.h>
60 #include <nfs/nfsmount.h>
61 #include <nfs/nfs_var.h>
62 
63 struct pool nfs_node_pool;
64 struct pool nfs_vattr_pool;
65 
66 MALLOC_JUSTDEFINE(M_NFSNODE, "NFS node", "NFS vnode private part");
67 
68 extern int prtactive;
69 
70 void nfs_gop_size(struct vnode *, off_t, off_t *, int);
71 int nfs_gop_alloc(struct vnode *, off_t, off_t, int, kauth_cred_t);
72 int nfs_gop_write(struct vnode *, struct vm_page **, int, int);
73 
74 static const struct genfs_ops nfs_genfsops = {
75 	.gop_size = nfs_gop_size,
76 	.gop_alloc = nfs_gop_alloc,
77 	.gop_write = nfs_gop_write,
78 };
79 
80 /*
81  * Reinitialize inode hash table.
82  */
83 void
84 nfs_node_init()
85 {
86 	malloc_type_attach(M_NFSNODE);
87 	pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl",
88 	    &pool_allocator_nointr, IPL_NONE);
89 	pool_init(&nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl",
90 	    &pool_allocator_nointr, IPL_NONE);
91 }
92 
93 /*
94  * Free resources previously allocated in nfs_node_reinit().
95  */
96 void
97 nfs_node_done()
98 {
99 	pool_destroy(&nfs_node_pool);
100 	pool_destroy(&nfs_vattr_pool);
101 	malloc_type_detach(M_NFSNODE);
102 }
103 
104 #define	RBTONFSNODE(node) \
105 	(void *)((uintptr_t)(node) - offsetof(struct nfsnode, n_rbnode))
106 
107 struct fh_match {
108 	nfsfh_t *fhm_fhp;
109 	size_t fhm_fhsize;
110 	size_t fhm_fhoffset;
111 };
112 
113 static int
114 nfs_compare_nodes(const struct rb_node *parent, const struct rb_node *node)
115 {
116 	const struct nfsnode * const pnp = RBTONFSNODE(parent);
117 	const struct nfsnode * const np = RBTONFSNODE(node);
118 
119 	if (pnp->n_fhsize != np->n_fhsize)
120 		return np->n_fhsize - pnp->n_fhsize;
121 
122 	return memcmp(np->n_fhp, pnp->n_fhp, np->n_fhsize);
123 }
124 
125 static int
126 nfs_compare_node_fh(const struct rb_node *b, const void *key)
127 {
128 	const struct nfsnode * const pnp = RBTONFSNODE(b);
129 	const struct fh_match * const fhm = key;
130 
131 	if (pnp->n_fhsize != fhm->fhm_fhsize)
132 		return fhm->fhm_fhsize - pnp->n_fhsize;
133 
134 	return memcmp(fhm->fhm_fhp, pnp->n_fhp, pnp->n_fhsize);
135 }
136 
137 static const struct rb_tree_ops nfs_node_rbtree_ops = {
138 	.rbto_compare_nodes = nfs_compare_nodes,
139 	.rbto_compare_key = nfs_compare_node_fh,
140 };
141 
142 void
143 nfs_rbtinit(struct nfsmount *nmp)
144 {
145 	rb_tree_init(&nmp->nm_rbtree, &nfs_node_rbtree_ops);
146 }
147 
148 
149 /*
150  * Look up a vnode/nfsnode by file handle.
151  * Callers must check for mount points!!
152  * In all cases, a pointer to a
153  * nfsnode structure is returned.
154  */
155 int
156 nfs_nget1(mntp, fhp, fhsize, npp, lkflags)
157 	struct mount *mntp;
158 	nfsfh_t *fhp;
159 	int fhsize;
160 	struct nfsnode **npp;
161 	int lkflags;
162 {
163 	struct nfsnode *np;
164 	struct vnode *vp;
165 	struct nfsmount *nmp = VFSTONFS(mntp);
166 	int error;
167 	struct fh_match fhm;
168 	struct rb_node *node;
169 
170 	fhm.fhm_fhp = fhp;
171 	fhm.fhm_fhsize = fhsize;
172 
173 loop:
174 	rw_enter(&nmp->nm_rbtlock, RW_READER);
175 	node = rb_tree_find_node(&nmp->nm_rbtree, &fhm);
176 	if (node != NULL) {
177 		np = RBTONFSNODE(node);
178 		vp = NFSTOV(np);
179 		mutex_enter(&vp->v_interlock);
180 		rw_exit(&nmp->nm_rbtlock);
181 		error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK | lkflags);
182 		if (error == EBUSY)
183 			return error;
184 		if (error)
185 			goto loop;
186 		*npp = np;
187 		return(0);
188 	}
189 	rw_exit(&nmp->nm_rbtlock);
190 
191 	error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, &vp);
192 	if (error) {
193 		*npp = 0;
194 		return (error);
195 	}
196 	np = pool_get(&nfs_node_pool, PR_WAITOK);
197 	memset(np, 0, sizeof *np);
198 	np->n_vnode = vp;
199 
200 	/*
201 	 * Insert the nfsnode in the hash queue for its new file handle
202 	 */
203 
204 	if (fhsize > NFS_SMALLFH) {
205 		np->n_fhp = kmem_alloc(fhsize, KM_SLEEP);
206 	} else
207 		np->n_fhp = &np->n_fh;
208 	memcpy(np->n_fhp, fhp, fhsize);
209 	np->n_fhsize = fhsize;
210 	np->n_accstamp = -1;
211 	np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);
212 
213 	rw_enter(&nmp->nm_rbtlock, RW_WRITER);
214 	if (NULL != rb_tree_find_node(&nmp->nm_rbtree, &fhm)) {
215 		rw_exit(&nmp->nm_rbtlock);
216 		if (fhsize > NFS_SMALLFH) {
217 			kmem_free(np->n_fhp, fhsize);
218 		}
219 		pool_put(&nfs_vattr_pool, np->n_vattr);
220 		pool_put(&nfs_node_pool, np);
221 		ungetnewvnode(vp);
222 		goto loop;
223 	}
224 	vp->v_data = np;
225 	genfs_node_init(vp, &nfs_genfsops);
226 	/*
227 	 * Initalize read/write creds to useful values. VOP_OPEN will
228 	 * overwrite these.
229 	 */
230 	np->n_rcred = curlwp->l_cred;
231 	kauth_cred_hold(np->n_rcred);
232 	np->n_wcred = curlwp->l_cred;
233 	kauth_cred_hold(np->n_wcred);
234 	vlockmgr(&vp->v_lock, LK_EXCLUSIVE);
235 	NFS_INVALIDATE_ATTRCACHE(np);
236 	uvm_vnp_setsize(vp, 0);
237 	rb_tree_insert_node(&nmp->nm_rbtree, &np->n_rbnode);
238 	rw_exit(&nmp->nm_rbtlock);
239 
240 	*npp = np;
241 	return (0);
242 }
243 
244 int
245 nfs_inactive(v)
246 	void *v;
247 {
248 	struct vop_inactive_args /* {
249 		struct vnode *a_vp;
250 		bool *a_recycle;
251 	} */ *ap = v;
252 	struct nfsnode *np;
253 	struct sillyrename *sp;
254 	struct vnode *vp = ap->a_vp;
255 
256 	np = VTONFS(vp);
257 	if (vp->v_type != VDIR) {
258 		sp = np->n_sillyrename;
259 		np->n_sillyrename = (struct sillyrename *)0;
260 	} else
261 		sp = NULL;
262 	if (sp != NULL)
263 		nfs_vinvalbuf(vp, 0, sp->s_cred, curlwp, 1);
264 	*ap->a_recycle = (np->n_flag & NREMOVED) != 0;
265 	np->n_flag &=
266 	    (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NEOFVALID | NTRUNCDELAYED);
267 
268 	if (vp->v_type == VDIR && np->n_dircache)
269 		nfs_invaldircache(vp,
270 		    NFS_INVALDIRCACHE_FORCE | NFS_INVALDIRCACHE_KEEPEOF);
271 
272 	VOP_UNLOCK(vp, 0);
273 
274 	if (sp != NULL) {
275 		int error;
276 
277 		/*
278 		 * Remove the silly file that was rename'd earlier
279 		 *
280 		 * Just in case our thread also has the parent node locked,
281 		 * we use LK_CANRECURSE.
282 		 */
283 
284 		error = vn_lock(sp->s_dvp, LK_EXCLUSIVE | LK_CANRECURSE);
285 		if (error || sp->s_dvp->v_data == NULL) {
286 			/* XXX should recover */
287 			printf("%s: vp=%p error=%d\n",
288 			    __func__, sp->s_dvp, error);
289 		} else {
290 			nfs_removeit(sp);
291 		}
292 		kauth_cred_free(sp->s_cred);
293 		vput(sp->s_dvp);
294 		kmem_free(sp, sizeof(*sp));
295 	}
296 
297 	return (0);
298 }
299 
300 /*
301  * Reclaim an nfsnode so that it can be used for other purposes.
302  */
303 int
304 nfs_reclaim(v)
305 	void *v;
306 {
307 	struct vop_reclaim_args /* {
308 		struct vnode *a_vp;
309 	} */ *ap = v;
310 	struct vnode *vp = ap->a_vp;
311 	struct nfsnode *np = VTONFS(vp);
312 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
313 
314 	if (prtactive && vp->v_usecount > 1)
315 		vprint("nfs_reclaim: pushing active", vp);
316 
317 	rw_enter(&nmp->nm_rbtlock, RW_WRITER);
318 	rb_tree_remove_node(&nmp->nm_rbtree, &np->n_rbnode);
319 	rw_exit(&nmp->nm_rbtlock);
320 
321 	/*
322 	 * Free up any directory cookie structures and
323 	 * large file handle structures that might be associated with
324 	 * this nfs node.
325 	 */
326 	if (vp->v_type == VDIR && np->n_dircache != NULL) {
327 		nfs_invaldircache(vp, NFS_INVALDIRCACHE_FORCE);
328 		hashdone(np->n_dircache, HASH_LIST, nfsdirhashmask);
329 	}
330 	KASSERT(np->n_dirgens == NULL);
331 
332 	if (np->n_fhsize > NFS_SMALLFH)
333 		kmem_free(np->n_fhp, np->n_fhsize);
334 
335 	pool_put(&nfs_vattr_pool, np->n_vattr);
336 	if (np->n_rcred)
337 		kauth_cred_free(np->n_rcred);
338 
339 	if (np->n_wcred)
340 		kauth_cred_free(np->n_wcred);
341 
342 	cache_purge(vp);
343 	if (vp->v_type == VREG) {
344 		mutex_destroy(&np->n_commitlock);
345 	}
346 	genfs_node_destroy(vp);
347 	pool_put(&nfs_node_pool, np);
348 	vp->v_data = NULL;
349 	return (0);
350 }
351 
352 void
353 nfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
354 {
355 
356 	*eobp = MAX(size, vp->v_size);
357 }
358 
359 int
360 nfs_gop_alloc(struct vnode *vp, off_t off, off_t len, int flags,
361     kauth_cred_t cred)
362 {
363 
364 	return 0;
365 }
366 
367 int
368 nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
369 {
370 	int i;
371 
372 	for (i = 0; i < npages; i++) {
373 		pmap_page_protect(pgs[i], VM_PROT_READ);
374 	}
375 	return genfs_gop_write(vp, pgs, npages, flags);
376 }
377