xref: /netbsd-src/sys/nfs/nfs_node.c (revision 19ef5b5b0bcb90f63509df6e78769de1b57c2758)
1 /*	$NetBSD: nfs_node.c,v 1.117 2014/02/27 16:51:38 hannken Exp $	*/
2 
3 /*
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Rick Macklem at The University of Guelph.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)nfs_node.c	8.6 (Berkeley) 5/22/95
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: nfs_node.c,v 1.117 2014/02/27 16:51:38 hannken Exp $");
39 
40 #ifdef _KERNEL_OPT
41 #include "opt_nfs.h"
42 #endif
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/vnode.h>
50 #include <sys/kernel.h>
51 #include <sys/pool.h>
52 #include <sys/lock.h>
53 #include <sys/hash.h>
54 #include <sys/kauth.h>
55 
56 #include <nfs/rpcv2.h>
57 #include <nfs/nfsproto.h>
58 #include <nfs/nfs.h>
59 #include <nfs/nfsnode.h>
60 #include <nfs/nfsmount.h>
61 #include <nfs/nfs_var.h>
62 
63 struct pool nfs_node_pool;
64 struct pool nfs_vattr_pool;
65 static struct workqueue *nfs_sillyworkq;
66 
67 extern int prtactive;
68 
69 static void nfs_gop_size(struct vnode *, off_t, off_t *, int);
70 static int nfs_gop_alloc(struct vnode *, off_t, off_t, int, kauth_cred_t);
71 static int nfs_gop_write(struct vnode *, struct vm_page **, int, int);
72 static void nfs_sillyworker(struct work *, void *);
73 
74 static const struct genfs_ops nfs_genfsops = {
75 	.gop_size = nfs_gop_size,
76 	.gop_alloc = nfs_gop_alloc,
77 	.gop_write = nfs_gop_write,
78 };
79 
80 /*
81  * Reinitialize inode hash table.
82  */
83 void
84 nfs_node_init(void)
85 {
86 
87 	pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl",
88 	    &pool_allocator_nointr, IPL_NONE);
89 	pool_init(&nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl",
90 	    &pool_allocator_nointr, IPL_NONE);
91 	if (workqueue_create(&nfs_sillyworkq, "nfssilly", nfs_sillyworker,
92 	    NULL, PRI_NONE, IPL_NONE, 0) != 0) {
93 	    	panic("nfs_node_init");
94 	}
95 }
96 
97 /*
98  * Free resources previously allocated in nfs_node_reinit().
99  */
100 void
101 nfs_node_done(void)
102 {
103 
104 	pool_destroy(&nfs_node_pool);
105 	pool_destroy(&nfs_vattr_pool);
106 	workqueue_destroy(nfs_sillyworkq);
107 }
108 
109 struct fh_match {
110 	nfsfh_t *fhm_fhp;
111 	size_t fhm_fhsize;
112 	size_t fhm_fhoffset;
113 };
114 
115 static int
116 nfs_compare_nodes(void *ctx, const void *parent, const void *node)
117 {
118 	const struct nfsnode * const pnp = parent;
119 	const struct nfsnode * const np = node;
120 
121 	if (pnp->n_fhsize != np->n_fhsize)
122 		return np->n_fhsize - pnp->n_fhsize;
123 
124 	return memcmp(np->n_fhp, pnp->n_fhp, np->n_fhsize);
125 }
126 
127 static int
128 nfs_compare_node_fh(void *ctx, const void *b, const void *key)
129 {
130 	const struct nfsnode * const pnp = b;
131 	const struct fh_match * const fhm = key;
132 
133 	if (pnp->n_fhsize != fhm->fhm_fhsize)
134 		return fhm->fhm_fhsize - pnp->n_fhsize;
135 
136 	return memcmp(fhm->fhm_fhp, pnp->n_fhp, pnp->n_fhsize);
137 }
138 
139 static const rb_tree_ops_t nfs_node_rbtree_ops = {
140 	.rbto_compare_nodes = nfs_compare_nodes,
141 	.rbto_compare_key = nfs_compare_node_fh,
142 	.rbto_node_offset = offsetof(struct nfsnode, n_rbnode),
143 	.rbto_context = NULL
144 };
145 
146 void
147 nfs_rbtinit(struct nfsmount *nmp)
148 {
149 
150 	rb_tree_init(&nmp->nm_rbtree, &nfs_node_rbtree_ops);
151 }
152 
153 /*
154  * Look up a vnode/nfsnode by file handle.
155  * Callers must check for mount points!!
156  * In all cases, a pointer to a
157  * nfsnode structure is returned.
158  */
159 int
160 nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp,
161     int lkflags)
162 {
163 	struct nfsnode *np;
164 	struct vnode *vp;
165 	struct nfsmount *nmp = VFSTONFS(mntp);
166 	int error;
167 	struct fh_match fhm;
168 
169 	fhm.fhm_fhp = fhp;
170 	fhm.fhm_fhsize = fhsize;
171 
172 loop:
173 	rw_enter(&nmp->nm_rbtlock, RW_READER);
174 	np = rb_tree_find_node(&nmp->nm_rbtree, &fhm);
175 	if (np != NULL) {
176 		vp = NFSTOV(np);
177 		mutex_enter(vp->v_interlock);
178 		rw_exit(&nmp->nm_rbtlock);
179 		error = vget(vp, LK_EXCLUSIVE | lkflags);
180 		if (error == EBUSY)
181 			return error;
182 		if (error)
183 			goto loop;
184 		*npp = np;
185 		return(0);
186 	}
187 	rw_exit(&nmp->nm_rbtlock);
188 
189 	error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, NULL, &vp);
190 	if (error) {
191 		*npp = 0;
192 		return (error);
193 	}
194 	np = pool_get(&nfs_node_pool, PR_WAITOK);
195 	memset(np, 0, sizeof *np);
196 	np->n_vnode = vp;
197 
198 	/*
199 	 * Insert the nfsnode in the hash queue for its new file handle
200 	 */
201 
202 	if (fhsize > NFS_SMALLFH) {
203 		np->n_fhp = kmem_alloc(fhsize, KM_SLEEP);
204 	} else
205 		np->n_fhp = &np->n_fh;
206 	memcpy(np->n_fhp, fhp, fhsize);
207 	np->n_fhsize = fhsize;
208 	np->n_accstamp = -1;
209 	np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);
210 
211 	rw_enter(&nmp->nm_rbtlock, RW_WRITER);
212 	if (NULL != rb_tree_find_node(&nmp->nm_rbtree, &fhm)) {
213 		rw_exit(&nmp->nm_rbtlock);
214 		if (fhsize > NFS_SMALLFH) {
215 			kmem_free(np->n_fhp, fhsize);
216 		}
217 		pool_put(&nfs_vattr_pool, np->n_vattr);
218 		pool_put(&nfs_node_pool, np);
219 		ungetnewvnode(vp);
220 		goto loop;
221 	}
222 	vp->v_data = np;
223 	genfs_node_init(vp, &nfs_genfsops);
224 	/*
225 	 * Initalize read/write creds to useful values. VOP_OPEN will
226 	 * overwrite these.
227 	 */
228 	np->n_rcred = curlwp->l_cred;
229 	kauth_cred_hold(np->n_rcred);
230 	np->n_wcred = curlwp->l_cred;
231 	kauth_cred_hold(np->n_wcred);
232 	error = VOP_LOCK(vp, LK_EXCLUSIVE);
233 	KASSERT(error == 0);
234 	NFS_INVALIDATE_ATTRCACHE(np);
235 	uvm_vnp_setsize(vp, 0);
236 	(void)rb_tree_insert_node(&nmp->nm_rbtree, np);
237 	rw_exit(&nmp->nm_rbtlock);
238 
239 	*npp = np;
240 	return (0);
241 }
242 
243 int
244 nfs_inactive(void *v)
245 {
246 	struct vop_inactive_args /* {
247 		struct vnode *a_vp;
248 		bool *a_recycle;
249 	} */ *ap = v;
250 	struct nfsnode *np;
251 	struct sillyrename *sp;
252 	struct vnode *vp = ap->a_vp;
253 
254 	np = VTONFS(vp);
255 	if (vp->v_type != VDIR) {
256 		sp = np->n_sillyrename;
257 		np->n_sillyrename = (struct sillyrename *)0;
258 	} else
259 		sp = NULL;
260 	if (sp != NULL)
261 		nfs_vinvalbuf(vp, 0, sp->s_cred, curlwp, 1);
262 	*ap->a_recycle = (np->n_flag & NREMOVED) != 0;
263 	np->n_flag &=
264 	    (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NEOFVALID | NTRUNCDELAYED);
265 
266 	if (vp->v_type == VDIR && np->n_dircache)
267 		nfs_invaldircache(vp,
268 		    NFS_INVALDIRCACHE_FORCE | NFS_INVALDIRCACHE_KEEPEOF);
269 
270 	VOP_UNLOCK(vp);
271 
272 	if (sp != NULL) {
273 		workqueue_enqueue(nfs_sillyworkq, &sp->s_work, NULL);
274 	}
275 
276 	return (0);
277 }
278 
279 /*
280  * Reclaim an nfsnode so that it can be used for other purposes.
281  */
282 int
283 nfs_reclaim(void *v)
284 {
285 	struct vop_reclaim_args /* {
286 		struct vnode *a_vp;
287 	} */ *ap = v;
288 	struct vnode *vp = ap->a_vp;
289 	struct nfsnode *np = VTONFS(vp);
290 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
291 
292 	if (prtactive && vp->v_usecount > 1)
293 		vprint("nfs_reclaim: pushing active", vp);
294 
295 	rw_enter(&nmp->nm_rbtlock, RW_WRITER);
296 	rb_tree_remove_node(&nmp->nm_rbtree, np);
297 	rw_exit(&nmp->nm_rbtlock);
298 
299 	/*
300 	 * Free up any directory cookie structures and
301 	 * large file handle structures that might be associated with
302 	 * this nfs node.
303 	 */
304 	if (vp->v_type == VDIR && np->n_dircache != NULL) {
305 		nfs_invaldircache(vp, NFS_INVALDIRCACHE_FORCE);
306 		hashdone(np->n_dircache, HASH_LIST, nfsdirhashmask);
307 	}
308 	KASSERT(np->n_dirgens == NULL);
309 
310 	if (np->n_fhsize > NFS_SMALLFH)
311 		kmem_free(np->n_fhp, np->n_fhsize);
312 
313 	pool_put(&nfs_vattr_pool, np->n_vattr);
314 	if (np->n_rcred)
315 		kauth_cred_free(np->n_rcred);
316 
317 	if (np->n_wcred)
318 		kauth_cred_free(np->n_wcred);
319 
320 	if (vp->v_type == VREG) {
321 		mutex_destroy(&np->n_commitlock);
322 	}
323 	genfs_node_destroy(vp);
324 	pool_put(&nfs_node_pool, np);
325 	vp->v_data = NULL;
326 	return (0);
327 }
328 
329 void
330 nfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
331 {
332 
333 	*eobp = MAX(size, vp->v_size);
334 }
335 
336 int
337 nfs_gop_alloc(struct vnode *vp, off_t off, off_t len, int flags,
338     kauth_cred_t cred)
339 {
340 
341 	return 0;
342 }
343 
344 int
345 nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
346 {
347 	int i;
348 
349 	mutex_enter(vp->v_interlock);
350 	for (i = 0; i < npages; i++) {
351 		pmap_page_protect(pgs[i], VM_PROT_READ);
352 	}
353 	mutex_exit(vp->v_interlock);
354 
355 	return genfs_gop_write(vp, pgs, npages, flags);
356 }
357 
358 /*
359  * Remove a silly file that was rename'd earlier
360  */
361 static void
362 nfs_sillyworker(struct work *work, void *arg)
363 {
364 	struct sillyrename *sp;
365 	int error;
366 
367 	sp = (struct sillyrename *)work;
368 	error = vn_lock(sp->s_dvp, LK_EXCLUSIVE);
369 	if (error || sp->s_dvp->v_data == NULL) {
370 		/* XXX should recover */
371 		printf("%s: vp=%p error=%d\n", __func__, sp->s_dvp, error);
372 		if (error == 0) {
373 			vput(sp->s_dvp);
374 		} else {
375 			vrele(sp->s_dvp);
376 		}
377 	} else {
378 		nfs_removeit(sp);
379 		vput(sp->s_dvp);
380 	}
381 	kauth_cred_free(sp->s_cred);
382 	kmem_free(sp, sizeof(*sp));
383 }
384