1 /* $NetBSD: nfs_node.c,v 1.126 2020/05/01 08:43:00 hannken Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_node.c 8.6 (Berkeley) 5/22/95
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: nfs_node.c,v 1.126 2020/05/01 08:43:00 hannken Exp $");
39
40 #ifdef _KERNEL_OPT
41 #include "opt_nfs.h"
42 #endif
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/vnode.h>
50 #include <sys/kernel.h>
51 #include <sys/pool.h>
52 #include <sys/lock.h>
53 #include <sys/hash.h>
54 #include <sys/kauth.h>
55
56 #include <nfs/rpcv2.h>
57 #include <nfs/nfsproto.h>
58 #include <nfs/nfs.h>
59 #include <nfs/nfsnode.h>
60 #include <nfs/nfsmount.h>
61 #include <nfs/nfs_var.h>
62
63 struct pool nfs_node_pool;
64 struct pool nfs_vattr_pool;
65 static struct workqueue *nfs_sillyworkq;
66
67 static void nfs_gop_size(struct vnode *, off_t, off_t *, int);
68 static int nfs_gop_alloc(struct vnode *, off_t, off_t, int, kauth_cred_t);
69 static int nfs_gop_write(struct vnode *, struct vm_page **, int, int);
70 static void nfs_sillyworker(struct work *, void *);
71
72 static const struct genfs_ops nfs_genfsops = {
73 .gop_size = nfs_gop_size,
74 .gop_alloc = nfs_gop_alloc,
75 .gop_write = nfs_gop_write,
76 .gop_putrange = genfs_gop_putrange,
77 };
78
79 /*
80 * Reinitialize inode hash table.
81 */
82 void
nfs_node_init(void)83 nfs_node_init(void)
84 {
85
86 pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl",
87 &pool_allocator_nointr, IPL_NONE);
88 pool_init(&nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl",
89 &pool_allocator_nointr, IPL_NONE);
90 if (workqueue_create(&nfs_sillyworkq, "nfssilly", nfs_sillyworker,
91 NULL, PRI_NONE, IPL_NONE, 0) != 0) {
92 panic("nfs_node_init");
93 }
94 }
95
96 /*
97 * Free resources previously allocated in nfs_node_reinit().
98 */
99 void
nfs_node_done(void)100 nfs_node_done(void)
101 {
102
103 pool_destroy(&nfs_node_pool);
104 pool_destroy(&nfs_vattr_pool);
105 workqueue_destroy(nfs_sillyworkq);
106 }
107
108 /*
109 * Initialize this vnode / nfs node pair.
110 * Caller assures no other thread will try to load this node.
111 */
112 int
nfs_loadvnode(struct mount * mp,struct vnode * vp,const void * key,size_t key_len,const void ** new_key)113 nfs_loadvnode(struct mount *mp, struct vnode *vp,
114 const void *key, size_t key_len, const void **new_key)
115 {
116 int fhsize = key_len;
117 const nfsfh_t *fhp = key;
118 struct nfsnode *np;
119
120 /* Aloocate and initialize the nfsnode. */
121 np = pool_get(&nfs_node_pool, PR_WAITOK);
122 memset(np, 0, sizeof *np);
123 if (fhsize > NFS_SMALLFH) {
124 np->n_fhp = kmem_alloc(fhsize, KM_SLEEP);
125 } else
126 np->n_fhp = &np->n_fh;
127 vp->v_tag = VT_NFS;
128 vp->v_type = VNON;
129 vp->v_op = nfsv2_vnodeop_p;
130 vp->v_data = np;
131 memcpy(np->n_fhp, fhp, fhsize);
132 np->n_fhsize = fhsize;
133 np->n_accstamp = -1;
134 np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);
135 np->n_vnode = vp;
136
137 /* Initialize genfs node. */
138 genfs_node_init(vp, &nfs_genfsops);
139 /*
140 * Initialize read/write creds to useful values. VOP_OPEN will
141 * overwrite these.
142 */
143 np->n_rcred = curlwp->l_cred;
144 kauth_cred_hold(np->n_rcred);
145 np->n_wcred = curlwp->l_cred;
146 kauth_cred_hold(np->n_wcred);
147 NFS_INVALIDATE_ATTRCACHE(np);
148 uvm_vnp_setsize(vp, 0);
149 *new_key = np->n_fhp;
150 return 0;
151 }
152
153 /*
154 * Look up a vnode/nfsnode by file handle.
155 * Callers must check for mount points!!
156 * In all cases, a pointer to a
157 * nfsnode structure is returned.
158 */
159 int
nfs_nget1(struct mount * mntp,nfsfh_t * fhp,int fhsize,struct nfsnode ** npp,int lkflags)160 nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp,
161 int lkflags)
162 {
163 int error;
164 struct vnode *vp;
165
166 error = vcache_get(mntp, fhp, fhsize, &vp);
167 if (error)
168 return error;
169 error = vn_lock(vp, LK_EXCLUSIVE | lkflags);
170 if (error) {
171 vrele(vp);
172 return error;
173 }
174 *npp = VTONFS(vp);
175 return 0;
176 }
177
178 int
nfs_inactive(void * v)179 nfs_inactive(void *v)
180 {
181 struct vop_inactive_v2_args /* {
182 struct vnode *a_vp;
183 bool *a_recycle;
184 } */ *ap = v;
185 struct nfsnode *np;
186 struct sillyrename *sp;
187 struct vnode *vp = ap->a_vp;
188
189 /* If we have a delayed truncation, do it now. */
190 nfs_delayedtruncate(vp);
191
192 np = VTONFS(vp);
193 if (vp->v_type != VDIR) {
194 sp = np->n_sillyrename;
195 np->n_sillyrename = (struct sillyrename *)0;
196 } else
197 sp = NULL;
198 if (sp != NULL)
199 nfs_vinvalbuf(vp, 0, sp->s_cred, curlwp, 1);
200 *ap->a_recycle = (np->n_flag & NREMOVED) != 0;
201 np->n_flag &=
202 (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NEOFVALID | NTRUNCDELAYED);
203
204 if (vp->v_type == VDIR && np->n_dircache)
205 nfs_invaldircache(vp,
206 NFS_INVALDIRCACHE_FORCE | NFS_INVALDIRCACHE_KEEPEOF);
207
208 if (sp != NULL) {
209 workqueue_enqueue(nfs_sillyworkq, &sp->s_work, NULL);
210 }
211
212 return (0);
213 }
214
215 /*
216 * Reclaim an nfsnode so that it can be used for other purposes.
217 */
218 int
nfs_reclaim(void * v)219 nfs_reclaim(void *v)
220 {
221 struct vop_reclaim_v2_args /* {
222 struct vnode *a_vp;
223 } */ *ap = v;
224 struct vnode *vp = ap->a_vp;
225 struct nfsnode *np = VTONFS(vp);
226
227 VOP_UNLOCK(vp);
228
229 /*
230 * Free up any directory cookie structures and
231 * large file handle structures that might be associated with
232 * this nfs node.
233 */
234 if (vp->v_type == VDIR && np->n_dircache != NULL) {
235 nfs_invaldircache(vp, NFS_INVALDIRCACHE_FORCE);
236 hashdone(np->n_dircache, HASH_LIST, nfsdirhashmask);
237 }
238 KASSERT(np->n_dirgens == NULL);
239
240 if (np->n_fhsize > NFS_SMALLFH)
241 kmem_free(np->n_fhp, np->n_fhsize);
242
243 pool_put(&nfs_vattr_pool, np->n_vattr);
244 if (np->n_rcred)
245 kauth_cred_free(np->n_rcred);
246
247 if (np->n_wcred)
248 kauth_cred_free(np->n_wcred);
249
250 if (vp->v_type == VREG) {
251 mutex_destroy(&np->n_commitlock);
252 }
253 genfs_node_destroy(vp);
254 pool_put(&nfs_node_pool, np);
255 vp->v_data = NULL;
256 return (0);
257 }
258
259 void
nfs_gop_size(struct vnode * vp,off_t size,off_t * eobp,int flags)260 nfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
261 {
262
263 *eobp = MAX(size, vp->v_size);
264 }
265
266 int
nfs_gop_alloc(struct vnode * vp,off_t off,off_t len,int flags,kauth_cred_t cred)267 nfs_gop_alloc(struct vnode *vp, off_t off, off_t len, int flags,
268 kauth_cred_t cred)
269 {
270
271 return 0;
272 }
273
274 int
nfs_gop_write(struct vnode * vp,struct vm_page ** pgs,int npages,int flags)275 nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
276 {
277 int i;
278
279 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
280 for (i = 0; i < npages; i++) {
281 pmap_page_protect(pgs[i], VM_PROT_READ);
282 }
283 rw_exit(vp->v_uobj.vmobjlock);
284
285 return genfs_gop_write(vp, pgs, npages, flags);
286 }
287
288 /*
289 * Remove a silly file that was rename'd earlier
290 */
291 static void
nfs_sillyworker(struct work * work,void * arg)292 nfs_sillyworker(struct work *work, void *arg)
293 {
294 struct sillyrename *sp;
295 int error;
296
297 sp = (struct sillyrename *)work;
298 error = vn_lock(sp->s_dvp, LK_EXCLUSIVE);
299 if (error || sp->s_dvp->v_data == NULL) {
300 /* XXX should recover */
301 printf("%s: vp=%p error=%d\n", __func__, sp->s_dvp, error);
302 if (error == 0) {
303 vput(sp->s_dvp);
304 } else {
305 vrele(sp->s_dvp);
306 }
307 } else {
308 nfs_removeit(sp);
309 vput(sp->s_dvp);
310 }
311 kauth_cred_free(sp->s_cred);
312 kmem_free(sp, sizeof(*sp));
313 }
314