xref: /netbsd-src/sys/nfs/nfs_vnops.c (revision fee42fd1d9cec2db989eb928c110ccc4d911165a)
1 /*	$NetBSD: nfs_vnops.c,v 1.325 2023/12/10 18:16:08 schmonz Exp $	*/
2 
3 /*
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Rick Macklem at The University of Guelph.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)nfs_vnops.c	8.19 (Berkeley) 7/31/95
35  */
36 
37 /*
38  * vnode op calls for Sun NFS version 2 and 3
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.325 2023/12/10 18:16:08 schmonz Exp $");
43 
44 #ifdef _KERNEL_OPT
45 #include "opt_nfs.h"
46 #include "opt_uvmhist.h"
47 #endif
48 
49 #include <sys/param.h>
50 #include <sys/proc.h>
51 #include <sys/kernel.h>
52 #include <sys/systm.h>
53 #include <sys/resourcevar.h>
54 #include <sys/mount.h>
55 #include <sys/buf.h>
56 #include <sys/condvar.h>
57 #include <sys/disk.h>
58 #include <sys/malloc.h>
59 #include <sys/kmem.h>
60 #include <sys/mbuf.h>
61 #include <sys/mutex.h>
62 #include <sys/namei.h>
63 #include <sys/vnode.h>
64 #include <sys/dirent.h>
65 #include <sys/fcntl.h>
66 #include <sys/hash.h>
67 #include <sys/lockf.h>
68 #include <sys/stat.h>
69 #include <sys/unistd.h>
70 #include <sys/kauth.h>
71 #include <sys/cprng.h>
72 
73 #ifdef UVMHIST
74 #include <uvm/uvm.h>
75 #endif
76 #include <uvm/uvm_extern.h>
77 #include <uvm/uvm_stat.h>
78 
79 #include <miscfs/fifofs/fifo.h>
80 #include <miscfs/genfs/genfs.h>
81 #include <miscfs/genfs/genfs_node.h>
82 #include <miscfs/specfs/specdev.h>
83 
84 #include <nfs/rpcv2.h>
85 #include <nfs/nfsproto.h>
86 #include <nfs/nfs.h>
87 #include <nfs/nfsnode.h>
88 #include <nfs/nfsmount.h>
89 #include <nfs/xdr_subs.h>
90 #include <nfs/nfsm_subs.h>
91 #include <nfs/nfs_var.h>
92 
93 #include <net/if.h>
94 #include <netinet/in.h>
95 #include <netinet/in_var.h>
96 
97 /*
98  * Global vfs data structures for nfs
99  */
100 int (**nfsv2_vnodeop_p)(void *);
101 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
102 	{ &vop_default_desc, vn_default_error },
103 	{ &vop_parsepath_desc, genfs_parsepath },	/* parsepath */
104 	{ &vop_lookup_desc, nfs_lookup },		/* lookup */
105 	{ &vop_create_desc, nfs_create },		/* create */
106 	{ &vop_mknod_desc, nfs_mknod },			/* mknod */
107 	{ &vop_open_desc, nfs_open },			/* open */
108 	{ &vop_close_desc, nfs_close },			/* close */
109 	{ &vop_access_desc, nfs_access },		/* access */
110 	{ &vop_accessx_desc, genfs_accessx },		/* accessx */
111 	{ &vop_getattr_desc, nfs_getattr },		/* getattr */
112 	{ &vop_setattr_desc, nfs_setattr },		/* setattr */
113 	{ &vop_read_desc, nfs_read },			/* read */
114 	{ &vop_write_desc, nfs_write },			/* write */
115 	{ &vop_fallocate_desc, genfs_eopnotsupp },	/* fallocate */
116 	{ &vop_fdiscard_desc, genfs_eopnotsupp },	/* fdiscard */
117 	{ &vop_fcntl_desc, genfs_fcntl },		/* fcntl */
118 	{ &vop_ioctl_desc, genfs_enoioctl },		/* ioctl */
119 	{ &vop_poll_desc, genfs_poll },			/* poll */
120 	{ &vop_kqfilter_desc, nfs_kqfilter },		/* kqfilter */
121 	{ &vop_revoke_desc, genfs_revoke },		/* revoke */
122 	{ &vop_mmap_desc, genfs_mmap },			/* mmap */
123 	{ &vop_fsync_desc, nfs_fsync },			/* fsync */
124 	{ &vop_seek_desc, genfs_seek },			/* seek */
125 	{ &vop_remove_desc, nfs_remove },		/* remove */
126 	{ &vop_link_desc, nfs_link },			/* link */
127 	{ &vop_rename_desc, nfs_rename },		/* rename */
128 	{ &vop_mkdir_desc, nfs_mkdir },			/* mkdir */
129 	{ &vop_rmdir_desc, nfs_rmdir },			/* rmdir */
130 	{ &vop_symlink_desc, nfs_symlink },		/* symlink */
131 	{ &vop_readdir_desc, nfs_readdir },		/* readdir */
132 	{ &vop_readlink_desc, nfs_readlink },		/* readlink */
133 	{ &vop_abortop_desc, genfs_abortop },		/* abortop */
134 	{ &vop_inactive_desc, nfs_inactive },		/* inactive */
135 	{ &vop_reclaim_desc, nfs_reclaim },		/* reclaim */
136 	{ &vop_lock_desc, genfs_lock },			/* lock */
137 	{ &vop_unlock_desc, nfs_unlock },		/* unlock */
138 	{ &vop_bmap_desc, nfs_bmap },			/* bmap */
139 	{ &vop_strategy_desc, nfs_strategy },		/* strategy */
140 	{ &vop_print_desc, nfs_print },			/* print */
141 	{ &vop_islocked_desc, genfs_islocked },		/* islocked */
142 	{ &vop_pathconf_desc, nfs_pathconf },		/* pathconf */
143 	{ &vop_advlock_desc, nfs_advlock },		/* advlock */
144 	{ &vop_bwrite_desc, genfs_badop },		/* bwrite */
145 	{ &vop_getpages_desc, nfs_getpages },		/* getpages */
146 	{ &vop_putpages_desc, genfs_putpages },		/* putpages */
147 	{ NULL, NULL }
148 };
149 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
150 	{ &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
151 
152 /*
153  * Special device vnode ops
154  */
155 int (**spec_nfsv2nodeop_p)(void *);
156 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = {
157 	{ &vop_default_desc, vn_default_error },
158 	GENFS_SPECOP_ENTRIES,
159 	{ &vop_close_desc, nfsspec_close },		/* close */
160 	{ &vop_access_desc, nfsspec_access },		/* access */
161 	{ &vop_accessx_desc, genfs_accessx },		/* accessx */
162 	{ &vop_getattr_desc, nfs_getattr },		/* getattr */
163 	{ &vop_setattr_desc, nfs_setattr },		/* setattr */
164 	{ &vop_read_desc, nfsspec_read },		/* read */
165 	{ &vop_write_desc, nfsspec_write },		/* write */
166 	{ &vop_fcntl_desc, genfs_fcntl },		/* fcntl */
167 	{ &vop_fsync_desc, spec_fsync },		/* fsync */
168 	{ &vop_inactive_desc, nfs_inactive },		/* inactive */
169 	{ &vop_reclaim_desc, nfs_reclaim },		/* reclaim */
170 	{ &vop_lock_desc, genfs_lock },			/* lock */
171 	{ &vop_unlock_desc, nfs_unlock },		/* unlock */
172 	{ &vop_print_desc, nfs_print },			/* print */
173 	{ &vop_islocked_desc, genfs_islocked },		/* islocked */
174 	{ &vop_bwrite_desc, vn_bwrite },		/* bwrite */
175 	{ NULL, NULL }
176 };
177 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
178 	{ &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries };
179 
180 int (**fifo_nfsv2nodeop_p)(void *);
181 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = {
182 	{ &vop_default_desc, vn_default_error },
183 	GENFS_FIFOOP_ENTRIES,
184 	{ &vop_close_desc, nfsfifo_close },		/* close */
185 	{ &vop_access_desc, nfsspec_access },		/* access */
186 	{ &vop_accessx_desc, genfs_accessx },		/* accessx */
187 	{ &vop_getattr_desc, nfs_getattr },		/* getattr */
188 	{ &vop_setattr_desc, nfs_setattr },		/* setattr */
189 	{ &vop_read_desc, nfsfifo_read },		/* read */
190 	{ &vop_write_desc, nfsfifo_write },		/* write */
191 	{ &vop_fcntl_desc, genfs_fcntl },		/* fcntl */
192 	{ &vop_fsync_desc, nfs_fsync },			/* fsync */
193 	{ &vop_inactive_desc, nfs_inactive },		/* inactive */
194 	{ &vop_reclaim_desc, nfs_reclaim },		/* reclaim */
195 	{ &vop_lock_desc, genfs_lock },			/* lock */
196 	{ &vop_unlock_desc, nfs_unlock },		/* unlock */
197 	{ &vop_strategy_desc, vn_fifo_bypass },		/* strategy */
198 	{ &vop_print_desc, nfs_print },			/* print */
199 	{ &vop_islocked_desc, genfs_islocked },		/* islocked */
200 	{ &vop_bwrite_desc, genfs_badop },		/* bwrite */
201 	{ NULL, NULL }
202 };
203 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
204 	{ &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries };
205 
206 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *,
207     size_t, kauth_cred_t, struct lwp *);
208 static void nfs_writerpc_extfree(struct mbuf *, void *, size_t, void *);
209 
210 /*
211  * Global variables
212  */
213 extern u_int32_t nfs_true, nfs_false;
214 extern u_int32_t nfs_xdrneg1;
215 extern const nfstype nfsv3_type[9];
216 
217 int nfs_numasync = 0;
218 #define	DIRHDSIZ	_DIRENT_NAMEOFF(dp)
219 #define UIO_ADVANCE(uio, siz) \
220     (void)((uio)->uio_resid -= (siz), \
221     (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \
222     (uio)->uio_iov->iov_len -= (siz))
223 
224 static void nfs_cache_enter(struct vnode *, struct vnode *,
225     struct componentname *);
226 
227 static void
nfs_cache_enter(struct vnode * dvp,struct vnode * vp,struct componentname * cnp)228 nfs_cache_enter(struct vnode *dvp, struct vnode *vp,
229     struct componentname *cnp)
230 {
231 	struct nfsnode *dnp = VTONFS(dvp);
232 
233 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
234 		return;
235 	}
236 	if (vp != NULL) {
237 		struct nfsnode *np = VTONFS(vp);
238 
239 		np->n_ctime = np->n_vattr->va_ctime.tv_sec;
240 	}
241 
242 	if (!timespecisset(&dnp->n_nctime))
243 		dnp->n_nctime = dnp->n_vattr->va_mtime;
244 
245 	cache_enter(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_flags);
246 }
247 
248 /*
249  * nfs null call from vfs.
250  */
251 int
nfs_null(struct vnode * vp,kauth_cred_t cred,struct lwp * l)252 nfs_null(struct vnode *vp, kauth_cred_t cred, struct lwp *l)
253 {
254 	char *bpos, *dpos;
255 	int error = 0;
256 	struct mbuf *mreq, *mrep, *md, *mb __unused;
257 	struct nfsnode *np = VTONFS(vp);
258 
259 	nfsm_reqhead(np, NFSPROC_NULL, 0);
260 	nfsm_request(np, NFSPROC_NULL, l, cred);
261 	nfsm_reqdone;
262 	return (error);
263 }
264 
265 /*
266  * nfs access vnode op.
267  * For nfs version 2, just return ok. File accesses may fail later.
268  * For nfs version 3, use the access rpc to check accessibility. If file modes
269  * are changed on the server, accesses might still fail later.
270  */
271 int
nfs_access(void * v)272 nfs_access(void *v)
273 {
274 	struct vop_access_args /* {
275 		struct vnode *a_vp;
276 		accmode_t  a_accmode;
277 		kauth_cred_t a_cred;
278 	} */ *ap = v;
279 	struct vnode *vp = ap->a_vp;
280 #ifndef NFS_V2_ONLY
281 	u_int32_t *tl;
282 	char *cp;
283 	int32_t t1, t2;
284 	char *bpos, *dpos, *cp2;
285 	int error = 0, attrflag;
286 	struct mbuf *mreq, *mrep, *md, *mb;
287 	u_int32_t mode, rmode;
288 	const int v3 = NFS_ISV3(vp);
289 #endif
290 	int cachevalid;
291 	struct nfsnode *np = VTONFS(vp);
292 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
293 
294 	cachevalid = (np->n_accstamp != -1 &&
295 	    (time_uptime - np->n_accstamp) < nfs_attrtimeo(nmp, np) &&
296 	    np->n_accuid == kauth_cred_geteuid(ap->a_cred));
297 
298 	/*
299 	 * Check access cache first. If this request has been made for this
300 	 * uid shortly before, use the cached result.
301 	 */
302 	if (cachevalid) {
303 		if (!np->n_accerror) {
304 			if  ((np->n_accmode & ap->a_accmode) == ap->a_accmode)
305 				return np->n_accerror;
306 		} else if ((np->n_accmode & ap->a_accmode) == np->n_accmode)
307 			return np->n_accerror;
308 	}
309 
310 #ifndef NFS_V2_ONLY
311 	/*
312 	 * For nfs v3, do an access rpc, otherwise you are stuck emulating
313 	 * ufs_access() locally using the vattr. This may not be correct,
314 	 * since the server may apply other access criteria such as
315 	 * client uid-->server uid mapping that we do not know about, but
316 	 * this is better than just returning anything that is lying about
317 	 * in the cache.
318 	 */
319 	if (v3) {
320 		nfsstats.rpccnt[NFSPROC_ACCESS]++;
321 		nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
322 		nfsm_fhtom(np, v3);
323 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
324 		if (ap->a_accmode & VREAD)
325 			mode = NFSV3ACCESS_READ;
326 		else
327 			mode = 0;
328 		if (vp->v_type != VDIR) {
329 			if (ap->a_accmode & VWRITE)
330 				mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
331 			if (ap->a_accmode & VEXEC)
332 				mode |= NFSV3ACCESS_EXECUTE;
333 		} else {
334 			if (ap->a_accmode & VWRITE)
335 				mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
336 					 NFSV3ACCESS_DELETE);
337 			if (ap->a_accmode & VEXEC)
338 				mode |= NFSV3ACCESS_LOOKUP;
339 		}
340 		*tl = txdr_unsigned(mode);
341 		nfsm_request(np, NFSPROC_ACCESS, curlwp, ap->a_cred);
342 		nfsm_postop_attr(vp, attrflag, 0);
343 		if (!error) {
344 			nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
345 			rmode = fxdr_unsigned(u_int32_t, *tl);
346 			/*
347 			 * The NFS V3 spec does not clarify whether or not
348 			 * the returned access bits can be a superset of
349 			 * the ones requested, so...
350 			 */
351 			if ((rmode & mode) != mode)
352 				error = EACCES;
353 		}
354 		nfsm_reqdone;
355 	} else
356 #endif
357 		return (nfsspec_access(ap));
358 #ifndef NFS_V2_ONLY
359 	/*
360 	 * Disallow write attempts on filesystems mounted read-only;
361 	 * unless the file is a socket, fifo, or a block or character
362 	 * device resident on the filesystem.
363 	 */
364 	if (!error && (ap->a_accmode & VWRITE) &&
365 	    (vp->v_mount->mnt_flag & MNT_RDONLY)) {
366 		switch (vp->v_type) {
367 		case VREG:
368 		case VDIR:
369 		case VLNK:
370 			error = EROFS;
371 		default:
372 			break;
373 		}
374 	}
375 
376 	if (!error || error == EACCES) {
377 		/*
378 		 * If we got the same result as for a previous,
379 		 * different request, OR it in. Don't update
380 		 * the timestamp in that case.
381 		 */
382 		if (cachevalid && np->n_accstamp != -1 &&
383 		    error == np->n_accerror) {
384 			if (!error)
385 				np->n_accmode |= ap->a_accmode;
386 			else if ((np->n_accmode & ap->a_accmode) == ap->a_accmode)
387 				np->n_accmode = ap->a_accmode;
388 		} else {
389 			np->n_accstamp = time_uptime;
390 			np->n_accuid = kauth_cred_geteuid(ap->a_cred);
391 			np->n_accmode = ap->a_accmode;
392 			np->n_accerror = error;
393 		}
394 	}
395 
396 	return (error);
397 #endif
398 }
399 
400 /*
401  * nfs open vnode op
402  * Check to see if the type is ok
403  * and that deletion is not in progress.
404  * For paged in text files, you will need to flush the page cache
405  * if consistency is lost.
406  */
407 /* ARGSUSED */
408 int
nfs_open(void * v)409 nfs_open(void *v)
410 {
411 	struct vop_open_args /* {
412 		struct vnode *a_vp;
413 		int  a_mode;
414 		kauth_cred_t a_cred;
415 	} */ *ap = v;
416 	struct vnode *vp = ap->a_vp;
417 	struct nfsnode *np = VTONFS(vp);
418 	int error;
419 
420 	if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
421 		return (EACCES);
422 	}
423 
424 	if (ap->a_mode & FREAD) {
425 		if (np->n_rcred != NULL)
426 			kauth_cred_free(np->n_rcred);
427 		np->n_rcred = ap->a_cred;
428 		kauth_cred_hold(np->n_rcred);
429 	}
430 	if (ap->a_mode & FWRITE) {
431 		if (np->n_wcred != NULL)
432 			kauth_cred_free(np->n_wcred);
433 		np->n_wcred = ap->a_cred;
434 		kauth_cred_hold(np->n_wcred);
435 	}
436 
437 	error = nfs_flushstalebuf(vp, ap->a_cred, curlwp, 0);
438 	if (error)
439 		return error;
440 
441 	NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */
442 
443 	return (0);
444 }
445 
446 /*
447  * nfs close vnode op
448  * What an NFS client should do upon close after writing is a debatable issue.
449  * Most NFS clients push delayed writes to the server upon close, basically for
450  * two reasons:
451  * 1 - So that any write errors may be reported back to the client process
452  *     doing the close system call. By far the two most likely errors are
453  *     NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
454  * 2 - To put a worst case upper bound on cache inconsistency between
455  *     multiple clients for the file.
456  * There is also a consistency problem for Version 2 of the protocol w.r.t.
457  * not being able to tell if other clients are writing a file concurrently,
458  * since there is no way of knowing if the changed modify time in the reply
459  * is only due to the write for this client.
460  * (NFS Version 3 provides weak cache consistency data in the reply that
461  *  should be sufficient to detect and handle this case.)
462  *
463  * The current code does the following:
464  * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
465  * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
466  *                     or commit them (this satisfies 1 and 2 except for the
467  *                     case where the server crashes after this close but
468  *                     before the commit RPC, which is felt to be "good
469  *                     enough". Changing the last argument to nfs_flush() to
470  *                     a 1 would force a commit operation, if it is felt a
471  *                     commit is necessary now.
472  */
473 /* ARGSUSED */
474 int
nfs_close(void * v)475 nfs_close(void *v)
476 {
477 	struct vop_close_args /* {
478 		struct vnodeop_desc *a_desc;
479 		struct vnode *a_vp;
480 		int  a_fflag;
481 		kauth_cred_t a_cred;
482 	} */ *ap = v;
483 	struct vnode *vp = ap->a_vp;
484 	struct nfsnode *np = VTONFS(vp);
485 	int error = 0;
486 	UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist);
487 
488 	if (vp->v_type == VREG) {
489 	    if (np->n_flag & NMODIFIED) {
490 #ifndef NFS_V2_ONLY
491 		if (NFS_ISV3(vp)) {
492 		    error = nfs_flush(vp, ap->a_cred, MNT_WAIT, curlwp, 0);
493 		    np->n_flag &= ~NMODIFIED;
494 		} else
495 #endif
496 		    error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 1);
497 		NFS_INVALIDATE_ATTRCACHE(np);
498 	    }
499 	    if (np->n_flag & NWRITEERR) {
500 		np->n_flag &= ~NWRITEERR;
501 		error = np->n_error;
502 	    }
503 	}
504 	UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
505 	return (error);
506 }
507 
508 /*
509  * nfs getattr call from vfs.
510  */
511 int
nfs_getattr(void * v)512 nfs_getattr(void *v)
513 {
514 	struct vop_getattr_args /* {
515 		struct vnode *a_vp;
516 		struct vattr *a_vap;
517 		kauth_cred_t a_cred;
518 	} */ *ap = v;
519 	struct vnode *vp = ap->a_vp;
520 	struct nfsnode *np = VTONFS(vp);
521 	char *cp;
522 	u_int32_t *tl;
523 	int32_t t1, t2;
524 	char *bpos, *dpos;
525 	int error = 0;
526 	struct mbuf *mreq, *mrep, *md, *mb;
527 	const int v3 = NFS_ISV3(vp);
528 
529 	/*
530 	 * Update local times for special files.
531 	 */
532 	if (np->n_flag & (NACC | NUPD))
533 		np->n_flag |= NCHG;
534 
535 	/*
536 	 * if we have delayed truncation, do it now.
537 	 */
538 	nfs_delayedtruncate(vp);
539 
540 	/*
541 	 * First look in the cache.
542 	 */
543 	if (nfs_getattrcache(vp, ap->a_vap) == 0)
544 		return (0);
545 	nfsstats.rpccnt[NFSPROC_GETATTR]++;
546 	nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3));
547 	nfsm_fhtom(np, v3);
548 	nfsm_request(np, NFSPROC_GETATTR, curlwp, ap->a_cred);
549 	if (!error) {
550 		nfsm_loadattr(vp, ap->a_vap, 0);
551 		if (vp->v_type == VDIR &&
552 		    ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ)
553 			ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ;
554 	}
555 	nfsm_reqdone;
556 	return (error);
557 }
558 
559 /*
560  * nfs setattr call.
561  */
562 int
nfs_setattr(void * v)563 nfs_setattr(void *v)
564 {
565 	struct vop_setattr_args /* {
566 		struct vnodeop_desc *a_desc;
567 		struct vnode *a_vp;
568 		struct vattr *a_vap;
569 		kauth_cred_t a_cred;
570 	} */ *ap = v;
571 	struct vnode *vp = ap->a_vp;
572 	struct nfsnode *np = VTONFS(vp);
573 	struct vattr *vap = ap->a_vap;
574 	int error = 0;
575 	u_quad_t tsize = 0;
576 
577 	/*
578 	 * Setting of flags is not supported.
579 	 */
580 	if (vap->va_flags != VNOVAL)
581 		return (EOPNOTSUPP);
582 
583 	/*
584 	 * Disallow write attempts if the filesystem is mounted read-only.
585 	 */
586   	if ((vap->va_uid != (uid_t)VNOVAL ||
587 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
588 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
589 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
590 		return (EROFS);
591 	if (vap->va_size != VNOVAL) {
592 		if (vap->va_size > VFSTONFS(vp->v_mount)->nm_maxfilesize) {
593 			return EFBIG;
594 		}
595  		switch (vp->v_type) {
596  		case VDIR:
597  			return (EISDIR);
598  		case VCHR:
599  		case VBLK:
600  		case VSOCK:
601  		case VFIFO:
602 			if (vap->va_mtime.tv_sec == VNOVAL &&
603 			    vap->va_atime.tv_sec == VNOVAL &&
604 			    vap->va_mode == (mode_t)VNOVAL &&
605 			    vap->va_uid == (uid_t)VNOVAL &&
606 			    vap->va_gid == (gid_t)VNOVAL)
607 				return (0);
608  			vap->va_size = VNOVAL;
609  			break;
610  		default:
611 			/*
612 			 * Disallow write attempts if the filesystem is
613 			 * mounted read-only.
614 			 */
615 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
616 				return (EROFS);
617 			genfs_node_wrlock(vp);
618  			uvm_vnp_setsize(vp, vap->va_size);
619  			tsize = np->n_size;
620 			np->n_size = vap->va_size;
621  			if (vap->va_size == 0)
622  				error = nfs_vinvalbuf(vp, 0,
623  				     ap->a_cred, curlwp, 1);
624 			else
625 				error = nfs_vinvalbuf(vp, V_SAVE,
626 				     ap->a_cred, curlwp, 1);
627 			if (error) {
628 				uvm_vnp_setsize(vp, tsize);
629 				genfs_node_unlock(vp);
630 				return (error);
631 			}
632  			np->n_vattr->va_size = vap->va_size;
633   		}
634   	} else {
635 		/*
636 		 * flush files before setattr because a later write of
637 		 * cached data might change timestamps or reset sugid bits
638 		 */
639 		if ((vap->va_mtime.tv_sec != VNOVAL ||
640 		     vap->va_atime.tv_sec != VNOVAL ||
641 		     vap->va_mode != VNOVAL) &&
642 		    vp->v_type == VREG &&
643   		    (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
644 		 			   curlwp, 1)) == EINTR)
645 			return (error);
646 	}
647 	error = nfs_setattrrpc(vp, vap, ap->a_cred, curlwp);
648 	if (vap->va_size != VNOVAL) {
649 		if (error) {
650 			np->n_size = np->n_vattr->va_size = tsize;
651 			uvm_vnp_setsize(vp, np->n_size);
652 		}
653 		genfs_node_unlock(vp);
654 	}
655 	return (error);
656 }
657 
658 /*
659  * Do an nfs setattr rpc.
660  */
661 int
nfs_setattrrpc(struct vnode * vp,struct vattr * vap,kauth_cred_t cred,struct lwp * l)662 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, kauth_cred_t cred, struct lwp *l)
663 {
664 	struct nfsv2_sattr *sp;
665 	char *cp;
666 	int32_t t1, t2;
667 	char *bpos, *dpos;
668 	u_int32_t *tl;
669 	int error = 0;
670 	struct mbuf *mreq, *mrep, *md, *mb;
671 	const int v3 = NFS_ISV3(vp);
672 	struct nfsnode *np = VTONFS(vp);
673 #ifndef NFS_V2_ONLY
674 	int wccflag = NFSV3_WCCRATTR;
675 	char *cp2;
676 #endif
677 
678 	nfsstats.rpccnt[NFSPROC_SETATTR]++;
679 	nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
680 	nfsm_fhtom(np, v3);
681 #ifndef NFS_V2_ONLY
682 	if (v3) {
683 		nfsm_v3attrbuild(vap, true);
684 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
685 		*tl = nfs_false;
686 	} else {
687 #endif
688 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
689 		if (vap->va_mode == (mode_t)VNOVAL)
690 			sp->sa_mode = nfs_xdrneg1;
691 		else
692 			sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
693 		if (vap->va_uid == (uid_t)VNOVAL)
694 			sp->sa_uid = nfs_xdrneg1;
695 		else
696 			sp->sa_uid = txdr_unsigned(vap->va_uid);
697 		if (vap->va_gid == (gid_t)VNOVAL)
698 			sp->sa_gid = nfs_xdrneg1;
699 		else
700 			sp->sa_gid = txdr_unsigned(vap->va_gid);
701 		sp->sa_size = txdr_unsigned(vap->va_size);
702 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
703 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
704 #ifndef NFS_V2_ONLY
705 	}
706 #endif
707 	nfsm_request(np, NFSPROC_SETATTR, l, cred);
708 #ifndef NFS_V2_ONLY
709 	if (v3) {
710 		nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
711 	} else
712 #endif
713 		nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
714 	nfsm_reqdone;
715 	return (error);
716 }
717 
718 /*
719  * nfs lookup call, one step at a time...
720  * First look in cache
721  * If not found, do the rpc.
722  */
723 int
nfs_lookup(void * v)724 nfs_lookup(void *v)
725 {
726 	struct vop_lookup_v2_args /* {
727 		struct vnodeop_desc *a_desc;
728 		struct vnode *a_dvp;
729 		struct vnode **a_vpp;
730 		struct componentname *a_cnp;
731 	} */ *ap = v;
732 	struct componentname *cnp = ap->a_cnp;
733 	struct vnode *dvp = ap->a_dvp;
734 	struct vnode **vpp = ap->a_vpp;
735 	int flags;
736 	struct vnode *newvp;
737 	u_int32_t *tl;
738 	char *cp;
739 	int32_t t1, t2;
740 	char *bpos, *dpos, *cp2;
741 	struct mbuf *mreq, *mrep, *md, *mb;
742 	long len;
743 	nfsfh_t *fhp;
744 	struct nfsnode *np;
745 	int cachefound;
746 	int error = 0, attrflag, fhsize;
747 	const int v3 = NFS_ISV3(dvp);
748 
749 	flags = cnp->cn_flags;
750 
751 	*vpp = NULLVP;
752 	newvp = NULLVP;
753 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
754 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
755 		return (EROFS);
756 	if (dvp->v_type != VDIR)
757 		return (ENOTDIR);
758 
759 	/*
760 	 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves.
761 	 */
762 	if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
763 		error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
764 		if (error)
765 			return error;
766 		if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN))
767 			return EISDIR;
768 		vref(dvp);
769 		*vpp = dvp;
770 		return 0;
771 	}
772 
773 	np = VTONFS(dvp);
774 
775 	/*
776 	 * Before performing an RPC, check the name cache to see if
777 	 * the directory/name pair we are looking for is known already.
778 	 * If the directory/name pair is found in the name cache,
779 	 * we have to ensure the directory has not changed from
780 	 * the time the cache entry has been created. If it has,
781 	 * the cache entry has to be ignored.
782 	 */
783 	cachefound = cache_lookup_raw(dvp, cnp->cn_nameptr, cnp->cn_namelen,
784 				      cnp->cn_flags, NULL, vpp);
785 	KASSERT(dvp != *vpp);
786 	KASSERT((cnp->cn_flags & ISWHITEOUT) == 0);
787 	if (cachefound) {
788 		struct vattr vattr;
789 
790 		error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
791 		if (error != 0) {
792 			if (*vpp != NULLVP)
793 				vrele(*vpp);
794 			*vpp = NULLVP;
795 			return error;
796 		}
797 
798 		if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred)
799 		    || timespeccmp(&vattr.va_mtime,
800 		    &VTONFS(dvp)->n_nctime, !=)) {
801 			if (*vpp != NULLVP) {
802 				vrele(*vpp);
803 				*vpp = NULLVP;
804 			}
805 			cache_purge1(dvp, NULL, 0, PURGE_CHILDREN);
806 			timespecclear(&np->n_nctime);
807 			goto dorpc;
808 		}
809 
810 		if (*vpp == NULLVP) {
811 			/* namecache gave us a negative result */
812 			error = ENOENT;
813 			goto noentry;
814 		}
815 
816 		/*
817 		 * investigate the vnode returned by cache_lookup_raw.
818 		 * if it isn't appropriate, do an rpc.
819 		 */
820 		newvp = *vpp;
821 		if ((flags & ISDOTDOT) != 0) {
822 			VOP_UNLOCK(dvp);
823 		}
824 		error = vn_lock(newvp, LK_SHARED);
825 		if ((flags & ISDOTDOT) != 0) {
826 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
827 		}
828 		if (error != 0) {
829 			/* newvp has been reclaimed. */
830 			vrele(newvp);
831 			*vpp = NULLVP;
832 			goto dorpc;
833 		}
834 		if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred)
835 		    && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
836 			nfsstats.lookupcache_hits++;
837 			KASSERT(newvp->v_type != VNON);
838 			VOP_UNLOCK(newvp);
839 			return (0);
840 		}
841 		cache_purge1(newvp, NULL, 0, PURGE_PARENTS);
842 		vput(newvp);
843 		*vpp = NULLVP;
844 	}
845 dorpc:
846 #if 0
847 	/*
848 	 * because nfsv3 has the same CREATE semantics as ours,
849 	 * we don't have to perform LOOKUPs beforehand.
850 	 *
851 	 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL.
852 	 * XXX although we have no way to know if O_EXCL is requested or not.
853 	 */
854 
855 	if (v3 && cnp->cn_nameiop == CREATE &&
856 	    (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN &&
857 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
858 		return (EJUSTRETURN);
859 	}
860 #endif /* 0 */
861 
862 	error = 0;
863 	newvp = NULLVP;
864 	nfsstats.lookupcache_misses++;
865 	nfsstats.rpccnt[NFSPROC_LOOKUP]++;
866 	len = cnp->cn_namelen;
867 	nfsm_reqhead(np, NFSPROC_LOOKUP,
868 		NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
869 	nfsm_fhtom(np, v3);
870 	nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
871 	nfsm_request(np, NFSPROC_LOOKUP, curlwp, cnp->cn_cred);
872 	if (error) {
873 		nfsm_postop_attr(dvp, attrflag, 0);
874 		m_freem(mrep);
875 		goto nfsmout;
876 	}
877 	nfsm_getfh(fhp, fhsize, v3);
878 
879 	/*
880 	 * Handle RENAME case...
881 	 */
882 	if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
883 		if (NFS_CMPFH(np, fhp, fhsize)) {
884 			m_freem(mrep);
885 			return (EISDIR);
886 		}
887 		error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
888 		if (error) {
889 			m_freem(mrep);
890 			return error;
891 		}
892 		newvp = NFSTOV(np);
893 #ifndef NFS_V2_ONLY
894 		if (v3) {
895 			nfsm_postop_attr(newvp, attrflag, 0);
896 			nfsm_postop_attr(dvp, attrflag, 0);
897 		} else
898 #endif
899 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
900 		*vpp = newvp;
901 		m_freem(mrep);
902 		goto validate;
903 	}
904 
905 	/*
906 	 * The postop attr handling is duplicated for each if case,
907 	 * because it should be done while dvp is locked (unlocking
908 	 * dvp is different for each case).
909 	 */
910 
911 	if (NFS_CMPFH(np, fhp, fhsize)) {
912 		/*
913 		 * As we handle "." lookup locally, this is
914 		 * a broken server.
915 		 */
916 		m_freem(mrep);
917 		return EBADRPC;
918 	} else if (flags & ISDOTDOT) {
919 		/*
920 		 * ".." lookup
921 		 */
922 		VOP_UNLOCK(dvp);
923 		error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
924 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
925 		if (error) {
926 			m_freem(mrep);
927 			return error;
928 		}
929 		newvp = NFSTOV(np);
930 
931 #ifndef NFS_V2_ONLY
932 		if (v3) {
933 			nfsm_postop_attr(newvp, attrflag, 0);
934 			nfsm_postop_attr(dvp, attrflag, 0);
935 		} else
936 #endif
937 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
938 	} else {
939 		/*
940 		 * Other lookups.
941 		 */
942 		error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
943 		if (error) {
944 			m_freem(mrep);
945 			return error;
946 		}
947 		newvp = NFSTOV(np);
948 #ifndef NFS_V2_ONLY
949 		if (v3) {
950 			nfsm_postop_attr(newvp, attrflag, 0);
951 			nfsm_postop_attr(dvp, attrflag, 0);
952 		} else
953 #endif
954 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
955 	}
956 	if (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) {
957 		nfs_cache_enter(dvp, newvp, cnp);
958 	}
959 	*vpp = newvp;
960 	nfsm_reqdone;
961 	if (error) {
962 		/*
963 		 * We get here only because of errors returned by
964 		 * the RPC. Otherwise we'll have returned above
965 		 * (the nfsm_* macros will jump to nfsm_reqdone
966 		 * on error).
967 		 */
968 		if (error == ENOENT && cnp->cn_nameiop != CREATE) {
969 			nfs_cache_enter(dvp, NULL, cnp);
970 		}
971 		if (newvp != NULLVP) {
972 			if (newvp == dvp) {
973 				vrele(newvp);
974 			} else {
975 				vput(newvp);
976 			}
977 		}
978 noentry:
979 		if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
980 		    (flags & ISLASTCN) && error == ENOENT) {
981 			if (dvp->v_mount->mnt_flag & MNT_RDONLY) {
982 				error = EROFS;
983 			} else {
984 				error = EJUSTRETURN;
985 			}
986 		}
987 		*vpp = NULL;
988 		return error;
989 	}
990 
991 validate:
992 	/*
993 	 * make sure we have valid type and size.
994 	 */
995 
996 	newvp = *vpp;
997 	if (newvp->v_type == VNON) {
998 		struct vattr vattr; /* dummy */
999 
1000 		KASSERT(VTONFS(newvp)->n_attrstamp == 0);
1001 		error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred);
1002 		if (error) {
1003 			vput(newvp);
1004 			*vpp = NULL;
1005 		}
1006 	}
1007 	if (error)
1008 		return error;
1009 	if (newvp != dvp)
1010 		VOP_UNLOCK(newvp);
1011 	return 0;
1012 }
1013 
1014 /*
1015  * nfs read call.
1016  * Just call nfs_bioread() to do the work.
1017  */
1018 int
nfs_read(void * v)1019 nfs_read(void *v)
1020 {
1021 	struct vop_read_args /* {
1022 		struct vnode *a_vp;
1023 		struct uio *a_uio;
1024 		int  a_ioflag;
1025 		kauth_cred_t a_cred;
1026 	} */ *ap = v;
1027 	struct vnode *vp = ap->a_vp;
1028 
1029 	if (vp->v_type != VREG)
1030 		return EISDIR;
1031 	return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0));
1032 }
1033 
1034 /*
1035  * nfs readlink call
1036  */
1037 int
nfs_readlink(void * v)1038 nfs_readlink(void *v)
1039 {
1040 	struct vop_readlink_args /* {
1041 		struct vnode *a_vp;
1042 		struct uio *a_uio;
1043 		kauth_cred_t a_cred;
1044 	} */ *ap = v;
1045 	struct vnode *vp = ap->a_vp;
1046 	struct nfsnode *np = VTONFS(vp);
1047 
1048 	if (vp->v_type != VLNK)
1049 		return (EPERM);
1050 
1051 	if (np->n_rcred != NULL) {
1052 		kauth_cred_free(np->n_rcred);
1053 	}
1054 	np->n_rcred = ap->a_cred;
1055 	kauth_cred_hold(np->n_rcred);
1056 
1057 	return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0));
1058 }
1059 
1060 /*
1061  * Do a readlink rpc.
1062  * Called by nfs_doio() from below the buffer cache.
1063  */
1064 int
nfs_readlinkrpc(struct vnode * vp,struct uio * uiop,kauth_cred_t cred)1065 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
1066 {
1067 	u_int32_t *tl;
1068 	char *cp;
1069 	int32_t t1, t2;
1070 	char *bpos, *dpos, *cp2;
1071 	int error = 0;
1072 	uint32_t len;
1073 	struct mbuf *mreq, *mrep, *md, *mb;
1074 	const int v3 = NFS_ISV3(vp);
1075 	struct nfsnode *np = VTONFS(vp);
1076 #ifndef NFS_V2_ONLY
1077 	int attrflag;
1078 #endif
1079 
1080 	nfsstats.rpccnt[NFSPROC_READLINK]++;
1081 	nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3));
1082 	nfsm_fhtom(np, v3);
1083 	nfsm_request(np, NFSPROC_READLINK, curlwp, cred);
1084 #ifndef NFS_V2_ONLY
1085 	if (v3)
1086 		nfsm_postop_attr(vp, attrflag, 0);
1087 #endif
1088 	if (!error) {
1089 #ifndef NFS_V2_ONLY
1090 		if (v3) {
1091 			nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED);
1092 			len = fxdr_unsigned(uint32_t, *tl);
1093 			if (len > NFS_MAXPATHLEN) {
1094 				/*
1095 				 * this pathname is too long for us.
1096 				 */
1097 				m_freem(mrep);
1098 				/* Solaris returns EINVAL. should we follow? */
1099 				error = ENAMETOOLONG;
1100 				goto nfsmout;
1101 			}
1102 		} else
1103 #endif
1104 		{
1105 			nfsm_strsiz(len, NFS_MAXPATHLEN);
1106 		}
1107 		nfsm_mtouio(uiop, len);
1108 	}
1109 	nfsm_reqdone;
1110 	return (error);
1111 }
1112 
1113 /*
1114  * nfs read rpc call
1115  * Ditto above
1116  */
1117 int
nfs_readrpc(struct vnode * vp,struct uio * uiop)1118 nfs_readrpc(struct vnode *vp, struct uio *uiop)
1119 {
1120 	u_int32_t *tl;
1121 	char *cp;
1122 	int32_t t1, t2;
1123 	char *bpos, *dpos, *cp2;
1124 	struct mbuf *mreq, *mrep, *md, *mb;
1125 	struct nfsmount *nmp;
1126 	int error = 0, len, retlen, tsiz, eof __unused, byte_count;
1127 	const int v3 = NFS_ISV3(vp);
1128 	struct nfsnode *np = VTONFS(vp);
1129 #ifndef NFS_V2_ONLY
1130 	int attrflag;
1131 #endif
1132 
1133 #ifndef nolint
1134 	eof = 0;
1135 #endif
1136 	nmp = VFSTONFS(vp->v_mount);
1137 	tsiz = uiop->uio_resid;
1138 	if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1139 		return (EFBIG);
1140 	iostat_busy(nmp->nm_stats);
1141 	byte_count = 0; /* count bytes actually transferred */
1142 	while (tsiz > 0) {
1143 		nfsstats.rpccnt[NFSPROC_READ]++;
1144 		len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1145 		nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1146 		nfsm_fhtom(np, v3);
1147 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1148 #ifndef NFS_V2_ONLY
1149 		if (v3) {
1150 			txdr_hyper(uiop->uio_offset, tl);
1151 			*(tl + 2) = txdr_unsigned(len);
1152 		} else
1153 #endif
1154 		{
1155 			*tl++ = txdr_unsigned(uiop->uio_offset);
1156 			*tl++ = txdr_unsigned(len);
1157 			*tl = 0;
1158 		}
1159 		nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred);
1160 #ifndef NFS_V2_ONLY
1161 		if (v3) {
1162 			nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC);
1163 			if (error) {
1164 				m_freem(mrep);
1165 				goto nfsmout;
1166 			}
1167 			nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1168 			eof = fxdr_unsigned(int, *(tl + 1));
1169 		} else
1170 #endif
1171 			nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1172 		nfsm_strsiz(retlen, nmp->nm_rsize);
1173 		nfsm_mtouio(uiop, retlen);
1174 		m_freem(mrep);
1175 		tsiz -= retlen;
1176 		byte_count += retlen;
1177 #ifndef NFS_V2_ONLY
1178 		if (v3) {
1179 			if (eof || retlen == 0)
1180 				tsiz = 0;
1181 		} else
1182 #endif
1183 		if (retlen < len)
1184 			tsiz = 0;
1185 	}
1186 nfsmout:
1187 	iostat_unbusy(nmp->nm_stats, byte_count, 1);
1188 	return (error);
1189 }
1190 
1191 struct nfs_writerpc_context {
1192 	kmutex_t nwc_lock;
1193 	kcondvar_t nwc_cv;
1194 	int nwc_mbufcount;
1195 };
1196 
1197 /*
1198  * free mbuf used to refer protected pages while write rpc call.
1199  * called at splvm.
1200  */
1201 static void
nfs_writerpc_extfree(struct mbuf * m,void * tbuf,size_t size,void * arg)1202 nfs_writerpc_extfree(struct mbuf *m, void *tbuf, size_t size, void *arg)
1203 {
1204 	struct nfs_writerpc_context *ctx = arg;
1205 
1206 	KASSERT(m != NULL);
1207 	KASSERT(ctx != NULL);
1208 	pool_cache_put(mb_cache, m);
1209 	mutex_enter(&ctx->nwc_lock);
1210 	if (--ctx->nwc_mbufcount == 0) {
1211 		cv_signal(&ctx->nwc_cv);
1212 	}
1213 	mutex_exit(&ctx->nwc_lock);
1214 }
1215 
1216 /*
1217  * nfs write call
1218  */
1219 int
nfs_writerpc(struct vnode * vp,struct uio * uiop,int * iomode,bool pageprotected,bool * stalewriteverfp)1220 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, bool pageprotected, bool *stalewriteverfp)
1221 {
1222 	u_int32_t *tl;
1223 	char *cp;
1224 	int32_t t1, t2;
1225 	char *bpos, *dpos;
1226 	struct mbuf *mreq, *mrep, *md, *mb;
1227 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1228 	int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR;
1229 	const int v3 = NFS_ISV3(vp);
1230 	int committed = NFSV3WRITE_FILESYNC;
1231 	struct nfsnode *np = VTONFS(vp);
1232 	struct nfs_writerpc_context ctx;
1233 	int byte_count;
1234 	size_t origresid;
1235 #ifndef NFS_V2_ONLY
1236 	char *cp2;
1237 	int rlen, commit;
1238 #endif
1239 
1240 	if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1241 		panic("writerpc readonly vp %p", vp);
1242 	}
1243 
1244 #ifdef DIAGNOSTIC
1245 	if (uiop->uio_iovcnt != 1)
1246 		panic("nfs: writerpc iovcnt > 1");
1247 #endif
1248 	tsiz = uiop->uio_resid;
1249 	if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1250 		return EFBIG;
1251 
1252 	mutex_init(&ctx.nwc_lock, MUTEX_DRIVER, IPL_VM);
1253 	cv_init(&ctx.nwc_cv, "nfsmblk");
1254 	ctx.nwc_mbufcount = 1;
1255 
1256 retry:
1257 	origresid = uiop->uio_resid;
1258 	KASSERT(origresid == uiop->uio_iov->iov_len);
1259 	iostat_busy(nmp->nm_stats);
1260 	byte_count = 0; /* count of bytes actually written */
1261 	while (tsiz > 0) {
1262 		uint32_t datalen; /* data bytes need to be allocated in mbuf */
1263 		size_t backup;
1264 		bool stalewriteverf = false;
1265 
1266 		nfsstats.rpccnt[NFSPROC_WRITE]++;
1267 		len = uimin(tsiz, nmp->nm_wsize);
1268 		datalen = pageprotected ? 0 : nfsm_rndup(len);
1269 		nfsm_reqhead(np, NFSPROC_WRITE,
1270 			NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen);
1271 		nfsm_fhtom(np, v3);
1272 #ifndef NFS_V2_ONLY
1273 		if (v3) {
1274 			nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1275 			txdr_hyper(uiop->uio_offset, tl);
1276 			tl += 2;
1277 			*tl++ = txdr_unsigned(len);
1278 			*tl++ = txdr_unsigned(*iomode);
1279 			*tl = txdr_unsigned(len);
1280 		} else
1281 #endif
1282 		{
1283 			u_int32_t x;
1284 
1285 			nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1286 			/* Set both "begin" and "current" to non-garbage. */
1287 			x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1288 			*tl++ = x;      /* "begin offset" */
1289 			*tl++ = x;      /* "current offset" */
1290 			x = txdr_unsigned(len);
1291 			*tl++ = x;      /* total to this offset */
1292 			*tl = x;        /* size of this write */
1293 
1294 		}
1295 		if (pageprotected) {
1296 			/*
1297 			 * since we know pages can't be modified during i/o,
1298 			 * no need to copy them for us.
1299 			 */
1300 			struct mbuf *m;
1301 			struct iovec *iovp = uiop->uio_iov;
1302 
1303 			m = m_get(M_WAIT, MT_DATA);
1304 			MCLAIM(m, &nfs_mowner);
1305 			MEXTADD(m, iovp->iov_base, len, M_MBUF,
1306 			    nfs_writerpc_extfree, &ctx);
1307 			m->m_flags |= M_EXT_ROMAP;
1308 			m->m_len = len;
1309 			mb->m_next = m;
1310 			/*
1311 			 * no need to maintain mb and bpos here
1312 			 * because no one care them later.
1313 			 */
1314 #if 0
1315 			mb = m;
1316 			bpos = mtod(void *, mb) + mb->m_len;
1317 #endif
1318 			UIO_ADVANCE(uiop, len);
1319 			uiop->uio_offset += len;
1320 			mutex_enter(&ctx.nwc_lock);
1321 			ctx.nwc_mbufcount++;
1322 			mutex_exit(&ctx.nwc_lock);
1323 			nfs_zeropad(mb, 0, nfsm_padlen(len));
1324 		} else {
1325 			nfsm_uiotom(uiop, len);
1326 		}
1327 		nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred);
1328 #ifndef NFS_V2_ONLY
1329 		if (v3) {
1330 			wccflag = NFSV3_WCCCHK;
1331 			nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error);
1332 			if (!error) {
1333 				nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1334 					+ NFSX_V3WRITEVERF);
1335 				rlen = fxdr_unsigned(int, *tl++);
1336 				if (rlen == 0) {
1337 					error = NFSERR_IO;
1338 					m_freem(mrep);
1339 					break;
1340 				} else if (rlen < len) {
1341 					backup = len - rlen;
1342 					UIO_ADVANCE(uiop, -backup);
1343 					uiop->uio_offset -= backup;
1344 					len = rlen;
1345 				}
1346 				commit = fxdr_unsigned(int, *tl++);
1347 
1348 				/*
1349 				 * Return the lowest commitment level
1350 				 * obtained by any of the RPCs.
1351 				 */
1352 				if (committed == NFSV3WRITE_FILESYNC)
1353 					committed = commit;
1354 				else if (committed == NFSV3WRITE_DATASYNC &&
1355 					commit == NFSV3WRITE_UNSTABLE)
1356 					committed = commit;
1357 				mutex_enter(&nmp->nm_lock);
1358 				if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){
1359 					memcpy(nmp->nm_writeverf, tl,
1360 					    NFSX_V3WRITEVERF);
1361 					nmp->nm_iflag |= NFSMNT_HASWRITEVERF;
1362 				} else if ((nmp->nm_iflag &
1363 				    NFSMNT_STALEWRITEVERF) ||
1364 				    memcmp(tl, nmp->nm_writeverf,
1365 				    NFSX_V3WRITEVERF)) {
1366 					memcpy(nmp->nm_writeverf, tl,
1367 					    NFSX_V3WRITEVERF);
1368 					/*
1369 					 * note NFSMNT_STALEWRITEVERF
1370 					 * if we're the first thread to
1371 					 * notice it.
1372 					 */
1373 					if ((nmp->nm_iflag &
1374 					    NFSMNT_STALEWRITEVERF) == 0) {
1375 						stalewriteverf = true;
1376 						nmp->nm_iflag |=
1377 						    NFSMNT_STALEWRITEVERF;
1378 					}
1379 				}
1380 				mutex_exit(&nmp->nm_lock);
1381 			}
1382 		} else
1383 #endif
1384 			nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1385 		if (wccflag)
1386 			VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime;
1387 		m_freem(mrep);
1388 		if (error)
1389 			break;
1390 		tsiz -= len;
1391 		byte_count += len;
1392 		if (stalewriteverf) {
1393 			*stalewriteverfp = true;
1394 			stalewriteverf = false;
1395 			if (committed == NFSV3WRITE_UNSTABLE &&
1396 			    len != origresid) {
1397 				/*
1398 				 * if our write requests weren't atomic but
1399 				 * unstable, datas in previous iterations
1400 				 * might have already been lost now.
1401 				 * then, we should resend them to nfsd.
1402 				 */
1403 				backup = origresid - tsiz;
1404 				UIO_ADVANCE(uiop, -backup);
1405 				uiop->uio_offset -= backup;
1406 				tsiz = origresid;
1407 				goto retry;
1408 			}
1409 		}
1410 	}
1411 nfsmout:
1412 	iostat_unbusy(nmp->nm_stats, byte_count, 0);
1413 	if (pageprotected) {
1414 		/*
1415 		 * wait until mbufs go away.
1416 		 * retransmitted mbufs can survive longer than rpc requests
1417 		 * themselves.
1418 		 */
1419 		mutex_enter(&ctx.nwc_lock);
1420 		ctx.nwc_mbufcount--;
1421 		while (ctx.nwc_mbufcount > 0) {
1422 			cv_wait(&ctx.nwc_cv, &ctx.nwc_lock);
1423 		}
1424 		mutex_exit(&ctx.nwc_lock);
1425 	}
1426 	mutex_destroy(&ctx.nwc_lock);
1427 	cv_destroy(&ctx.nwc_cv);
1428 	*iomode = committed;
1429 	if (error)
1430 		uiop->uio_resid = tsiz;
1431 	return (error);
1432 }
1433 
1434 /*
1435  * nfs mknod rpc
1436  * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1437  * mode set to specify the file type and the size field for rdev.
1438  */
1439 int
nfs_mknodrpc(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct vattr * vap)1440 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap)
1441 {
1442 	struct nfsv2_sattr *sp;
1443 	u_int32_t *tl;
1444 	char *cp;
1445 	int32_t t1, t2;
1446 	struct vnode *newvp = (struct vnode *)0;
1447 	struct nfsnode *dnp, *np;
1448 	char *cp2;
1449 	char *bpos, *dpos;
1450 	int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1451 	struct mbuf *mreq, *mrep, *md, *mb;
1452 	u_int32_t rdev;
1453 	const int v3 = NFS_ISV3(dvp);
1454 
1455 	if (vap->va_type == VCHR || vap->va_type == VBLK)
1456 		rdev = txdr_unsigned(vap->va_rdev);
1457 	else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1458 		rdev = nfs_xdrneg1;
1459 	else {
1460 		VOP_ABORTOP(dvp, cnp);
1461 		return (EOPNOTSUPP);
1462 	}
1463 	nfsstats.rpccnt[NFSPROC_MKNOD]++;
1464 	dnp = VTONFS(dvp);
1465 	nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1466 		+ nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1467 	nfsm_fhtom(dnp, v3);
1468 	nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1469 #ifndef NFS_V2_ONLY
1470 	if (v3) {
1471 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1472 		*tl++ = vtonfsv3_type(vap->va_type);
1473 		nfsm_v3attrbuild(vap, false);
1474 		if (vap->va_type == VCHR || vap->va_type == VBLK) {
1475 			nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1476 			*tl++ = txdr_unsigned(major(vap->va_rdev));
1477 			*tl = txdr_unsigned(minor(vap->va_rdev));
1478 		}
1479 	} else
1480 #endif
1481 	{
1482 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1483 		sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1484 		sp->sa_uid = nfs_xdrneg1;
1485 		sp->sa_gid = nfs_xdrneg1;
1486 		sp->sa_size = rdev;
1487 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1488 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1489 	}
1490 	nfsm_request(dnp, NFSPROC_MKNOD, curlwp, cnp->cn_cred);
1491 	if (!error) {
1492 		nfsm_mtofh(dvp, newvp, v3, gotvp);
1493 		if (!gotvp) {
1494 			error = nfs_lookitup(dvp, cnp->cn_nameptr,
1495 			    cnp->cn_namelen, cnp->cn_cred, curlwp, &np);
1496 			if (!error)
1497 				newvp = NFSTOV(np);
1498 		}
1499 	}
1500 #ifndef NFS_V2_ONLY
1501 	if (v3)
1502 		nfsm_wcc_data(dvp, wccflag, 0, !error);
1503 #endif
1504 	nfsm_reqdone;
1505 	if (error) {
1506 		if (newvp)
1507 			vput(newvp);
1508 	} else {
1509 		nfs_cache_enter(dvp, newvp, cnp);
1510 		*vpp = newvp;
1511 		VOP_UNLOCK(newvp);
1512 	}
1513 	VTONFS(dvp)->n_flag |= NMODIFIED;
1514 	if (!wccflag)
1515 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1516 	return (error);
1517 }
1518 
1519 /*
1520  * nfs mknod vop
1521  * just call nfs_mknodrpc() to do the work.
1522  */
1523 /* ARGSUSED */
1524 int
nfs_mknod(void * v)1525 nfs_mknod(void *v)
1526 {
1527 	struct vop_mknod_v3_args /* {
1528 		struct vnode *a_dvp;
1529 		struct vnode **a_vpp;
1530 		struct componentname *a_cnp;
1531 		struct vattr *a_vap;
1532 	} */ *ap = v;
1533 	struct vnode *dvp = ap->a_dvp;
1534 	struct componentname *cnp = ap->a_cnp;
1535 	int error;
1536 
1537 	error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap);
1538 	if (error == 0 || error == EEXIST)
1539 		cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1540 	return (error);
1541 }
1542 
1543 /*
1544  * nfs file create call
1545  */
1546 int
nfs_create(void * v)1547 nfs_create(void *v)
1548 {
1549 	struct vop_create_v3_args /* {
1550 		struct vnode *a_dvp;
1551 		struct vnode **a_vpp;
1552 		struct componentname *a_cnp;
1553 		struct vattr *a_vap;
1554 	} */ *ap = v;
1555 	struct vnode *dvp = ap->a_dvp;
1556 	struct vattr *vap = ap->a_vap;
1557 	struct componentname *cnp = ap->a_cnp;
1558 	struct nfsv2_sattr *sp;
1559 	u_int32_t *tl;
1560 	char *cp;
1561 	int32_t t1, t2;
1562 	struct nfsnode *dnp, *np = (struct nfsnode *)0;
1563 	struct vnode *newvp = (struct vnode *)0;
1564 	char *bpos, *dpos, *cp2;
1565 	int error, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1566 	struct mbuf *mreq, *mrep, *md, *mb;
1567 	const int v3 = NFS_ISV3(dvp);
1568 	u_int32_t excl_mode = NFSV3CREATE_UNCHECKED;
1569 
1570 	/*
1571 	 * Oops, not for me..
1572 	 */
1573 	if (vap->va_type == VSOCK)
1574 		return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1575 
1576 	KASSERT(vap->va_type == VREG);
1577 
1578 #ifdef VA_EXCLUSIVE
1579 	if (vap->va_vaflags & VA_EXCLUSIVE) {
1580 		excl_mode = NFSV3CREATE_EXCLUSIVE;
1581 	}
1582 #endif
1583 again:
1584 	error = 0;
1585 	nfsstats.rpccnt[NFSPROC_CREATE]++;
1586 	dnp = VTONFS(dvp);
1587 	nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1588 		nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1589 	nfsm_fhtom(dnp, v3);
1590 	nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1591 #ifndef NFS_V2_ONLY
1592 	if (v3) {
1593 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1594 		if (excl_mode == NFSV3CREATE_EXCLUSIVE) {
1595 			*tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1596 			nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1597 			*tl++ = cprng_fast32();
1598 			*tl = cprng_fast32();
1599 		} else {
1600 			*tl = txdr_unsigned(excl_mode);
1601 			nfsm_v3attrbuild(vap, false);
1602 		}
1603 	} else
1604 #endif
1605 	{
1606 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1607 		sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1608 		sp->sa_uid = nfs_xdrneg1;
1609 		sp->sa_gid = nfs_xdrneg1;
1610 		sp->sa_size = 0;
1611 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1612 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1613 	}
1614 	nfsm_request(dnp, NFSPROC_CREATE, curlwp, cnp->cn_cred);
1615 	if (!error) {
1616 		nfsm_mtofh(dvp, newvp, v3, gotvp);
1617 		if (!gotvp) {
1618 			error = nfs_lookitup(dvp, cnp->cn_nameptr,
1619 			    cnp->cn_namelen, cnp->cn_cred, curlwp, &np);
1620 			if (!error)
1621 				newvp = NFSTOV(np);
1622 		}
1623 	}
1624 #ifndef NFS_V2_ONLY
1625 	if (v3)
1626 		nfsm_wcc_data(dvp, wccflag, 0, !error);
1627 #endif
1628 	nfsm_reqdone;
1629 	if (error) {
1630 		/*
1631 		 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
1632 		 */
1633 		if (v3 && error == ENOTSUP) {
1634 			if (excl_mode == NFSV3CREATE_EXCLUSIVE) {
1635 				excl_mode = NFSV3CREATE_GUARDED;
1636 				goto again;
1637 			} else if (excl_mode == NFSV3CREATE_GUARDED) {
1638 				excl_mode = NFSV3CREATE_UNCHECKED;
1639 				goto again;
1640 			}
1641 		}
1642 	} else if (v3 && (excl_mode == NFSV3CREATE_EXCLUSIVE)) {
1643 		struct timespec ts;
1644 
1645 		getnanotime(&ts);
1646 
1647 		/*
1648 		 * make sure that we'll update timestamps as
1649 		 * most server implementations use them to store
1650 		 * the create verifier.
1651 		 *
1652 		 * XXX it's better to use TOSERVER always.
1653 		 */
1654 
1655 		if (vap->va_atime.tv_sec == VNOVAL)
1656 			vap->va_atime = ts;
1657 		if (vap->va_mtime.tv_sec == VNOVAL)
1658 			vap->va_mtime = ts;
1659 
1660 		error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, curlwp);
1661 	}
1662 	if (error == 0) {
1663 		if (cnp->cn_flags & MAKEENTRY)
1664 			nfs_cache_enter(dvp, newvp, cnp);
1665 		else
1666 			cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1667 		*ap->a_vpp = newvp;
1668 		VOP_UNLOCK(newvp);
1669 	} else {
1670 		if (newvp)
1671 			vput(newvp);
1672 		if (error == EEXIST)
1673 			cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1674 	}
1675 	VTONFS(dvp)->n_flag |= NMODIFIED;
1676 	if (!wccflag)
1677 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1678 	return (error);
1679 }
1680 
1681 /*
1682  * nfs file remove call
1683  * To try and make nfs semantics closer to ufs semantics, a file that has
1684  * other processes using the vnode is renamed instead of removed and then
1685  * removed later on the last close.
1686  * - If vrefcnt(vp) > 1
1687  *	  If a rename is not already in the works
1688  *	     call nfs_sillyrename() to set it up
1689  *     else
1690  *	  do the remove rpc
1691  */
1692 int
nfs_remove(void * v)1693 nfs_remove(void *v)
1694 {
1695 	struct vop_remove_v3_args /* {
1696 		struct vnodeop_desc *a_desc;
1697 		struct vnode * a_dvp;
1698 		struct vnode * a_vp;
1699 		struct componentname * a_cnp;
1700 		nlink_t ctx_vp_new_nlink;
1701 	} */ *ap = v;
1702 	struct vnode *vp = ap->a_vp;
1703 	struct vnode *dvp = ap->a_dvp;
1704 	struct componentname *cnp = ap->a_cnp;
1705 	struct nfsnode *np = VTONFS(vp);
1706 	int error = 0;
1707 	struct vattr vattr;
1708 
1709 #ifndef DIAGNOSTIC
1710 	if (vrefcnt(vp) < 1)
1711 		panic("nfs_remove: bad vrefcnt(vp)");
1712 #endif
1713 	if (vp->v_type == VDIR)
1714 		error = EPERM;
1715 	else if (vrefcnt(vp) == 1 || (np->n_sillyrename &&
1716 	    VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 &&
1717 	    vattr.va_nlink > 1)) {
1718 		/*
1719 		 * Purge the name cache so that the chance of a lookup for
1720 		 * the name succeeding while the remove is in progress is
1721 		 * minimized. Without node locking it can still happen, such
1722 		 * that an I/O op returns ESTALE, but since you get this if
1723 		 * another host removes the file..
1724 		 */
1725 		cache_purge(vp);
1726 		/*
1727 		 * throw away biocache buffers, mainly to avoid
1728 		 * unnecessary delayed writes later.
1729 		 */
1730 		error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, curlwp, 1);
1731 		/* Do the rpc */
1732 		if (error != EINTR)
1733 			error = nfs_removerpc(dvp, cnp->cn_nameptr,
1734 				cnp->cn_namelen, cnp->cn_cred, curlwp);
1735 	} else if (!np->n_sillyrename)
1736 		error = nfs_sillyrename(dvp, vp, cnp, false);
1737 	if (error == 0 && nfs_getattrcache(vp, &vattr) == 0) {
1738 		ap->ctx_vp_new_nlink = vattr.va_nlink - 1;
1739 		if (vattr.va_nlink == 1)
1740 			np->n_flag |= NREMOVED;
1741 	}
1742 	NFS_INVALIDATE_ATTRCACHE(np);
1743 	if (dvp == vp)
1744 		vrele(vp);
1745 	else
1746 		vput(vp);
1747 	return (error);
1748 }
1749 
1750 /*
1751  * nfs file remove rpc called from nfs_inactive
1752  */
1753 int
nfs_removeit(struct sillyrename * sp)1754 nfs_removeit(struct sillyrename *sp)
1755 {
1756 
1757 	return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1758 		(struct lwp *)0));
1759 }
1760 
1761 /*
1762  * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1763  */
1764 int
nfs_removerpc(struct vnode * dvp,const char * name,int namelen,kauth_cred_t cred,struct lwp * l)1765 nfs_removerpc(struct vnode *dvp, const char *name, int namelen, kauth_cred_t cred, struct lwp *l)
1766 {
1767 	u_int32_t *tl;
1768 	char *cp;
1769 #ifndef NFS_V2_ONLY
1770 	int32_t t1;
1771 	char *cp2;
1772 #endif
1773 	int32_t t2;
1774 	char *bpos, *dpos;
1775 	int error = 0, wccflag = NFSV3_WCCRATTR;
1776 	struct mbuf *mreq, *mrep, *md, *mb;
1777 	const int v3 = NFS_ISV3(dvp);
1778 	int rexmit = 0;
1779 	struct nfsnode *dnp = VTONFS(dvp);
1780 
1781 	nfsstats.rpccnt[NFSPROC_REMOVE]++;
1782 	nfsm_reqhead(dnp, NFSPROC_REMOVE,
1783 		NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1784 	nfsm_fhtom(dnp, v3);
1785 	nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1786 	nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit);
1787 #ifndef NFS_V2_ONLY
1788 	if (v3)
1789 		nfsm_wcc_data(dvp, wccflag, 0, !error);
1790 #endif
1791 	nfsm_reqdone;
1792 	VTONFS(dvp)->n_flag |= NMODIFIED;
1793 	if (!wccflag)
1794 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1795 	/*
1796 	 * Kludge City: If the first reply to the remove rpc is lost..
1797 	 *   the reply to the retransmitted request will be ENOENT
1798 	 *   since the file was in fact removed
1799 	 *   Therefore, we cheat and return success.
1800 	 */
1801 	if (rexmit && error == ENOENT)
1802 		error = 0;
1803 	return (error);
1804 }
1805 
1806 /*
1807  * nfs file rename call
1808  */
1809 int
nfs_rename(void * v)1810 nfs_rename(void *v)
1811 {
1812 	struct vop_rename_args /* {
1813 		struct vnode *a_fdvp;
1814 		struct vnode *a_fvp;
1815 		struct componentname *a_fcnp;
1816 		struct vnode *a_tdvp;
1817 		struct vnode *a_tvp;
1818 		struct componentname *a_tcnp;
1819 	} */ *ap = v;
1820 	struct vnode *fvp = ap->a_fvp;
1821 	struct vnode *tvp = ap->a_tvp;
1822 	struct vnode *fdvp = ap->a_fdvp;
1823 	struct vnode *tdvp = ap->a_tdvp;
1824 	struct componentname *tcnp = ap->a_tcnp;
1825 	struct componentname *fcnp = ap->a_fcnp;
1826 	int error;
1827 
1828 	/* Check for cross-device rename */
1829 	if ((fvp->v_mount != tdvp->v_mount) ||
1830 	    (tvp && (fvp->v_mount != tvp->v_mount))) {
1831 		error = EXDEV;
1832 		goto out;
1833 	}
1834 
1835 	/*
1836 	 * If the tvp exists and is in use, sillyrename it before doing the
1837 	 * rename of the new file over it.
1838 	 *
1839 	 * Have sillyrename use link instead of rename if possible,
1840 	 * so that we don't lose the file if the rename fails, and so
1841 	 * that there's no window when the "to" file doesn't exist.
1842 	 */
1843 	if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename &&
1844 	    tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) {
1845 		VN_KNOTE(tvp, NOTE_DELETE);
1846 		vput(tvp);
1847 		tvp = NULL;
1848 	}
1849 
1850 	error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1851 		tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1852 		curlwp);
1853 
1854 	VN_KNOTE(fdvp, NOTE_WRITE);
1855 	VN_KNOTE(tdvp, NOTE_WRITE);
1856 	if (error == 0 || error == EEXIST) {
1857 		if (fvp->v_type == VDIR)
1858 			cache_purge(fvp);
1859 		else
1860 			cache_purge1(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1861 				     0);
1862 		if (tvp != NULL && tvp->v_type == VDIR)
1863 			cache_purge(tvp);
1864 		else
1865 			cache_purge1(tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
1866 				     0);
1867 	}
1868 out:
1869 	if (tdvp == tvp)
1870 		vrele(tdvp);
1871 	else
1872 		vput(tdvp);
1873 	if (tvp)
1874 		vput(tvp);
1875 	vrele(fdvp);
1876 	vrele(fvp);
1877 	return (error);
1878 }
1879 
1880 /*
1881  * nfs file rename rpc called from nfs_remove() above
1882  */
1883 int
nfs_renameit(struct vnode * sdvp,struct componentname * scnp,struct sillyrename * sp)1884 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, struct sillyrename *sp)
1885 {
1886 	return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1887 		sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curlwp));
1888 }
1889 
1890 /*
1891  * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1892  */
1893 int
nfs_renamerpc(struct vnode * fdvp,const char * fnameptr,int fnamelen,struct vnode * tdvp,const char * tnameptr,int tnamelen,kauth_cred_t cred,struct lwp * l)1894 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen, struct vnode *tdvp, const char *tnameptr, int tnamelen, kauth_cred_t cred, struct lwp *l)
1895 {
1896 	u_int32_t *tl;
1897 	char *cp;
1898 #ifndef NFS_V2_ONLY
1899 	int32_t t1;
1900 	char *cp2;
1901 #endif
1902 	int32_t t2;
1903 	char *bpos, *dpos;
1904 	int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1905 	struct mbuf *mreq, *mrep, *md, *mb;
1906 	const int v3 = NFS_ISV3(fdvp);
1907 	int rexmit = 0;
1908 	struct nfsnode *fdnp = VTONFS(fdvp);
1909 
1910 	nfsstats.rpccnt[NFSPROC_RENAME]++;
1911 	nfsm_reqhead(fdnp, NFSPROC_RENAME,
1912 		(NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1913 		nfsm_rndup(tnamelen));
1914 	nfsm_fhtom(fdnp, v3);
1915 	nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1916 	nfsm_fhtom(VTONFS(tdvp), v3);
1917 	nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1918 	nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit);
1919 #ifndef NFS_V2_ONLY
1920 	if (v3) {
1921 		nfsm_wcc_data(fdvp, fwccflag, 0, !error);
1922 		nfsm_wcc_data(tdvp, twccflag, 0, !error);
1923 	}
1924 #endif
1925 	nfsm_reqdone;
1926 	VTONFS(fdvp)->n_flag |= NMODIFIED;
1927 	VTONFS(tdvp)->n_flag |= NMODIFIED;
1928 	if (!fwccflag)
1929 		NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp));
1930 	if (!twccflag)
1931 		NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp));
1932 	/*
1933 	 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1934 	 */
1935 	if (rexmit && error == ENOENT)
1936 		error = 0;
1937 	return (error);
1938 }
1939 
1940 /*
1941  * NFS link RPC, called from nfs_link.
1942  * Assumes dvp and vp locked, and leaves them that way.
1943  */
1944 
1945 static int
nfs_linkrpc(struct vnode * dvp,struct vnode * vp,const char * name,size_t namelen,kauth_cred_t cred,struct lwp * l)1946 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name,
1947     size_t namelen, kauth_cred_t cred, struct lwp *l)
1948 {
1949 	u_int32_t *tl;
1950 	char *cp;
1951 #ifndef NFS_V2_ONLY
1952 	int32_t t1;
1953 	char *cp2;
1954 #endif
1955 	int32_t t2;
1956 	char *bpos, *dpos;
1957 	int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
1958 	struct mbuf *mreq, *mrep, *md, *mb;
1959 	const int v3 = NFS_ISV3(dvp);
1960 	int rexmit = 0;
1961 	struct nfsnode *np = VTONFS(vp);
1962 
1963 	nfsstats.rpccnt[NFSPROC_LINK]++;
1964 	nfsm_reqhead(np, NFSPROC_LINK,
1965 	    NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen));
1966 	nfsm_fhtom(np, v3);
1967 	nfsm_fhtom(VTONFS(dvp), v3);
1968 	nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1969 	nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit);
1970 #ifndef NFS_V2_ONLY
1971 	if (v3) {
1972 		nfsm_postop_attr(vp, attrflag, 0);
1973 		nfsm_wcc_data(dvp, wccflag, 0, !error);
1974 	}
1975 #endif
1976 	nfsm_reqdone;
1977 
1978 	VTONFS(dvp)->n_flag |= NMODIFIED;
1979 	if (!attrflag)
1980 		NFS_INVALIDATE_ATTRCACHE(VTONFS(vp));
1981 	if (!wccflag)
1982 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1983 
1984 	/*
1985 	 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
1986 	 */
1987 	if (rexmit && error == EEXIST)
1988 		error = 0;
1989 
1990 	return error;
1991 }
1992 
1993 /*
1994  * nfs hard link create call
1995  */
1996 int
nfs_link(void * v)1997 nfs_link(void *v)
1998 {
1999 	struct vop_link_v2_args /* {
2000 		struct vnode *a_dvp;
2001 		struct vnode *a_vp;
2002 		struct componentname *a_cnp;
2003 	} */ *ap = v;
2004 	struct vnode *vp = ap->a_vp;
2005 	struct vnode *dvp = ap->a_dvp;
2006 	struct componentname *cnp = ap->a_cnp;
2007 	int error = 0, abrt = 1;
2008 
2009 	error = vn_lock(vp, LK_EXCLUSIVE);
2010 	if (error != 0)
2011 		goto out;
2012 
2013 	error = kauth_authorize_vnode(cnp->cn_cred, KAUTH_VNODE_ADD_LINK, vp,
2014 	    dvp, 0);
2015 	if (error)
2016 		goto out1;
2017 
2018 	abrt = 0;
2019 	/*
2020 	 * Push all writes to the server, so that the attribute cache
2021 	 * doesn't get "out of sync" with the server.
2022 	 * XXX There should be a better way!
2023 	 */
2024 	VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0);
2025 
2026 	error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
2027 	    cnp->cn_cred, curlwp);
2028 
2029 	if (error == 0) {
2030 		cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
2031 	}
2032 out1:
2033 	VOP_UNLOCK(vp);
2034 out:
2035 	if (abrt)
2036 		VOP_ABORTOP(dvp, cnp);
2037 	return (error);
2038 }
2039 
2040 /*
2041  * nfs symbolic link create call
2042  */
2043 int
nfs_symlink(void * v)2044 nfs_symlink(void *v)
2045 {
2046 	struct vop_symlink_v3_args /* {
2047 		struct vnode *a_dvp;
2048 		struct vnode **a_vpp;
2049 		struct componentname *a_cnp;
2050 		struct vattr *a_vap;
2051 		char *a_target;
2052 	} */ *ap = v;
2053 	struct vnode *dvp = ap->a_dvp;
2054 	struct vattr *vap = ap->a_vap;
2055 	struct componentname *cnp = ap->a_cnp;
2056 	struct nfsv2_sattr *sp;
2057 	u_int32_t *tl;
2058 	char *cp;
2059 	int32_t t1, t2;
2060 	char *bpos, *dpos, *cp2;
2061 	int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
2062 	struct mbuf *mreq, *mrep, *md, *mb;
2063 	struct vnode *newvp = (struct vnode *)0;
2064 	const int v3 = NFS_ISV3(dvp);
2065 	int rexmit = 0;
2066 	struct nfsnode *dnp = VTONFS(dvp);
2067 
2068 	*ap->a_vpp = NULL;
2069 	nfsstats.rpccnt[NFSPROC_SYMLINK]++;
2070 	slen = strlen(ap->a_target);
2071 	nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
2072 	    nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
2073 	nfsm_fhtom(dnp, v3);
2074 	nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2075 #ifndef NFS_V2_ONlY
2076 	if (v3)
2077 		nfsm_v3attrbuild(vap, false);
2078 #endif
2079 	nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
2080 #ifndef NFS_V2_ONlY
2081 	if (!v3) {
2082 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2083 		sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
2084 		sp->sa_uid = nfs_xdrneg1;
2085 		sp->sa_gid = nfs_xdrneg1;
2086 		sp->sa_size = nfs_xdrneg1;
2087 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2088 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2089 	}
2090 #endif
2091 	nfsm_request1(dnp, NFSPROC_SYMLINK, curlwp, cnp->cn_cred,
2092 	    &rexmit);
2093 #ifndef NFS_V2_ONlY
2094 	if (v3) {
2095 		if (!error)
2096 			nfsm_mtofh(dvp, newvp, v3, gotvp);
2097 		nfsm_wcc_data(dvp, wccflag, 0, !error);
2098 	}
2099 #endif
2100 	nfsm_reqdone;
2101 	/*
2102 	 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2103 	 */
2104 	if (rexmit && error == EEXIST)
2105 		error = 0;
2106 	if (error == 0 || error == EEXIST)
2107 		cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
2108 	if (error == 0 && newvp == NULL) {
2109 		struct nfsnode *np = NULL;
2110 
2111 		error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2112 		    cnp->cn_cred, curlwp, &np);
2113 		if (error == 0)
2114 			newvp = NFSTOV(np);
2115 	}
2116 	if (error) {
2117 		if (newvp != NULL)
2118 			vput(newvp);
2119 	} else {
2120 		*ap->a_vpp = newvp;
2121 		VOP_UNLOCK(newvp);
2122 	}
2123 	VTONFS(dvp)->n_flag |= NMODIFIED;
2124 	if (!wccflag)
2125 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2126 	return (error);
2127 }
2128 
2129 /*
2130  * nfs make dir call
2131  */
2132 int
nfs_mkdir(void * v)2133 nfs_mkdir(void *v)
2134 {
2135 	struct vop_mkdir_v3_args /* {
2136 		struct vnode *a_dvp;
2137 		struct vnode **a_vpp;
2138 		struct componentname *a_cnp;
2139 		struct vattr *a_vap;
2140 	} */ *ap = v;
2141 	struct vnode *dvp = ap->a_dvp;
2142 	struct vattr *vap = ap->a_vap;
2143 	struct componentname *cnp = ap->a_cnp;
2144 	struct nfsv2_sattr *sp;
2145 	u_int32_t *tl;
2146 	char *cp;
2147 	int32_t t1, t2;
2148 	int len;
2149 	struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0;
2150 	struct vnode *newvp = (struct vnode *)0;
2151 	char *bpos, *dpos, *cp2;
2152 	int error = 0, wccflag = NFSV3_WCCRATTR;
2153 	int gotvp = 0;
2154 	int rexmit = 0;
2155 	struct mbuf *mreq, *mrep, *md, *mb;
2156 	const int v3 = NFS_ISV3(dvp);
2157 
2158 	len = cnp->cn_namelen;
2159 	nfsstats.rpccnt[NFSPROC_MKDIR]++;
2160 	nfsm_reqhead(dnp, NFSPROC_MKDIR,
2161 	  NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
2162 	nfsm_fhtom(dnp, v3);
2163 	nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
2164 #ifndef NFS_V2_ONLY
2165 	if (v3) {
2166 		nfsm_v3attrbuild(vap, false);
2167 	} else
2168 #endif
2169 	{
2170 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2171 		sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
2172 		sp->sa_uid = nfs_xdrneg1;
2173 		sp->sa_gid = nfs_xdrneg1;
2174 		sp->sa_size = nfs_xdrneg1;
2175 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2176 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2177 	}
2178 	nfsm_request1(dnp, NFSPROC_MKDIR, curlwp, cnp->cn_cred, &rexmit);
2179 	if (!error)
2180 		nfsm_mtofh(dvp, newvp, v3, gotvp);
2181 	if (v3)
2182 		nfsm_wcc_data(dvp, wccflag, 0, !error);
2183 	nfsm_reqdone;
2184 	VTONFS(dvp)->n_flag |= NMODIFIED;
2185 	if (!wccflag)
2186 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2187 	/*
2188 	 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2189 	 * if we can succeed in looking up the directory.
2190 	 */
2191 	if ((rexmit && error == EEXIST) || (!error && !gotvp)) {
2192 		if (newvp) {
2193 			vput(newvp);
2194 			newvp = (struct vnode *)0;
2195 		}
2196 		error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
2197 			curlwp, &np);
2198 		if (!error) {
2199 			newvp = NFSTOV(np);
2200 			if (newvp->v_type != VDIR || newvp == dvp)
2201 				error = EEXIST;
2202 		}
2203 	}
2204 	if (error) {
2205 		if (newvp) {
2206 			if (dvp != newvp)
2207 				vput(newvp);
2208 			else
2209 				vrele(newvp);
2210 		}
2211 	} else {
2212 		nfs_cache_enter(dvp, newvp, cnp);
2213 		*ap->a_vpp = newvp;
2214 		VOP_UNLOCK(newvp);
2215 	}
2216 	return (error);
2217 }
2218 
2219 /*
2220  * nfs remove directory call
2221  */
2222 int
nfs_rmdir(void * v)2223 nfs_rmdir(void *v)
2224 {
2225 	struct vop_rmdir_v2_args /* {
2226 		struct vnode *a_dvp;
2227 		struct vnode *a_vp;
2228 		struct componentname *a_cnp;
2229 	} */ *ap = v;
2230 	struct vnode *vp = ap->a_vp;
2231 	struct vnode *dvp = ap->a_dvp;
2232 	struct componentname *cnp = ap->a_cnp;
2233 	u_int32_t *tl;
2234 	char *cp;
2235 #ifndef NFS_V2_ONLY
2236 	int32_t t1;
2237 	char *cp2;
2238 #endif
2239 	int32_t t2;
2240 	char *bpos, *dpos;
2241 	int error = 0, wccflag = NFSV3_WCCRATTR;
2242 	int rexmit = 0;
2243 	struct mbuf *mreq, *mrep, *md, *mb;
2244 	const int v3 = NFS_ISV3(dvp);
2245 	struct nfsnode *dnp;
2246 
2247 	if (dvp == vp) {
2248 		vrele(vp);
2249 		return (EINVAL);
2250 	}
2251 	nfsstats.rpccnt[NFSPROC_RMDIR]++;
2252 	dnp = VTONFS(dvp);
2253 	nfsm_reqhead(dnp, NFSPROC_RMDIR,
2254 		NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2255 	nfsm_fhtom(dnp, v3);
2256 	nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2257 	nfsm_request1(dnp, NFSPROC_RMDIR, curlwp, cnp->cn_cred, &rexmit);
2258 #ifndef NFS_V2_ONLY
2259 	if (v3)
2260 		nfsm_wcc_data(dvp, wccflag, 0, !error);
2261 #endif
2262 	nfsm_reqdone;
2263 	VTONFS(dvp)->n_flag |= NMODIFIED;
2264 	if (!wccflag)
2265 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2266 	cache_purge(vp);
2267 	vput(vp);
2268 	/*
2269 	 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2270 	 */
2271 	if (rexmit && error == ENOENT)
2272 		error = 0;
2273 	return (error);
2274 }
2275 
2276 /*
2277  * nfs readdir call
2278  */
2279 int
nfs_readdir(void * v)2280 nfs_readdir(void *v)
2281 {
2282 	struct vop_readdir_args /* {
2283 		struct vnode *a_vp;
2284 		struct uio *a_uio;
2285 		kauth_cred_t a_cred;
2286 		int *a_eofflag;
2287 		off_t **a_cookies;
2288 		int *a_ncookies;
2289 	} */ *ap = v;
2290 	struct vnode *vp = ap->a_vp;
2291 	struct uio *uio = ap->a_uio;
2292 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2293 	char *base = uio->uio_iov->iov_base;
2294 	int tresid, error;
2295 	size_t count, lost;
2296 	struct dirent *dp;
2297 	off_t *cookies = NULL;
2298 	int ncookies = 0, nc;
2299 
2300 	if (vp->v_type != VDIR)
2301 		return (EPERM);
2302 
2303 	lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1);
2304 	count = uio->uio_resid - lost;
2305 	if (count <= 0)
2306 		return (EINVAL);
2307 
2308 	/*
2309 	 * Call nfs_bioread() to do the real work.
2310 	 */
2311 	tresid = uio->uio_resid = count;
2312 	error = nfs_bioread(vp, uio, 0, ap->a_cred,
2313 		    ap->a_cookies ? NFSBIO_CACHECOOKIES : 0);
2314 
2315 	if (!error && ap->a_cookies) {
2316 		ncookies = count / 16;
2317 		cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK);
2318 		*ap->a_cookies = cookies;
2319 	}
2320 
2321 	if (!error && uio->uio_resid == tresid) {
2322 		uio->uio_resid += lost;
2323 		nfsstats.direofcache_misses++;
2324 		if (ap->a_cookies)
2325 			*ap->a_ncookies = 0;
2326 		*ap->a_eofflag = 1;
2327 		return (0);
2328 	}
2329 
2330 	if (!error && ap->a_cookies) {
2331 		/*
2332 		 * Only the NFS server and emulations use cookies, and they
2333 		 * load the directory block into system space, so we can
2334 		 * just look at it directly.
2335 		 */
2336 		if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
2337 		    uio->uio_iovcnt != 1)
2338 			panic("nfs_readdir: lost in space");
2339 		for (nc = 0; ncookies-- &&
2340 		     base < (char *)uio->uio_iov->iov_base; nc++){
2341 			dp = (struct dirent *) base;
2342 			if (dp->d_reclen == 0)
2343 				break;
2344 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
2345 				*(cookies++) = (off_t)NFS_GETCOOKIE32(dp);
2346 			else
2347 				*(cookies++) = NFS_GETCOOKIE(dp);
2348 			base += dp->d_reclen;
2349 		}
2350 		uio->uio_resid +=
2351 		    ((char *)uio->uio_iov->iov_base - base);
2352 		uio->uio_iov->iov_len +=
2353 		    ((char *)uio->uio_iov->iov_base - base);
2354 		uio->uio_iov->iov_base = base;
2355 		*ap->a_ncookies = nc;
2356 	}
2357 
2358 	uio->uio_resid += lost;
2359 	*ap->a_eofflag = 0;
2360 	return (error);
2361 }
2362 
2363 /*
2364  * Readdir rpc call.
2365  * Called from below the buffer cache by nfs_doio().
2366  */
2367 int
nfs_readdirrpc(struct vnode * vp,struct uio * uiop,kauth_cred_t cred)2368 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
2369 {
2370 	int len, left;
2371 	struct dirent *dp = NULL;
2372 	u_int32_t *tl;
2373 	char *cp;
2374 	int32_t t1, t2;
2375 	char *bpos, *dpos, *cp2;
2376 	struct mbuf *mreq, *mrep, *md, *mb;
2377 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2378 	struct nfsnode *dnp = VTONFS(vp);
2379 	u_quad_t fileno;
2380 	int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1;
2381 #ifndef NFS_V2_ONLY
2382 	int attrflag;
2383 #endif
2384 	int nrpcs = 0, reclen;
2385 	const int v3 = NFS_ISV3(vp);
2386 
2387 #ifdef DIAGNOSTIC
2388 	/*
2389 	 * Should be called from buffer cache, so only amount of
2390 	 * NFS_DIRBLKSIZ will be requested.
2391 	 */
2392 	if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2393 		panic("nfs readdirrpc bad uio");
2394 #endif
2395 
2396 	/*
2397 	 * Loop around doing readdir rpc's of size nm_readdirsize
2398 	 * truncated to a multiple of NFS_DIRFRAGSIZ.
2399 	 * The stopping criteria is EOF or buffer full.
2400 	 */
2401 	while (more_dirs && bigenough) {
2402 		/*
2403 		 * Heuristic: don't bother to do another RPC to further
2404 		 * fill up this block if there is not much room left. (< 50%
2405 		 * of the readdir RPC size). This wastes some buffer space
2406 		 * but can save up to 50% in RPC calls.
2407 		 */
2408 		if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2409 			bigenough = 0;
2410 			break;
2411 		}
2412 		nfsstats.rpccnt[NFSPROC_READDIR]++;
2413 		nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) +
2414 			NFSX_READDIR(v3));
2415 		nfsm_fhtom(dnp, v3);
2416 #ifndef NFS_V2_ONLY
2417 		if (v3) {
2418 			nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2419 			if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2420 				txdr_swapcookie3(uiop->uio_offset, tl);
2421 			} else {
2422 				txdr_cookie3(uiop->uio_offset, tl);
2423 			}
2424 			tl += 2;
2425 			if (uiop->uio_offset == 0) {
2426 				*tl++ = 0;
2427 				*tl++ = 0;
2428 			} else {
2429 				*tl++ = dnp->n_cookieverf.nfsuquad[0];
2430 				*tl++ = dnp->n_cookieverf.nfsuquad[1];
2431 			}
2432 		} else
2433 #endif
2434 		{
2435 			nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2436 			*tl++ = txdr_unsigned(uiop->uio_offset);
2437 		}
2438 		*tl = txdr_unsigned(nmp->nm_readdirsize);
2439 		nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred);
2440 		nrpcs++;
2441 #ifndef NFS_V2_ONLY
2442 		if (v3) {
2443 			nfsm_postop_attr(vp, attrflag, 0);
2444 			if (!error) {
2445 				nfsm_dissect(tl, u_int32_t *,
2446 				    2 * NFSX_UNSIGNED);
2447 				dnp->n_cookieverf.nfsuquad[0] = *tl++;
2448 				dnp->n_cookieverf.nfsuquad[1] = *tl;
2449 			} else {
2450 				m_freem(mrep);
2451 				goto nfsmout;
2452 			}
2453 		}
2454 #endif
2455 		nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2456 		more_dirs = fxdr_unsigned(int, *tl);
2457 
2458 		/* loop thru the dir entries, doctoring them to 4bsd form */
2459 		while (more_dirs && bigenough) {
2460 #ifndef NFS_V2_ONLY
2461 			if (v3) {
2462 				nfsm_dissect(tl, u_int32_t *,
2463 				    3 * NFSX_UNSIGNED);
2464 				fileno = fxdr_hyper(tl);
2465 				len = fxdr_unsigned(int, *(tl + 2));
2466 			} else
2467 #endif
2468 			{
2469 				nfsm_dissect(tl, u_int32_t *,
2470 				    2 * NFSX_UNSIGNED);
2471 				fileno = fxdr_unsigned(u_quad_t, *tl++);
2472 				len = fxdr_unsigned(int, *tl);
2473 			}
2474 			if (len <= 0 || len > NFS_MAXNAMLEN) {
2475 				error = EBADRPC;
2476 				m_freem(mrep);
2477 				goto nfsmout;
2478 			}
2479 			/* for cookie stashing */
2480 			reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2481 			left = NFS_DIRFRAGSIZ - blksiz;
2482 			if (reclen > left) {
2483 				memset(uiop->uio_iov->iov_base, 0, left);
2484 				dp->d_reclen += left;
2485 				UIO_ADVANCE(uiop, left);
2486 				blksiz = 0;
2487 				NFS_STASHCOOKIE(dp, uiop->uio_offset);
2488 			}
2489 			if (reclen > uiop->uio_resid)
2490 				bigenough = 0;
2491 			if (bigenough) {
2492 				int tlen;
2493 
2494 				dp = (struct dirent *)uiop->uio_iov->iov_base;
2495 				dp->d_fileno = fileno;
2496 				dp->d_namlen = len;
2497 				dp->d_reclen = reclen;
2498 				dp->d_type = DT_UNKNOWN;
2499 				blksiz += reclen;
2500 				if (blksiz == NFS_DIRFRAGSIZ)
2501 					blksiz = 0;
2502 				UIO_ADVANCE(uiop, DIRHDSIZ);
2503 				nfsm_mtouio(uiop, len);
2504 				tlen = reclen - (DIRHDSIZ + len);
2505 				(void)memset(uiop->uio_iov->iov_base, 0, tlen);
2506 				UIO_ADVANCE(uiop, tlen);
2507 			} else
2508 				nfsm_adv(nfsm_rndup(len));
2509 #ifndef NFS_V2_ONLY
2510 			if (v3) {
2511 				nfsm_dissect(tl, u_int32_t *,
2512 				    3 * NFSX_UNSIGNED);
2513 			} else
2514 #endif
2515 			{
2516 				nfsm_dissect(tl, u_int32_t *,
2517 				    2 * NFSX_UNSIGNED);
2518 			}
2519 			if (bigenough) {
2520 #ifndef NFS_V2_ONLY
2521 				if (v3) {
2522 					if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2523 						uiop->uio_offset =
2524 						    fxdr_swapcookie3(tl);
2525 					else
2526 						uiop->uio_offset =
2527 						    fxdr_cookie3(tl);
2528 				}
2529 				else
2530 #endif
2531 				{
2532 					uiop->uio_offset =
2533 					    fxdr_unsigned(off_t, *tl);
2534 				}
2535 				NFS_STASHCOOKIE(dp, uiop->uio_offset);
2536 			}
2537 			if (v3)
2538 				tl += 2;
2539 			else
2540 				tl++;
2541 			more_dirs = fxdr_unsigned(int, *tl);
2542 		}
2543 		/*
2544 		 * If at end of rpc data, get the eof boolean
2545 		 */
2546 		if (!more_dirs) {
2547 			nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2548 			more_dirs = (fxdr_unsigned(int, *tl) == 0);
2549 
2550 			/*
2551 			 * kludge: if we got no entries, treat it as EOF.
2552 			 * some server sometimes send a reply without any
2553 			 * entries or EOF.
2554 			 * although it might mean the server has very long name,
2555 			 * we can't handle such entries anyway.
2556 			 */
2557 
2558 			if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2559 				more_dirs = 0;
2560 		}
2561 		m_freem(mrep);
2562 	}
2563 	/*
2564 	 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2565 	 * by increasing d_reclen for the last record.
2566 	 */
2567 	if (blksiz > 0) {
2568 		left = NFS_DIRFRAGSIZ - blksiz;
2569 		memset(uiop->uio_iov->iov_base, 0, left);
2570 		dp->d_reclen += left;
2571 		NFS_STASHCOOKIE(dp, uiop->uio_offset);
2572 		UIO_ADVANCE(uiop, left);
2573 	}
2574 
2575 	/*
2576 	 * We are now either at the end of the directory or have filled the
2577 	 * block.
2578 	 */
2579 	if (bigenough) {
2580 		dnp->n_direofoffset = uiop->uio_offset;
2581 		dnp->n_flag |= NEOFVALID;
2582 	}
2583 nfsmout:
2584 	return (error);
2585 }
2586 
2587 #ifndef NFS_V2_ONLY
2588 /*
2589  * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2590  */
2591 int
nfs_readdirplusrpc(struct vnode * vp,struct uio * uiop,kauth_cred_t cred)2592 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
2593 {
2594 	int len, left;
2595 	struct dirent *dp = NULL;
2596 	u_int32_t *tl;
2597 	char *cp;
2598 	int32_t t1, t2;
2599 	struct vnode *newvp;
2600 	char *bpos, *dpos, *cp2;
2601 	struct mbuf *mreq, *mrep, *md, *mb;
2602 	struct nameidata nami, *ndp = &nami;
2603 	struct componentname *cnp = &ndp->ni_cnd;
2604 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2605 	struct nfsnode *dnp = VTONFS(vp), *np;
2606 	nfsfh_t *fhp;
2607 	u_quad_t fileno;
2608 	int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2609 	int attrflag, fhsize, nrpcs = 0, reclen;
2610 	struct nfs_fattr fattr, *fp;
2611 
2612 #ifdef DIAGNOSTIC
2613 	if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2614 		panic("nfs readdirplusrpc bad uio");
2615 #endif
2616 	ndp->ni_dvp = vp;
2617 	newvp = NULLVP;
2618 
2619 	/*
2620 	 * Loop around doing readdir rpc's of size nm_readdirsize
2621 	 * truncated to a multiple of NFS_DIRFRAGSIZ.
2622 	 * The stopping criteria is EOF or buffer full.
2623 	 */
2624 	while (more_dirs && bigenough) {
2625 		if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2626 			bigenough = 0;
2627 			break;
2628 		}
2629 		nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2630 		nfsm_reqhead(dnp, NFSPROC_READDIRPLUS,
2631 			NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2632 		nfsm_fhtom(dnp, 1);
2633  		nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2634 		if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2635 			txdr_swapcookie3(uiop->uio_offset, tl);
2636 		} else {
2637 			txdr_cookie3(uiop->uio_offset, tl);
2638 		}
2639 		tl += 2;
2640 		if (uiop->uio_offset == 0) {
2641 			*tl++ = 0;
2642 			*tl++ = 0;
2643 		} else {
2644 			*tl++ = dnp->n_cookieverf.nfsuquad[0];
2645 			*tl++ = dnp->n_cookieverf.nfsuquad[1];
2646 		}
2647 		*tl++ = txdr_unsigned(nmp->nm_readdirsize);
2648 		*tl = txdr_unsigned(nmp->nm_rsize);
2649 		nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred);
2650 		nfsm_postop_attr(vp, attrflag, 0);
2651 		if (error) {
2652 			m_freem(mrep);
2653 			goto nfsmout;
2654 		}
2655 		nrpcs++;
2656 		nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2657 		dnp->n_cookieverf.nfsuquad[0] = *tl++;
2658 		dnp->n_cookieverf.nfsuquad[1] = *tl++;
2659 		more_dirs = fxdr_unsigned(int, *tl);
2660 
2661 		/* loop thru the dir entries, doctoring them to 4bsd form */
2662 		while (more_dirs && bigenough) {
2663 			nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2664 			fileno = fxdr_hyper(tl);
2665 			len = fxdr_unsigned(int, *(tl + 2));
2666 			if (len <= 0 || len > NFS_MAXNAMLEN) {
2667 				error = EBADRPC;
2668 				m_freem(mrep);
2669 				goto nfsmout;
2670 			}
2671 			/* for cookie stashing */
2672 			reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2673 			left = NFS_DIRFRAGSIZ - blksiz;
2674 			if (reclen > left) {
2675 				/*
2676 				 * DIRFRAGSIZ is aligned, no need to align
2677 				 * again here.
2678 				 */
2679 				memset(uiop->uio_iov->iov_base, 0, left);
2680 				dp->d_reclen += left;
2681 				UIO_ADVANCE(uiop, left);
2682 				NFS_STASHCOOKIE(dp, uiop->uio_offset);
2683 				blksiz = 0;
2684 			}
2685 			if (reclen > uiop->uio_resid)
2686 				bigenough = 0;
2687 			if (bigenough) {
2688 				int tlen;
2689 
2690 				dp = (struct dirent *)uiop->uio_iov->iov_base;
2691 				dp->d_fileno = fileno;
2692 				dp->d_namlen = len;
2693 				dp->d_reclen = reclen;
2694 				dp->d_type = DT_UNKNOWN;
2695 				blksiz += reclen;
2696 				if (blksiz == NFS_DIRFRAGSIZ)
2697 					blksiz = 0;
2698 				UIO_ADVANCE(uiop, DIRHDSIZ);
2699 				nfsm_mtouio(uiop, len);
2700 				tlen = reclen - (DIRHDSIZ + len);
2701 				(void)memset(uiop->uio_iov->iov_base, 0, tlen);
2702 				UIO_ADVANCE(uiop, tlen);
2703 				cnp->cn_nameptr = dp->d_name;
2704 				cnp->cn_namelen = dp->d_namlen;
2705 			} else
2706 				nfsm_adv(nfsm_rndup(len));
2707 			nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2708 			if (bigenough) {
2709 				if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2710 					uiop->uio_offset =
2711 						fxdr_swapcookie3(tl);
2712 				else
2713 					uiop->uio_offset =
2714 						fxdr_cookie3(tl);
2715 				NFS_STASHCOOKIE(dp, uiop->uio_offset);
2716 			}
2717 			tl += 2;
2718 
2719 			/*
2720 			 * Since the attributes are before the file handle
2721 			 * (sigh), we must skip over the attributes and then
2722 			 * come back and get them.
2723 			 */
2724 			attrflag = fxdr_unsigned(int, *tl);
2725 			if (attrflag) {
2726 			    nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR);
2727 			    memcpy(&fattr, fp, NFSX_V3FATTR);
2728 			    nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2729 			    doit = fxdr_unsigned(int, *tl);
2730 			    if (doit) {
2731 				nfsm_getfh(fhp, fhsize, 1);
2732 				if (NFS_CMPFH(dnp, fhp, fhsize)) {
2733 				    vref(vp);
2734 				    newvp = vp;
2735 				    np = dnp;
2736 				} else {
2737 				    error = nfs_nget1(vp->v_mount, fhp,
2738 					fhsize, &np, LK_NOWAIT);
2739 				    if (!error)
2740 					newvp = NFSTOV(np);
2741 				}
2742 				if (!error) {
2743 				    nfs_loadattrcache(&newvp, &fattr, 0, 0);
2744 				    if (bigenough) {
2745 					dp->d_type =
2746 					   IFTODT(VTTOIF(np->n_vattr->va_type));
2747 					ndp->ni_vp = newvp;
2748 					nfs_cache_enter(ndp->ni_dvp,
2749 					    ndp->ni_vp, cnp);
2750 				    }
2751 				}
2752 				error = 0;
2753 			   }
2754 			} else {
2755 			    /* Just skip over the file handle */
2756 			    nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2757 			    i = fxdr_unsigned(int, *tl);
2758 			    nfsm_adv(nfsm_rndup(i));
2759 			}
2760 			if (newvp != NULLVP) {
2761 			    if (newvp == vp)
2762 				vrele(newvp);
2763 			    else
2764 				vput(newvp);
2765 			    newvp = NULLVP;
2766 			}
2767 			nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2768 			more_dirs = fxdr_unsigned(int, *tl);
2769 		}
2770 		/*
2771 		 * If at end of rpc data, get the eof boolean
2772 		 */
2773 		if (!more_dirs) {
2774 			nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2775 			more_dirs = (fxdr_unsigned(int, *tl) == 0);
2776 
2777 			/*
2778 			 * kludge: see a comment in nfs_readdirrpc.
2779 			 */
2780 
2781 			if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2782 				more_dirs = 0;
2783 		}
2784 		m_freem(mrep);
2785 	}
2786 	/*
2787 	 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2788 	 * by increasing d_reclen for the last record.
2789 	 */
2790 	if (blksiz > 0) {
2791 		left = NFS_DIRFRAGSIZ - blksiz;
2792 		memset(uiop->uio_iov->iov_base, 0, left);
2793 		dp->d_reclen += left;
2794 		NFS_STASHCOOKIE(dp, uiop->uio_offset);
2795 		UIO_ADVANCE(uiop, left);
2796 	}
2797 
2798 	/*
2799 	 * We are now either at the end of the directory or have filled the
2800 	 * block.
2801 	 */
2802 	if (bigenough) {
2803 		dnp->n_direofoffset = uiop->uio_offset;
2804 		dnp->n_flag |= NEOFVALID;
2805 	}
2806 nfsmout:
2807 	if (newvp != NULLVP) {
2808 		if(newvp == vp)
2809 		    vrele(newvp);
2810 		else
2811 		    vput(newvp);
2812 	}
2813 	return (error);
2814 }
2815 #endif
2816 
2817 /*
2818  * Silly rename. To make the NFS filesystem that is stateless look a little
2819  * more like the "ufs" a remove of an active vnode is translated to a rename
2820  * to a funny looking filename that is removed by nfs_inactive on the
2821  * nfsnode. There is the potential for another process on a different client
2822  * to create the same funny name between the nfs_lookitup() fails and the
2823  * nfs_rename() completes, but...
2824  */
2825 int
nfs_sillyrename(struct vnode * dvp,struct vnode * vp,struct componentname * cnp,bool dolink)2826 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, bool dolink)
2827 {
2828 	struct sillyrename *sp;
2829 	struct nfsnode *np;
2830 	int error;
2831 	pid_t pid;
2832 
2833 	cache_purge(dvp);
2834 	np = VTONFS(vp);
2835 #ifndef DIAGNOSTIC
2836 	if (vp->v_type == VDIR)
2837 		panic("nfs: sillyrename dir");
2838 #endif
2839 	sp = kmem_alloc(sizeof(*sp), KM_SLEEP);
2840 	sp->s_cred = kauth_cred_dup(cnp->cn_cred);
2841 	sp->s_dvp = dvp;
2842 	vref(dvp);
2843 
2844 	/* Fudge together a funny name */
2845 	pid = curlwp->l_proc->p_pid;
2846 	memcpy(sp->s_name, ".nfsAxxxx4.4", 13);
2847 	sp->s_namlen = 12;
2848 	sp->s_name[8] = hexdigits[pid & 0xf];
2849 	sp->s_name[7] = hexdigits[(pid >> 4) & 0xf];
2850 	sp->s_name[6] = hexdigits[(pid >> 8) & 0xf];
2851 	sp->s_name[5] = hexdigits[(pid >> 12) & 0xf];
2852 
2853 	/* Try lookitups until we get one that isn't there */
2854 	while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2855 		curlwp, (struct nfsnode **)0) == 0) {
2856 		sp->s_name[4]++;
2857 		if (sp->s_name[4] > 'z') {
2858 			error = EINVAL;
2859 			goto bad;
2860 		}
2861 	}
2862 	if (dolink) {
2863 		error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen,
2864 		    sp->s_cred, curlwp);
2865 		/*
2866 		 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
2867 		 */
2868 		if (error == ENOTSUP) {
2869 			error = nfs_renameit(dvp, cnp, sp);
2870 		}
2871 	} else {
2872 		error = nfs_renameit(dvp, cnp, sp);
2873 	}
2874 	if (error)
2875 		goto bad;
2876 	error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2877 		curlwp, &np);
2878 	np->n_sillyrename = sp;
2879 	return (0);
2880 bad:
2881 	vrele(sp->s_dvp);
2882 	kauth_cred_free(sp->s_cred);
2883 	kmem_free(sp, sizeof(*sp));
2884 	return (error);
2885 }
2886 
2887 /*
2888  * Look up a file name and optionally either update the file handle or
2889  * allocate an nfsnode, depending on the value of npp.
2890  * npp == NULL	--> just do the lookup
2891  * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2892  *			handled too
2893  * *npp != NULL --> update the file handle in the vnode
2894  */
2895 int
nfs_lookitup(struct vnode * dvp,const char * name,int len,kauth_cred_t cred,struct lwp * l,struct nfsnode ** npp)2896 nfs_lookitup(struct vnode *dvp, const char *name, int len, kauth_cred_t cred, struct lwp *l, struct nfsnode **npp)
2897 {
2898 	u_int32_t *tl;
2899 	char *cp;
2900 	int32_t t1, t2;
2901 	struct vnode *newvp = (struct vnode *)0;
2902 	struct nfsnode *np, *dnp = VTONFS(dvp);
2903 	char *bpos, *dpos, *cp2;
2904 	int error = 0, ofhlen, fhlen;
2905 #ifndef NFS_V2_ONLY
2906 	int attrflag;
2907 #endif
2908 	struct mbuf *mreq, *mrep, *md, *mb;
2909 	nfsfh_t *ofhp, *nfhp;
2910 	const int v3 = NFS_ISV3(dvp);
2911 
2912 	nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2913 	nfsm_reqhead(dnp, NFSPROC_LOOKUP,
2914 		NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2915 	nfsm_fhtom(dnp, v3);
2916 	nfsm_strtom(name, len, NFS_MAXNAMLEN);
2917 	nfsm_request(dnp, NFSPROC_LOOKUP, l, cred);
2918 	if (npp && !error) {
2919 		nfsm_getfh(nfhp, fhlen, v3);
2920 		if (*npp) {
2921 		    np = *npp;
2922 		    newvp = NFSTOV(np);
2923 		    ofhlen = np->n_fhsize;
2924 		    ofhp = kmem_alloc(ofhlen, KM_SLEEP);
2925 		    memcpy(ofhp, np->n_fhp, ofhlen);
2926 		    error = vcache_rekey_enter(newvp->v_mount, newvp,
2927 			ofhp, ofhlen, nfhp, fhlen);
2928 		    if (error) {
2929 			kmem_free(ofhp, ofhlen);
2930 			m_freem(mrep);
2931 			return error;
2932 		    }
2933 		    if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2934 			kmem_free(np->n_fhp, np->n_fhsize);
2935 			np->n_fhp = &np->n_fh;
2936 		    }
2937 #if NFS_SMALLFH < NFSX_V3FHMAX
2938 		    else if (np->n_fhsize <= NFS_SMALLFH && fhlen > NFS_SMALLFH)
2939 			np->n_fhp = kmem_alloc(fhlen, KM_SLEEP);
2940 #endif
2941 		    memcpy(np->n_fhp, nfhp, fhlen);
2942 		    np->n_fhsize = fhlen;
2943 		    vcache_rekey_exit(newvp->v_mount, newvp,
2944 			ofhp, ofhlen, np->n_fhp, fhlen);
2945 		    kmem_free(ofhp, ofhlen);
2946 		} else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2947 		    vref(dvp);
2948 		    newvp = dvp;
2949 		    np = dnp;
2950 		} else {
2951 		    error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2952 		    if (error) {
2953 			m_freem(mrep);
2954 			return (error);
2955 		    }
2956 		    newvp = NFSTOV(np);
2957 		}
2958 #ifndef NFS_V2_ONLY
2959 		if (v3) {
2960 			nfsm_postop_attr(newvp, attrflag, 0);
2961 			if (!attrflag && *npp == NULL) {
2962 				m_freem(mrep);
2963 				vput(newvp);
2964 				return (ENOENT);
2965 			}
2966 		} else
2967 #endif
2968 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
2969 	}
2970 	nfsm_reqdone;
2971 	if (npp && *npp == NULL) {
2972 		if (error) {
2973 			if (newvp)
2974 				vput(newvp);
2975 		} else
2976 			*npp = np;
2977 	}
2978 	return (error);
2979 }
2980 
2981 #ifndef NFS_V2_ONLY
2982 /*
2983  * Nfs Version 3 commit rpc
2984  */
2985 int
nfs_commit(struct vnode * vp,off_t offset,uint32_t cnt,struct lwp * l)2986 nfs_commit(struct vnode *vp, off_t offset, uint32_t cnt, struct lwp *l)
2987 {
2988 	char *cp;
2989 	u_int32_t *tl;
2990 	int32_t t1, t2;
2991 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2992 	char *bpos, *dpos, *cp2;
2993 	int error = 0, wccflag = NFSV3_WCCRATTR;
2994 	struct mbuf *mreq, *mrep, *md, *mb;
2995 	struct nfsnode *np;
2996 
2997 	KASSERT(NFS_ISV3(vp));
2998 
2999 #ifdef NFS_DEBUG_COMMIT
3000 	printf("commit %lu - %lu\n", (unsigned long)offset,
3001 	    (unsigned long)(offset + cnt));
3002 #endif
3003 
3004 	mutex_enter(&nmp->nm_lock);
3005 	if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) {
3006 		mutex_exit(&nmp->nm_lock);
3007 		return (0);
3008 	}
3009 	mutex_exit(&nmp->nm_lock);
3010 	nfsstats.rpccnt[NFSPROC_COMMIT]++;
3011 	np = VTONFS(vp);
3012 	nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1));
3013 	nfsm_fhtom(np, 1);
3014 	nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3015 	txdr_hyper(offset, tl);
3016 	tl += 2;
3017 	*tl = txdr_unsigned(cnt);
3018 	nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred);
3019 	nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
3020 	if (!error) {
3021 		nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
3022 		mutex_enter(&nmp->nm_lock);
3023 		if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) ||
3024 		    memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) {
3025 			memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF);
3026 			error = NFSERR_STALEWRITEVERF;
3027 			nmp->nm_iflag |= NFSMNT_STALEWRITEVERF;
3028 		}
3029 		mutex_exit(&nmp->nm_lock);
3030 	}
3031 	nfsm_reqdone;
3032 	return (error);
3033 }
3034 #endif
3035 
3036 /*
3037  * Kludge City..
3038  * - make nfs_bmap() essentially a no-op that does no translation
3039  * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
3040  *   (Maybe I could use the process's page mapping, but I was concerned that
3041  *    Kernel Write might not be enabled and also figured copyout() would do
3042  *    a lot more work than memcpy() and also it currently happens in the
3043  *    context of the swapper process (2).
3044  */
3045 int
nfs_bmap(void * v)3046 nfs_bmap(void *v)
3047 {
3048 	struct vop_bmap_args /* {
3049 		struct vnode *a_vp;
3050 		daddr_t  a_bn;
3051 		struct vnode **a_vpp;
3052 		daddr_t *a_bnp;
3053 		int *a_runp;
3054 	} */ *ap = v;
3055 	struct vnode *vp = ap->a_vp;
3056 	int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift;
3057 
3058 	if (ap->a_vpp != NULL)
3059 		*ap->a_vpp = vp;
3060 	if (ap->a_bnp != NULL)
3061 		*ap->a_bnp = ap->a_bn << bshift;
3062 	if (ap->a_runp != NULL)
3063 		*ap->a_runp = 1024 * 1024; /* XXX */
3064 	return (0);
3065 }
3066 
3067 /*
3068  * Strategy routine.
3069  * For async requests when nfsiod(s) are running, queue the request by
3070  * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
3071  * request.
3072  */
3073 int
nfs_strategy(void * v)3074 nfs_strategy(void *v)
3075 {
3076 	struct vop_strategy_args *ap = v;
3077 	struct buf *bp = ap->a_bp;
3078 	int error = 0;
3079 
3080 	if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC))
3081 		panic("nfs physio/async");
3082 
3083 	/*
3084 	 * If the op is asynchronous and an i/o daemon is waiting
3085 	 * queue the request, wake it up and wait for completion
3086 	 * otherwise just do it ourselves.
3087 	 */
3088 	if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp))
3089 		error = nfs_doio(bp);
3090 	return (error);
3091 }
3092 
3093 /*
3094  * fsync vnode op. Just call nfs_flush() with commit == 1.
3095  */
3096 /* ARGSUSED */
3097 int
nfs_fsync(void * v)3098 nfs_fsync(void *v)
3099 {
3100 	struct vop_fsync_args /* {
3101 		struct vnodeop_desc *a_desc;
3102 		struct vnode * a_vp;
3103 		kauth_cred_t  a_cred;
3104 		int  a_flags;
3105 		off_t offlo;
3106 		off_t offhi;
3107 		struct lwp * a_l;
3108 	} */ *ap = v;
3109 
3110 	struct vnode *vp = ap->a_vp;
3111 
3112 	if (vp->v_type != VREG)
3113 		return 0;
3114 
3115 	return (nfs_flush(vp, ap->a_cred,
3116 	    (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, curlwp, 1));
3117 }
3118 
3119 /*
3120  * Flush all the data associated with a vnode.
3121  */
3122 int
nfs_flush(struct vnode * vp,kauth_cred_t cred,int waitfor,struct lwp * l,int commit)3123 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l,
3124     int commit)
3125 {
3126 	struct nfsnode *np = VTONFS(vp);
3127 	int error;
3128 	int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO;
3129 	UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist);
3130 
3131 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
3132 	error = VOP_PUTPAGES(vp, 0, 0, flushflags);
3133 	if (np->n_flag & NWRITEERR) {
3134 		error = np->n_error;
3135 		np->n_flag &= ~NWRITEERR;
3136 	}
3137 	UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
3138 	return (error);
3139 }
3140 
3141 /*
3142  * Return POSIX pathconf information applicable to nfs.
3143  *
3144  * N.B. The NFS V2 protocol doesn't support this RPC.
3145  */
3146 /* ARGSUSED */
3147 int
nfs_pathconf(void * v)3148 nfs_pathconf(void *v)
3149 {
3150 	struct vop_pathconf_args /* {
3151 		struct vnode *a_vp;
3152 		int a_name;
3153 		register_t *a_retval;
3154 	} */ *ap = v;
3155 	struct nfsv3_pathconf *pcp;
3156 	struct vnode *vp = ap->a_vp;
3157 	struct mbuf *mreq, *mrep, *md, *mb;
3158 	int32_t t1, t2;
3159 	u_int32_t *tl;
3160 	char *bpos, *dpos, *cp, *cp2;
3161 	int error = 0, attrflag;
3162 #ifndef NFS_V2_ONLY
3163 	struct nfsmount *nmp;
3164 	unsigned int l;
3165 	u_int64_t maxsize;
3166 #endif
3167 	const int v3 = NFS_ISV3(vp);
3168 	struct nfsnode *np = VTONFS(vp);
3169 
3170 	switch (ap->a_name) {
3171 		/* Names that can be resolved locally. */
3172 	case _PC_PIPE_BUF:
3173 		*ap->a_retval = PIPE_BUF;
3174 		break;
3175 	case _PC_SYNC_IO:
3176 		*ap->a_retval = 1;
3177 		break;
3178 	/* Names that cannot be resolved locally; do an RPC, if possible. */
3179 	case _PC_LINK_MAX:
3180 	case _PC_NAME_MAX:
3181 	case _PC_CHOWN_RESTRICTED:
3182 	case _PC_NO_TRUNC:
3183 		if (!v3) {
3184 			error = EINVAL;
3185 			break;
3186 		}
3187 		nfsstats.rpccnt[NFSPROC_PATHCONF]++;
3188 		nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1));
3189 		nfsm_fhtom(np, 1);
3190 		nfsm_request(np, NFSPROC_PATHCONF,
3191 		    curlwp, curlwp->l_cred);	/* XXX */
3192 		nfsm_postop_attr(vp, attrflag, 0);
3193 		if (!error) {
3194 			nfsm_dissect(pcp, struct nfsv3_pathconf *,
3195 			    NFSX_V3PATHCONF);
3196 			switch (ap->a_name) {
3197 			case _PC_LINK_MAX:
3198 				*ap->a_retval =
3199 				    fxdr_unsigned(register_t, pcp->pc_linkmax);
3200 				break;
3201 			case _PC_NAME_MAX:
3202 				*ap->a_retval =
3203 				    fxdr_unsigned(register_t, pcp->pc_namemax);
3204 				break;
3205 			case _PC_CHOWN_RESTRICTED:
3206 				*ap->a_retval =
3207 				    (pcp->pc_chownrestricted == nfs_true);
3208 				break;
3209 			case _PC_NO_TRUNC:
3210 				*ap->a_retval =
3211 				    (pcp->pc_notrunc == nfs_true);
3212 				break;
3213 			}
3214 		}
3215 		nfsm_reqdone;
3216 		break;
3217 	case _PC_FILESIZEBITS:
3218 #ifndef NFS_V2_ONLY
3219 		if (v3) {
3220 			nmp = VFSTONFS(vp->v_mount);
3221 			if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0)
3222 				if ((error = nfs_fsinfo(nmp, vp,
3223 				    curlwp->l_cred, curlwp)) != 0) /* XXX */
3224 					break;
3225 			for (l = 0, maxsize = nmp->nm_maxfilesize;
3226 			    (maxsize >> l) > 0; l++)
3227 				;
3228 			*ap->a_retval = l + 1;
3229 		} else
3230 #endif
3231 		{
3232 			*ap->a_retval = 32;	/* NFS V2 limitation */
3233 		}
3234 		break;
3235 	default:
3236 		error = genfs_pathconf(ap);
3237 		break;
3238 	}
3239 
3240 	return (error);
3241 }
3242 
3243 /*
3244  * NFS advisory byte-level locks.
3245  */
3246 int
nfs_advlock(void * v)3247 nfs_advlock(void *v)
3248 {
3249 	struct vop_advlock_args /* {
3250 		struct vnode *a_vp;
3251 		void *a_id;
3252 		int  a_op;
3253 		struct flock *a_fl;
3254 		int  a_flags;
3255 	} */ *ap = v;
3256 	struct nfsnode *np = VTONFS(ap->a_vp);
3257 
3258 	return lf_advlock(ap, &np->n_lockf, np->n_size);
3259 }
3260 
3261 /*
3262  * Print out the contents of an nfsnode.
3263  */
3264 int
nfs_print(void * v)3265 nfs_print(void *v)
3266 {
3267 	struct vop_print_args /* {
3268 		struct vnode *a_vp;
3269 	} */ *ap = v;
3270 	struct vnode *vp = ap->a_vp;
3271 	struct nfsnode *np = VTONFS(vp);
3272 
3273 	printf("tag VT_NFS, fileid %lld fsid 0x%llx",
3274 	    (unsigned long long)np->n_vattr->va_fileid,
3275 	    (unsigned long long)np->n_vattr->va_fsid);
3276 	if (vp->v_type == VFIFO)
3277 		VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v);
3278 	printf("\n");
3279 	return (0);
3280 }
3281 
3282 /*
3283  * nfs unlock wrapper.
3284  */
3285 int
nfs_unlock(void * v)3286 nfs_unlock(void *v)
3287 {
3288 	struct vop_unlock_args /* {
3289 		struct vnode *a_vp;
3290 		int a_flags;
3291 	} */ *ap = v;
3292 	struct vnode *vp = ap->a_vp;
3293 
3294 	/*
3295 	 * VOP_UNLOCK can be called by nfs_loadattrcache
3296 	 * with v_data == 0.
3297 	 */
3298 	if (VTONFS(vp)) {
3299 		nfs_delayedtruncate(vp);
3300 	}
3301 
3302 	return genfs_unlock(v);
3303 }
3304 
3305 /*
3306  * nfs special file access vnode op.
3307  * Essentially just get vattr and then imitate iaccess() since the device is
3308  * local to the client.
3309  */
3310 int
nfsspec_access(void * v)3311 nfsspec_access(void *v)
3312 {
3313 	struct vop_access_args /* {
3314 		struct vnode *a_vp;
3315 		accmode_t  a_accmode;
3316 		kauth_cred_t a_cred;
3317 		struct lwp *a_l;
3318 	} */ *ap = v;
3319 	struct vattr va;
3320 	struct vnode *vp = ap->a_vp;
3321 	int error;
3322 
3323 	error = VOP_GETATTR(vp, &va, ap->a_cred);
3324 	if (error)
3325 		return (error);
3326 
3327         /*
3328 	 * Disallow write attempts on filesystems mounted read-only;
3329 	 * unless the file is a socket, fifo, or a block or character
3330 	 * device resident on the filesystem.
3331 	 */
3332 	if ((ap->a_accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3333 		switch (vp->v_type) {
3334 		case VREG:
3335 		case VDIR:
3336 		case VLNK:
3337 			return (EROFS);
3338 		default:
3339 			break;
3340 		}
3341 	}
3342 
3343 	return kauth_authorize_vnode(ap->a_cred, KAUTH_ACCESS_ACTION(
3344 	    ap->a_accmode, va.va_type, va.va_mode), vp, NULL, genfs_can_access(
3345 	    vp, ap->a_cred, va.va_uid, va.va_gid, va.va_mode, NULL,
3346 	    ap->a_accmode));
3347 }
3348 
3349 /*
3350  * Read wrapper for special devices.
3351  */
3352 int
nfsspec_read(void * v)3353 nfsspec_read(void *v)
3354 {
3355 	struct vop_read_args /* {
3356 		struct vnode *a_vp;
3357 		struct uio *a_uio;
3358 		int  a_ioflag;
3359 		kauth_cred_t a_cred;
3360 	} */ *ap = v;
3361 	struct nfsnode *np = VTONFS(ap->a_vp);
3362 
3363 	/*
3364 	 * Set access flag.
3365 	 */
3366 	np->n_flag |= NACC;
3367 	getnanotime(&np->n_atim);
3368 	return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3369 }
3370 
3371 /*
3372  * Write wrapper for special devices.
3373  */
3374 int
nfsspec_write(void * v)3375 nfsspec_write(void *v)
3376 {
3377 	struct vop_write_args /* {
3378 		struct vnode *a_vp;
3379 		struct uio *a_uio;
3380 		int  a_ioflag;
3381 		kauth_cred_t a_cred;
3382 	} */ *ap = v;
3383 	struct nfsnode *np = VTONFS(ap->a_vp);
3384 
3385 	/*
3386 	 * Set update flag.
3387 	 */
3388 	np->n_flag |= NUPD;
3389 	getnanotime(&np->n_mtim);
3390 	return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3391 }
3392 
3393 /*
3394  * Close wrapper for special devices.
3395  *
3396  * Update the times on the nfsnode then do device close.
3397  */
3398 int
nfsspec_close(void * v)3399 nfsspec_close(void *v)
3400 {
3401 	struct vop_close_args /* {
3402 		struct vnode *a_vp;
3403 		int  a_fflag;
3404 		kauth_cred_t a_cred;
3405 		struct lwp *a_l;
3406 	} */ *ap = v;
3407 	struct vnode *vp = ap->a_vp;
3408 	struct nfsnode *np = VTONFS(vp);
3409 	struct vattr vattr;
3410 
3411 	if (np->n_flag & (NACC | NUPD)) {
3412 		np->n_flag |= NCHG;
3413 		if (vrefcnt(vp) == 1 &&
3414 		    (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3415 			vattr_null(&vattr);
3416 			if (np->n_flag & NACC)
3417 				vattr.va_atime = np->n_atim;
3418 			if (np->n_flag & NUPD)
3419 				vattr.va_mtime = np->n_mtim;
3420 			(void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3421 		}
3422 	}
3423 	return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3424 }
3425 
3426 /*
3427  * Read wrapper for fifos.
3428  */
3429 int
nfsfifo_read(void * v)3430 nfsfifo_read(void *v)
3431 {
3432 	struct vop_read_args /* {
3433 		struct vnode *a_vp;
3434 		struct uio *a_uio;
3435 		int  a_ioflag;
3436 		kauth_cred_t a_cred;
3437 	} */ *ap = v;
3438 	struct nfsnode *np = VTONFS(ap->a_vp);
3439 
3440 	/*
3441 	 * Set access flag.
3442 	 */
3443 	np->n_flag |= NACC;
3444 	getnanotime(&np->n_atim);
3445 	return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3446 }
3447 
3448 /*
3449  * Write wrapper for fifos.
3450  */
3451 int
nfsfifo_write(void * v)3452 nfsfifo_write(void *v)
3453 {
3454 	struct vop_write_args /* {
3455 		struct vnode *a_vp;
3456 		struct uio *a_uio;
3457 		int  a_ioflag;
3458 		kauth_cred_t a_cred;
3459 	} */ *ap = v;
3460 	struct nfsnode *np = VTONFS(ap->a_vp);
3461 
3462 	/*
3463 	 * Set update flag.
3464 	 */
3465 	np->n_flag |= NUPD;
3466 	getnanotime(&np->n_mtim);
3467 	return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3468 }
3469 
3470 /*
3471  * Close wrapper for fifos.
3472  *
3473  * Update the times on the nfsnode then do fifo close.
3474  */
3475 int
nfsfifo_close(void * v)3476 nfsfifo_close(void *v)
3477 {
3478 	struct vop_close_args /* {
3479 		struct vnode *a_vp;
3480 		int  a_fflag;
3481 		kauth_cred_t a_cred;
3482 		struct lwp *a_l;
3483 	} */ *ap = v;
3484 	struct vnode *vp = ap->a_vp;
3485 	struct nfsnode *np = VTONFS(vp);
3486 	struct vattr vattr;
3487 
3488 	if (np->n_flag & (NACC | NUPD)) {
3489 		struct timespec ts;
3490 
3491 		getnanotime(&ts);
3492 		if (np->n_flag & NACC)
3493 			np->n_atim = ts;
3494 		if (np->n_flag & NUPD)
3495 			np->n_mtim = ts;
3496 		np->n_flag |= NCHG;
3497 		if (vrefcnt(vp) == 1 &&
3498 		    (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3499 			vattr_null(&vattr);
3500 			if (np->n_flag & NACC)
3501 				vattr.va_atime = np->n_atim;
3502 			if (np->n_flag & NUPD)
3503 				vattr.va_mtime = np->n_mtim;
3504 			(void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3505 		}
3506 	}
3507 	return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3508 }
3509