xref: /netbsd-src/sys/nfs/nfs_vnops.c (revision 46f5119e40af2e51998f686b2fdcc76b5488f7f3)
1 /*	$NetBSD: nfs_vnops.c,v 1.290 2011/04/24 21:35:30 rmind Exp $	*/
2 
3 /*
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Rick Macklem at The University of Guelph.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)nfs_vnops.c	8.19 (Berkeley) 7/31/95
35  */
36 
37 /*
38  * vnode op calls for Sun NFS version 2 and 3
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.290 2011/04/24 21:35:30 rmind Exp $");
43 
44 #ifdef _KERNEL_OPT
45 #include "opt_nfs.h"
46 #include "opt_uvmhist.h"
47 #endif
48 
49 #include <sys/param.h>
50 #include <sys/proc.h>
51 #include <sys/kernel.h>
52 #include <sys/systm.h>
53 #include <sys/resourcevar.h>
54 #include <sys/mount.h>
55 #include <sys/buf.h>
56 #include <sys/condvar.h>
57 #include <sys/disk.h>
58 #include <sys/malloc.h>
59 #include <sys/kmem.h>
60 #include <sys/mbuf.h>
61 #include <sys/mutex.h>
62 #include <sys/namei.h>
63 #include <sys/vnode.h>
64 #include <sys/dirent.h>
65 #include <sys/fcntl.h>
66 #include <sys/hash.h>
67 #include <sys/lockf.h>
68 #include <sys/stat.h>
69 #include <sys/unistd.h>
70 #include <sys/kauth.h>
71 
72 #include <uvm/uvm_extern.h>
73 #include <uvm/uvm.h>
74 
75 #include <miscfs/fifofs/fifo.h>
76 #include <miscfs/genfs/genfs.h>
77 #include <miscfs/genfs/genfs_node.h>
78 #include <miscfs/specfs/specdev.h>
79 
80 #include <nfs/rpcv2.h>
81 #include <nfs/nfsproto.h>
82 #include <nfs/nfs.h>
83 #include <nfs/nfsnode.h>
84 #include <nfs/nfsmount.h>
85 #include <nfs/xdr_subs.h>
86 #include <nfs/nfsm_subs.h>
87 #include <nfs/nfs_var.h>
88 
89 #include <net/if.h>
90 #include <netinet/in.h>
91 #include <netinet/in_var.h>
92 
93 /*
94  * Global vfs data structures for nfs
95  */
96 int (**nfsv2_vnodeop_p)(void *);
97 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
98 	{ &vop_default_desc, vn_default_error },
99 	{ &vop_lookup_desc, nfs_lookup },		/* lookup */
100 	{ &vop_create_desc, nfs_create },		/* create */
101 	{ &vop_mknod_desc, nfs_mknod },			/* mknod */
102 	{ &vop_open_desc, nfs_open },			/* open */
103 	{ &vop_close_desc, nfs_close },			/* close */
104 	{ &vop_access_desc, nfs_access },		/* access */
105 	{ &vop_getattr_desc, nfs_getattr },		/* getattr */
106 	{ &vop_setattr_desc, nfs_setattr },		/* setattr */
107 	{ &vop_read_desc, nfs_read },			/* read */
108 	{ &vop_write_desc, nfs_write },			/* write */
109 	{ &vop_fcntl_desc, genfs_fcntl },		/* fcntl */
110 	{ &vop_ioctl_desc, nfs_ioctl },			/* ioctl */
111 	{ &vop_poll_desc, nfs_poll },			/* poll */
112 	{ &vop_kqfilter_desc, nfs_kqfilter },		/* kqfilter */
113 	{ &vop_revoke_desc, nfs_revoke },		/* revoke */
114 	{ &vop_mmap_desc, nfs_mmap },			/* mmap */
115 	{ &vop_fsync_desc, nfs_fsync },			/* fsync */
116 	{ &vop_seek_desc, nfs_seek },			/* seek */
117 	{ &vop_remove_desc, nfs_remove },		/* remove */
118 	{ &vop_link_desc, nfs_link },			/* link */
119 	{ &vop_rename_desc, nfs_rename },		/* rename */
120 	{ &vop_mkdir_desc, nfs_mkdir },			/* mkdir */
121 	{ &vop_rmdir_desc, nfs_rmdir },			/* rmdir */
122 	{ &vop_symlink_desc, nfs_symlink },		/* symlink */
123 	{ &vop_readdir_desc, nfs_readdir },		/* readdir */
124 	{ &vop_readlink_desc, nfs_readlink },		/* readlink */
125 	{ &vop_abortop_desc, nfs_abortop },		/* abortop */
126 	{ &vop_inactive_desc, nfs_inactive },		/* inactive */
127 	{ &vop_reclaim_desc, nfs_reclaim },		/* reclaim */
128 	{ &vop_lock_desc, nfs_lock },			/* lock */
129 	{ &vop_unlock_desc, nfs_unlock },		/* unlock */
130 	{ &vop_bmap_desc, nfs_bmap },			/* bmap */
131 	{ &vop_strategy_desc, nfs_strategy },		/* strategy */
132 	{ &vop_print_desc, nfs_print },			/* print */
133 	{ &vop_islocked_desc, nfs_islocked },		/* islocked */
134 	{ &vop_pathconf_desc, nfs_pathconf },		/* pathconf */
135 	{ &vop_advlock_desc, nfs_advlock },		/* advlock */
136 	{ &vop_bwrite_desc, genfs_badop },		/* bwrite */
137 	{ &vop_getpages_desc, nfs_getpages },		/* getpages */
138 	{ &vop_putpages_desc, genfs_putpages },		/* putpages */
139 	{ NULL, NULL }
140 };
141 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
142 	{ &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
143 
144 /*
145  * Special device vnode ops
146  */
147 int (**spec_nfsv2nodeop_p)(void *);
148 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = {
149 	{ &vop_default_desc, vn_default_error },
150 	{ &vop_lookup_desc, spec_lookup },		/* lookup */
151 	{ &vop_create_desc, spec_create },		/* create */
152 	{ &vop_mknod_desc, spec_mknod },		/* mknod */
153 	{ &vop_open_desc, spec_open },			/* open */
154 	{ &vop_close_desc, nfsspec_close },		/* close */
155 	{ &vop_access_desc, nfsspec_access },		/* access */
156 	{ &vop_getattr_desc, nfs_getattr },		/* getattr */
157 	{ &vop_setattr_desc, nfs_setattr },		/* setattr */
158 	{ &vop_read_desc, nfsspec_read },		/* read */
159 	{ &vop_write_desc, nfsspec_write },		/* write */
160 	{ &vop_fcntl_desc, genfs_fcntl },		/* fcntl */
161 	{ &vop_ioctl_desc, spec_ioctl },		/* ioctl */
162 	{ &vop_poll_desc, spec_poll },			/* poll */
163 	{ &vop_kqfilter_desc, spec_kqfilter },		/* kqfilter */
164 	{ &vop_revoke_desc, spec_revoke },		/* revoke */
165 	{ &vop_mmap_desc, spec_mmap },			/* mmap */
166 	{ &vop_fsync_desc, spec_fsync },		/* fsync */
167 	{ &vop_seek_desc, spec_seek },			/* seek */
168 	{ &vop_remove_desc, spec_remove },		/* remove */
169 	{ &vop_link_desc, spec_link },			/* link */
170 	{ &vop_rename_desc, spec_rename },		/* rename */
171 	{ &vop_mkdir_desc, spec_mkdir },		/* mkdir */
172 	{ &vop_rmdir_desc, spec_rmdir },		/* rmdir */
173 	{ &vop_symlink_desc, spec_symlink },		/* symlink */
174 	{ &vop_readdir_desc, spec_readdir },		/* readdir */
175 	{ &vop_readlink_desc, spec_readlink },		/* readlink */
176 	{ &vop_abortop_desc, spec_abortop },		/* abortop */
177 	{ &vop_inactive_desc, nfs_inactive },		/* inactive */
178 	{ &vop_reclaim_desc, nfs_reclaim },		/* reclaim */
179 	{ &vop_lock_desc, nfs_lock },			/* lock */
180 	{ &vop_unlock_desc, nfs_unlock },		/* unlock */
181 	{ &vop_bmap_desc, spec_bmap },			/* bmap */
182 	{ &vop_strategy_desc, spec_strategy },		/* strategy */
183 	{ &vop_print_desc, nfs_print },			/* print */
184 	{ &vop_islocked_desc, nfs_islocked },		/* islocked */
185 	{ &vop_pathconf_desc, spec_pathconf },		/* pathconf */
186 	{ &vop_advlock_desc, spec_advlock },		/* advlock */
187 	{ &vop_bwrite_desc, spec_bwrite },		/* bwrite */
188 	{ &vop_getpages_desc, spec_getpages },		/* getpages */
189 	{ &vop_putpages_desc, spec_putpages },		/* putpages */
190 	{ NULL, NULL }
191 };
192 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
193 	{ &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries };
194 
195 int (**fifo_nfsv2nodeop_p)(void *);
196 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = {
197 	{ &vop_default_desc, vn_default_error },
198 	{ &vop_lookup_desc, vn_fifo_bypass },		/* lookup */
199 	{ &vop_create_desc, vn_fifo_bypass },		/* create */
200 	{ &vop_mknod_desc, vn_fifo_bypass },		/* mknod */
201 	{ &vop_open_desc, vn_fifo_bypass },		/* open */
202 	{ &vop_close_desc, nfsfifo_close },		/* close */
203 	{ &vop_access_desc, nfsspec_access },		/* access */
204 	{ &vop_getattr_desc, nfs_getattr },		/* getattr */
205 	{ &vop_setattr_desc, nfs_setattr },		/* setattr */
206 	{ &vop_read_desc, nfsfifo_read },		/* read */
207 	{ &vop_write_desc, nfsfifo_write },		/* write */
208 	{ &vop_fcntl_desc, genfs_fcntl },		/* fcntl */
209 	{ &vop_ioctl_desc, vn_fifo_bypass },		/* ioctl */
210 	{ &vop_poll_desc, vn_fifo_bypass },		/* poll */
211 	{ &vop_kqfilter_desc, vn_fifo_bypass },		/* kqfilter */
212 	{ &vop_revoke_desc, vn_fifo_bypass },		/* revoke */
213 	{ &vop_mmap_desc, vn_fifo_bypass },		/* mmap */
214 	{ &vop_fsync_desc, nfs_fsync },			/* fsync */
215 	{ &vop_seek_desc, vn_fifo_bypass },		/* seek */
216 	{ &vop_remove_desc, vn_fifo_bypass },		/* remove */
217 	{ &vop_link_desc, vn_fifo_bypass },		/* link */
218 	{ &vop_rename_desc, vn_fifo_bypass },		/* rename */
219 	{ &vop_mkdir_desc, vn_fifo_bypass },		/* mkdir */
220 	{ &vop_rmdir_desc, vn_fifo_bypass },		/* rmdir */
221 	{ &vop_symlink_desc, vn_fifo_bypass },		/* symlink */
222 	{ &vop_readdir_desc, vn_fifo_bypass },		/* readdir */
223 	{ &vop_readlink_desc, vn_fifo_bypass },		/* readlink */
224 	{ &vop_abortop_desc, vn_fifo_bypass },		/* abortop */
225 	{ &vop_inactive_desc, nfs_inactive },		/* inactive */
226 	{ &vop_reclaim_desc, nfs_reclaim },		/* reclaim */
227 	{ &vop_lock_desc, nfs_lock },			/* lock */
228 	{ &vop_unlock_desc, nfs_unlock },		/* unlock */
229 	{ &vop_bmap_desc, vn_fifo_bypass },		/* bmap */
230 	{ &vop_strategy_desc, genfs_badop },		/* strategy */
231 	{ &vop_print_desc, nfs_print },			/* print */
232 	{ &vop_islocked_desc, nfs_islocked },		/* islocked */
233 	{ &vop_pathconf_desc, vn_fifo_bypass },		/* pathconf */
234 	{ &vop_advlock_desc, vn_fifo_bypass },		/* advlock */
235 	{ &vop_bwrite_desc, genfs_badop },		/* bwrite */
236 	{ &vop_putpages_desc, vn_fifo_bypass }, 	/* putpages */
237 	{ NULL, NULL }
238 };
239 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
240 	{ &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries };
241 
242 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *,
243     size_t, kauth_cred_t, struct lwp *);
244 static void nfs_writerpc_extfree(struct mbuf *, void *, size_t, void *);
245 
246 /*
247  * Global variables
248  */
249 extern u_int32_t nfs_true, nfs_false;
250 extern u_int32_t nfs_xdrneg1;
251 extern const nfstype nfsv3_type[9];
252 
253 int nfs_numasync = 0;
254 #define	DIRHDSIZ	_DIRENT_NAMEOFF(dp)
255 #define UIO_ADVANCE(uio, siz) \
256     (void)((uio)->uio_resid -= (siz), \
257     (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \
258     (uio)->uio_iov->iov_len -= (siz))
259 
260 static void nfs_cache_enter(struct vnode *, struct vnode *,
261     struct componentname *);
262 
263 static void
264 nfs_cache_enter(struct vnode *dvp, struct vnode *vp,
265     struct componentname *cnp)
266 {
267 	struct nfsnode *dnp = VTONFS(dvp);
268 
269 	if (vp != NULL) {
270 		struct nfsnode *np = VTONFS(vp);
271 
272 		np->n_ctime = np->n_vattr->va_ctime.tv_sec;
273 	}
274 
275 	if (!timespecisset(&dnp->n_nctime))
276 		dnp->n_nctime = dnp->n_vattr->va_mtime;
277 
278 	cache_enter(dvp, vp, cnp);
279 }
280 
281 /*
282  * nfs null call from vfs.
283  */
284 int
285 nfs_null(struct vnode *vp, kauth_cred_t cred, struct lwp *l)
286 {
287 	char *bpos, *dpos;
288 	int error = 0;
289 	struct mbuf *mreq, *mrep, *md, *mb;
290 	struct nfsnode *np = VTONFS(vp);
291 
292 	nfsm_reqhead(np, NFSPROC_NULL, 0);
293 	nfsm_request(np, NFSPROC_NULL, l, cred);
294 	nfsm_reqdone;
295 	return (error);
296 }
297 
298 /*
299  * nfs access vnode op.
300  * For nfs version 2, just return ok. File accesses may fail later.
301  * For nfs version 3, use the access rpc to check accessibility. If file modes
302  * are changed on the server, accesses might still fail later.
303  */
304 int
305 nfs_access(void *v)
306 {
307 	struct vop_access_args /* {
308 		struct vnode *a_vp;
309 		int  a_mode;
310 		kauth_cred_t a_cred;
311 	} */ *ap = v;
312 	struct vnode *vp = ap->a_vp;
313 #ifndef NFS_V2_ONLY
314 	u_int32_t *tl;
315 	char *cp;
316 	int32_t t1, t2;
317 	char *bpos, *dpos, *cp2;
318 	int error = 0, attrflag;
319 	struct mbuf *mreq, *mrep, *md, *mb;
320 	u_int32_t mode, rmode;
321 	const int v3 = NFS_ISV3(vp);
322 #endif
323 	int cachevalid;
324 	struct nfsnode *np = VTONFS(vp);
325 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
326 
327 	cachevalid = (np->n_accstamp != -1 &&
328 	    (time_uptime - np->n_accstamp) < nfs_attrtimeo(nmp, np) &&
329 	    np->n_accuid == kauth_cred_geteuid(ap->a_cred));
330 
331 	/*
332 	 * Check access cache first. If this request has been made for this
333 	 * uid shortly before, use the cached result.
334 	 */
335 	if (cachevalid) {
336 		if (!np->n_accerror) {
337 			if  ((np->n_accmode & ap->a_mode) == ap->a_mode)
338 				return np->n_accerror;
339 		} else if ((np->n_accmode & ap->a_mode) == np->n_accmode)
340 			return np->n_accerror;
341 	}
342 
343 #ifndef NFS_V2_ONLY
344 	/*
345 	 * For nfs v3, do an access rpc, otherwise you are stuck emulating
346 	 * ufs_access() locally using the vattr. This may not be correct,
347 	 * since the server may apply other access criteria such as
348 	 * client uid-->server uid mapping that we do not know about, but
349 	 * this is better than just returning anything that is lying about
350 	 * in the cache.
351 	 */
352 	if (v3) {
353 		nfsstats.rpccnt[NFSPROC_ACCESS]++;
354 		nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
355 		nfsm_fhtom(np, v3);
356 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
357 		if (ap->a_mode & VREAD)
358 			mode = NFSV3ACCESS_READ;
359 		else
360 			mode = 0;
361 		if (vp->v_type != VDIR) {
362 			if (ap->a_mode & VWRITE)
363 				mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
364 			if (ap->a_mode & VEXEC)
365 				mode |= NFSV3ACCESS_EXECUTE;
366 		} else {
367 			if (ap->a_mode & VWRITE)
368 				mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
369 					 NFSV3ACCESS_DELETE);
370 			if (ap->a_mode & VEXEC)
371 				mode |= NFSV3ACCESS_LOOKUP;
372 		}
373 		*tl = txdr_unsigned(mode);
374 		nfsm_request(np, NFSPROC_ACCESS, curlwp, ap->a_cred);
375 		nfsm_postop_attr(vp, attrflag, 0);
376 		if (!error) {
377 			nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
378 			rmode = fxdr_unsigned(u_int32_t, *tl);
379 			/*
380 			 * The NFS V3 spec does not clarify whether or not
381 			 * the returned access bits can be a superset of
382 			 * the ones requested, so...
383 			 */
384 			if ((rmode & mode) != mode)
385 				error = EACCES;
386 		}
387 		nfsm_reqdone;
388 	} else
389 #endif
390 		return (nfsspec_access(ap));
391 #ifndef NFS_V2_ONLY
392 	/*
393 	 * Disallow write attempts on filesystems mounted read-only;
394 	 * unless the file is a socket, fifo, or a block or character
395 	 * device resident on the filesystem.
396 	 */
397 	if (!error && (ap->a_mode & VWRITE) &&
398 	    (vp->v_mount->mnt_flag & MNT_RDONLY)) {
399 		switch (vp->v_type) {
400 		case VREG:
401 		case VDIR:
402 		case VLNK:
403 			error = EROFS;
404 		default:
405 			break;
406 		}
407 	}
408 
409 	if (!error || error == EACCES) {
410 		/*
411 		 * If we got the same result as for a previous,
412 		 * different request, OR it in. Don't update
413 		 * the timestamp in that case.
414 		 */
415 		if (cachevalid && np->n_accstamp != -1 &&
416 		    error == np->n_accerror) {
417 			if (!error)
418 				np->n_accmode |= ap->a_mode;
419 			else if ((np->n_accmode & ap->a_mode) == ap->a_mode)
420 				np->n_accmode = ap->a_mode;
421 		} else {
422 			np->n_accstamp = time_uptime;
423 			np->n_accuid = kauth_cred_geteuid(ap->a_cred);
424 			np->n_accmode = ap->a_mode;
425 			np->n_accerror = error;
426 		}
427 	}
428 
429 	return (error);
430 #endif
431 }
432 
433 /*
434  * nfs open vnode op
435  * Check to see if the type is ok
436  * and that deletion is not in progress.
437  * For paged in text files, you will need to flush the page cache
438  * if consistency is lost.
439  */
440 /* ARGSUSED */
441 int
442 nfs_open(void *v)
443 {
444 	struct vop_open_args /* {
445 		struct vnode *a_vp;
446 		int  a_mode;
447 		kauth_cred_t a_cred;
448 	} */ *ap = v;
449 	struct vnode *vp = ap->a_vp;
450 	struct nfsnode *np = VTONFS(vp);
451 	int error;
452 
453 	if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
454 		return (EACCES);
455 	}
456 
457 	if (ap->a_mode & FREAD) {
458 		if (np->n_rcred != NULL)
459 			kauth_cred_free(np->n_rcred);
460 		np->n_rcred = ap->a_cred;
461 		kauth_cred_hold(np->n_rcred);
462 	}
463 	if (ap->a_mode & FWRITE) {
464 		if (np->n_wcred != NULL)
465 			kauth_cred_free(np->n_wcred);
466 		np->n_wcred = ap->a_cred;
467 		kauth_cred_hold(np->n_wcred);
468 	}
469 
470 	error = nfs_flushstalebuf(vp, ap->a_cred, curlwp, 0);
471 	if (error)
472 		return error;
473 
474 	NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */
475 
476 	return (0);
477 }
478 
479 /*
480  * nfs close vnode op
481  * What an NFS client should do upon close after writing is a debatable issue.
482  * Most NFS clients push delayed writes to the server upon close, basically for
483  * two reasons:
484  * 1 - So that any write errors may be reported back to the client process
485  *     doing the close system call. By far the two most likely errors are
486  *     NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
487  * 2 - To put a worst case upper bound on cache inconsistency between
488  *     multiple clients for the file.
489  * There is also a consistency problem for Version 2 of the protocol w.r.t.
490  * not being able to tell if other clients are writing a file concurrently,
491  * since there is no way of knowing if the changed modify time in the reply
492  * is only due to the write for this client.
493  * (NFS Version 3 provides weak cache consistency data in the reply that
494  *  should be sufficient to detect and handle this case.)
495  *
496  * The current code does the following:
497  * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
498  * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
499  *                     or commit them (this satisfies 1 and 2 except for the
500  *                     case where the server crashes after this close but
501  *                     before the commit RPC, which is felt to be "good
502  *                     enough". Changing the last argument to nfs_flush() to
503  *                     a 1 would force a commit operation, if it is felt a
504  *                     commit is necessary now.
505  */
506 /* ARGSUSED */
507 int
508 nfs_close(void *v)
509 {
510 	struct vop_close_args /* {
511 		struct vnodeop_desc *a_desc;
512 		struct vnode *a_vp;
513 		int  a_fflag;
514 		kauth_cred_t a_cred;
515 	} */ *ap = v;
516 	struct vnode *vp = ap->a_vp;
517 	struct nfsnode *np = VTONFS(vp);
518 	int error = 0;
519 	UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist);
520 
521 	if (vp->v_type == VREG) {
522 	    if (np->n_flag & NMODIFIED) {
523 #ifndef NFS_V2_ONLY
524 		if (NFS_ISV3(vp)) {
525 		    error = nfs_flush(vp, ap->a_cred, MNT_WAIT, curlwp, 0);
526 		    np->n_flag &= ~NMODIFIED;
527 		} else
528 #endif
529 		    error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 1);
530 		NFS_INVALIDATE_ATTRCACHE(np);
531 	    }
532 	    if (np->n_flag & NWRITEERR) {
533 		np->n_flag &= ~NWRITEERR;
534 		error = np->n_error;
535 	    }
536 	}
537 	UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
538 	return (error);
539 }
540 
541 /*
542  * nfs getattr call from vfs.
543  */
544 int
545 nfs_getattr(void *v)
546 {
547 	struct vop_getattr_args /* {
548 		struct vnode *a_vp;
549 		struct vattr *a_vap;
550 		kauth_cred_t a_cred;
551 	} */ *ap = v;
552 	struct vnode *vp = ap->a_vp;
553 	struct nfsnode *np = VTONFS(vp);
554 	char *cp;
555 	u_int32_t *tl;
556 	int32_t t1, t2;
557 	char *bpos, *dpos;
558 	int error = 0;
559 	struct mbuf *mreq, *mrep, *md, *mb;
560 	const int v3 = NFS_ISV3(vp);
561 
562 	/*
563 	 * Update local times for special files.
564 	 */
565 	if (np->n_flag & (NACC | NUPD))
566 		np->n_flag |= NCHG;
567 
568 	/*
569 	 * if we have delayed truncation, do it now.
570 	 */
571 	nfs_delayedtruncate(vp);
572 
573 	/*
574 	 * First look in the cache.
575 	 */
576 	if (nfs_getattrcache(vp, ap->a_vap) == 0)
577 		return (0);
578 	nfsstats.rpccnt[NFSPROC_GETATTR]++;
579 	nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3));
580 	nfsm_fhtom(np, v3);
581 	nfsm_request(np, NFSPROC_GETATTR, curlwp, ap->a_cred);
582 	if (!error) {
583 		nfsm_loadattr(vp, ap->a_vap, 0);
584 		if (vp->v_type == VDIR &&
585 		    ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ)
586 			ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ;
587 	}
588 	nfsm_reqdone;
589 	return (error);
590 }
591 
592 /*
593  * nfs setattr call.
594  */
595 int
596 nfs_setattr(void *v)
597 {
598 	struct vop_setattr_args /* {
599 		struct vnodeop_desc *a_desc;
600 		struct vnode *a_vp;
601 		struct vattr *a_vap;
602 		kauth_cred_t a_cred;
603 	} */ *ap = v;
604 	struct vnode *vp = ap->a_vp;
605 	struct nfsnode *np = VTONFS(vp);
606 	struct vattr *vap = ap->a_vap;
607 	int error = 0;
608 	u_quad_t tsize = 0;
609 
610 	/*
611 	 * Setting of flags is not supported.
612 	 */
613 	if (vap->va_flags != VNOVAL)
614 		return (EOPNOTSUPP);
615 
616 	/*
617 	 * Disallow write attempts if the filesystem is mounted read-only.
618 	 */
619   	if ((vap->va_uid != (uid_t)VNOVAL ||
620 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
621 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
622 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
623 		return (EROFS);
624 	if (vap->va_size != VNOVAL) {
625 		if (vap->va_size > VFSTONFS(vp->v_mount)->nm_maxfilesize) {
626 			return EFBIG;
627 		}
628  		switch (vp->v_type) {
629  		case VDIR:
630  			return (EISDIR);
631  		case VCHR:
632  		case VBLK:
633  		case VSOCK:
634  		case VFIFO:
635 			if (vap->va_mtime.tv_sec == VNOVAL &&
636 			    vap->va_atime.tv_sec == VNOVAL &&
637 			    vap->va_mode == (mode_t)VNOVAL &&
638 			    vap->va_uid == (uid_t)VNOVAL &&
639 			    vap->va_gid == (gid_t)VNOVAL)
640 				return (0);
641  			vap->va_size = VNOVAL;
642  			break;
643  		default:
644 			/*
645 			 * Disallow write attempts if the filesystem is
646 			 * mounted read-only.
647 			 */
648 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
649 				return (EROFS);
650 			genfs_node_wrlock(vp);
651  			uvm_vnp_setsize(vp, vap->va_size);
652  			tsize = np->n_size;
653 			np->n_size = vap->va_size;
654  			if (vap->va_size == 0)
655  				error = nfs_vinvalbuf(vp, 0,
656  				     ap->a_cred, curlwp, 1);
657 			else
658 				error = nfs_vinvalbuf(vp, V_SAVE,
659 				     ap->a_cred, curlwp, 1);
660 			if (error) {
661 				uvm_vnp_setsize(vp, tsize);
662 				genfs_node_unlock(vp);
663 				return (error);
664 			}
665  			np->n_vattr->va_size = vap->va_size;
666   		}
667   	} else {
668 		/*
669 		 * flush files before setattr because a later write of
670 		 * cached data might change timestamps or reset sugid bits
671 		 */
672 		if ((vap->va_mtime.tv_sec != VNOVAL ||
673 		     vap->va_atime.tv_sec != VNOVAL ||
674 		     vap->va_mode != VNOVAL) &&
675 		    vp->v_type == VREG &&
676   		    (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
677 		 			   curlwp, 1)) == EINTR)
678 			return (error);
679 	}
680 	error = nfs_setattrrpc(vp, vap, ap->a_cred, curlwp);
681 	if (vap->va_size != VNOVAL) {
682 		if (error) {
683 			np->n_size = np->n_vattr->va_size = tsize;
684 			uvm_vnp_setsize(vp, np->n_size);
685 		}
686 		genfs_node_unlock(vp);
687 	}
688 	VN_KNOTE(vp, NOTE_ATTRIB);
689 	return (error);
690 }
691 
692 /*
693  * Do an nfs setattr rpc.
694  */
695 int
696 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, kauth_cred_t cred, struct lwp *l)
697 {
698 	struct nfsv2_sattr *sp;
699 	char *cp;
700 	int32_t t1, t2;
701 	char *bpos, *dpos;
702 	u_int32_t *tl;
703 	int error = 0;
704 	struct mbuf *mreq, *mrep, *md, *mb;
705 	const int v3 = NFS_ISV3(vp);
706 	struct nfsnode *np = VTONFS(vp);
707 #ifndef NFS_V2_ONLY
708 	int wccflag = NFSV3_WCCRATTR;
709 	char *cp2;
710 #endif
711 
712 	nfsstats.rpccnt[NFSPROC_SETATTR]++;
713 	nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
714 	nfsm_fhtom(np, v3);
715 #ifndef NFS_V2_ONLY
716 	if (v3) {
717 		nfsm_v3attrbuild(vap, true);
718 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
719 		*tl = nfs_false;
720 	} else {
721 #endif
722 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
723 		if (vap->va_mode == (mode_t)VNOVAL)
724 			sp->sa_mode = nfs_xdrneg1;
725 		else
726 			sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
727 		if (vap->va_uid == (uid_t)VNOVAL)
728 			sp->sa_uid = nfs_xdrneg1;
729 		else
730 			sp->sa_uid = txdr_unsigned(vap->va_uid);
731 		if (vap->va_gid == (gid_t)VNOVAL)
732 			sp->sa_gid = nfs_xdrneg1;
733 		else
734 			sp->sa_gid = txdr_unsigned(vap->va_gid);
735 		sp->sa_size = txdr_unsigned(vap->va_size);
736 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
737 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
738 #ifndef NFS_V2_ONLY
739 	}
740 #endif
741 	nfsm_request(np, NFSPROC_SETATTR, l, cred);
742 #ifndef NFS_V2_ONLY
743 	if (v3) {
744 		nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
745 	} else
746 #endif
747 		nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
748 	nfsm_reqdone;
749 	return (error);
750 }
751 
752 /*
753  * nfs lookup call, one step at a time...
754  * First look in cache
755  * If not found, do the rpc.
756  */
757 int
758 nfs_lookup(void *v)
759 {
760 	struct vop_lookup_args /* {
761 		struct vnodeop_desc *a_desc;
762 		struct vnode *a_dvp;
763 		struct vnode **a_vpp;
764 		struct componentname *a_cnp;
765 	} */ *ap = v;
766 	struct componentname *cnp = ap->a_cnp;
767 	struct vnode *dvp = ap->a_dvp;
768 	struct vnode **vpp = ap->a_vpp;
769 	int flags;
770 	struct vnode *newvp;
771 	u_int32_t *tl;
772 	char *cp;
773 	int32_t t1, t2;
774 	char *bpos, *dpos, *cp2;
775 	struct mbuf *mreq, *mrep, *md, *mb;
776 	long len;
777 	nfsfh_t *fhp;
778 	struct nfsnode *np;
779 	int error = 0, attrflag, fhsize;
780 	const int v3 = NFS_ISV3(dvp);
781 
782 	flags = cnp->cn_flags;
783 
784 	*vpp = NULLVP;
785 	newvp = NULLVP;
786 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
787 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
788 		return (EROFS);
789 	if (dvp->v_type != VDIR)
790 		return (ENOTDIR);
791 
792 	/*
793 	 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves.
794 	 */
795 	if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
796 		error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
797 		if (error)
798 			return error;
799 		if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN))
800 			return EISDIR;
801 		vref(dvp);
802 		*vpp = dvp;
803 		return 0;
804 	}
805 
806 	np = VTONFS(dvp);
807 
808 	/*
809 	 * Before performing an RPC, check the name cache to see if
810 	 * the directory/name pair we are looking for is known already.
811 	 * If the directory/name pair is found in the name cache,
812 	 * we have to ensure the directory has not changed from
813 	 * the time the cache entry has been created. If it has,
814 	 * the cache entry has to be ignored.
815 	 */
816 	error = cache_lookup_raw(dvp, vpp, cnp);
817 	KASSERT(dvp != *vpp);
818 	KASSERT((cnp->cn_flags & ISWHITEOUT) == 0);
819 	if (error >= 0) {
820 		struct vattr vattr;
821 		int err2;
822 
823 		if (error && error != ENOENT) {
824 			*vpp = NULLVP;
825 			return error;
826 		}
827 
828 		err2 = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
829 		if (err2 != 0) {
830 			if (error == 0)
831 				vrele(*vpp);
832 			*vpp = NULLVP;
833 			return err2;
834 		}
835 
836 		if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred)
837 		    || timespeccmp(&vattr.va_mtime,
838 		    &VTONFS(dvp)->n_nctime, !=)) {
839 			if (error == 0) {
840 				vrele(*vpp);
841 				*vpp = NULLVP;
842 			}
843 			cache_purge1(dvp, NULL, PURGE_CHILDREN);
844 			timespecclear(&np->n_nctime);
845 			goto dorpc;
846 		}
847 
848 		if (error == ENOENT) {
849 			goto noentry;
850 		}
851 
852 		/*
853 		 * investigate the vnode returned by cache_lookup_raw.
854 		 * if it isn't appropriate, do an rpc.
855 		 */
856 		newvp = *vpp;
857 		if ((flags & ISDOTDOT) != 0) {
858 			VOP_UNLOCK(dvp);
859 		}
860 		error = vn_lock(newvp, LK_EXCLUSIVE);
861 		if ((flags & ISDOTDOT) != 0) {
862 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
863 		}
864 		if (error != 0) {
865 			/* newvp has been reclaimed. */
866 			vrele(newvp);
867 			*vpp = NULLVP;
868 			goto dorpc;
869 		}
870 		if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred)
871 		    && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
872 			nfsstats.lookupcache_hits++;
873 			KASSERT(newvp->v_type != VNON);
874 			return (0);
875 		}
876 		cache_purge1(newvp, NULL, PURGE_PARENTS);
877 		vput(newvp);
878 		*vpp = NULLVP;
879 	}
880 dorpc:
881 #if 0
882 	/*
883 	 * because nfsv3 has the same CREATE semantics as ours,
884 	 * we don't have to perform LOOKUPs beforehand.
885 	 *
886 	 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL.
887 	 * XXX although we have no way to know if O_EXCL is requested or not.
888 	 */
889 
890 	if (v3 && cnp->cn_nameiop == CREATE &&
891 	    (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN &&
892 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
893 		return (EJUSTRETURN);
894 	}
895 #endif /* 0 */
896 
897 	error = 0;
898 	newvp = NULLVP;
899 	nfsstats.lookupcache_misses++;
900 	nfsstats.rpccnt[NFSPROC_LOOKUP]++;
901 	len = cnp->cn_namelen;
902 	nfsm_reqhead(np, NFSPROC_LOOKUP,
903 		NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
904 	nfsm_fhtom(np, v3);
905 	nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
906 	nfsm_request(np, NFSPROC_LOOKUP, curlwp, cnp->cn_cred);
907 	if (error) {
908 		nfsm_postop_attr(dvp, attrflag, 0);
909 		m_freem(mrep);
910 		goto nfsmout;
911 	}
912 	nfsm_getfh(fhp, fhsize, v3);
913 
914 	/*
915 	 * Handle RENAME case...
916 	 */
917 	if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
918 		if (NFS_CMPFH(np, fhp, fhsize)) {
919 			m_freem(mrep);
920 			return (EISDIR);
921 		}
922 		error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
923 		if (error) {
924 			m_freem(mrep);
925 			return error;
926 		}
927 		newvp = NFSTOV(np);
928 #ifndef NFS_V2_ONLY
929 		if (v3) {
930 			nfsm_postop_attr(newvp, attrflag, 0);
931 			nfsm_postop_attr(dvp, attrflag, 0);
932 		} else
933 #endif
934 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
935 		*vpp = newvp;
936 		m_freem(mrep);
937 		goto validate;
938 	}
939 
940 	/*
941 	 * The postop attr handling is duplicated for each if case,
942 	 * because it should be done while dvp is locked (unlocking
943 	 * dvp is different for each case).
944 	 */
945 
946 	if (NFS_CMPFH(np, fhp, fhsize)) {
947 		/*
948 		 * as we handle "." lookup locally, this should be
949 		 * a broken server.
950 		 */
951 		vref(dvp);
952 		newvp = dvp;
953 #ifndef NFS_V2_ONLY
954 		if (v3) {
955 			nfsm_postop_attr(newvp, attrflag, 0);
956 			nfsm_postop_attr(dvp, attrflag, 0);
957 		} else
958 #endif
959 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
960 	} else if (flags & ISDOTDOT) {
961 		/*
962 		 * ".." lookup
963 		 */
964 		VOP_UNLOCK(dvp);
965 		error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
966 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
967 		if (error) {
968 			m_freem(mrep);
969 			return error;
970 		}
971 		newvp = NFSTOV(np);
972 
973 #ifndef NFS_V2_ONLY
974 		if (v3) {
975 			nfsm_postop_attr(newvp, attrflag, 0);
976 			nfsm_postop_attr(dvp, attrflag, 0);
977 		} else
978 #endif
979 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
980 	} else {
981 		/*
982 		 * Other lookups.
983 		 */
984 		error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
985 		if (error) {
986 			m_freem(mrep);
987 			return error;
988 		}
989 		newvp = NFSTOV(np);
990 #ifndef NFS_V2_ONLY
991 		if (v3) {
992 			nfsm_postop_attr(newvp, attrflag, 0);
993 			nfsm_postop_attr(dvp, attrflag, 0);
994 		} else
995 #endif
996 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
997 	}
998 	if ((cnp->cn_flags & MAKEENTRY) &&
999 	    (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
1000 		nfs_cache_enter(dvp, newvp, cnp);
1001 	}
1002 	*vpp = newvp;
1003 	nfsm_reqdone;
1004 	if (error) {
1005 		/*
1006 		 * We get here only because of errors returned by
1007 		 * the RPC. Otherwise we'll have returned above
1008 		 * (the nfsm_* macros will jump to nfsm_reqdone
1009 		 * on error).
1010 		 */
1011 		if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) &&
1012 		    cnp->cn_nameiop != CREATE) {
1013 			nfs_cache_enter(dvp, NULL, cnp);
1014 		}
1015 		if (newvp != NULLVP) {
1016 			if (newvp == dvp) {
1017 				vrele(newvp);
1018 			} else {
1019 				vput(newvp);
1020 			}
1021 		}
1022 noentry:
1023 		if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
1024 		    (flags & ISLASTCN) && error == ENOENT) {
1025 			if (dvp->v_mount->mnt_flag & MNT_RDONLY) {
1026 				error = EROFS;
1027 			} else {
1028 				error = EJUSTRETURN;
1029 			}
1030 		}
1031 		*vpp = NULL;
1032 		return error;
1033 	}
1034 
1035 validate:
1036 	/*
1037 	 * make sure we have valid type and size.
1038 	 */
1039 
1040 	newvp = *vpp;
1041 	if (newvp->v_type == VNON) {
1042 		struct vattr vattr; /* dummy */
1043 
1044 		KASSERT(VTONFS(newvp)->n_attrstamp == 0);
1045 		error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred);
1046 		if (error) {
1047 			vput(newvp);
1048 			*vpp = NULL;
1049 		}
1050 	}
1051 
1052 	return error;
1053 }
1054 
1055 /*
1056  * nfs read call.
1057  * Just call nfs_bioread() to do the work.
1058  */
1059 int
1060 nfs_read(void *v)
1061 {
1062 	struct vop_read_args /* {
1063 		struct vnode *a_vp;
1064 		struct uio *a_uio;
1065 		int  a_ioflag;
1066 		kauth_cred_t a_cred;
1067 	} */ *ap = v;
1068 	struct vnode *vp = ap->a_vp;
1069 
1070 	if (vp->v_type != VREG)
1071 		return EISDIR;
1072 	return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0));
1073 }
1074 
1075 /*
1076  * nfs readlink call
1077  */
1078 int
1079 nfs_readlink(void *v)
1080 {
1081 	struct vop_readlink_args /* {
1082 		struct vnode *a_vp;
1083 		struct uio *a_uio;
1084 		kauth_cred_t a_cred;
1085 	} */ *ap = v;
1086 	struct vnode *vp = ap->a_vp;
1087 	struct nfsnode *np = VTONFS(vp);
1088 
1089 	if (vp->v_type != VLNK)
1090 		return (EPERM);
1091 
1092 	if (np->n_rcred != NULL) {
1093 		kauth_cred_free(np->n_rcred);
1094 	}
1095 	np->n_rcred = ap->a_cred;
1096 	kauth_cred_hold(np->n_rcred);
1097 
1098 	return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0));
1099 }
1100 
1101 /*
1102  * Do a readlink rpc.
1103  * Called by nfs_doio() from below the buffer cache.
1104  */
1105 int
1106 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
1107 {
1108 	u_int32_t *tl;
1109 	char *cp;
1110 	int32_t t1, t2;
1111 	char *bpos, *dpos, *cp2;
1112 	int error = 0;
1113 	uint32_t len;
1114 	struct mbuf *mreq, *mrep, *md, *mb;
1115 	const int v3 = NFS_ISV3(vp);
1116 	struct nfsnode *np = VTONFS(vp);
1117 #ifndef NFS_V2_ONLY
1118 	int attrflag;
1119 #endif
1120 
1121 	nfsstats.rpccnt[NFSPROC_READLINK]++;
1122 	nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3));
1123 	nfsm_fhtom(np, v3);
1124 	nfsm_request(np, NFSPROC_READLINK, curlwp, cred);
1125 #ifndef NFS_V2_ONLY
1126 	if (v3)
1127 		nfsm_postop_attr(vp, attrflag, 0);
1128 #endif
1129 	if (!error) {
1130 #ifndef NFS_V2_ONLY
1131 		if (v3) {
1132 			nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED);
1133 			len = fxdr_unsigned(uint32_t, *tl);
1134 			if (len > MAXPATHLEN) {
1135 				/*
1136 				 * this pathname is too long for us.
1137 				 */
1138 				m_freem(mrep);
1139 				/* Solaris returns EINVAL. should we follow? */
1140 				error = ENAMETOOLONG;
1141 				goto nfsmout;
1142 			}
1143 		} else
1144 #endif
1145 		{
1146 			nfsm_strsiz(len, NFS_MAXPATHLEN);
1147 		}
1148 		nfsm_mtouio(uiop, len);
1149 	}
1150 	nfsm_reqdone;
1151 	return (error);
1152 }
1153 
1154 /*
1155  * nfs read rpc call
1156  * Ditto above
1157  */
1158 int
1159 nfs_readrpc(struct vnode *vp, struct uio *uiop)
1160 {
1161 	u_int32_t *tl;
1162 	char *cp;
1163 	int32_t t1, t2;
1164 	char *bpos, *dpos, *cp2;
1165 	struct mbuf *mreq, *mrep, *md, *mb;
1166 	struct nfsmount *nmp;
1167 	int error = 0, len, retlen, tsiz, eof, byte_count;
1168 	const int v3 = NFS_ISV3(vp);
1169 	struct nfsnode *np = VTONFS(vp);
1170 #ifndef NFS_V2_ONLY
1171 	int attrflag;
1172 #endif
1173 
1174 #ifndef nolint
1175 	eof = 0;
1176 #endif
1177 	nmp = VFSTONFS(vp->v_mount);
1178 	tsiz = uiop->uio_resid;
1179 	if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1180 		return (EFBIG);
1181 	iostat_busy(nmp->nm_stats);
1182 	byte_count = 0; /* count bytes actually transferred */
1183 	while (tsiz > 0) {
1184 		nfsstats.rpccnt[NFSPROC_READ]++;
1185 		len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1186 		nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1187 		nfsm_fhtom(np, v3);
1188 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1189 #ifndef NFS_V2_ONLY
1190 		if (v3) {
1191 			txdr_hyper(uiop->uio_offset, tl);
1192 			*(tl + 2) = txdr_unsigned(len);
1193 		} else
1194 #endif
1195 		{
1196 			*tl++ = txdr_unsigned(uiop->uio_offset);
1197 			*tl++ = txdr_unsigned(len);
1198 			*tl = 0;
1199 		}
1200 		nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred);
1201 #ifndef NFS_V2_ONLY
1202 		if (v3) {
1203 			nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC);
1204 			if (error) {
1205 				m_freem(mrep);
1206 				goto nfsmout;
1207 			}
1208 			nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1209 			eof = fxdr_unsigned(int, *(tl + 1));
1210 		} else
1211 #endif
1212 			nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1213 		nfsm_strsiz(retlen, nmp->nm_rsize);
1214 		nfsm_mtouio(uiop, retlen);
1215 		m_freem(mrep);
1216 		tsiz -= retlen;
1217 		byte_count += retlen;
1218 #ifndef NFS_V2_ONLY
1219 		if (v3) {
1220 			if (eof || retlen == 0)
1221 				tsiz = 0;
1222 		} else
1223 #endif
1224 		if (retlen < len)
1225 			tsiz = 0;
1226 	}
1227 nfsmout:
1228 	iostat_unbusy(nmp->nm_stats, byte_count, 1);
1229 	return (error);
1230 }
1231 
1232 struct nfs_writerpc_context {
1233 	kmutex_t nwc_lock;
1234 	kcondvar_t nwc_cv;
1235 	int nwc_mbufcount;
1236 };
1237 
1238 /*
1239  * free mbuf used to refer protected pages while write rpc call.
1240  * called at splvm.
1241  */
1242 static void
1243 nfs_writerpc_extfree(struct mbuf *m, void *tbuf, size_t size, void *arg)
1244 {
1245 	struct nfs_writerpc_context *ctx = arg;
1246 
1247 	KASSERT(m != NULL);
1248 	KASSERT(ctx != NULL);
1249 	pool_cache_put(mb_cache, m);
1250 	mutex_enter(&ctx->nwc_lock);
1251 	if (--ctx->nwc_mbufcount == 0) {
1252 		cv_signal(&ctx->nwc_cv);
1253 	}
1254 	mutex_exit(&ctx->nwc_lock);
1255 }
1256 
1257 /*
1258  * nfs write call
1259  */
1260 int
1261 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, bool pageprotected, bool *stalewriteverfp)
1262 {
1263 	u_int32_t *tl;
1264 	char *cp;
1265 	int32_t t1, t2;
1266 	char *bpos, *dpos;
1267 	struct mbuf *mreq, *mrep, *md, *mb;
1268 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1269 	int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR;
1270 	const int v3 = NFS_ISV3(vp);
1271 	int committed = NFSV3WRITE_FILESYNC;
1272 	struct nfsnode *np = VTONFS(vp);
1273 	struct nfs_writerpc_context ctx;
1274 	int byte_count;
1275 	size_t origresid;
1276 #ifndef NFS_V2_ONLY
1277 	char *cp2;
1278 	int rlen, commit;
1279 #endif
1280 
1281 	if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1282 		panic("writerpc readonly vp %p", vp);
1283 	}
1284 
1285 #ifdef DIAGNOSTIC
1286 	if (uiop->uio_iovcnt != 1)
1287 		panic("nfs: writerpc iovcnt > 1");
1288 #endif
1289 	tsiz = uiop->uio_resid;
1290 	if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1291 		return EFBIG;
1292 
1293 	mutex_init(&ctx.nwc_lock, MUTEX_DRIVER, IPL_VM);
1294 	cv_init(&ctx.nwc_cv, "nfsmblk");
1295 	ctx.nwc_mbufcount = 1;
1296 
1297 retry:
1298 	origresid = uiop->uio_resid;
1299 	KASSERT(origresid == uiop->uio_iov->iov_len);
1300 	iostat_busy(nmp->nm_stats);
1301 	byte_count = 0; /* count of bytes actually written */
1302 	while (tsiz > 0) {
1303 		uint32_t datalen; /* data bytes need to be allocated in mbuf */
1304 		uint32_t backup;
1305 		bool stalewriteverf = false;
1306 
1307 		nfsstats.rpccnt[NFSPROC_WRITE]++;
1308 		len = min(tsiz, nmp->nm_wsize);
1309 		datalen = pageprotected ? 0 : nfsm_rndup(len);
1310 		nfsm_reqhead(np, NFSPROC_WRITE,
1311 			NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen);
1312 		nfsm_fhtom(np, v3);
1313 #ifndef NFS_V2_ONLY
1314 		if (v3) {
1315 			nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1316 			txdr_hyper(uiop->uio_offset, tl);
1317 			tl += 2;
1318 			*tl++ = txdr_unsigned(len);
1319 			*tl++ = txdr_unsigned(*iomode);
1320 			*tl = txdr_unsigned(len);
1321 		} else
1322 #endif
1323 		{
1324 			u_int32_t x;
1325 
1326 			nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1327 			/* Set both "begin" and "current" to non-garbage. */
1328 			x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1329 			*tl++ = x;      /* "begin offset" */
1330 			*tl++ = x;      /* "current offset" */
1331 			x = txdr_unsigned(len);
1332 			*tl++ = x;      /* total to this offset */
1333 			*tl = x;        /* size of this write */
1334 
1335 		}
1336 		if (pageprotected) {
1337 			/*
1338 			 * since we know pages can't be modified during i/o,
1339 			 * no need to copy them for us.
1340 			 */
1341 			struct mbuf *m;
1342 			struct iovec *iovp = uiop->uio_iov;
1343 
1344 			m = m_get(M_WAIT, MT_DATA);
1345 			MCLAIM(m, &nfs_mowner);
1346 			MEXTADD(m, iovp->iov_base, len, M_MBUF,
1347 			    nfs_writerpc_extfree, &ctx);
1348 			m->m_flags |= M_EXT_ROMAP;
1349 			m->m_len = len;
1350 			mb->m_next = m;
1351 			/*
1352 			 * no need to maintain mb and bpos here
1353 			 * because no one care them later.
1354 			 */
1355 #if 0
1356 			mb = m;
1357 			bpos = mtod(void *, mb) + mb->m_len;
1358 #endif
1359 			UIO_ADVANCE(uiop, len);
1360 			uiop->uio_offset += len;
1361 			mutex_enter(&ctx.nwc_lock);
1362 			ctx.nwc_mbufcount++;
1363 			mutex_exit(&ctx.nwc_lock);
1364 			nfs_zeropad(mb, 0, nfsm_padlen(len));
1365 		} else {
1366 			nfsm_uiotom(uiop, len);
1367 		}
1368 		nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred);
1369 #ifndef NFS_V2_ONLY
1370 		if (v3) {
1371 			wccflag = NFSV3_WCCCHK;
1372 			nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error);
1373 			if (!error) {
1374 				nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1375 					+ NFSX_V3WRITEVERF);
1376 				rlen = fxdr_unsigned(int, *tl++);
1377 				if (rlen == 0) {
1378 					error = NFSERR_IO;
1379 					m_freem(mrep);
1380 					break;
1381 				} else if (rlen < len) {
1382 					backup = len - rlen;
1383 					UIO_ADVANCE(uiop, -backup);
1384 					uiop->uio_offset -= backup;
1385 					len = rlen;
1386 				}
1387 				commit = fxdr_unsigned(int, *tl++);
1388 
1389 				/*
1390 				 * Return the lowest committment level
1391 				 * obtained by any of the RPCs.
1392 				 */
1393 				if (committed == NFSV3WRITE_FILESYNC)
1394 					committed = commit;
1395 				else if (committed == NFSV3WRITE_DATASYNC &&
1396 					commit == NFSV3WRITE_UNSTABLE)
1397 					committed = commit;
1398 				mutex_enter(&nmp->nm_lock);
1399 				if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){
1400 					memcpy(nmp->nm_writeverf, tl,
1401 					    NFSX_V3WRITEVERF);
1402 					nmp->nm_iflag |= NFSMNT_HASWRITEVERF;
1403 				} else if ((nmp->nm_iflag &
1404 				    NFSMNT_STALEWRITEVERF) ||
1405 				    memcmp(tl, nmp->nm_writeverf,
1406 				    NFSX_V3WRITEVERF)) {
1407 					memcpy(nmp->nm_writeverf, tl,
1408 					    NFSX_V3WRITEVERF);
1409 					/*
1410 					 * note NFSMNT_STALEWRITEVERF
1411 					 * if we're the first thread to
1412 					 * notice it.
1413 					 */
1414 					if ((nmp->nm_iflag &
1415 					    NFSMNT_STALEWRITEVERF) == 0) {
1416 						stalewriteverf = true;
1417 						nmp->nm_iflag |=
1418 						    NFSMNT_STALEWRITEVERF;
1419 					}
1420 				}
1421 				mutex_exit(&nmp->nm_lock);
1422 			}
1423 		} else
1424 #endif
1425 			nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1426 		if (wccflag)
1427 			VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime;
1428 		m_freem(mrep);
1429 		if (error)
1430 			break;
1431 		tsiz -= len;
1432 		byte_count += len;
1433 		if (stalewriteverf) {
1434 			*stalewriteverfp = true;
1435 			stalewriteverf = false;
1436 			if (committed == NFSV3WRITE_UNSTABLE &&
1437 			    len != origresid) {
1438 				/*
1439 				 * if our write requests weren't atomic but
1440 				 * unstable, datas in previous iterations
1441 				 * might have already been lost now.
1442 				 * then, we should resend them to nfsd.
1443 				 */
1444 				backup = origresid - tsiz;
1445 				UIO_ADVANCE(uiop, -backup);
1446 				uiop->uio_offset -= backup;
1447 				tsiz = origresid;
1448 				goto retry;
1449 			}
1450 		}
1451 	}
1452 nfsmout:
1453 	iostat_unbusy(nmp->nm_stats, byte_count, 0);
1454 	if (pageprotected) {
1455 		/*
1456 		 * wait until mbufs go away.
1457 		 * retransmitted mbufs can survive longer than rpc requests
1458 		 * themselves.
1459 		 */
1460 		mutex_enter(&ctx.nwc_lock);
1461 		ctx.nwc_mbufcount--;
1462 		while (ctx.nwc_mbufcount > 0) {
1463 			cv_wait(&ctx.nwc_cv, &ctx.nwc_lock);
1464 		}
1465 		mutex_exit(&ctx.nwc_lock);
1466 	}
1467 	mutex_destroy(&ctx.nwc_lock);
1468 	cv_destroy(&ctx.nwc_cv);
1469 	*iomode = committed;
1470 	if (error)
1471 		uiop->uio_resid = tsiz;
1472 	return (error);
1473 }
1474 
1475 /*
1476  * nfs mknod rpc
1477  * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1478  * mode set to specify the file type and the size field for rdev.
1479  */
1480 int
1481 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap)
1482 {
1483 	struct nfsv2_sattr *sp;
1484 	u_int32_t *tl;
1485 	char *cp;
1486 	int32_t t1, t2;
1487 	struct vnode *newvp = (struct vnode *)0;
1488 	struct nfsnode *dnp, *np;
1489 	char *cp2;
1490 	char *bpos, *dpos;
1491 	int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1492 	struct mbuf *mreq, *mrep, *md, *mb;
1493 	u_int32_t rdev;
1494 	const int v3 = NFS_ISV3(dvp);
1495 
1496 	if (vap->va_type == VCHR || vap->va_type == VBLK)
1497 		rdev = txdr_unsigned(vap->va_rdev);
1498 	else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1499 		rdev = nfs_xdrneg1;
1500 	else {
1501 		VOP_ABORTOP(dvp, cnp);
1502 		vput(dvp);
1503 		return (EOPNOTSUPP);
1504 	}
1505 	nfsstats.rpccnt[NFSPROC_MKNOD]++;
1506 	dnp = VTONFS(dvp);
1507 	nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1508 		+ nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1509 	nfsm_fhtom(dnp, v3);
1510 	nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1511 #ifndef NFS_V2_ONLY
1512 	if (v3) {
1513 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1514 		*tl++ = vtonfsv3_type(vap->va_type);
1515 		nfsm_v3attrbuild(vap, false);
1516 		if (vap->va_type == VCHR || vap->va_type == VBLK) {
1517 			nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1518 			*tl++ = txdr_unsigned(major(vap->va_rdev));
1519 			*tl = txdr_unsigned(minor(vap->va_rdev));
1520 		}
1521 	} else
1522 #endif
1523 	{
1524 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1525 		sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1526 		sp->sa_uid = nfs_xdrneg1;
1527 		sp->sa_gid = nfs_xdrneg1;
1528 		sp->sa_size = rdev;
1529 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1530 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1531 	}
1532 	nfsm_request(dnp, NFSPROC_MKNOD, curlwp, cnp->cn_cred);
1533 	if (!error) {
1534 		nfsm_mtofh(dvp, newvp, v3, gotvp);
1535 		if (!gotvp) {
1536 			error = nfs_lookitup(dvp, cnp->cn_nameptr,
1537 			    cnp->cn_namelen, cnp->cn_cred, curlwp, &np);
1538 			if (!error)
1539 				newvp = NFSTOV(np);
1540 		}
1541 	}
1542 #ifndef NFS_V2_ONLY
1543 	if (v3)
1544 		nfsm_wcc_data(dvp, wccflag, 0, !error);
1545 #endif
1546 	nfsm_reqdone;
1547 	if (error) {
1548 		if (newvp)
1549 			vput(newvp);
1550 	} else {
1551 		if (cnp->cn_flags & MAKEENTRY)
1552 			nfs_cache_enter(dvp, newvp, cnp);
1553 		*vpp = newvp;
1554 	}
1555 	VTONFS(dvp)->n_flag |= NMODIFIED;
1556 	if (!wccflag)
1557 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1558 	vput(dvp);
1559 	return (error);
1560 }
1561 
1562 /*
1563  * nfs mknod vop
1564  * just call nfs_mknodrpc() to do the work.
1565  */
1566 /* ARGSUSED */
1567 int
1568 nfs_mknod(void *v)
1569 {
1570 	struct vop_mknod_args /* {
1571 		struct vnode *a_dvp;
1572 		struct vnode **a_vpp;
1573 		struct componentname *a_cnp;
1574 		struct vattr *a_vap;
1575 	} */ *ap = v;
1576 	struct vnode *dvp = ap->a_dvp;
1577 	struct componentname *cnp = ap->a_cnp;
1578 	int error;
1579 
1580 	error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap);
1581 	VN_KNOTE(dvp, NOTE_WRITE);
1582 	if (error == 0 || error == EEXIST)
1583 		cache_purge1(dvp, cnp, 0);
1584 	return (error);
1585 }
1586 
1587 /*
1588  * nfs file create call
1589  */
1590 int
1591 nfs_create(void *v)
1592 {
1593 	struct vop_create_args /* {
1594 		struct vnode *a_dvp;
1595 		struct vnode **a_vpp;
1596 		struct componentname *a_cnp;
1597 		struct vattr *a_vap;
1598 	} */ *ap = v;
1599 	struct vnode *dvp = ap->a_dvp;
1600 	struct vattr *vap = ap->a_vap;
1601 	struct componentname *cnp = ap->a_cnp;
1602 	struct nfsv2_sattr *sp;
1603 	u_int32_t *tl;
1604 	char *cp;
1605 	int32_t t1, t2;
1606 	struct nfsnode *dnp, *np = (struct nfsnode *)0;
1607 	struct vnode *newvp = (struct vnode *)0;
1608 	char *bpos, *dpos, *cp2;
1609 	int error, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1610 	struct mbuf *mreq, *mrep, *md, *mb;
1611 	const int v3 = NFS_ISV3(dvp);
1612 	u_int32_t excl_mode = NFSV3CREATE_UNCHECKED;
1613 
1614 	/*
1615 	 * Oops, not for me..
1616 	 */
1617 	if (vap->va_type == VSOCK)
1618 		return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1619 
1620 	KASSERT(vap->va_type == VREG);
1621 
1622 #ifdef VA_EXCLUSIVE
1623 	if (vap->va_vaflags & VA_EXCLUSIVE) {
1624 		excl_mode = NFSV3CREATE_EXCLUSIVE;
1625 	}
1626 #endif
1627 again:
1628 	error = 0;
1629 	nfsstats.rpccnt[NFSPROC_CREATE]++;
1630 	dnp = VTONFS(dvp);
1631 	nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1632 		nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1633 	nfsm_fhtom(dnp, v3);
1634 	nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1635 #ifndef NFS_V2_ONLY
1636 	if (v3) {
1637 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1638 		if (excl_mode == NFSV3CREATE_EXCLUSIVE) {
1639 			*tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1640 			nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1641 			*tl++ = arc4random();
1642 			*tl = arc4random();
1643 		} else {
1644 			*tl = txdr_unsigned(excl_mode);
1645 			nfsm_v3attrbuild(vap, false);
1646 		}
1647 	} else
1648 #endif
1649 	{
1650 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1651 		sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1652 		sp->sa_uid = nfs_xdrneg1;
1653 		sp->sa_gid = nfs_xdrneg1;
1654 		sp->sa_size = 0;
1655 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1656 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1657 	}
1658 	nfsm_request(dnp, NFSPROC_CREATE, curlwp, cnp->cn_cred);
1659 	if (!error) {
1660 		nfsm_mtofh(dvp, newvp, v3, gotvp);
1661 		if (!gotvp) {
1662 			error = nfs_lookitup(dvp, cnp->cn_nameptr,
1663 			    cnp->cn_namelen, cnp->cn_cred, curlwp, &np);
1664 			if (!error)
1665 				newvp = NFSTOV(np);
1666 		}
1667 	}
1668 #ifndef NFS_V2_ONLY
1669 	if (v3)
1670 		nfsm_wcc_data(dvp, wccflag, 0, !error);
1671 #endif
1672 	nfsm_reqdone;
1673 	if (error) {
1674 		/*
1675 		 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
1676 		 */
1677 		if (v3 && error == ENOTSUP) {
1678 			if (excl_mode == NFSV3CREATE_EXCLUSIVE) {
1679 				excl_mode = NFSV3CREATE_GUARDED;
1680 				goto again;
1681 			} else if (excl_mode == NFSV3CREATE_GUARDED) {
1682 				excl_mode = NFSV3CREATE_UNCHECKED;
1683 				goto again;
1684 			}
1685 		}
1686 	} else if (v3 && (excl_mode == NFSV3CREATE_EXCLUSIVE)) {
1687 		struct timespec ts;
1688 
1689 		getnanotime(&ts);
1690 
1691 		/*
1692 		 * make sure that we'll update timestamps as
1693 		 * most server implementations use them to store
1694 		 * the create verifier.
1695 		 *
1696 		 * XXX it's better to use TOSERVER always.
1697 		 */
1698 
1699 		if (vap->va_atime.tv_sec == VNOVAL)
1700 			vap->va_atime = ts;
1701 		if (vap->va_mtime.tv_sec == VNOVAL)
1702 			vap->va_mtime = ts;
1703 
1704 		error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, curlwp);
1705 	}
1706 	if (error == 0) {
1707 		if (cnp->cn_flags & MAKEENTRY)
1708 			nfs_cache_enter(dvp, newvp, cnp);
1709 		else
1710 			cache_purge1(dvp, cnp, 0);
1711 		*ap->a_vpp = newvp;
1712 	} else {
1713 		if (newvp)
1714 			vput(newvp);
1715 		if (error == EEXIST)
1716 			cache_purge1(dvp, cnp, 0);
1717 	}
1718 	VTONFS(dvp)->n_flag |= NMODIFIED;
1719 	if (!wccflag)
1720 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1721 	VN_KNOTE(ap->a_dvp, NOTE_WRITE);
1722 	vput(dvp);
1723 	return (error);
1724 }
1725 
1726 /*
1727  * nfs file remove call
1728  * To try and make nfs semantics closer to ufs semantics, a file that has
1729  * other processes using the vnode is renamed instead of removed and then
1730  * removed later on the last close.
1731  * - If v_usecount > 1
1732  *	  If a rename is not already in the works
1733  *	     call nfs_sillyrename() to set it up
1734  *     else
1735  *	  do the remove rpc
1736  */
1737 int
1738 nfs_remove(void *v)
1739 {
1740 	struct vop_remove_args /* {
1741 		struct vnodeop_desc *a_desc;
1742 		struct vnode * a_dvp;
1743 		struct vnode * a_vp;
1744 		struct componentname * a_cnp;
1745 	} */ *ap = v;
1746 	struct vnode *vp = ap->a_vp;
1747 	struct vnode *dvp = ap->a_dvp;
1748 	struct componentname *cnp = ap->a_cnp;
1749 	struct nfsnode *np = VTONFS(vp);
1750 	int error = 0;
1751 	struct vattr vattr;
1752 
1753 #ifndef DIAGNOSTIC
1754 	if (vp->v_usecount < 1)
1755 		panic("nfs_remove: bad v_usecount");
1756 #endif
1757 	if (vp->v_type == VDIR)
1758 		error = EPERM;
1759 	else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1760 	    VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 &&
1761 	    vattr.va_nlink > 1)) {
1762 		/*
1763 		 * Purge the name cache so that the chance of a lookup for
1764 		 * the name succeeding while the remove is in progress is
1765 		 * minimized. Without node locking it can still happen, such
1766 		 * that an I/O op returns ESTALE, but since you get this if
1767 		 * another host removes the file..
1768 		 */
1769 		cache_purge(vp);
1770 		/*
1771 		 * throw away biocache buffers, mainly to avoid
1772 		 * unnecessary delayed writes later.
1773 		 */
1774 		error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, curlwp, 1);
1775 		/* Do the rpc */
1776 		if (error != EINTR)
1777 			error = nfs_removerpc(dvp, cnp->cn_nameptr,
1778 				cnp->cn_namelen, cnp->cn_cred, curlwp);
1779 	} else if (!np->n_sillyrename)
1780 		error = nfs_sillyrename(dvp, vp, cnp, false);
1781 	if (!error && nfs_getattrcache(vp, &vattr) == 0 &&
1782 	    vattr.va_nlink == 1) {
1783 		np->n_flag |= NREMOVED;
1784 	}
1785 	NFS_INVALIDATE_ATTRCACHE(np);
1786 	VN_KNOTE(vp, NOTE_DELETE);
1787 	VN_KNOTE(dvp, NOTE_WRITE);
1788 	if (dvp == vp)
1789 		vrele(vp);
1790 	else
1791 		vput(vp);
1792 	vput(dvp);
1793 	return (error);
1794 }
1795 
1796 /*
1797  * nfs file remove rpc called from nfs_inactive
1798  */
1799 int
1800 nfs_removeit(struct sillyrename *sp)
1801 {
1802 
1803 	return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1804 		(struct lwp *)0));
1805 }
1806 
1807 /*
1808  * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1809  */
1810 int
1811 nfs_removerpc(struct vnode *dvp, const char *name, int namelen, kauth_cred_t cred, struct lwp *l)
1812 {
1813 	u_int32_t *tl;
1814 	char *cp;
1815 #ifndef NFS_V2_ONLY
1816 	int32_t t1;
1817 	char *cp2;
1818 #endif
1819 	int32_t t2;
1820 	char *bpos, *dpos;
1821 	int error = 0, wccflag = NFSV3_WCCRATTR;
1822 	struct mbuf *mreq, *mrep, *md, *mb;
1823 	const int v3 = NFS_ISV3(dvp);
1824 	int rexmit = 0;
1825 	struct nfsnode *dnp = VTONFS(dvp);
1826 
1827 	nfsstats.rpccnt[NFSPROC_REMOVE]++;
1828 	nfsm_reqhead(dnp, NFSPROC_REMOVE,
1829 		NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1830 	nfsm_fhtom(dnp, v3);
1831 	nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1832 	nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit);
1833 #ifndef NFS_V2_ONLY
1834 	if (v3)
1835 		nfsm_wcc_data(dvp, wccflag, 0, !error);
1836 #endif
1837 	nfsm_reqdone;
1838 	VTONFS(dvp)->n_flag |= NMODIFIED;
1839 	if (!wccflag)
1840 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1841 	/*
1842 	 * Kludge City: If the first reply to the remove rpc is lost..
1843 	 *   the reply to the retransmitted request will be ENOENT
1844 	 *   since the file was in fact removed
1845 	 *   Therefore, we cheat and return success.
1846 	 */
1847 	if (rexmit && error == ENOENT)
1848 		error = 0;
1849 	return (error);
1850 }
1851 
1852 /*
1853  * nfs file rename call
1854  */
1855 int
1856 nfs_rename(void *v)
1857 {
1858 	struct vop_rename_args  /* {
1859 		struct vnode *a_fdvp;
1860 		struct vnode *a_fvp;
1861 		struct componentname *a_fcnp;
1862 		struct vnode *a_tdvp;
1863 		struct vnode *a_tvp;
1864 		struct componentname *a_tcnp;
1865 	} */ *ap = v;
1866 	struct vnode *fvp = ap->a_fvp;
1867 	struct vnode *tvp = ap->a_tvp;
1868 	struct vnode *fdvp = ap->a_fdvp;
1869 	struct vnode *tdvp = ap->a_tdvp;
1870 	struct componentname *tcnp = ap->a_tcnp;
1871 	struct componentname *fcnp = ap->a_fcnp;
1872 	int error;
1873 
1874 	/* Check for cross-device rename */
1875 	if ((fvp->v_mount != tdvp->v_mount) ||
1876 	    (tvp && (fvp->v_mount != tvp->v_mount))) {
1877 		error = EXDEV;
1878 		goto out;
1879 	}
1880 
1881 	/*
1882 	 * If the tvp exists and is in use, sillyrename it before doing the
1883 	 * rename of the new file over it.
1884 	 *
1885 	 * Have sillyrename use link instead of rename if possible,
1886 	 * so that we don't lose the file if the rename fails, and so
1887 	 * that there's no window when the "to" file doesn't exist.
1888 	 */
1889 	if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1890 	    tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) {
1891 		VN_KNOTE(tvp, NOTE_DELETE);
1892 		vput(tvp);
1893 		tvp = NULL;
1894 	}
1895 
1896 	error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1897 		tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1898 		curlwp);
1899 
1900 	VN_KNOTE(fdvp, NOTE_WRITE);
1901 	VN_KNOTE(tdvp, NOTE_WRITE);
1902 	if (error == 0 || error == EEXIST) {
1903 		if (fvp->v_type == VDIR)
1904 			cache_purge(fvp);
1905 		else
1906 			cache_purge1(fdvp, fcnp, 0);
1907 		if (tvp != NULL && tvp->v_type == VDIR)
1908 			cache_purge(tvp);
1909 		else
1910 			cache_purge1(tdvp, tcnp, 0);
1911 	}
1912 out:
1913 	if (tdvp == tvp)
1914 		vrele(tdvp);
1915 	else
1916 		vput(tdvp);
1917 	if (tvp)
1918 		vput(tvp);
1919 	vrele(fdvp);
1920 	vrele(fvp);
1921 	return (error);
1922 }
1923 
1924 /*
1925  * nfs file rename rpc called from nfs_remove() above
1926  */
1927 int
1928 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, struct sillyrename *sp)
1929 {
1930 	return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1931 		sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curlwp));
1932 }
1933 
1934 /*
1935  * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1936  */
1937 int
1938 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen, struct vnode *tdvp, const char *tnameptr, int tnamelen, kauth_cred_t cred, struct lwp *l)
1939 {
1940 	u_int32_t *tl;
1941 	char *cp;
1942 #ifndef NFS_V2_ONLY
1943 	int32_t t1;
1944 	char *cp2;
1945 #endif
1946 	int32_t t2;
1947 	char *bpos, *dpos;
1948 	int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1949 	struct mbuf *mreq, *mrep, *md, *mb;
1950 	const int v3 = NFS_ISV3(fdvp);
1951 	int rexmit = 0;
1952 	struct nfsnode *fdnp = VTONFS(fdvp);
1953 
1954 	nfsstats.rpccnt[NFSPROC_RENAME]++;
1955 	nfsm_reqhead(fdnp, NFSPROC_RENAME,
1956 		(NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1957 		nfsm_rndup(tnamelen));
1958 	nfsm_fhtom(fdnp, v3);
1959 	nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1960 	nfsm_fhtom(VTONFS(tdvp), v3);
1961 	nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1962 	nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit);
1963 #ifndef NFS_V2_ONLY
1964 	if (v3) {
1965 		nfsm_wcc_data(fdvp, fwccflag, 0, !error);
1966 		nfsm_wcc_data(tdvp, twccflag, 0, !error);
1967 	}
1968 #endif
1969 	nfsm_reqdone;
1970 	VTONFS(fdvp)->n_flag |= NMODIFIED;
1971 	VTONFS(tdvp)->n_flag |= NMODIFIED;
1972 	if (!fwccflag)
1973 		NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp));
1974 	if (!twccflag)
1975 		NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp));
1976 	/*
1977 	 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1978 	 */
1979 	if (rexmit && error == ENOENT)
1980 		error = 0;
1981 	return (error);
1982 }
1983 
1984 /*
1985  * NFS link RPC, called from nfs_link.
1986  * Assumes dvp and vp locked, and leaves them that way.
1987  */
1988 
1989 static int
1990 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name,
1991     size_t namelen, kauth_cred_t cred, struct lwp *l)
1992 {
1993 	u_int32_t *tl;
1994 	char *cp;
1995 #ifndef NFS_V2_ONLY
1996 	int32_t t1;
1997 	char *cp2;
1998 #endif
1999 	int32_t t2;
2000 	char *bpos, *dpos;
2001 	int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
2002 	struct mbuf *mreq, *mrep, *md, *mb;
2003 	const int v3 = NFS_ISV3(dvp);
2004 	int rexmit = 0;
2005 	struct nfsnode *np = VTONFS(vp);
2006 
2007 	nfsstats.rpccnt[NFSPROC_LINK]++;
2008 	nfsm_reqhead(np, NFSPROC_LINK,
2009 	    NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen));
2010 	nfsm_fhtom(np, v3);
2011 	nfsm_fhtom(VTONFS(dvp), v3);
2012 	nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
2013 	nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit);
2014 #ifndef NFS_V2_ONLY
2015 	if (v3) {
2016 		nfsm_postop_attr(vp, attrflag, 0);
2017 		nfsm_wcc_data(dvp, wccflag, 0, !error);
2018 	}
2019 #endif
2020 	nfsm_reqdone;
2021 
2022 	VTONFS(dvp)->n_flag |= NMODIFIED;
2023 	if (!attrflag)
2024 		NFS_INVALIDATE_ATTRCACHE(VTONFS(vp));
2025 	if (!wccflag)
2026 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2027 
2028 	/*
2029 	 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2030 	 */
2031 	if (rexmit && error == EEXIST)
2032 		error = 0;
2033 
2034 	return error;
2035 }
2036 
2037 /*
2038  * nfs hard link create call
2039  */
2040 int
2041 nfs_link(void *v)
2042 {
2043 	struct vop_link_args /* {
2044 		struct vnode *a_dvp;
2045 		struct vnode *a_vp;
2046 		struct componentname *a_cnp;
2047 	} */ *ap = v;
2048 	struct vnode *vp = ap->a_vp;
2049 	struct vnode *dvp = ap->a_dvp;
2050 	struct componentname *cnp = ap->a_cnp;
2051 	int error = 0;
2052 
2053 	error = vn_lock(vp, LK_EXCLUSIVE);
2054 	if (error != 0) {
2055 		VOP_ABORTOP(dvp, cnp);
2056 		vput(dvp);
2057 		return error;
2058 	}
2059 
2060 	/*
2061 	 * Push all writes to the server, so that the attribute cache
2062 	 * doesn't get "out of sync" with the server.
2063 	 * XXX There should be a better way!
2064 	 */
2065 	VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0);
2066 
2067 	error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
2068 	    cnp->cn_cred, curlwp);
2069 
2070 	if (error == 0) {
2071 		cache_purge1(dvp, cnp, 0);
2072 	}
2073 	VOP_UNLOCK(vp);
2074 	VN_KNOTE(vp, NOTE_LINK);
2075 	VN_KNOTE(dvp, NOTE_WRITE);
2076 	vput(dvp);
2077 	return (error);
2078 }
2079 
2080 /*
2081  * nfs symbolic link create call
2082  */
2083 int
2084 nfs_symlink(void *v)
2085 {
2086 	struct vop_symlink_args /* {
2087 		struct vnode *a_dvp;
2088 		struct vnode **a_vpp;
2089 		struct componentname *a_cnp;
2090 		struct vattr *a_vap;
2091 		char *a_target;
2092 	} */ *ap = v;
2093 	struct vnode *dvp = ap->a_dvp;
2094 	struct vattr *vap = ap->a_vap;
2095 	struct componentname *cnp = ap->a_cnp;
2096 	struct nfsv2_sattr *sp;
2097 	u_int32_t *tl;
2098 	char *cp;
2099 	int32_t t1, t2;
2100 	char *bpos, *dpos, *cp2;
2101 	int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
2102 	struct mbuf *mreq, *mrep, *md, *mb;
2103 	struct vnode *newvp = (struct vnode *)0;
2104 	const int v3 = NFS_ISV3(dvp);
2105 	int rexmit = 0;
2106 	struct nfsnode *dnp = VTONFS(dvp);
2107 
2108 	*ap->a_vpp = NULL;
2109 	nfsstats.rpccnt[NFSPROC_SYMLINK]++;
2110 	slen = strlen(ap->a_target);
2111 	nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
2112 	    nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
2113 	nfsm_fhtom(dnp, v3);
2114 	nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2115 #ifndef NFS_V2_ONlY
2116 	if (v3)
2117 		nfsm_v3attrbuild(vap, false);
2118 #endif
2119 	nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
2120 #ifndef NFS_V2_ONlY
2121 	if (!v3) {
2122 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2123 		sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
2124 		sp->sa_uid = nfs_xdrneg1;
2125 		sp->sa_gid = nfs_xdrneg1;
2126 		sp->sa_size = nfs_xdrneg1;
2127 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2128 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2129 	}
2130 #endif
2131 	nfsm_request1(dnp, NFSPROC_SYMLINK, curlwp, cnp->cn_cred,
2132 	    &rexmit);
2133 #ifndef NFS_V2_ONlY
2134 	if (v3) {
2135 		if (!error)
2136 			nfsm_mtofh(dvp, newvp, v3, gotvp);
2137 		nfsm_wcc_data(dvp, wccflag, 0, !error);
2138 	}
2139 #endif
2140 	nfsm_reqdone;
2141 	/*
2142 	 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2143 	 */
2144 	if (rexmit && error == EEXIST)
2145 		error = 0;
2146 	if (error == 0 || error == EEXIST)
2147 		cache_purge1(dvp, cnp, 0);
2148 	if (error == 0 && newvp == NULL) {
2149 		struct nfsnode *np = NULL;
2150 
2151 		error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2152 		    cnp->cn_cred, curlwp, &np);
2153 		if (error == 0)
2154 			newvp = NFSTOV(np);
2155 	}
2156 	if (error) {
2157 		if (newvp != NULL)
2158 			vput(newvp);
2159 	} else {
2160 		*ap->a_vpp = newvp;
2161 	}
2162 	VTONFS(dvp)->n_flag |= NMODIFIED;
2163 	if (!wccflag)
2164 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2165 	VN_KNOTE(dvp, NOTE_WRITE);
2166 	vput(dvp);
2167 	return (error);
2168 }
2169 
2170 /*
2171  * nfs make dir call
2172  */
2173 int
2174 nfs_mkdir(void *v)
2175 {
2176 	struct vop_mkdir_args /* {
2177 		struct vnode *a_dvp;
2178 		struct vnode **a_vpp;
2179 		struct componentname *a_cnp;
2180 		struct vattr *a_vap;
2181 	} */ *ap = v;
2182 	struct vnode *dvp = ap->a_dvp;
2183 	struct vattr *vap = ap->a_vap;
2184 	struct componentname *cnp = ap->a_cnp;
2185 	struct nfsv2_sattr *sp;
2186 	u_int32_t *tl;
2187 	char *cp;
2188 	int32_t t1, t2;
2189 	int len;
2190 	struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0;
2191 	struct vnode *newvp = (struct vnode *)0;
2192 	char *bpos, *dpos, *cp2;
2193 	int error = 0, wccflag = NFSV3_WCCRATTR;
2194 	int gotvp = 0;
2195 	int rexmit = 0;
2196 	struct mbuf *mreq, *mrep, *md, *mb;
2197 	const int v3 = NFS_ISV3(dvp);
2198 
2199 	len = cnp->cn_namelen;
2200 	nfsstats.rpccnt[NFSPROC_MKDIR]++;
2201 	nfsm_reqhead(dnp, NFSPROC_MKDIR,
2202 	  NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
2203 	nfsm_fhtom(dnp, v3);
2204 	nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
2205 #ifndef NFS_V2_ONLY
2206 	if (v3) {
2207 		nfsm_v3attrbuild(vap, false);
2208 	} else
2209 #endif
2210 	{
2211 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2212 		sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
2213 		sp->sa_uid = nfs_xdrneg1;
2214 		sp->sa_gid = nfs_xdrneg1;
2215 		sp->sa_size = nfs_xdrneg1;
2216 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2217 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2218 	}
2219 	nfsm_request1(dnp, NFSPROC_MKDIR, curlwp, cnp->cn_cred, &rexmit);
2220 	if (!error)
2221 		nfsm_mtofh(dvp, newvp, v3, gotvp);
2222 	if (v3)
2223 		nfsm_wcc_data(dvp, wccflag, 0, !error);
2224 	nfsm_reqdone;
2225 	VTONFS(dvp)->n_flag |= NMODIFIED;
2226 	if (!wccflag)
2227 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2228 	/*
2229 	 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2230 	 * if we can succeed in looking up the directory.
2231 	 */
2232 	if ((rexmit && error == EEXIST) || (!error && !gotvp)) {
2233 		if (newvp) {
2234 			vput(newvp);
2235 			newvp = (struct vnode *)0;
2236 		}
2237 		error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
2238 			curlwp, &np);
2239 		if (!error) {
2240 			newvp = NFSTOV(np);
2241 			if (newvp->v_type != VDIR || newvp == dvp)
2242 				error = EEXIST;
2243 		}
2244 	}
2245 	if (error) {
2246 		if (newvp) {
2247 			if (dvp != newvp)
2248 				vput(newvp);
2249 			else
2250 				vrele(newvp);
2251 		}
2252 	} else {
2253 		VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2254 		if (cnp->cn_flags & MAKEENTRY)
2255 			nfs_cache_enter(dvp, newvp, cnp);
2256 		*ap->a_vpp = newvp;
2257 	}
2258 	vput(dvp);
2259 	return (error);
2260 }
2261 
2262 /*
2263  * nfs remove directory call
2264  */
2265 int
2266 nfs_rmdir(void *v)
2267 {
2268 	struct vop_rmdir_args /* {
2269 		struct vnode *a_dvp;
2270 		struct vnode *a_vp;
2271 		struct componentname *a_cnp;
2272 	} */ *ap = v;
2273 	struct vnode *vp = ap->a_vp;
2274 	struct vnode *dvp = ap->a_dvp;
2275 	struct componentname *cnp = ap->a_cnp;
2276 	u_int32_t *tl;
2277 	char *cp;
2278 #ifndef NFS_V2_ONLY
2279 	int32_t t1;
2280 	char *cp2;
2281 #endif
2282 	int32_t t2;
2283 	char *bpos, *dpos;
2284 	int error = 0, wccflag = NFSV3_WCCRATTR;
2285 	int rexmit = 0;
2286 	struct mbuf *mreq, *mrep, *md, *mb;
2287 	const int v3 = NFS_ISV3(dvp);
2288 	struct nfsnode *dnp;
2289 
2290 	if (dvp == vp) {
2291 		vrele(dvp);
2292 		vput(dvp);
2293 		return (EINVAL);
2294 	}
2295 	nfsstats.rpccnt[NFSPROC_RMDIR]++;
2296 	dnp = VTONFS(dvp);
2297 	nfsm_reqhead(dnp, NFSPROC_RMDIR,
2298 		NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2299 	nfsm_fhtom(dnp, v3);
2300 	nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2301 	nfsm_request1(dnp, NFSPROC_RMDIR, curlwp, cnp->cn_cred, &rexmit);
2302 #ifndef NFS_V2_ONLY
2303 	if (v3)
2304 		nfsm_wcc_data(dvp, wccflag, 0, !error);
2305 #endif
2306 	nfsm_reqdone;
2307 	VTONFS(dvp)->n_flag |= NMODIFIED;
2308 	if (!wccflag)
2309 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2310 	VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2311 	VN_KNOTE(vp, NOTE_DELETE);
2312 	cache_purge(vp);
2313 	vput(vp);
2314 	vput(dvp);
2315 	/*
2316 	 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2317 	 */
2318 	if (rexmit && error == ENOENT)
2319 		error = 0;
2320 	return (error);
2321 }
2322 
2323 /*
2324  * nfs readdir call
2325  */
2326 int
2327 nfs_readdir(void *v)
2328 {
2329 	struct vop_readdir_args /* {
2330 		struct vnode *a_vp;
2331 		struct uio *a_uio;
2332 		kauth_cred_t a_cred;
2333 		int *a_eofflag;
2334 		off_t **a_cookies;
2335 		int *a_ncookies;
2336 	} */ *ap = v;
2337 	struct vnode *vp = ap->a_vp;
2338 	struct uio *uio = ap->a_uio;
2339 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2340 	char *base = uio->uio_iov->iov_base;
2341 	int tresid, error;
2342 	size_t count, lost;
2343 	struct dirent *dp;
2344 	off_t *cookies = NULL;
2345 	int ncookies = 0, nc;
2346 
2347 	if (vp->v_type != VDIR)
2348 		return (EPERM);
2349 
2350 	lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1);
2351 	count = uio->uio_resid - lost;
2352 	if (count <= 0)
2353 		return (EINVAL);
2354 
2355 	/*
2356 	 * Call nfs_bioread() to do the real work.
2357 	 */
2358 	tresid = uio->uio_resid = count;
2359 	error = nfs_bioread(vp, uio, 0, ap->a_cred,
2360 		    ap->a_cookies ? NFSBIO_CACHECOOKIES : 0);
2361 
2362 	if (!error && ap->a_cookies) {
2363 		ncookies = count / 16;
2364 		cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK);
2365 		*ap->a_cookies = cookies;
2366 	}
2367 
2368 	if (!error && uio->uio_resid == tresid) {
2369 		uio->uio_resid += lost;
2370 		nfsstats.direofcache_misses++;
2371 		if (ap->a_cookies)
2372 			*ap->a_ncookies = 0;
2373 		*ap->a_eofflag = 1;
2374 		return (0);
2375 	}
2376 
2377 	if (!error && ap->a_cookies) {
2378 		/*
2379 		 * Only the NFS server and emulations use cookies, and they
2380 		 * load the directory block into system space, so we can
2381 		 * just look at it directly.
2382 		 */
2383 		if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
2384 		    uio->uio_iovcnt != 1)
2385 			panic("nfs_readdir: lost in space");
2386 		for (nc = 0; ncookies-- &&
2387 		     base < (char *)uio->uio_iov->iov_base; nc++){
2388 			dp = (struct dirent *) base;
2389 			if (dp->d_reclen == 0)
2390 				break;
2391 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
2392 				*(cookies++) = (off_t)NFS_GETCOOKIE32(dp);
2393 			else
2394 				*(cookies++) = NFS_GETCOOKIE(dp);
2395 			base += dp->d_reclen;
2396 		}
2397 		uio->uio_resid +=
2398 		    ((char *)uio->uio_iov->iov_base - base);
2399 		uio->uio_iov->iov_len +=
2400 		    ((char *)uio->uio_iov->iov_base - base);
2401 		uio->uio_iov->iov_base = base;
2402 		*ap->a_ncookies = nc;
2403 	}
2404 
2405 	uio->uio_resid += lost;
2406 	*ap->a_eofflag = 0;
2407 	return (error);
2408 }
2409 
2410 /*
2411  * Readdir rpc call.
2412  * Called from below the buffer cache by nfs_doio().
2413  */
2414 int
2415 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
2416 {
2417 	int len, left;
2418 	struct dirent *dp = NULL;
2419 	u_int32_t *tl;
2420 	char *cp;
2421 	int32_t t1, t2;
2422 	char *bpos, *dpos, *cp2;
2423 	struct mbuf *mreq, *mrep, *md, *mb;
2424 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2425 	struct nfsnode *dnp = VTONFS(vp);
2426 	u_quad_t fileno;
2427 	int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1;
2428 #ifndef NFS_V2_ONLY
2429 	int attrflag;
2430 #endif
2431 	int nrpcs = 0, reclen;
2432 	const int v3 = NFS_ISV3(vp);
2433 
2434 #ifdef DIAGNOSTIC
2435 	/*
2436 	 * Should be called from buffer cache, so only amount of
2437 	 * NFS_DIRBLKSIZ will be requested.
2438 	 */
2439 	if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2440 		panic("nfs readdirrpc bad uio");
2441 #endif
2442 
2443 	/*
2444 	 * Loop around doing readdir rpc's of size nm_readdirsize
2445 	 * truncated to a multiple of NFS_DIRFRAGSIZ.
2446 	 * The stopping criteria is EOF or buffer full.
2447 	 */
2448 	while (more_dirs && bigenough) {
2449 		/*
2450 		 * Heuristic: don't bother to do another RPC to further
2451 		 * fill up this block if there is not much room left. (< 50%
2452 		 * of the readdir RPC size). This wastes some buffer space
2453 		 * but can save up to 50% in RPC calls.
2454 		 */
2455 		if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2456 			bigenough = 0;
2457 			break;
2458 		}
2459 		nfsstats.rpccnt[NFSPROC_READDIR]++;
2460 		nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) +
2461 			NFSX_READDIR(v3));
2462 		nfsm_fhtom(dnp, v3);
2463 #ifndef NFS_V2_ONLY
2464 		if (v3) {
2465 			nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2466 			if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2467 				txdr_swapcookie3(uiop->uio_offset, tl);
2468 			} else {
2469 				txdr_cookie3(uiop->uio_offset, tl);
2470 			}
2471 			tl += 2;
2472 			*tl++ = dnp->n_cookieverf.nfsuquad[0];
2473 			*tl++ = dnp->n_cookieverf.nfsuquad[1];
2474 		} else
2475 #endif
2476 		{
2477 			nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2478 			*tl++ = txdr_unsigned(uiop->uio_offset);
2479 		}
2480 		*tl = txdr_unsigned(nmp->nm_readdirsize);
2481 		nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred);
2482 		nrpcs++;
2483 #ifndef NFS_V2_ONLY
2484 		if (v3) {
2485 			nfsm_postop_attr(vp, attrflag, 0);
2486 			if (!error) {
2487 				nfsm_dissect(tl, u_int32_t *,
2488 				    2 * NFSX_UNSIGNED);
2489 				dnp->n_cookieverf.nfsuquad[0] = *tl++;
2490 				dnp->n_cookieverf.nfsuquad[1] = *tl;
2491 			} else {
2492 				m_freem(mrep);
2493 				goto nfsmout;
2494 			}
2495 		}
2496 #endif
2497 		nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2498 		more_dirs = fxdr_unsigned(int, *tl);
2499 
2500 		/* loop thru the dir entries, doctoring them to 4bsd form */
2501 		while (more_dirs && bigenough) {
2502 #ifndef NFS_V2_ONLY
2503 			if (v3) {
2504 				nfsm_dissect(tl, u_int32_t *,
2505 				    3 * NFSX_UNSIGNED);
2506 				fileno = fxdr_hyper(tl);
2507 				len = fxdr_unsigned(int, *(tl + 2));
2508 			} else
2509 #endif
2510 			{
2511 				nfsm_dissect(tl, u_int32_t *,
2512 				    2 * NFSX_UNSIGNED);
2513 				fileno = fxdr_unsigned(u_quad_t, *tl++);
2514 				len = fxdr_unsigned(int, *tl);
2515 			}
2516 			if (len <= 0 || len > NFS_MAXNAMLEN) {
2517 				error = EBADRPC;
2518 				m_freem(mrep);
2519 				goto nfsmout;
2520 			}
2521 			/* for cookie stashing */
2522 			reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2523 			left = NFS_DIRFRAGSIZ - blksiz;
2524 			if (reclen > left) {
2525 				memset(uiop->uio_iov->iov_base, 0, left);
2526 				dp->d_reclen += left;
2527 				UIO_ADVANCE(uiop, left);
2528 				blksiz = 0;
2529 				NFS_STASHCOOKIE(dp, uiop->uio_offset);
2530 			}
2531 			if (reclen > uiop->uio_resid)
2532 				bigenough = 0;
2533 			if (bigenough) {
2534 				int tlen;
2535 
2536 				dp = (struct dirent *)uiop->uio_iov->iov_base;
2537 				dp->d_fileno = fileno;
2538 				dp->d_namlen = len;
2539 				dp->d_reclen = reclen;
2540 				dp->d_type = DT_UNKNOWN;
2541 				blksiz += reclen;
2542 				if (blksiz == NFS_DIRFRAGSIZ)
2543 					blksiz = 0;
2544 				UIO_ADVANCE(uiop, DIRHDSIZ);
2545 				nfsm_mtouio(uiop, len);
2546 				tlen = reclen - (DIRHDSIZ + len);
2547 				(void)memset(uiop->uio_iov->iov_base, 0, tlen);
2548 				UIO_ADVANCE(uiop, tlen);
2549 			} else
2550 				nfsm_adv(nfsm_rndup(len));
2551 #ifndef NFS_V2_ONLY
2552 			if (v3) {
2553 				nfsm_dissect(tl, u_int32_t *,
2554 				    3 * NFSX_UNSIGNED);
2555 			} else
2556 #endif
2557 			{
2558 				nfsm_dissect(tl, u_int32_t *,
2559 				    2 * NFSX_UNSIGNED);
2560 			}
2561 			if (bigenough) {
2562 #ifndef NFS_V2_ONLY
2563 				if (v3) {
2564 					if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2565 						uiop->uio_offset =
2566 						    fxdr_swapcookie3(tl);
2567 					else
2568 						uiop->uio_offset =
2569 						    fxdr_cookie3(tl);
2570 				}
2571 				else
2572 #endif
2573 				{
2574 					uiop->uio_offset =
2575 					    fxdr_unsigned(off_t, *tl);
2576 				}
2577 				NFS_STASHCOOKIE(dp, uiop->uio_offset);
2578 			}
2579 			if (v3)
2580 				tl += 2;
2581 			else
2582 				tl++;
2583 			more_dirs = fxdr_unsigned(int, *tl);
2584 		}
2585 		/*
2586 		 * If at end of rpc data, get the eof boolean
2587 		 */
2588 		if (!more_dirs) {
2589 			nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2590 			more_dirs = (fxdr_unsigned(int, *tl) == 0);
2591 
2592 			/*
2593 			 * kludge: if we got no entries, treat it as EOF.
2594 			 * some server sometimes send a reply without any
2595 			 * entries or EOF.
2596 			 * although it might mean the server has very long name,
2597 			 * we can't handle such entries anyway.
2598 			 */
2599 
2600 			if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2601 				more_dirs = 0;
2602 		}
2603 		m_freem(mrep);
2604 	}
2605 	/*
2606 	 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2607 	 * by increasing d_reclen for the last record.
2608 	 */
2609 	if (blksiz > 0) {
2610 		left = NFS_DIRFRAGSIZ - blksiz;
2611 		memset(uiop->uio_iov->iov_base, 0, left);
2612 		dp->d_reclen += left;
2613 		NFS_STASHCOOKIE(dp, uiop->uio_offset);
2614 		UIO_ADVANCE(uiop, left);
2615 	}
2616 
2617 	/*
2618 	 * We are now either at the end of the directory or have filled the
2619 	 * block.
2620 	 */
2621 	if (bigenough) {
2622 		dnp->n_direofoffset = uiop->uio_offset;
2623 		dnp->n_flag |= NEOFVALID;
2624 	}
2625 nfsmout:
2626 	return (error);
2627 }
2628 
2629 #ifndef NFS_V2_ONLY
2630 /*
2631  * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2632  */
2633 int
2634 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
2635 {
2636 	int len, left;
2637 	struct dirent *dp = NULL;
2638 	u_int32_t *tl;
2639 	char *cp;
2640 	int32_t t1, t2;
2641 	struct vnode *newvp;
2642 	char *bpos, *dpos, *cp2;
2643 	struct mbuf *mreq, *mrep, *md, *mb;
2644 	struct nameidata nami, *ndp = &nami;
2645 	struct componentname *cnp = &ndp->ni_cnd;
2646 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2647 	struct nfsnode *dnp = VTONFS(vp), *np;
2648 	nfsfh_t *fhp;
2649 	u_quad_t fileno;
2650 	int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2651 	int attrflag, fhsize, nrpcs = 0, reclen;
2652 	struct nfs_fattr fattr, *fp;
2653 
2654 #ifdef DIAGNOSTIC
2655 	if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2656 		panic("nfs readdirplusrpc bad uio");
2657 #endif
2658 	ndp->ni_dvp = vp;
2659 	newvp = NULLVP;
2660 
2661 	/*
2662 	 * Loop around doing readdir rpc's of size nm_readdirsize
2663 	 * truncated to a multiple of NFS_DIRFRAGSIZ.
2664 	 * The stopping criteria is EOF or buffer full.
2665 	 */
2666 	while (more_dirs && bigenough) {
2667 		if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2668 			bigenough = 0;
2669 			break;
2670 		}
2671 		nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2672 		nfsm_reqhead(dnp, NFSPROC_READDIRPLUS,
2673 			NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2674 		nfsm_fhtom(dnp, 1);
2675  		nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2676 		if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2677 			txdr_swapcookie3(uiop->uio_offset, tl);
2678 		} else {
2679 			txdr_cookie3(uiop->uio_offset, tl);
2680 		}
2681 		tl += 2;
2682 		*tl++ = dnp->n_cookieverf.nfsuquad[0];
2683 		*tl++ = dnp->n_cookieverf.nfsuquad[1];
2684 		*tl++ = txdr_unsigned(nmp->nm_readdirsize);
2685 		*tl = txdr_unsigned(nmp->nm_rsize);
2686 		nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred);
2687 		nfsm_postop_attr(vp, attrflag, 0);
2688 		if (error) {
2689 			m_freem(mrep);
2690 			goto nfsmout;
2691 		}
2692 		nrpcs++;
2693 		nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2694 		dnp->n_cookieverf.nfsuquad[0] = *tl++;
2695 		dnp->n_cookieverf.nfsuquad[1] = *tl++;
2696 		more_dirs = fxdr_unsigned(int, *tl);
2697 
2698 		/* loop thru the dir entries, doctoring them to 4bsd form */
2699 		while (more_dirs && bigenough) {
2700 			nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2701 			fileno = fxdr_hyper(tl);
2702 			len = fxdr_unsigned(int, *(tl + 2));
2703 			if (len <= 0 || len > NFS_MAXNAMLEN) {
2704 				error = EBADRPC;
2705 				m_freem(mrep);
2706 				goto nfsmout;
2707 			}
2708 			/* for cookie stashing */
2709 			reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2710 			left = NFS_DIRFRAGSIZ - blksiz;
2711 			if (reclen > left) {
2712 				/*
2713 				 * DIRFRAGSIZ is aligned, no need to align
2714 				 * again here.
2715 				 */
2716 				memset(uiop->uio_iov->iov_base, 0, left);
2717 				dp->d_reclen += left;
2718 				UIO_ADVANCE(uiop, left);
2719 				NFS_STASHCOOKIE(dp, uiop->uio_offset);
2720 				blksiz = 0;
2721 			}
2722 			if (reclen > uiop->uio_resid)
2723 				bigenough = 0;
2724 			if (bigenough) {
2725 				int tlen;
2726 
2727 				dp = (struct dirent *)uiop->uio_iov->iov_base;
2728 				dp->d_fileno = fileno;
2729 				dp->d_namlen = len;
2730 				dp->d_reclen = reclen;
2731 				dp->d_type = DT_UNKNOWN;
2732 				blksiz += reclen;
2733 				if (blksiz == NFS_DIRFRAGSIZ)
2734 					blksiz = 0;
2735 				UIO_ADVANCE(uiop, DIRHDSIZ);
2736 				nfsm_mtouio(uiop, len);
2737 				tlen = reclen - (DIRHDSIZ + len);
2738 				(void)memset(uiop->uio_iov->iov_base, 0, tlen);
2739 				UIO_ADVANCE(uiop, tlen);
2740 				cnp->cn_nameptr = dp->d_name;
2741 				cnp->cn_namelen = dp->d_namlen;
2742 			} else
2743 				nfsm_adv(nfsm_rndup(len));
2744 			nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2745 			if (bigenough) {
2746 				if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2747 					uiop->uio_offset =
2748 						fxdr_swapcookie3(tl);
2749 				else
2750 					uiop->uio_offset =
2751 						fxdr_cookie3(tl);
2752 				NFS_STASHCOOKIE(dp, uiop->uio_offset);
2753 			}
2754 			tl += 2;
2755 
2756 			/*
2757 			 * Since the attributes are before the file handle
2758 			 * (sigh), we must skip over the attributes and then
2759 			 * come back and get them.
2760 			 */
2761 			attrflag = fxdr_unsigned(int, *tl);
2762 			if (attrflag) {
2763 			    nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR);
2764 			    memcpy(&fattr, fp, NFSX_V3FATTR);
2765 			    nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2766 			    doit = fxdr_unsigned(int, *tl);
2767 			    if (doit) {
2768 				nfsm_getfh(fhp, fhsize, 1);
2769 				if (NFS_CMPFH(dnp, fhp, fhsize)) {
2770 				    vref(vp);
2771 				    newvp = vp;
2772 				    np = dnp;
2773 				} else {
2774 				    error = nfs_nget1(vp->v_mount, fhp,
2775 					fhsize, &np, LK_NOWAIT);
2776 				    if (!error)
2777 					newvp = NFSTOV(np);
2778 				}
2779 				if (!error) {
2780 				    const char *xcp;
2781 
2782 				    nfs_loadattrcache(&newvp, &fattr, 0, 0);
2783 				    if (bigenough) {
2784 					dp->d_type =
2785 					   IFTODT(VTTOIF(np->n_vattr->va_type));
2786 					if (cnp->cn_namelen <= NCHNAMLEN) {
2787 					    ndp->ni_vp = newvp;
2788 					    xcp = cnp->cn_nameptr +
2789 						cnp->cn_namelen;
2790 					    cnp->cn_hash =
2791 					       namei_hash(cnp->cn_nameptr, &xcp);
2792 					    nfs_cache_enter(ndp->ni_dvp,
2793 						ndp->ni_vp, cnp);
2794 					}
2795 				    }
2796 				}
2797 				error = 0;
2798 			   }
2799 			} else {
2800 			    /* Just skip over the file handle */
2801 			    nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2802 			    i = fxdr_unsigned(int, *tl);
2803 			    nfsm_adv(nfsm_rndup(i));
2804 			}
2805 			if (newvp != NULLVP) {
2806 			    if (newvp == vp)
2807 				vrele(newvp);
2808 			    else
2809 				vput(newvp);
2810 			    newvp = NULLVP;
2811 			}
2812 			nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2813 			more_dirs = fxdr_unsigned(int, *tl);
2814 		}
2815 		/*
2816 		 * If at end of rpc data, get the eof boolean
2817 		 */
2818 		if (!more_dirs) {
2819 			nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2820 			more_dirs = (fxdr_unsigned(int, *tl) == 0);
2821 
2822 			/*
2823 			 * kludge: see a comment in nfs_readdirrpc.
2824 			 */
2825 
2826 			if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2827 				more_dirs = 0;
2828 		}
2829 		m_freem(mrep);
2830 	}
2831 	/*
2832 	 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2833 	 * by increasing d_reclen for the last record.
2834 	 */
2835 	if (blksiz > 0) {
2836 		left = NFS_DIRFRAGSIZ - blksiz;
2837 		memset(uiop->uio_iov->iov_base, 0, left);
2838 		dp->d_reclen += left;
2839 		NFS_STASHCOOKIE(dp, uiop->uio_offset);
2840 		UIO_ADVANCE(uiop, left);
2841 	}
2842 
2843 	/*
2844 	 * We are now either at the end of the directory or have filled the
2845 	 * block.
2846 	 */
2847 	if (bigenough) {
2848 		dnp->n_direofoffset = uiop->uio_offset;
2849 		dnp->n_flag |= NEOFVALID;
2850 	}
2851 nfsmout:
2852 	if (newvp != NULLVP) {
2853 		if(newvp == vp)
2854 		    vrele(newvp);
2855 		else
2856 		    vput(newvp);
2857 	}
2858 	return (error);
2859 }
2860 #endif
2861 
2862 /*
2863  * Silly rename. To make the NFS filesystem that is stateless look a little
2864  * more like the "ufs" a remove of an active vnode is translated to a rename
2865  * to a funny looking filename that is removed by nfs_inactive on the
2866  * nfsnode. There is the potential for another process on a different client
2867  * to create the same funny name between the nfs_lookitup() fails and the
2868  * nfs_rename() completes, but...
2869  */
2870 int
2871 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, bool dolink)
2872 {
2873 	struct sillyrename *sp;
2874 	struct nfsnode *np;
2875 	int error;
2876 	pid_t pid;
2877 
2878 	cache_purge(dvp);
2879 	np = VTONFS(vp);
2880 #ifndef DIAGNOSTIC
2881 	if (vp->v_type == VDIR)
2882 		panic("nfs: sillyrename dir");
2883 #endif
2884 	sp = kmem_alloc(sizeof(*sp), KM_SLEEP);
2885 	sp->s_cred = kauth_cred_dup(cnp->cn_cred);
2886 	sp->s_dvp = dvp;
2887 	vref(dvp);
2888 
2889 	/* Fudge together a funny name */
2890 	pid = curlwp->l_proc->p_pid;
2891 	memcpy(sp->s_name, ".nfsAxxxx4.4", 13);
2892 	sp->s_namlen = 12;
2893 	sp->s_name[8] = hexdigits[pid & 0xf];
2894 	sp->s_name[7] = hexdigits[(pid >> 4) & 0xf];
2895 	sp->s_name[6] = hexdigits[(pid >> 8) & 0xf];
2896 	sp->s_name[5] = hexdigits[(pid >> 12) & 0xf];
2897 
2898 	/* Try lookitups until we get one that isn't there */
2899 	while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2900 		curlwp, (struct nfsnode **)0) == 0) {
2901 		sp->s_name[4]++;
2902 		if (sp->s_name[4] > 'z') {
2903 			error = EINVAL;
2904 			goto bad;
2905 		}
2906 	}
2907 	if (dolink) {
2908 		error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen,
2909 		    sp->s_cred, curlwp);
2910 		/*
2911 		 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
2912 		 */
2913 		if (error == ENOTSUP) {
2914 			error = nfs_renameit(dvp, cnp, sp);
2915 		}
2916 	} else {
2917 		error = nfs_renameit(dvp, cnp, sp);
2918 	}
2919 	if (error)
2920 		goto bad;
2921 	error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2922 		curlwp, &np);
2923 	np->n_sillyrename = sp;
2924 	return (0);
2925 bad:
2926 	vrele(sp->s_dvp);
2927 	kauth_cred_free(sp->s_cred);
2928 	kmem_free(sp, sizeof(*sp));
2929 	return (error);
2930 }
2931 
2932 /*
2933  * Look up a file name and optionally either update the file handle or
2934  * allocate an nfsnode, depending on the value of npp.
2935  * npp == NULL	--> just do the lookup
2936  * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2937  *			handled too
2938  * *npp != NULL --> update the file handle in the vnode
2939  */
2940 int
2941 nfs_lookitup(struct vnode *dvp, const char *name, int len, kauth_cred_t cred, struct lwp *l, struct nfsnode **npp)
2942 {
2943 	u_int32_t *tl;
2944 	char *cp;
2945 	int32_t t1, t2;
2946 	struct vnode *newvp = (struct vnode *)0;
2947 	struct nfsnode *np, *dnp = VTONFS(dvp);
2948 	char *bpos, *dpos, *cp2;
2949 	int error = 0, fhlen;
2950 #ifndef NFS_V2_ONLY
2951 	int attrflag;
2952 #endif
2953 	struct mbuf *mreq, *mrep, *md, *mb;
2954 	nfsfh_t *nfhp;
2955 	const int v3 = NFS_ISV3(dvp);
2956 
2957 	nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2958 	nfsm_reqhead(dnp, NFSPROC_LOOKUP,
2959 		NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2960 	nfsm_fhtom(dnp, v3);
2961 	nfsm_strtom(name, len, NFS_MAXNAMLEN);
2962 	nfsm_request(dnp, NFSPROC_LOOKUP, l, cred);
2963 	if (npp && !error) {
2964 		nfsm_getfh(nfhp, fhlen, v3);
2965 		if (*npp) {
2966 		    np = *npp;
2967 		    if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2968 			kmem_free(np->n_fhp, np->n_fhsize);
2969 			np->n_fhp = &np->n_fh;
2970 		    }
2971 #if NFS_SMALLFH < NFSX_V3FHMAX
2972 		    else if (np->n_fhsize <= NFS_SMALLFH && fhlen > NFS_SMALLFH)
2973 			np->n_fhp = kmem_alloc(fhlen, KM_SLEEP);
2974 #endif
2975 		    memcpy(np->n_fhp, nfhp, fhlen);
2976 		    np->n_fhsize = fhlen;
2977 		    newvp = NFSTOV(np);
2978 		} else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2979 		    vref(dvp);
2980 		    newvp = dvp;
2981 		    np = dnp;
2982 		} else {
2983 		    error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
2984 		    if (error) {
2985 			m_freem(mrep);
2986 			return (error);
2987 		    }
2988 		    newvp = NFSTOV(np);
2989 		}
2990 #ifndef NFS_V2_ONLY
2991 		if (v3) {
2992 			nfsm_postop_attr(newvp, attrflag, 0);
2993 			if (!attrflag && *npp == NULL) {
2994 				m_freem(mrep);
2995 				vput(newvp);
2996 				return (ENOENT);
2997 			}
2998 		} else
2999 #endif
3000 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
3001 	}
3002 	nfsm_reqdone;
3003 	if (npp && *npp == NULL) {
3004 		if (error) {
3005 			if (newvp)
3006 				vput(newvp);
3007 		} else
3008 			*npp = np;
3009 	}
3010 	return (error);
3011 }
3012 
3013 #ifndef NFS_V2_ONLY
3014 /*
3015  * Nfs Version 3 commit rpc
3016  */
3017 int
3018 nfs_commit(struct vnode *vp, off_t offset, uint32_t cnt, struct lwp *l)
3019 {
3020 	char *cp;
3021 	u_int32_t *tl;
3022 	int32_t t1, t2;
3023 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
3024 	char *bpos, *dpos, *cp2;
3025 	int error = 0, wccflag = NFSV3_WCCRATTR;
3026 	struct mbuf *mreq, *mrep, *md, *mb;
3027 	struct nfsnode *np;
3028 
3029 	KASSERT(NFS_ISV3(vp));
3030 
3031 #ifdef NFS_DEBUG_COMMIT
3032 	printf("commit %lu - %lu\n", (unsigned long)offset,
3033 	    (unsigned long)(offset + cnt));
3034 #endif
3035 
3036 	mutex_enter(&nmp->nm_lock);
3037 	if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) {
3038 		mutex_exit(&nmp->nm_lock);
3039 		return (0);
3040 	}
3041 	mutex_exit(&nmp->nm_lock);
3042 	nfsstats.rpccnt[NFSPROC_COMMIT]++;
3043 	np = VTONFS(vp);
3044 	nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1));
3045 	nfsm_fhtom(np, 1);
3046 	nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3047 	txdr_hyper(offset, tl);
3048 	tl += 2;
3049 	*tl = txdr_unsigned(cnt);
3050 	nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred);
3051 	nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
3052 	if (!error) {
3053 		nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
3054 		mutex_enter(&nmp->nm_lock);
3055 		if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) ||
3056 		    memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) {
3057 			memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF);
3058 			error = NFSERR_STALEWRITEVERF;
3059 			nmp->nm_iflag |= NFSMNT_STALEWRITEVERF;
3060 		}
3061 		mutex_exit(&nmp->nm_lock);
3062 	}
3063 	nfsm_reqdone;
3064 	return (error);
3065 }
3066 #endif
3067 
3068 /*
3069  * Kludge City..
3070  * - make nfs_bmap() essentially a no-op that does no translation
3071  * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
3072  *   (Maybe I could use the process's page mapping, but I was concerned that
3073  *    Kernel Write might not be enabled and also figured copyout() would do
3074  *    a lot more work than memcpy() and also it currently happens in the
3075  *    context of the swapper process (2).
3076  */
3077 int
3078 nfs_bmap(void *v)
3079 {
3080 	struct vop_bmap_args /* {
3081 		struct vnode *a_vp;
3082 		daddr_t  a_bn;
3083 		struct vnode **a_vpp;
3084 		daddr_t *a_bnp;
3085 		int *a_runp;
3086 	} */ *ap = v;
3087 	struct vnode *vp = ap->a_vp;
3088 	int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift;
3089 
3090 	if (ap->a_vpp != NULL)
3091 		*ap->a_vpp = vp;
3092 	if (ap->a_bnp != NULL)
3093 		*ap->a_bnp = ap->a_bn << bshift;
3094 	if (ap->a_runp != NULL)
3095 		*ap->a_runp = 1024 * 1024; /* XXX */
3096 	return (0);
3097 }
3098 
3099 /*
3100  * Strategy routine.
3101  * For async requests when nfsiod(s) are running, queue the request by
3102  * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
3103  * request.
3104  */
3105 int
3106 nfs_strategy(void *v)
3107 {
3108 	struct vop_strategy_args *ap = v;
3109 	struct buf *bp = ap->a_bp;
3110 	int error = 0;
3111 
3112 	if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC))
3113 		panic("nfs physio/async");
3114 
3115 	/*
3116 	 * If the op is asynchronous and an i/o daemon is waiting
3117 	 * queue the request, wake it up and wait for completion
3118 	 * otherwise just do it ourselves.
3119 	 */
3120 	if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp))
3121 		error = nfs_doio(bp);
3122 	return (error);
3123 }
3124 
3125 /*
3126  * fsync vnode op. Just call nfs_flush() with commit == 1.
3127  */
3128 /* ARGSUSED */
3129 int
3130 nfs_fsync(void *v)
3131 {
3132 	struct vop_fsync_args /* {
3133 		struct vnodeop_desc *a_desc;
3134 		struct vnode * a_vp;
3135 		kauth_cred_t  a_cred;
3136 		int  a_flags;
3137 		off_t offlo;
3138 		off_t offhi;
3139 		struct lwp * a_l;
3140 	} */ *ap = v;
3141 
3142 	struct vnode *vp = ap->a_vp;
3143 
3144 	if (vp->v_type != VREG)
3145 		return 0;
3146 
3147 	return (nfs_flush(vp, ap->a_cred,
3148 	    (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, curlwp, 1));
3149 }
3150 
3151 /*
3152  * Flush all the data associated with a vnode.
3153  */
3154 int
3155 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l,
3156     int commit)
3157 {
3158 	struct nfsnode *np = VTONFS(vp);
3159 	int error;
3160 	int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO;
3161 	UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist);
3162 
3163 	mutex_enter(&vp->v_interlock);
3164 	error = VOP_PUTPAGES(vp, 0, 0, flushflags);
3165 	if (np->n_flag & NWRITEERR) {
3166 		error = np->n_error;
3167 		np->n_flag &= ~NWRITEERR;
3168 	}
3169 	UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
3170 	return (error);
3171 }
3172 
3173 /*
3174  * Return POSIX pathconf information applicable to nfs.
3175  *
3176  * N.B. The NFS V2 protocol doesn't support this RPC.
3177  */
3178 /* ARGSUSED */
3179 int
3180 nfs_pathconf(void *v)
3181 {
3182 	struct vop_pathconf_args /* {
3183 		struct vnode *a_vp;
3184 		int a_name;
3185 		register_t *a_retval;
3186 	} */ *ap = v;
3187 	struct nfsv3_pathconf *pcp;
3188 	struct vnode *vp = ap->a_vp;
3189 	struct mbuf *mreq, *mrep, *md, *mb;
3190 	int32_t t1, t2;
3191 	u_int32_t *tl;
3192 	char *bpos, *dpos, *cp, *cp2;
3193 	int error = 0, attrflag;
3194 #ifndef NFS_V2_ONLY
3195 	struct nfsmount *nmp;
3196 	unsigned int l;
3197 	u_int64_t maxsize;
3198 #endif
3199 	const int v3 = NFS_ISV3(vp);
3200 	struct nfsnode *np = VTONFS(vp);
3201 
3202 	switch (ap->a_name) {
3203 		/* Names that can be resolved locally. */
3204 	case _PC_PIPE_BUF:
3205 		*ap->a_retval = PIPE_BUF;
3206 		break;
3207 	case _PC_SYNC_IO:
3208 		*ap->a_retval = 1;
3209 		break;
3210 	/* Names that cannot be resolved locally; do an RPC, if possible. */
3211 	case _PC_LINK_MAX:
3212 	case _PC_NAME_MAX:
3213 	case _PC_CHOWN_RESTRICTED:
3214 	case _PC_NO_TRUNC:
3215 		if (!v3) {
3216 			error = EINVAL;
3217 			break;
3218 		}
3219 		nfsstats.rpccnt[NFSPROC_PATHCONF]++;
3220 		nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1));
3221 		nfsm_fhtom(np, 1);
3222 		nfsm_request(np, NFSPROC_PATHCONF,
3223 		    curlwp, curlwp->l_cred);	/* XXX */
3224 		nfsm_postop_attr(vp, attrflag, 0);
3225 		if (!error) {
3226 			nfsm_dissect(pcp, struct nfsv3_pathconf *,
3227 			    NFSX_V3PATHCONF);
3228 			switch (ap->a_name) {
3229 			case _PC_LINK_MAX:
3230 				*ap->a_retval =
3231 				    fxdr_unsigned(register_t, pcp->pc_linkmax);
3232 				break;
3233 			case _PC_NAME_MAX:
3234 				*ap->a_retval =
3235 				    fxdr_unsigned(register_t, pcp->pc_namemax);
3236 				break;
3237 			case _PC_CHOWN_RESTRICTED:
3238 				*ap->a_retval =
3239 				    (pcp->pc_chownrestricted == nfs_true);
3240 				break;
3241 			case _PC_NO_TRUNC:
3242 				*ap->a_retval =
3243 				    (pcp->pc_notrunc == nfs_true);
3244 				break;
3245 			}
3246 		}
3247 		nfsm_reqdone;
3248 		break;
3249 	case _PC_FILESIZEBITS:
3250 #ifndef NFS_V2_ONLY
3251 		if (v3) {
3252 			nmp = VFSTONFS(vp->v_mount);
3253 			if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0)
3254 				if ((error = nfs_fsinfo(nmp, vp,
3255 				    curlwp->l_cred, curlwp)) != 0) /* XXX */
3256 					break;
3257 			for (l = 0, maxsize = nmp->nm_maxfilesize;
3258 			    (maxsize >> l) > 0; l++)
3259 				;
3260 			*ap->a_retval = l + 1;
3261 		} else
3262 #endif
3263 		{
3264 			*ap->a_retval = 32;	/* NFS V2 limitation */
3265 		}
3266 		break;
3267 	default:
3268 		error = EINVAL;
3269 		break;
3270 	}
3271 
3272 	return (error);
3273 }
3274 
3275 /*
3276  * NFS advisory byte-level locks.
3277  */
3278 int
3279 nfs_advlock(void *v)
3280 {
3281 	struct vop_advlock_args /* {
3282 		struct vnode *a_vp;
3283 		void *a_id;
3284 		int  a_op;
3285 		struct flock *a_fl;
3286 		int  a_flags;
3287 	} */ *ap = v;
3288 	struct nfsnode *np = VTONFS(ap->a_vp);
3289 
3290 	return lf_advlock(ap, &np->n_lockf, np->n_size);
3291 }
3292 
3293 /*
3294  * Print out the contents of an nfsnode.
3295  */
3296 int
3297 nfs_print(void *v)
3298 {
3299 	struct vop_print_args /* {
3300 		struct vnode *a_vp;
3301 	} */ *ap = v;
3302 	struct vnode *vp = ap->a_vp;
3303 	struct nfsnode *np = VTONFS(vp);
3304 
3305 	printf("tag VT_NFS, fileid %lld fsid 0x%llx",
3306 	    (unsigned long long)np->n_vattr->va_fileid,
3307 	    (unsigned long long)np->n_vattr->va_fsid);
3308 	if (vp->v_type == VFIFO)
3309 		VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v);
3310 	printf("\n");
3311 	return (0);
3312 }
3313 
3314 /*
3315  * nfs unlock wrapper.
3316  */
3317 int
3318 nfs_unlock(void *v)
3319 {
3320 	struct vop_unlock_args /* {
3321 		struct vnode *a_vp;
3322 		int a_flags;
3323 	} */ *ap = v;
3324 	struct vnode *vp = ap->a_vp;
3325 
3326 	/*
3327 	 * VOP_UNLOCK can be called by nfs_loadattrcache
3328 	 * with v_data == 0.
3329 	 */
3330 	if (VTONFS(vp)) {
3331 		nfs_delayedtruncate(vp);
3332 	}
3333 
3334 	return genfs_unlock(v);
3335 }
3336 
3337 /*
3338  * nfs special file access vnode op.
3339  * Essentially just get vattr and then imitate iaccess() since the device is
3340  * local to the client.
3341  */
3342 int
3343 nfsspec_access(void *v)
3344 {
3345 	struct vop_access_args /* {
3346 		struct vnode *a_vp;
3347 		int  a_mode;
3348 		kauth_cred_t a_cred;
3349 		struct lwp *a_l;
3350 	} */ *ap = v;
3351 	struct vattr va;
3352 	struct vnode *vp = ap->a_vp;
3353 	int error;
3354 
3355 	error = VOP_GETATTR(vp, &va, ap->a_cred);
3356 	if (error)
3357 		return (error);
3358 
3359         /*
3360 	 * Disallow write attempts on filesystems mounted read-only;
3361 	 * unless the file is a socket, fifo, or a block or character
3362 	 * device resident on the filesystem.
3363 	 */
3364 	if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3365 		switch (vp->v_type) {
3366 		case VREG:
3367 		case VDIR:
3368 		case VLNK:
3369 			return (EROFS);
3370 		default:
3371 			break;
3372 		}
3373 	}
3374 
3375 	return (genfs_can_access(va.va_type, va.va_mode,
3376 	    va.va_uid, va.va_gid, ap->a_mode, ap->a_cred));
3377 }
3378 
3379 /*
3380  * Read wrapper for special devices.
3381  */
3382 int
3383 nfsspec_read(void *v)
3384 {
3385 	struct vop_read_args /* {
3386 		struct vnode *a_vp;
3387 		struct uio *a_uio;
3388 		int  a_ioflag;
3389 		kauth_cred_t a_cred;
3390 	} */ *ap = v;
3391 	struct nfsnode *np = VTONFS(ap->a_vp);
3392 
3393 	/*
3394 	 * Set access flag.
3395 	 */
3396 	np->n_flag |= NACC;
3397 	getnanotime(&np->n_atim);
3398 	return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3399 }
3400 
3401 /*
3402  * Write wrapper for special devices.
3403  */
3404 int
3405 nfsspec_write(void *v)
3406 {
3407 	struct vop_write_args /* {
3408 		struct vnode *a_vp;
3409 		struct uio *a_uio;
3410 		int  a_ioflag;
3411 		kauth_cred_t a_cred;
3412 	} */ *ap = v;
3413 	struct nfsnode *np = VTONFS(ap->a_vp);
3414 
3415 	/*
3416 	 * Set update flag.
3417 	 */
3418 	np->n_flag |= NUPD;
3419 	getnanotime(&np->n_mtim);
3420 	return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3421 }
3422 
3423 /*
3424  * Close wrapper for special devices.
3425  *
3426  * Update the times on the nfsnode then do device close.
3427  */
3428 int
3429 nfsspec_close(void *v)
3430 {
3431 	struct vop_close_args /* {
3432 		struct vnode *a_vp;
3433 		int  a_fflag;
3434 		kauth_cred_t a_cred;
3435 		struct lwp *a_l;
3436 	} */ *ap = v;
3437 	struct vnode *vp = ap->a_vp;
3438 	struct nfsnode *np = VTONFS(vp);
3439 	struct vattr vattr;
3440 
3441 	if (np->n_flag & (NACC | NUPD)) {
3442 		np->n_flag |= NCHG;
3443 		if (vp->v_usecount == 1 &&
3444 		    (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3445 			vattr_null(&vattr);
3446 			if (np->n_flag & NACC)
3447 				vattr.va_atime = np->n_atim;
3448 			if (np->n_flag & NUPD)
3449 				vattr.va_mtime = np->n_mtim;
3450 			(void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3451 		}
3452 	}
3453 	return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3454 }
3455 
3456 /*
3457  * Read wrapper for fifos.
3458  */
3459 int
3460 nfsfifo_read(void *v)
3461 {
3462 	struct vop_read_args /* {
3463 		struct vnode *a_vp;
3464 		struct uio *a_uio;
3465 		int  a_ioflag;
3466 		kauth_cred_t a_cred;
3467 	} */ *ap = v;
3468 	struct nfsnode *np = VTONFS(ap->a_vp);
3469 
3470 	/*
3471 	 * Set access flag.
3472 	 */
3473 	np->n_flag |= NACC;
3474 	getnanotime(&np->n_atim);
3475 	return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3476 }
3477 
3478 /*
3479  * Write wrapper for fifos.
3480  */
3481 int
3482 nfsfifo_write(void *v)
3483 {
3484 	struct vop_write_args /* {
3485 		struct vnode *a_vp;
3486 		struct uio *a_uio;
3487 		int  a_ioflag;
3488 		kauth_cred_t a_cred;
3489 	} */ *ap = v;
3490 	struct nfsnode *np = VTONFS(ap->a_vp);
3491 
3492 	/*
3493 	 * Set update flag.
3494 	 */
3495 	np->n_flag |= NUPD;
3496 	getnanotime(&np->n_mtim);
3497 	return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3498 }
3499 
3500 /*
3501  * Close wrapper for fifos.
3502  *
3503  * Update the times on the nfsnode then do fifo close.
3504  */
3505 int
3506 nfsfifo_close(void *v)
3507 {
3508 	struct vop_close_args /* {
3509 		struct vnode *a_vp;
3510 		int  a_fflag;
3511 		kauth_cred_t a_cred;
3512 		struct lwp *a_l;
3513 	} */ *ap = v;
3514 	struct vnode *vp = ap->a_vp;
3515 	struct nfsnode *np = VTONFS(vp);
3516 	struct vattr vattr;
3517 
3518 	if (np->n_flag & (NACC | NUPD)) {
3519 		struct timespec ts;
3520 
3521 		getnanotime(&ts);
3522 		if (np->n_flag & NACC)
3523 			np->n_atim = ts;
3524 		if (np->n_flag & NUPD)
3525 			np->n_mtim = ts;
3526 		np->n_flag |= NCHG;
3527 		if (vp->v_usecount == 1 &&
3528 		    (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3529 			vattr_null(&vattr);
3530 			if (np->n_flag & NACC)
3531 				vattr.va_atime = np->n_atim;
3532 			if (np->n_flag & NUPD)
3533 				vattr.va_mtime = np->n_mtim;
3534 			(void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3535 		}
3536 	}
3537 	return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3538 }
3539