xref: /netbsd-src/sys/nfs/nfs_vnops.c (revision 80d9064ac03cbb6a4174695f0d5b237c8766d3d0)
1 /*	$NetBSD: nfs_vnops.c,v 1.306 2014/07/25 08:20:53 dholland Exp $	*/
2 
3 /*
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Rick Macklem at The University of Guelph.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)nfs_vnops.c	8.19 (Berkeley) 7/31/95
35  */
36 
37 /*
38  * vnode op calls for Sun NFS version 2 and 3
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.306 2014/07/25 08:20:53 dholland Exp $");
43 
44 #ifdef _KERNEL_OPT
45 #include "opt_nfs.h"
46 #include "opt_uvmhist.h"
47 #endif
48 
49 #include <sys/param.h>
50 #include <sys/proc.h>
51 #include <sys/kernel.h>
52 #include <sys/systm.h>
53 #include <sys/resourcevar.h>
54 #include <sys/mount.h>
55 #include <sys/buf.h>
56 #include <sys/condvar.h>
57 #include <sys/disk.h>
58 #include <sys/malloc.h>
59 #include <sys/kmem.h>
60 #include <sys/mbuf.h>
61 #include <sys/mutex.h>
62 #include <sys/namei.h>
63 #include <sys/vnode.h>
64 #include <sys/dirent.h>
65 #include <sys/fcntl.h>
66 #include <sys/hash.h>
67 #include <sys/lockf.h>
68 #include <sys/stat.h>
69 #include <sys/unistd.h>
70 #include <sys/kauth.h>
71 #include <sys/cprng.h>
72 
73 #include <uvm/uvm_extern.h>
74 #include <uvm/uvm.h>
75 
76 #include <miscfs/fifofs/fifo.h>
77 #include <miscfs/genfs/genfs.h>
78 #include <miscfs/genfs/genfs_node.h>
79 #include <miscfs/specfs/specdev.h>
80 
81 #include <nfs/rpcv2.h>
82 #include <nfs/nfsproto.h>
83 #include <nfs/nfs.h>
84 #include <nfs/nfsnode.h>
85 #include <nfs/nfsmount.h>
86 #include <nfs/xdr_subs.h>
87 #include <nfs/nfsm_subs.h>
88 #include <nfs/nfs_var.h>
89 
90 #include <net/if.h>
91 #include <netinet/in.h>
92 #include <netinet/in_var.h>
93 
94 /*
95  * Global vfs data structures for nfs
96  */
97 int (**nfsv2_vnodeop_p)(void *);
98 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = {
99 	{ &vop_default_desc, vn_default_error },
100 	{ &vop_lookup_desc, nfs_lookup },		/* lookup */
101 	{ &vop_create_desc, nfs_create },		/* create */
102 	{ &vop_mknod_desc, nfs_mknod },			/* mknod */
103 	{ &vop_open_desc, nfs_open },			/* open */
104 	{ &vop_close_desc, nfs_close },			/* close */
105 	{ &vop_access_desc, nfs_access },		/* access */
106 	{ &vop_getattr_desc, nfs_getattr },		/* getattr */
107 	{ &vop_setattr_desc, nfs_setattr },		/* setattr */
108 	{ &vop_read_desc, nfs_read },			/* read */
109 	{ &vop_write_desc, nfs_write },			/* write */
110 	{ &vop_fallocate_desc, genfs_eopnotsupp },	/* fallocate */
111 	{ &vop_fdiscard_desc, genfs_eopnotsupp },	/* fdiscard */
112 	{ &vop_fcntl_desc, genfs_fcntl },		/* fcntl */
113 	{ &vop_ioctl_desc, nfs_ioctl },			/* ioctl */
114 	{ &vop_poll_desc, nfs_poll },			/* poll */
115 	{ &vop_kqfilter_desc, nfs_kqfilter },		/* kqfilter */
116 	{ &vop_revoke_desc, nfs_revoke },		/* revoke */
117 	{ &vop_mmap_desc, nfs_mmap },			/* mmap */
118 	{ &vop_fsync_desc, nfs_fsync },			/* fsync */
119 	{ &vop_seek_desc, nfs_seek },			/* seek */
120 	{ &vop_remove_desc, nfs_remove },		/* remove */
121 	{ &vop_link_desc, nfs_link },			/* link */
122 	{ &vop_rename_desc, nfs_rename },		/* rename */
123 	{ &vop_mkdir_desc, nfs_mkdir },			/* mkdir */
124 	{ &vop_rmdir_desc, nfs_rmdir },			/* rmdir */
125 	{ &vop_symlink_desc, nfs_symlink },		/* symlink */
126 	{ &vop_readdir_desc, nfs_readdir },		/* readdir */
127 	{ &vop_readlink_desc, nfs_readlink },		/* readlink */
128 	{ &vop_abortop_desc, nfs_abortop },		/* abortop */
129 	{ &vop_inactive_desc, nfs_inactive },		/* inactive */
130 	{ &vop_reclaim_desc, nfs_reclaim },		/* reclaim */
131 	{ &vop_lock_desc, nfs_lock },			/* lock */
132 	{ &vop_unlock_desc, nfs_unlock },		/* unlock */
133 	{ &vop_bmap_desc, nfs_bmap },			/* bmap */
134 	{ &vop_strategy_desc, nfs_strategy },		/* strategy */
135 	{ &vop_print_desc, nfs_print },			/* print */
136 	{ &vop_islocked_desc, nfs_islocked },		/* islocked */
137 	{ &vop_pathconf_desc, nfs_pathconf },		/* pathconf */
138 	{ &vop_advlock_desc, nfs_advlock },		/* advlock */
139 	{ &vop_bwrite_desc, genfs_badop },		/* bwrite */
140 	{ &vop_getpages_desc, nfs_getpages },		/* getpages */
141 	{ &vop_putpages_desc, genfs_putpages },		/* putpages */
142 	{ NULL, NULL }
143 };
144 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc =
145 	{ &nfsv2_vnodeop_p, nfsv2_vnodeop_entries };
146 
147 /*
148  * Special device vnode ops
149  */
150 int (**spec_nfsv2nodeop_p)(void *);
151 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = {
152 	{ &vop_default_desc, vn_default_error },
153 	{ &vop_lookup_desc, spec_lookup },		/* lookup */
154 	{ &vop_create_desc, spec_create },		/* create */
155 	{ &vop_mknod_desc, spec_mknod },		/* mknod */
156 	{ &vop_open_desc, spec_open },			/* open */
157 	{ &vop_close_desc, nfsspec_close },		/* close */
158 	{ &vop_access_desc, nfsspec_access },		/* access */
159 	{ &vop_getattr_desc, nfs_getattr },		/* getattr */
160 	{ &vop_setattr_desc, nfs_setattr },		/* setattr */
161 	{ &vop_read_desc, nfsspec_read },		/* read */
162 	{ &vop_write_desc, nfsspec_write },		/* write */
163 	{ &vop_fallocate_desc, spec_fallocate },	/* fallocate */
164 	{ &vop_fdiscard_desc, spec_fdiscard },		/* fdiscard */
165 	{ &vop_fcntl_desc, genfs_fcntl },		/* fcntl */
166 	{ &vop_ioctl_desc, spec_ioctl },		/* ioctl */
167 	{ &vop_poll_desc, spec_poll },			/* poll */
168 	{ &vop_kqfilter_desc, spec_kqfilter },		/* kqfilter */
169 	{ &vop_revoke_desc, spec_revoke },		/* revoke */
170 	{ &vop_mmap_desc, spec_mmap },			/* mmap */
171 	{ &vop_fsync_desc, spec_fsync },		/* fsync */
172 	{ &vop_seek_desc, spec_seek },			/* seek */
173 	{ &vop_remove_desc, spec_remove },		/* remove */
174 	{ &vop_link_desc, spec_link },			/* link */
175 	{ &vop_rename_desc, spec_rename },		/* rename */
176 	{ &vop_mkdir_desc, spec_mkdir },		/* mkdir */
177 	{ &vop_rmdir_desc, spec_rmdir },		/* rmdir */
178 	{ &vop_symlink_desc, spec_symlink },		/* symlink */
179 	{ &vop_readdir_desc, spec_readdir },		/* readdir */
180 	{ &vop_readlink_desc, spec_readlink },		/* readlink */
181 	{ &vop_abortop_desc, spec_abortop },		/* abortop */
182 	{ &vop_inactive_desc, nfs_inactive },		/* inactive */
183 	{ &vop_reclaim_desc, nfs_reclaim },		/* reclaim */
184 	{ &vop_lock_desc, nfs_lock },			/* lock */
185 	{ &vop_unlock_desc, nfs_unlock },		/* unlock */
186 	{ &vop_bmap_desc, spec_bmap },			/* bmap */
187 	{ &vop_strategy_desc, spec_strategy },		/* strategy */
188 	{ &vop_print_desc, nfs_print },			/* print */
189 	{ &vop_islocked_desc, nfs_islocked },		/* islocked */
190 	{ &vop_pathconf_desc, spec_pathconf },		/* pathconf */
191 	{ &vop_advlock_desc, spec_advlock },		/* advlock */
192 	{ &vop_bwrite_desc, spec_bwrite },		/* bwrite */
193 	{ &vop_getpages_desc, spec_getpages },		/* getpages */
194 	{ &vop_putpages_desc, spec_putpages },		/* putpages */
195 	{ NULL, NULL }
196 };
197 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc =
198 	{ &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries };
199 
200 int (**fifo_nfsv2nodeop_p)(void *);
201 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = {
202 	{ &vop_default_desc, vn_default_error },
203 	{ &vop_lookup_desc, vn_fifo_bypass },		/* lookup */
204 	{ &vop_create_desc, vn_fifo_bypass },		/* create */
205 	{ &vop_mknod_desc, vn_fifo_bypass },		/* mknod */
206 	{ &vop_open_desc, vn_fifo_bypass },		/* open */
207 	{ &vop_close_desc, nfsfifo_close },		/* close */
208 	{ &vop_access_desc, nfsspec_access },		/* access */
209 	{ &vop_getattr_desc, nfs_getattr },		/* getattr */
210 	{ &vop_setattr_desc, nfs_setattr },		/* setattr */
211 	{ &vop_read_desc, nfsfifo_read },		/* read */
212 	{ &vop_write_desc, nfsfifo_write },		/* write */
213 	{ &vop_fallocate_desc, vn_fifo_bypass },	/* fallocate */
214 	{ &vop_fdiscard_desc, vn_fifo_bypass },		/* fdiscard */
215 	{ &vop_fcntl_desc, genfs_fcntl },		/* fcntl */
216 	{ &vop_ioctl_desc, vn_fifo_bypass },		/* ioctl */
217 	{ &vop_poll_desc, vn_fifo_bypass },		/* poll */
218 	{ &vop_kqfilter_desc, vn_fifo_bypass },		/* kqfilter */
219 	{ &vop_revoke_desc, vn_fifo_bypass },		/* revoke */
220 	{ &vop_mmap_desc, vn_fifo_bypass },		/* mmap */
221 	{ &vop_fsync_desc, nfs_fsync },			/* fsync */
222 	{ &vop_seek_desc, vn_fifo_bypass },		/* seek */
223 	{ &vop_remove_desc, vn_fifo_bypass },		/* remove */
224 	{ &vop_link_desc, vn_fifo_bypass },		/* link */
225 	{ &vop_rename_desc, vn_fifo_bypass },		/* rename */
226 	{ &vop_mkdir_desc, vn_fifo_bypass },		/* mkdir */
227 	{ &vop_rmdir_desc, vn_fifo_bypass },		/* rmdir */
228 	{ &vop_symlink_desc, vn_fifo_bypass },		/* symlink */
229 	{ &vop_readdir_desc, vn_fifo_bypass },		/* readdir */
230 	{ &vop_readlink_desc, vn_fifo_bypass },		/* readlink */
231 	{ &vop_abortop_desc, vn_fifo_bypass },		/* abortop */
232 	{ &vop_inactive_desc, nfs_inactive },		/* inactive */
233 	{ &vop_reclaim_desc, nfs_reclaim },		/* reclaim */
234 	{ &vop_lock_desc, nfs_lock },			/* lock */
235 	{ &vop_unlock_desc, nfs_unlock },		/* unlock */
236 	{ &vop_bmap_desc, vn_fifo_bypass },		/* bmap */
237 	{ &vop_strategy_desc, genfs_badop },		/* strategy */
238 	{ &vop_print_desc, nfs_print },			/* print */
239 	{ &vop_islocked_desc, nfs_islocked },		/* islocked */
240 	{ &vop_pathconf_desc, vn_fifo_bypass },		/* pathconf */
241 	{ &vop_advlock_desc, vn_fifo_bypass },		/* advlock */
242 	{ &vop_bwrite_desc, genfs_badop },		/* bwrite */
243 	{ &vop_putpages_desc, vn_fifo_bypass }, 	/* putpages */
244 	{ NULL, NULL }
245 };
246 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc =
247 	{ &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries };
248 
249 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *,
250     size_t, kauth_cred_t, struct lwp *);
251 static void nfs_writerpc_extfree(struct mbuf *, void *, size_t, void *);
252 
253 /*
254  * Global variables
255  */
256 extern u_int32_t nfs_true, nfs_false;
257 extern u_int32_t nfs_xdrneg1;
258 extern const nfstype nfsv3_type[9];
259 
260 int nfs_numasync = 0;
261 #define	DIRHDSIZ	_DIRENT_NAMEOFF(dp)
262 #define UIO_ADVANCE(uio, siz) \
263     (void)((uio)->uio_resid -= (siz), \
264     (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \
265     (uio)->uio_iov->iov_len -= (siz))
266 
267 static void nfs_cache_enter(struct vnode *, struct vnode *,
268     struct componentname *);
269 
270 static void
271 nfs_cache_enter(struct vnode *dvp, struct vnode *vp,
272     struct componentname *cnp)
273 {
274 	struct nfsnode *dnp = VTONFS(dvp);
275 
276 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
277 		return;
278 	}
279 	if (vp != NULL) {
280 		struct nfsnode *np = VTONFS(vp);
281 
282 		np->n_ctime = np->n_vattr->va_ctime.tv_sec;
283 	}
284 
285 	if (!timespecisset(&dnp->n_nctime))
286 		dnp->n_nctime = dnp->n_vattr->va_mtime;
287 
288 	cache_enter(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_flags);
289 }
290 
291 /*
292  * nfs null call from vfs.
293  */
294 int
295 nfs_null(struct vnode *vp, kauth_cred_t cred, struct lwp *l)
296 {
297 	char *bpos, *dpos;
298 	int error = 0;
299 	struct mbuf *mreq, *mrep, *md, *mb __unused;
300 	struct nfsnode *np = VTONFS(vp);
301 
302 	nfsm_reqhead(np, NFSPROC_NULL, 0);
303 	nfsm_request(np, NFSPROC_NULL, l, cred);
304 	nfsm_reqdone;
305 	return (error);
306 }
307 
308 /*
309  * nfs access vnode op.
310  * For nfs version 2, just return ok. File accesses may fail later.
311  * For nfs version 3, use the access rpc to check accessibility. If file modes
312  * are changed on the server, accesses might still fail later.
313  */
314 int
315 nfs_access(void *v)
316 {
317 	struct vop_access_args /* {
318 		struct vnode *a_vp;
319 		int  a_mode;
320 		kauth_cred_t a_cred;
321 	} */ *ap = v;
322 	struct vnode *vp = ap->a_vp;
323 #ifndef NFS_V2_ONLY
324 	u_int32_t *tl;
325 	char *cp;
326 	int32_t t1, t2;
327 	char *bpos, *dpos, *cp2;
328 	int error = 0, attrflag;
329 	struct mbuf *mreq, *mrep, *md, *mb;
330 	u_int32_t mode, rmode;
331 	const int v3 = NFS_ISV3(vp);
332 #endif
333 	int cachevalid;
334 	struct nfsnode *np = VTONFS(vp);
335 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
336 
337 	cachevalid = (np->n_accstamp != -1 &&
338 	    (time_uptime - np->n_accstamp) < nfs_attrtimeo(nmp, np) &&
339 	    np->n_accuid == kauth_cred_geteuid(ap->a_cred));
340 
341 	/*
342 	 * Check access cache first. If this request has been made for this
343 	 * uid shortly before, use the cached result.
344 	 */
345 	if (cachevalid) {
346 		if (!np->n_accerror) {
347 			if  ((np->n_accmode & ap->a_mode) == ap->a_mode)
348 				return np->n_accerror;
349 		} else if ((np->n_accmode & ap->a_mode) == np->n_accmode)
350 			return np->n_accerror;
351 	}
352 
353 #ifndef NFS_V2_ONLY
354 	/*
355 	 * For nfs v3, do an access rpc, otherwise you are stuck emulating
356 	 * ufs_access() locally using the vattr. This may not be correct,
357 	 * since the server may apply other access criteria such as
358 	 * client uid-->server uid mapping that we do not know about, but
359 	 * this is better than just returning anything that is lying about
360 	 * in the cache.
361 	 */
362 	if (v3) {
363 		nfsstats.rpccnt[NFSPROC_ACCESS]++;
364 		nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED);
365 		nfsm_fhtom(np, v3);
366 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
367 		if (ap->a_mode & VREAD)
368 			mode = NFSV3ACCESS_READ;
369 		else
370 			mode = 0;
371 		if (vp->v_type != VDIR) {
372 			if (ap->a_mode & VWRITE)
373 				mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND);
374 			if (ap->a_mode & VEXEC)
375 				mode |= NFSV3ACCESS_EXECUTE;
376 		} else {
377 			if (ap->a_mode & VWRITE)
378 				mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND |
379 					 NFSV3ACCESS_DELETE);
380 			if (ap->a_mode & VEXEC)
381 				mode |= NFSV3ACCESS_LOOKUP;
382 		}
383 		*tl = txdr_unsigned(mode);
384 		nfsm_request(np, NFSPROC_ACCESS, curlwp, ap->a_cred);
385 		nfsm_postop_attr(vp, attrflag, 0);
386 		if (!error) {
387 			nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
388 			rmode = fxdr_unsigned(u_int32_t, *tl);
389 			/*
390 			 * The NFS V3 spec does not clarify whether or not
391 			 * the returned access bits can be a superset of
392 			 * the ones requested, so...
393 			 */
394 			if ((rmode & mode) != mode)
395 				error = EACCES;
396 		}
397 		nfsm_reqdone;
398 	} else
399 #endif
400 		return (nfsspec_access(ap));
401 #ifndef NFS_V2_ONLY
402 	/*
403 	 * Disallow write attempts on filesystems mounted read-only;
404 	 * unless the file is a socket, fifo, or a block or character
405 	 * device resident on the filesystem.
406 	 */
407 	if (!error && (ap->a_mode & VWRITE) &&
408 	    (vp->v_mount->mnt_flag & MNT_RDONLY)) {
409 		switch (vp->v_type) {
410 		case VREG:
411 		case VDIR:
412 		case VLNK:
413 			error = EROFS;
414 		default:
415 			break;
416 		}
417 	}
418 
419 	if (!error || error == EACCES) {
420 		/*
421 		 * If we got the same result as for a previous,
422 		 * different request, OR it in. Don't update
423 		 * the timestamp in that case.
424 		 */
425 		if (cachevalid && np->n_accstamp != -1 &&
426 		    error == np->n_accerror) {
427 			if (!error)
428 				np->n_accmode |= ap->a_mode;
429 			else if ((np->n_accmode & ap->a_mode) == ap->a_mode)
430 				np->n_accmode = ap->a_mode;
431 		} else {
432 			np->n_accstamp = time_uptime;
433 			np->n_accuid = kauth_cred_geteuid(ap->a_cred);
434 			np->n_accmode = ap->a_mode;
435 			np->n_accerror = error;
436 		}
437 	}
438 
439 	return (error);
440 #endif
441 }
442 
443 /*
444  * nfs open vnode op
445  * Check to see if the type is ok
446  * and that deletion is not in progress.
447  * For paged in text files, you will need to flush the page cache
448  * if consistency is lost.
449  */
450 /* ARGSUSED */
451 int
452 nfs_open(void *v)
453 {
454 	struct vop_open_args /* {
455 		struct vnode *a_vp;
456 		int  a_mode;
457 		kauth_cred_t a_cred;
458 	} */ *ap = v;
459 	struct vnode *vp = ap->a_vp;
460 	struct nfsnode *np = VTONFS(vp);
461 	int error;
462 
463 	if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
464 		return (EACCES);
465 	}
466 
467 	if (ap->a_mode & FREAD) {
468 		if (np->n_rcred != NULL)
469 			kauth_cred_free(np->n_rcred);
470 		np->n_rcred = ap->a_cred;
471 		kauth_cred_hold(np->n_rcred);
472 	}
473 	if (ap->a_mode & FWRITE) {
474 		if (np->n_wcred != NULL)
475 			kauth_cred_free(np->n_wcred);
476 		np->n_wcred = ap->a_cred;
477 		kauth_cred_hold(np->n_wcred);
478 	}
479 
480 	error = nfs_flushstalebuf(vp, ap->a_cred, curlwp, 0);
481 	if (error)
482 		return error;
483 
484 	NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */
485 
486 	return (0);
487 }
488 
489 /*
490  * nfs close vnode op
491  * What an NFS client should do upon close after writing is a debatable issue.
492  * Most NFS clients push delayed writes to the server upon close, basically for
493  * two reasons:
494  * 1 - So that any write errors may be reported back to the client process
495  *     doing the close system call. By far the two most likely errors are
496  *     NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure.
497  * 2 - To put a worst case upper bound on cache inconsistency between
498  *     multiple clients for the file.
499  * There is also a consistency problem for Version 2 of the protocol w.r.t.
500  * not being able to tell if other clients are writing a file concurrently,
501  * since there is no way of knowing if the changed modify time in the reply
502  * is only due to the write for this client.
503  * (NFS Version 3 provides weak cache consistency data in the reply that
504  *  should be sufficient to detect and handle this case.)
505  *
506  * The current code does the following:
507  * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers
508  * for NFS Version 3 - flush dirty buffers to the server but don't invalidate
509  *                     or commit them (this satisfies 1 and 2 except for the
510  *                     case where the server crashes after this close but
511  *                     before the commit RPC, which is felt to be "good
512  *                     enough". Changing the last argument to nfs_flush() to
513  *                     a 1 would force a commit operation, if it is felt a
514  *                     commit is necessary now.
515  */
516 /* ARGSUSED */
517 int
518 nfs_close(void *v)
519 {
520 	struct vop_close_args /* {
521 		struct vnodeop_desc *a_desc;
522 		struct vnode *a_vp;
523 		int  a_fflag;
524 		kauth_cred_t a_cred;
525 	} */ *ap = v;
526 	struct vnode *vp = ap->a_vp;
527 	struct nfsnode *np = VTONFS(vp);
528 	int error = 0;
529 	UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist);
530 
531 	if (vp->v_type == VREG) {
532 	    if (np->n_flag & NMODIFIED) {
533 #ifndef NFS_V2_ONLY
534 		if (NFS_ISV3(vp)) {
535 		    error = nfs_flush(vp, ap->a_cred, MNT_WAIT, curlwp, 0);
536 		    np->n_flag &= ~NMODIFIED;
537 		} else
538 #endif
539 		    error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 1);
540 		NFS_INVALIDATE_ATTRCACHE(np);
541 	    }
542 	    if (np->n_flag & NWRITEERR) {
543 		np->n_flag &= ~NWRITEERR;
544 		error = np->n_error;
545 	    }
546 	}
547 	UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
548 	return (error);
549 }
550 
551 /*
552  * nfs getattr call from vfs.
553  */
554 int
555 nfs_getattr(void *v)
556 {
557 	struct vop_getattr_args /* {
558 		struct vnode *a_vp;
559 		struct vattr *a_vap;
560 		kauth_cred_t a_cred;
561 	} */ *ap = v;
562 	struct vnode *vp = ap->a_vp;
563 	struct nfsnode *np = VTONFS(vp);
564 	char *cp;
565 	u_int32_t *tl;
566 	int32_t t1, t2;
567 	char *bpos, *dpos;
568 	int error = 0;
569 	struct mbuf *mreq, *mrep, *md, *mb;
570 	const int v3 = NFS_ISV3(vp);
571 
572 	/*
573 	 * Update local times for special files.
574 	 */
575 	if (np->n_flag & (NACC | NUPD))
576 		np->n_flag |= NCHG;
577 
578 	/*
579 	 * if we have delayed truncation, do it now.
580 	 */
581 	nfs_delayedtruncate(vp);
582 
583 	/*
584 	 * First look in the cache.
585 	 */
586 	if (nfs_getattrcache(vp, ap->a_vap) == 0)
587 		return (0);
588 	nfsstats.rpccnt[NFSPROC_GETATTR]++;
589 	nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3));
590 	nfsm_fhtom(np, v3);
591 	nfsm_request(np, NFSPROC_GETATTR, curlwp, ap->a_cred);
592 	if (!error) {
593 		nfsm_loadattr(vp, ap->a_vap, 0);
594 		if (vp->v_type == VDIR &&
595 		    ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ)
596 			ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ;
597 	}
598 	nfsm_reqdone;
599 	return (error);
600 }
601 
602 /*
603  * nfs setattr call.
604  */
605 int
606 nfs_setattr(void *v)
607 {
608 	struct vop_setattr_args /* {
609 		struct vnodeop_desc *a_desc;
610 		struct vnode *a_vp;
611 		struct vattr *a_vap;
612 		kauth_cred_t a_cred;
613 	} */ *ap = v;
614 	struct vnode *vp = ap->a_vp;
615 	struct nfsnode *np = VTONFS(vp);
616 	struct vattr *vap = ap->a_vap;
617 	int error = 0;
618 	u_quad_t tsize = 0;
619 
620 	/*
621 	 * Setting of flags is not supported.
622 	 */
623 	if (vap->va_flags != VNOVAL)
624 		return (EOPNOTSUPP);
625 
626 	/*
627 	 * Disallow write attempts if the filesystem is mounted read-only.
628 	 */
629   	if ((vap->va_uid != (uid_t)VNOVAL ||
630 	    vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
631 	    vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
632 	    (vp->v_mount->mnt_flag & MNT_RDONLY))
633 		return (EROFS);
634 	if (vap->va_size != VNOVAL) {
635 		if (vap->va_size > VFSTONFS(vp->v_mount)->nm_maxfilesize) {
636 			return EFBIG;
637 		}
638  		switch (vp->v_type) {
639  		case VDIR:
640  			return (EISDIR);
641  		case VCHR:
642  		case VBLK:
643  		case VSOCK:
644  		case VFIFO:
645 			if (vap->va_mtime.tv_sec == VNOVAL &&
646 			    vap->va_atime.tv_sec == VNOVAL &&
647 			    vap->va_mode == (mode_t)VNOVAL &&
648 			    vap->va_uid == (uid_t)VNOVAL &&
649 			    vap->va_gid == (gid_t)VNOVAL)
650 				return (0);
651  			vap->va_size = VNOVAL;
652  			break;
653  		default:
654 			/*
655 			 * Disallow write attempts if the filesystem is
656 			 * mounted read-only.
657 			 */
658 			if (vp->v_mount->mnt_flag & MNT_RDONLY)
659 				return (EROFS);
660 			genfs_node_wrlock(vp);
661  			uvm_vnp_setsize(vp, vap->va_size);
662  			tsize = np->n_size;
663 			np->n_size = vap->va_size;
664  			if (vap->va_size == 0)
665  				error = nfs_vinvalbuf(vp, 0,
666  				     ap->a_cred, curlwp, 1);
667 			else
668 				error = nfs_vinvalbuf(vp, V_SAVE,
669 				     ap->a_cred, curlwp, 1);
670 			if (error) {
671 				uvm_vnp_setsize(vp, tsize);
672 				genfs_node_unlock(vp);
673 				return (error);
674 			}
675  			np->n_vattr->va_size = vap->va_size;
676   		}
677   	} else {
678 		/*
679 		 * flush files before setattr because a later write of
680 		 * cached data might change timestamps or reset sugid bits
681 		 */
682 		if ((vap->va_mtime.tv_sec != VNOVAL ||
683 		     vap->va_atime.tv_sec != VNOVAL ||
684 		     vap->va_mode != VNOVAL) &&
685 		    vp->v_type == VREG &&
686   		    (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred,
687 		 			   curlwp, 1)) == EINTR)
688 			return (error);
689 	}
690 	error = nfs_setattrrpc(vp, vap, ap->a_cred, curlwp);
691 	if (vap->va_size != VNOVAL) {
692 		if (error) {
693 			np->n_size = np->n_vattr->va_size = tsize;
694 			uvm_vnp_setsize(vp, np->n_size);
695 		}
696 		genfs_node_unlock(vp);
697 	}
698 	VN_KNOTE(vp, NOTE_ATTRIB);
699 	return (error);
700 }
701 
702 /*
703  * Do an nfs setattr rpc.
704  */
705 int
706 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, kauth_cred_t cred, struct lwp *l)
707 {
708 	struct nfsv2_sattr *sp;
709 	char *cp;
710 	int32_t t1, t2;
711 	char *bpos, *dpos;
712 	u_int32_t *tl;
713 	int error = 0;
714 	struct mbuf *mreq, *mrep, *md, *mb;
715 	const int v3 = NFS_ISV3(vp);
716 	struct nfsnode *np = VTONFS(vp);
717 #ifndef NFS_V2_ONLY
718 	int wccflag = NFSV3_WCCRATTR;
719 	char *cp2;
720 #endif
721 
722 	nfsstats.rpccnt[NFSPROC_SETATTR]++;
723 	nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3));
724 	nfsm_fhtom(np, v3);
725 #ifndef NFS_V2_ONLY
726 	if (v3) {
727 		nfsm_v3attrbuild(vap, true);
728 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
729 		*tl = nfs_false;
730 	} else {
731 #endif
732 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
733 		if (vap->va_mode == (mode_t)VNOVAL)
734 			sp->sa_mode = nfs_xdrneg1;
735 		else
736 			sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode);
737 		if (vap->va_uid == (uid_t)VNOVAL)
738 			sp->sa_uid = nfs_xdrneg1;
739 		else
740 			sp->sa_uid = txdr_unsigned(vap->va_uid);
741 		if (vap->va_gid == (gid_t)VNOVAL)
742 			sp->sa_gid = nfs_xdrneg1;
743 		else
744 			sp->sa_gid = txdr_unsigned(vap->va_gid);
745 		sp->sa_size = txdr_unsigned(vap->va_size);
746 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
747 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
748 #ifndef NFS_V2_ONLY
749 	}
750 #endif
751 	nfsm_request(np, NFSPROC_SETATTR, l, cred);
752 #ifndef NFS_V2_ONLY
753 	if (v3) {
754 		nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
755 	} else
756 #endif
757 		nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
758 	nfsm_reqdone;
759 	return (error);
760 }
761 
762 /*
763  * nfs lookup call, one step at a time...
764  * First look in cache
765  * If not found, do the rpc.
766  */
767 int
768 nfs_lookup(void *v)
769 {
770 	struct vop_lookup_v2_args /* {
771 		struct vnodeop_desc *a_desc;
772 		struct vnode *a_dvp;
773 		struct vnode **a_vpp;
774 		struct componentname *a_cnp;
775 	} */ *ap = v;
776 	struct componentname *cnp = ap->a_cnp;
777 	struct vnode *dvp = ap->a_dvp;
778 	struct vnode **vpp = ap->a_vpp;
779 	int flags;
780 	struct vnode *newvp;
781 	u_int32_t *tl;
782 	char *cp;
783 	int32_t t1, t2;
784 	char *bpos, *dpos, *cp2;
785 	struct mbuf *mreq, *mrep, *md, *mb;
786 	long len;
787 	nfsfh_t *fhp;
788 	struct nfsnode *np;
789 	int cachefound;
790 	int error = 0, attrflag, fhsize;
791 	const int v3 = NFS_ISV3(dvp);
792 
793 	flags = cnp->cn_flags;
794 
795 	*vpp = NULLVP;
796 	newvp = NULLVP;
797 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
798 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
799 		return (EROFS);
800 	if (dvp->v_type != VDIR)
801 		return (ENOTDIR);
802 
803 	/*
804 	 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves.
805 	 */
806 	if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
807 		error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
808 		if (error)
809 			return error;
810 		if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN))
811 			return EISDIR;
812 		vref(dvp);
813 		*vpp = dvp;
814 		return 0;
815 	}
816 
817 	np = VTONFS(dvp);
818 
819 	/*
820 	 * Before performing an RPC, check the name cache to see if
821 	 * the directory/name pair we are looking for is known already.
822 	 * If the directory/name pair is found in the name cache,
823 	 * we have to ensure the directory has not changed from
824 	 * the time the cache entry has been created. If it has,
825 	 * the cache entry has to be ignored.
826 	 */
827 	cachefound = cache_lookup_raw(dvp, cnp->cn_nameptr, cnp->cn_namelen,
828 				      cnp->cn_flags, NULL, vpp);
829 	KASSERT(dvp != *vpp);
830 	KASSERT((cnp->cn_flags & ISWHITEOUT) == 0);
831 	if (cachefound) {
832 		struct vattr vattr;
833 
834 		error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
835 		if (error != 0) {
836 			if (*vpp != NULLVP)
837 				vrele(*vpp);
838 			*vpp = NULLVP;
839 			return error;
840 		}
841 
842 		if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred)
843 		    || timespeccmp(&vattr.va_mtime,
844 		    &VTONFS(dvp)->n_nctime, !=)) {
845 			if (*vpp != NULLVP) {
846 				vrele(*vpp);
847 				*vpp = NULLVP;
848 			}
849 			cache_purge1(dvp, NULL, 0, PURGE_CHILDREN);
850 			timespecclear(&np->n_nctime);
851 			goto dorpc;
852 		}
853 
854 		if (*vpp == NULLVP) {
855 			/* namecache gave us a negative result */
856 			error = ENOENT;
857 			goto noentry;
858 		}
859 
860 		/*
861 		 * investigate the vnode returned by cache_lookup_raw.
862 		 * if it isn't appropriate, do an rpc.
863 		 */
864 		newvp = *vpp;
865 		if ((flags & ISDOTDOT) != 0) {
866 			VOP_UNLOCK(dvp);
867 		}
868 		error = vn_lock(newvp, LK_SHARED);
869 		if ((flags & ISDOTDOT) != 0) {
870 			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
871 		}
872 		if (error != 0) {
873 			/* newvp has been reclaimed. */
874 			vrele(newvp);
875 			*vpp = NULLVP;
876 			goto dorpc;
877 		}
878 		if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred)
879 		    && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) {
880 			nfsstats.lookupcache_hits++;
881 			KASSERT(newvp->v_type != VNON);
882 			VOP_UNLOCK(newvp);
883 			return (0);
884 		}
885 		cache_purge1(newvp, NULL, 0, PURGE_PARENTS);
886 		vput(newvp);
887 		*vpp = NULLVP;
888 	}
889 dorpc:
890 #if 0
891 	/*
892 	 * because nfsv3 has the same CREATE semantics as ours,
893 	 * we don't have to perform LOOKUPs beforehand.
894 	 *
895 	 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL.
896 	 * XXX although we have no way to know if O_EXCL is requested or not.
897 	 */
898 
899 	if (v3 && cnp->cn_nameiop == CREATE &&
900 	    (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN &&
901 	    (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
902 		return (EJUSTRETURN);
903 	}
904 #endif /* 0 */
905 
906 	error = 0;
907 	newvp = NULLVP;
908 	nfsstats.lookupcache_misses++;
909 	nfsstats.rpccnt[NFSPROC_LOOKUP]++;
910 	len = cnp->cn_namelen;
911 	nfsm_reqhead(np, NFSPROC_LOOKUP,
912 		NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
913 	nfsm_fhtom(np, v3);
914 	nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
915 	nfsm_request(np, NFSPROC_LOOKUP, curlwp, cnp->cn_cred);
916 	if (error) {
917 		nfsm_postop_attr(dvp, attrflag, 0);
918 		m_freem(mrep);
919 		goto nfsmout;
920 	}
921 	nfsm_getfh(fhp, fhsize, v3);
922 
923 	/*
924 	 * Handle RENAME case...
925 	 */
926 	if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) {
927 		if (NFS_CMPFH(np, fhp, fhsize)) {
928 			m_freem(mrep);
929 			return (EISDIR);
930 		}
931 		error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
932 		if (error) {
933 			m_freem(mrep);
934 			return error;
935 		}
936 		newvp = NFSTOV(np);
937 #ifndef NFS_V2_ONLY
938 		if (v3) {
939 			nfsm_postop_attr(newvp, attrflag, 0);
940 			nfsm_postop_attr(dvp, attrflag, 0);
941 		} else
942 #endif
943 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
944 		*vpp = newvp;
945 		m_freem(mrep);
946 		goto validate;
947 	}
948 
949 	/*
950 	 * The postop attr handling is duplicated for each if case,
951 	 * because it should be done while dvp is locked (unlocking
952 	 * dvp is different for each case).
953 	 */
954 
955 	if (NFS_CMPFH(np, fhp, fhsize)) {
956 		/*
957 		 * as we handle "." lookup locally, this should be
958 		 * a broken server.
959 		 */
960 		vref(dvp);
961 		newvp = dvp;
962 #ifndef NFS_V2_ONLY
963 		if (v3) {
964 			nfsm_postop_attr(newvp, attrflag, 0);
965 			nfsm_postop_attr(dvp, attrflag, 0);
966 		} else
967 #endif
968 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
969 	} else if (flags & ISDOTDOT) {
970 		/*
971 		 * ".." lookup
972 		 */
973 		VOP_UNLOCK(dvp);
974 		error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
975 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
976 		if (error) {
977 			m_freem(mrep);
978 			return error;
979 		}
980 		newvp = NFSTOV(np);
981 
982 #ifndef NFS_V2_ONLY
983 		if (v3) {
984 			nfsm_postop_attr(newvp, attrflag, 0);
985 			nfsm_postop_attr(dvp, attrflag, 0);
986 		} else
987 #endif
988 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
989 	} else {
990 		/*
991 		 * Other lookups.
992 		 */
993 		error = nfs_nget(dvp->v_mount, fhp, fhsize, &np);
994 		if (error) {
995 			m_freem(mrep);
996 			return error;
997 		}
998 		newvp = NFSTOV(np);
999 #ifndef NFS_V2_ONLY
1000 		if (v3) {
1001 			nfsm_postop_attr(newvp, attrflag, 0);
1002 			nfsm_postop_attr(dvp, attrflag, 0);
1003 		} else
1004 #endif
1005 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
1006 	}
1007 	if (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) {
1008 		nfs_cache_enter(dvp, newvp, cnp);
1009 	}
1010 	*vpp = newvp;
1011 	nfsm_reqdone;
1012 	if (error) {
1013 		/*
1014 		 * We get here only because of errors returned by
1015 		 * the RPC. Otherwise we'll have returned above
1016 		 * (the nfsm_* macros will jump to nfsm_reqdone
1017 		 * on error).
1018 		 */
1019 		if (error == ENOENT && cnp->cn_nameiop != CREATE) {
1020 			nfs_cache_enter(dvp, NULL, cnp);
1021 		}
1022 		if (newvp != NULLVP) {
1023 			if (newvp == dvp) {
1024 				vrele(newvp);
1025 			} else {
1026 				vput(newvp);
1027 			}
1028 		}
1029 noentry:
1030 		if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
1031 		    (flags & ISLASTCN) && error == ENOENT) {
1032 			if (dvp->v_mount->mnt_flag & MNT_RDONLY) {
1033 				error = EROFS;
1034 			} else {
1035 				error = EJUSTRETURN;
1036 			}
1037 		}
1038 		*vpp = NULL;
1039 		return error;
1040 	}
1041 
1042 validate:
1043 	/*
1044 	 * make sure we have valid type and size.
1045 	 */
1046 
1047 	newvp = *vpp;
1048 	if (newvp->v_type == VNON) {
1049 		struct vattr vattr; /* dummy */
1050 
1051 		KASSERT(VTONFS(newvp)->n_attrstamp == 0);
1052 		error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred);
1053 		if (error) {
1054 			vput(newvp);
1055 			*vpp = NULL;
1056 		}
1057 	}
1058 	if (error)
1059 		return error;
1060 	if (newvp != dvp)
1061 		VOP_UNLOCK(newvp);
1062 	return 0;
1063 }
1064 
1065 /*
1066  * nfs read call.
1067  * Just call nfs_bioread() to do the work.
1068  */
1069 int
1070 nfs_read(void *v)
1071 {
1072 	struct vop_read_args /* {
1073 		struct vnode *a_vp;
1074 		struct uio *a_uio;
1075 		int  a_ioflag;
1076 		kauth_cred_t a_cred;
1077 	} */ *ap = v;
1078 	struct vnode *vp = ap->a_vp;
1079 
1080 	if (vp->v_type != VREG)
1081 		return EISDIR;
1082 	return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0));
1083 }
1084 
1085 /*
1086  * nfs readlink call
1087  */
1088 int
1089 nfs_readlink(void *v)
1090 {
1091 	struct vop_readlink_args /* {
1092 		struct vnode *a_vp;
1093 		struct uio *a_uio;
1094 		kauth_cred_t a_cred;
1095 	} */ *ap = v;
1096 	struct vnode *vp = ap->a_vp;
1097 	struct nfsnode *np = VTONFS(vp);
1098 
1099 	if (vp->v_type != VLNK)
1100 		return (EPERM);
1101 
1102 	if (np->n_rcred != NULL) {
1103 		kauth_cred_free(np->n_rcred);
1104 	}
1105 	np->n_rcred = ap->a_cred;
1106 	kauth_cred_hold(np->n_rcred);
1107 
1108 	return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0));
1109 }
1110 
1111 /*
1112  * Do a readlink rpc.
1113  * Called by nfs_doio() from below the buffer cache.
1114  */
1115 int
1116 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
1117 {
1118 	u_int32_t *tl;
1119 	char *cp;
1120 	int32_t t1, t2;
1121 	char *bpos, *dpos, *cp2;
1122 	int error = 0;
1123 	uint32_t len;
1124 	struct mbuf *mreq, *mrep, *md, *mb;
1125 	const int v3 = NFS_ISV3(vp);
1126 	struct nfsnode *np = VTONFS(vp);
1127 #ifndef NFS_V2_ONLY
1128 	int attrflag;
1129 #endif
1130 
1131 	nfsstats.rpccnt[NFSPROC_READLINK]++;
1132 	nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3));
1133 	nfsm_fhtom(np, v3);
1134 	nfsm_request(np, NFSPROC_READLINK, curlwp, cred);
1135 #ifndef NFS_V2_ONLY
1136 	if (v3)
1137 		nfsm_postop_attr(vp, attrflag, 0);
1138 #endif
1139 	if (!error) {
1140 #ifndef NFS_V2_ONLY
1141 		if (v3) {
1142 			nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED);
1143 			len = fxdr_unsigned(uint32_t, *tl);
1144 			if (len > NFS_MAXPATHLEN) {
1145 				/*
1146 				 * this pathname is too long for us.
1147 				 */
1148 				m_freem(mrep);
1149 				/* Solaris returns EINVAL. should we follow? */
1150 				error = ENAMETOOLONG;
1151 				goto nfsmout;
1152 			}
1153 		} else
1154 #endif
1155 		{
1156 			nfsm_strsiz(len, NFS_MAXPATHLEN);
1157 		}
1158 		nfsm_mtouio(uiop, len);
1159 	}
1160 	nfsm_reqdone;
1161 	return (error);
1162 }
1163 
1164 /*
1165  * nfs read rpc call
1166  * Ditto above
1167  */
1168 int
1169 nfs_readrpc(struct vnode *vp, struct uio *uiop)
1170 {
1171 	u_int32_t *tl;
1172 	char *cp;
1173 	int32_t t1, t2;
1174 	char *bpos, *dpos, *cp2;
1175 	struct mbuf *mreq, *mrep, *md, *mb;
1176 	struct nfsmount *nmp;
1177 	int error = 0, len, retlen, tsiz, eof __unused, byte_count;
1178 	const int v3 = NFS_ISV3(vp);
1179 	struct nfsnode *np = VTONFS(vp);
1180 #ifndef NFS_V2_ONLY
1181 	int attrflag;
1182 #endif
1183 
1184 #ifndef nolint
1185 	eof = 0;
1186 #endif
1187 	nmp = VFSTONFS(vp->v_mount);
1188 	tsiz = uiop->uio_resid;
1189 	if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1190 		return (EFBIG);
1191 	iostat_busy(nmp->nm_stats);
1192 	byte_count = 0; /* count bytes actually transferred */
1193 	while (tsiz > 0) {
1194 		nfsstats.rpccnt[NFSPROC_READ]++;
1195 		len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
1196 		nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3);
1197 		nfsm_fhtom(np, v3);
1198 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3);
1199 #ifndef NFS_V2_ONLY
1200 		if (v3) {
1201 			txdr_hyper(uiop->uio_offset, tl);
1202 			*(tl + 2) = txdr_unsigned(len);
1203 		} else
1204 #endif
1205 		{
1206 			*tl++ = txdr_unsigned(uiop->uio_offset);
1207 			*tl++ = txdr_unsigned(len);
1208 			*tl = 0;
1209 		}
1210 		nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred);
1211 #ifndef NFS_V2_ONLY
1212 		if (v3) {
1213 			nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC);
1214 			if (error) {
1215 				m_freem(mrep);
1216 				goto nfsmout;
1217 			}
1218 			nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1219 			eof = fxdr_unsigned(int, *(tl + 1));
1220 		} else
1221 #endif
1222 			nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1223 		nfsm_strsiz(retlen, nmp->nm_rsize);
1224 		nfsm_mtouio(uiop, retlen);
1225 		m_freem(mrep);
1226 		tsiz -= retlen;
1227 		byte_count += retlen;
1228 #ifndef NFS_V2_ONLY
1229 		if (v3) {
1230 			if (eof || retlen == 0)
1231 				tsiz = 0;
1232 		} else
1233 #endif
1234 		if (retlen < len)
1235 			tsiz = 0;
1236 	}
1237 nfsmout:
1238 	iostat_unbusy(nmp->nm_stats, byte_count, 1);
1239 	return (error);
1240 }
1241 
1242 struct nfs_writerpc_context {
1243 	kmutex_t nwc_lock;
1244 	kcondvar_t nwc_cv;
1245 	int nwc_mbufcount;
1246 };
1247 
1248 /*
1249  * free mbuf used to refer protected pages while write rpc call.
1250  * called at splvm.
1251  */
1252 static void
1253 nfs_writerpc_extfree(struct mbuf *m, void *tbuf, size_t size, void *arg)
1254 {
1255 	struct nfs_writerpc_context *ctx = arg;
1256 
1257 	KASSERT(m != NULL);
1258 	KASSERT(ctx != NULL);
1259 	pool_cache_put(mb_cache, m);
1260 	mutex_enter(&ctx->nwc_lock);
1261 	if (--ctx->nwc_mbufcount == 0) {
1262 		cv_signal(&ctx->nwc_cv);
1263 	}
1264 	mutex_exit(&ctx->nwc_lock);
1265 }
1266 
1267 /*
1268  * nfs write call
1269  */
1270 int
1271 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, bool pageprotected, bool *stalewriteverfp)
1272 {
1273 	u_int32_t *tl;
1274 	char *cp;
1275 	int32_t t1, t2;
1276 	char *bpos, *dpos;
1277 	struct mbuf *mreq, *mrep, *md, *mb;
1278 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1279 	int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR;
1280 	const int v3 = NFS_ISV3(vp);
1281 	int committed = NFSV3WRITE_FILESYNC;
1282 	struct nfsnode *np = VTONFS(vp);
1283 	struct nfs_writerpc_context ctx;
1284 	int byte_count;
1285 	size_t origresid;
1286 #ifndef NFS_V2_ONLY
1287 	char *cp2;
1288 	int rlen, commit;
1289 #endif
1290 
1291 	if (vp->v_mount->mnt_flag & MNT_RDONLY) {
1292 		panic("writerpc readonly vp %p", vp);
1293 	}
1294 
1295 #ifdef DIAGNOSTIC
1296 	if (uiop->uio_iovcnt != 1)
1297 		panic("nfs: writerpc iovcnt > 1");
1298 #endif
1299 	tsiz = uiop->uio_resid;
1300 	if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
1301 		return EFBIG;
1302 
1303 	mutex_init(&ctx.nwc_lock, MUTEX_DRIVER, IPL_VM);
1304 	cv_init(&ctx.nwc_cv, "nfsmblk");
1305 	ctx.nwc_mbufcount = 1;
1306 
1307 retry:
1308 	origresid = uiop->uio_resid;
1309 	KASSERT(origresid == uiop->uio_iov->iov_len);
1310 	iostat_busy(nmp->nm_stats);
1311 	byte_count = 0; /* count of bytes actually written */
1312 	while (tsiz > 0) {
1313 		uint32_t datalen; /* data bytes need to be allocated in mbuf */
1314 		uint32_t backup;
1315 		bool stalewriteverf = false;
1316 
1317 		nfsstats.rpccnt[NFSPROC_WRITE]++;
1318 		len = min(tsiz, nmp->nm_wsize);
1319 		datalen = pageprotected ? 0 : nfsm_rndup(len);
1320 		nfsm_reqhead(np, NFSPROC_WRITE,
1321 			NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen);
1322 		nfsm_fhtom(np, v3);
1323 #ifndef NFS_V2_ONLY
1324 		if (v3) {
1325 			nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
1326 			txdr_hyper(uiop->uio_offset, tl);
1327 			tl += 2;
1328 			*tl++ = txdr_unsigned(len);
1329 			*tl++ = txdr_unsigned(*iomode);
1330 			*tl = txdr_unsigned(len);
1331 		} else
1332 #endif
1333 		{
1334 			u_int32_t x;
1335 
1336 			nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED);
1337 			/* Set both "begin" and "current" to non-garbage. */
1338 			x = txdr_unsigned((u_int32_t)uiop->uio_offset);
1339 			*tl++ = x;      /* "begin offset" */
1340 			*tl++ = x;      /* "current offset" */
1341 			x = txdr_unsigned(len);
1342 			*tl++ = x;      /* total to this offset */
1343 			*tl = x;        /* size of this write */
1344 
1345 		}
1346 		if (pageprotected) {
1347 			/*
1348 			 * since we know pages can't be modified during i/o,
1349 			 * no need to copy them for us.
1350 			 */
1351 			struct mbuf *m;
1352 			struct iovec *iovp = uiop->uio_iov;
1353 
1354 			m = m_get(M_WAIT, MT_DATA);
1355 			MCLAIM(m, &nfs_mowner);
1356 			MEXTADD(m, iovp->iov_base, len, M_MBUF,
1357 			    nfs_writerpc_extfree, &ctx);
1358 			m->m_flags |= M_EXT_ROMAP;
1359 			m->m_len = len;
1360 			mb->m_next = m;
1361 			/*
1362 			 * no need to maintain mb and bpos here
1363 			 * because no one care them later.
1364 			 */
1365 #if 0
1366 			mb = m;
1367 			bpos = mtod(void *, mb) + mb->m_len;
1368 #endif
1369 			UIO_ADVANCE(uiop, len);
1370 			uiop->uio_offset += len;
1371 			mutex_enter(&ctx.nwc_lock);
1372 			ctx.nwc_mbufcount++;
1373 			mutex_exit(&ctx.nwc_lock);
1374 			nfs_zeropad(mb, 0, nfsm_padlen(len));
1375 		} else {
1376 			nfsm_uiotom(uiop, len);
1377 		}
1378 		nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred);
1379 #ifndef NFS_V2_ONLY
1380 		if (v3) {
1381 			wccflag = NFSV3_WCCCHK;
1382 			nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error);
1383 			if (!error) {
1384 				nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED
1385 					+ NFSX_V3WRITEVERF);
1386 				rlen = fxdr_unsigned(int, *tl++);
1387 				if (rlen == 0) {
1388 					error = NFSERR_IO;
1389 					m_freem(mrep);
1390 					break;
1391 				} else if (rlen < len) {
1392 					backup = len - rlen;
1393 					UIO_ADVANCE(uiop, -backup);
1394 					uiop->uio_offset -= backup;
1395 					len = rlen;
1396 				}
1397 				commit = fxdr_unsigned(int, *tl++);
1398 
1399 				/*
1400 				 * Return the lowest committment level
1401 				 * obtained by any of the RPCs.
1402 				 */
1403 				if (committed == NFSV3WRITE_FILESYNC)
1404 					committed = commit;
1405 				else if (committed == NFSV3WRITE_DATASYNC &&
1406 					commit == NFSV3WRITE_UNSTABLE)
1407 					committed = commit;
1408 				mutex_enter(&nmp->nm_lock);
1409 				if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){
1410 					memcpy(nmp->nm_writeverf, tl,
1411 					    NFSX_V3WRITEVERF);
1412 					nmp->nm_iflag |= NFSMNT_HASWRITEVERF;
1413 				} else if ((nmp->nm_iflag &
1414 				    NFSMNT_STALEWRITEVERF) ||
1415 				    memcmp(tl, nmp->nm_writeverf,
1416 				    NFSX_V3WRITEVERF)) {
1417 					memcpy(nmp->nm_writeverf, tl,
1418 					    NFSX_V3WRITEVERF);
1419 					/*
1420 					 * note NFSMNT_STALEWRITEVERF
1421 					 * if we're the first thread to
1422 					 * notice it.
1423 					 */
1424 					if ((nmp->nm_iflag &
1425 					    NFSMNT_STALEWRITEVERF) == 0) {
1426 						stalewriteverf = true;
1427 						nmp->nm_iflag |=
1428 						    NFSMNT_STALEWRITEVERF;
1429 					}
1430 				}
1431 				mutex_exit(&nmp->nm_lock);
1432 			}
1433 		} else
1434 #endif
1435 			nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC);
1436 		if (wccflag)
1437 			VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime;
1438 		m_freem(mrep);
1439 		if (error)
1440 			break;
1441 		tsiz -= len;
1442 		byte_count += len;
1443 		if (stalewriteverf) {
1444 			*stalewriteverfp = true;
1445 			stalewriteverf = false;
1446 			if (committed == NFSV3WRITE_UNSTABLE &&
1447 			    len != origresid) {
1448 				/*
1449 				 * if our write requests weren't atomic but
1450 				 * unstable, datas in previous iterations
1451 				 * might have already been lost now.
1452 				 * then, we should resend them to nfsd.
1453 				 */
1454 				backup = origresid - tsiz;
1455 				UIO_ADVANCE(uiop, -backup);
1456 				uiop->uio_offset -= backup;
1457 				tsiz = origresid;
1458 				goto retry;
1459 			}
1460 		}
1461 	}
1462 nfsmout:
1463 	iostat_unbusy(nmp->nm_stats, byte_count, 0);
1464 	if (pageprotected) {
1465 		/*
1466 		 * wait until mbufs go away.
1467 		 * retransmitted mbufs can survive longer than rpc requests
1468 		 * themselves.
1469 		 */
1470 		mutex_enter(&ctx.nwc_lock);
1471 		ctx.nwc_mbufcount--;
1472 		while (ctx.nwc_mbufcount > 0) {
1473 			cv_wait(&ctx.nwc_cv, &ctx.nwc_lock);
1474 		}
1475 		mutex_exit(&ctx.nwc_lock);
1476 	}
1477 	mutex_destroy(&ctx.nwc_lock);
1478 	cv_destroy(&ctx.nwc_cv);
1479 	*iomode = committed;
1480 	if (error)
1481 		uiop->uio_resid = tsiz;
1482 	return (error);
1483 }
1484 
1485 /*
1486  * nfs mknod rpc
1487  * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the
1488  * mode set to specify the file type and the size field for rdev.
1489  */
1490 int
1491 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap)
1492 {
1493 	struct nfsv2_sattr *sp;
1494 	u_int32_t *tl;
1495 	char *cp;
1496 	int32_t t1, t2;
1497 	struct vnode *newvp = (struct vnode *)0;
1498 	struct nfsnode *dnp, *np;
1499 	char *cp2;
1500 	char *bpos, *dpos;
1501 	int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1502 	struct mbuf *mreq, *mrep, *md, *mb;
1503 	u_int32_t rdev;
1504 	const int v3 = NFS_ISV3(dvp);
1505 
1506 	if (vap->va_type == VCHR || vap->va_type == VBLK)
1507 		rdev = txdr_unsigned(vap->va_rdev);
1508 	else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
1509 		rdev = nfs_xdrneg1;
1510 	else {
1511 		VOP_ABORTOP(dvp, cnp);
1512 		return (EOPNOTSUPP);
1513 	}
1514 	nfsstats.rpccnt[NFSPROC_MKNOD]++;
1515 	dnp = VTONFS(dvp);
1516 	nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED +
1517 		+ nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1518 	nfsm_fhtom(dnp, v3);
1519 	nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1520 #ifndef NFS_V2_ONLY
1521 	if (v3) {
1522 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1523 		*tl++ = vtonfsv3_type(vap->va_type);
1524 		nfsm_v3attrbuild(vap, false);
1525 		if (vap->va_type == VCHR || vap->va_type == VBLK) {
1526 			nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
1527 			*tl++ = txdr_unsigned(major(vap->va_rdev));
1528 			*tl = txdr_unsigned(minor(vap->va_rdev));
1529 		}
1530 	} else
1531 #endif
1532 	{
1533 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1534 		sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1535 		sp->sa_uid = nfs_xdrneg1;
1536 		sp->sa_gid = nfs_xdrneg1;
1537 		sp->sa_size = rdev;
1538 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1539 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1540 	}
1541 	nfsm_request(dnp, NFSPROC_MKNOD, curlwp, cnp->cn_cred);
1542 	if (!error) {
1543 		nfsm_mtofh(dvp, newvp, v3, gotvp);
1544 		if (!gotvp) {
1545 			error = nfs_lookitup(dvp, cnp->cn_nameptr,
1546 			    cnp->cn_namelen, cnp->cn_cred, curlwp, &np);
1547 			if (!error)
1548 				newvp = NFSTOV(np);
1549 		}
1550 	}
1551 #ifndef NFS_V2_ONLY
1552 	if (v3)
1553 		nfsm_wcc_data(dvp, wccflag, 0, !error);
1554 #endif
1555 	nfsm_reqdone;
1556 	if (error) {
1557 		if (newvp)
1558 			vput(newvp);
1559 	} else {
1560 		nfs_cache_enter(dvp, newvp, cnp);
1561 		*vpp = newvp;
1562 		VOP_UNLOCK(newvp);
1563 	}
1564 	VTONFS(dvp)->n_flag |= NMODIFIED;
1565 	if (!wccflag)
1566 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1567 	return (error);
1568 }
1569 
1570 /*
1571  * nfs mknod vop
1572  * just call nfs_mknodrpc() to do the work.
1573  */
1574 /* ARGSUSED */
1575 int
1576 nfs_mknod(void *v)
1577 {
1578 	struct vop_mknod_v3_args /* {
1579 		struct vnode *a_dvp;
1580 		struct vnode **a_vpp;
1581 		struct componentname *a_cnp;
1582 		struct vattr *a_vap;
1583 	} */ *ap = v;
1584 	struct vnode *dvp = ap->a_dvp;
1585 	struct componentname *cnp = ap->a_cnp;
1586 	int error;
1587 
1588 	error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap);
1589 	VN_KNOTE(dvp, NOTE_WRITE);
1590 	if (error == 0 || error == EEXIST)
1591 		cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1592 	return (error);
1593 }
1594 
1595 /*
1596  * nfs file create call
1597  */
1598 int
1599 nfs_create(void *v)
1600 {
1601 	struct vop_create_v3_args /* {
1602 		struct vnode *a_dvp;
1603 		struct vnode **a_vpp;
1604 		struct componentname *a_cnp;
1605 		struct vattr *a_vap;
1606 	} */ *ap = v;
1607 	struct vnode *dvp = ap->a_dvp;
1608 	struct vattr *vap = ap->a_vap;
1609 	struct componentname *cnp = ap->a_cnp;
1610 	struct nfsv2_sattr *sp;
1611 	u_int32_t *tl;
1612 	char *cp;
1613 	int32_t t1, t2;
1614 	struct nfsnode *dnp, *np = (struct nfsnode *)0;
1615 	struct vnode *newvp = (struct vnode *)0;
1616 	char *bpos, *dpos, *cp2;
1617 	int error, wccflag = NFSV3_WCCRATTR, gotvp = 0;
1618 	struct mbuf *mreq, *mrep, *md, *mb;
1619 	const int v3 = NFS_ISV3(dvp);
1620 	u_int32_t excl_mode = NFSV3CREATE_UNCHECKED;
1621 
1622 	/*
1623 	 * Oops, not for me..
1624 	 */
1625 	if (vap->va_type == VSOCK)
1626 		return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
1627 
1628 	KASSERT(vap->va_type == VREG);
1629 
1630 #ifdef VA_EXCLUSIVE
1631 	if (vap->va_vaflags & VA_EXCLUSIVE) {
1632 		excl_mode = NFSV3CREATE_EXCLUSIVE;
1633 	}
1634 #endif
1635 again:
1636 	error = 0;
1637 	nfsstats.rpccnt[NFSPROC_CREATE]++;
1638 	dnp = VTONFS(dvp);
1639 	nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED +
1640 		nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3));
1641 	nfsm_fhtom(dnp, v3);
1642 	nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
1643 #ifndef NFS_V2_ONLY
1644 	if (v3) {
1645 		nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED);
1646 		if (excl_mode == NFSV3CREATE_EXCLUSIVE) {
1647 			*tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE);
1648 			nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF);
1649 			*tl++ = cprng_fast32();
1650 			*tl = cprng_fast32();
1651 		} else {
1652 			*tl = txdr_unsigned(excl_mode);
1653 			nfsm_v3attrbuild(vap, false);
1654 		}
1655 	} else
1656 #endif
1657 	{
1658 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
1659 		sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode);
1660 		sp->sa_uid = nfs_xdrneg1;
1661 		sp->sa_gid = nfs_xdrneg1;
1662 		sp->sa_size = 0;
1663 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
1664 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
1665 	}
1666 	nfsm_request(dnp, NFSPROC_CREATE, curlwp, cnp->cn_cred);
1667 	if (!error) {
1668 		nfsm_mtofh(dvp, newvp, v3, gotvp);
1669 		if (!gotvp) {
1670 			error = nfs_lookitup(dvp, cnp->cn_nameptr,
1671 			    cnp->cn_namelen, cnp->cn_cred, curlwp, &np);
1672 			if (!error)
1673 				newvp = NFSTOV(np);
1674 		}
1675 	}
1676 #ifndef NFS_V2_ONLY
1677 	if (v3)
1678 		nfsm_wcc_data(dvp, wccflag, 0, !error);
1679 #endif
1680 	nfsm_reqdone;
1681 	if (error) {
1682 		/*
1683 		 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
1684 		 */
1685 		if (v3 && error == ENOTSUP) {
1686 			if (excl_mode == NFSV3CREATE_EXCLUSIVE) {
1687 				excl_mode = NFSV3CREATE_GUARDED;
1688 				goto again;
1689 			} else if (excl_mode == NFSV3CREATE_GUARDED) {
1690 				excl_mode = NFSV3CREATE_UNCHECKED;
1691 				goto again;
1692 			}
1693 		}
1694 	} else if (v3 && (excl_mode == NFSV3CREATE_EXCLUSIVE)) {
1695 		struct timespec ts;
1696 
1697 		getnanotime(&ts);
1698 
1699 		/*
1700 		 * make sure that we'll update timestamps as
1701 		 * most server implementations use them to store
1702 		 * the create verifier.
1703 		 *
1704 		 * XXX it's better to use TOSERVER always.
1705 		 */
1706 
1707 		if (vap->va_atime.tv_sec == VNOVAL)
1708 			vap->va_atime = ts;
1709 		if (vap->va_mtime.tv_sec == VNOVAL)
1710 			vap->va_mtime = ts;
1711 
1712 		error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, curlwp);
1713 	}
1714 	if (error == 0) {
1715 		if (cnp->cn_flags & MAKEENTRY)
1716 			nfs_cache_enter(dvp, newvp, cnp);
1717 		else
1718 			cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1719 		*ap->a_vpp = newvp;
1720 		VOP_UNLOCK(newvp);
1721 	} else {
1722 		if (newvp)
1723 			vput(newvp);
1724 		if (error == EEXIST)
1725 			cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
1726 	}
1727 	VTONFS(dvp)->n_flag |= NMODIFIED;
1728 	if (!wccflag)
1729 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1730 	VN_KNOTE(ap->a_dvp, NOTE_WRITE);
1731 	return (error);
1732 }
1733 
1734 /*
1735  * nfs file remove call
1736  * To try and make nfs semantics closer to ufs semantics, a file that has
1737  * other processes using the vnode is renamed instead of removed and then
1738  * removed later on the last close.
1739  * - If v_usecount > 1
1740  *	  If a rename is not already in the works
1741  *	     call nfs_sillyrename() to set it up
1742  *     else
1743  *	  do the remove rpc
1744  */
1745 int
1746 nfs_remove(void *v)
1747 {
1748 	struct vop_remove_args /* {
1749 		struct vnodeop_desc *a_desc;
1750 		struct vnode * a_dvp;
1751 		struct vnode * a_vp;
1752 		struct componentname * a_cnp;
1753 	} */ *ap = v;
1754 	struct vnode *vp = ap->a_vp;
1755 	struct vnode *dvp = ap->a_dvp;
1756 	struct componentname *cnp = ap->a_cnp;
1757 	struct nfsnode *np = VTONFS(vp);
1758 	int error = 0;
1759 	struct vattr vattr;
1760 
1761 #ifndef DIAGNOSTIC
1762 	if (vp->v_usecount < 1)
1763 		panic("nfs_remove: bad v_usecount");
1764 #endif
1765 	if (vp->v_type == VDIR)
1766 		error = EPERM;
1767 	else if (vp->v_usecount == 1 || (np->n_sillyrename &&
1768 	    VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 &&
1769 	    vattr.va_nlink > 1)) {
1770 		/*
1771 		 * Purge the name cache so that the chance of a lookup for
1772 		 * the name succeeding while the remove is in progress is
1773 		 * minimized. Without node locking it can still happen, such
1774 		 * that an I/O op returns ESTALE, but since you get this if
1775 		 * another host removes the file..
1776 		 */
1777 		cache_purge(vp);
1778 		/*
1779 		 * throw away biocache buffers, mainly to avoid
1780 		 * unnecessary delayed writes later.
1781 		 */
1782 		error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, curlwp, 1);
1783 		/* Do the rpc */
1784 		if (error != EINTR)
1785 			error = nfs_removerpc(dvp, cnp->cn_nameptr,
1786 				cnp->cn_namelen, cnp->cn_cred, curlwp);
1787 	} else if (!np->n_sillyrename)
1788 		error = nfs_sillyrename(dvp, vp, cnp, false);
1789 	if (!error && nfs_getattrcache(vp, &vattr) == 0 &&
1790 	    vattr.va_nlink == 1) {
1791 		np->n_flag |= NREMOVED;
1792 	}
1793 	NFS_INVALIDATE_ATTRCACHE(np);
1794 	VN_KNOTE(vp, NOTE_DELETE);
1795 	VN_KNOTE(dvp, NOTE_WRITE);
1796 	if (dvp == vp)
1797 		vrele(vp);
1798 	else
1799 		vput(vp);
1800 	vput(dvp);
1801 	return (error);
1802 }
1803 
1804 /*
1805  * nfs file remove rpc called from nfs_inactive
1806  */
1807 int
1808 nfs_removeit(struct sillyrename *sp)
1809 {
1810 
1811 	return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred,
1812 		(struct lwp *)0));
1813 }
1814 
1815 /*
1816  * Nfs remove rpc, called from nfs_remove() and nfs_removeit().
1817  */
1818 int
1819 nfs_removerpc(struct vnode *dvp, const char *name, int namelen, kauth_cred_t cred, struct lwp *l)
1820 {
1821 	u_int32_t *tl;
1822 	char *cp;
1823 #ifndef NFS_V2_ONLY
1824 	int32_t t1;
1825 	char *cp2;
1826 #endif
1827 	int32_t t2;
1828 	char *bpos, *dpos;
1829 	int error = 0, wccflag = NFSV3_WCCRATTR;
1830 	struct mbuf *mreq, *mrep, *md, *mb;
1831 	const int v3 = NFS_ISV3(dvp);
1832 	int rexmit = 0;
1833 	struct nfsnode *dnp = VTONFS(dvp);
1834 
1835 	nfsstats.rpccnt[NFSPROC_REMOVE]++;
1836 	nfsm_reqhead(dnp, NFSPROC_REMOVE,
1837 		NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen));
1838 	nfsm_fhtom(dnp, v3);
1839 	nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
1840 	nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit);
1841 #ifndef NFS_V2_ONLY
1842 	if (v3)
1843 		nfsm_wcc_data(dvp, wccflag, 0, !error);
1844 #endif
1845 	nfsm_reqdone;
1846 	VTONFS(dvp)->n_flag |= NMODIFIED;
1847 	if (!wccflag)
1848 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
1849 	/*
1850 	 * Kludge City: If the first reply to the remove rpc is lost..
1851 	 *   the reply to the retransmitted request will be ENOENT
1852 	 *   since the file was in fact removed
1853 	 *   Therefore, we cheat and return success.
1854 	 */
1855 	if (rexmit && error == ENOENT)
1856 		error = 0;
1857 	return (error);
1858 }
1859 
1860 /*
1861  * nfs file rename call
1862  */
1863 int
1864 nfs_rename(void *v)
1865 {
1866 	struct vop_rename_args  /* {
1867 		struct vnode *a_fdvp;
1868 		struct vnode *a_fvp;
1869 		struct componentname *a_fcnp;
1870 		struct vnode *a_tdvp;
1871 		struct vnode *a_tvp;
1872 		struct componentname *a_tcnp;
1873 	} */ *ap = v;
1874 	struct vnode *fvp = ap->a_fvp;
1875 	struct vnode *tvp = ap->a_tvp;
1876 	struct vnode *fdvp = ap->a_fdvp;
1877 	struct vnode *tdvp = ap->a_tdvp;
1878 	struct componentname *tcnp = ap->a_tcnp;
1879 	struct componentname *fcnp = ap->a_fcnp;
1880 	int error;
1881 
1882 	/* Check for cross-device rename */
1883 	if ((fvp->v_mount != tdvp->v_mount) ||
1884 	    (tvp && (fvp->v_mount != tvp->v_mount))) {
1885 		error = EXDEV;
1886 		goto out;
1887 	}
1888 
1889 	/*
1890 	 * If the tvp exists and is in use, sillyrename it before doing the
1891 	 * rename of the new file over it.
1892 	 *
1893 	 * Have sillyrename use link instead of rename if possible,
1894 	 * so that we don't lose the file if the rename fails, and so
1895 	 * that there's no window when the "to" file doesn't exist.
1896 	 */
1897 	if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename &&
1898 	    tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) {
1899 		VN_KNOTE(tvp, NOTE_DELETE);
1900 		vput(tvp);
1901 		tvp = NULL;
1902 	}
1903 
1904 	error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1905 		tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred,
1906 		curlwp);
1907 
1908 	VN_KNOTE(fdvp, NOTE_WRITE);
1909 	VN_KNOTE(tdvp, NOTE_WRITE);
1910 	if (error == 0 || error == EEXIST) {
1911 		if (fvp->v_type == VDIR)
1912 			cache_purge(fvp);
1913 		else
1914 			cache_purge1(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen,
1915 				     0);
1916 		if (tvp != NULL && tvp->v_type == VDIR)
1917 			cache_purge(tvp);
1918 		else
1919 			cache_purge1(tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
1920 				     0);
1921 	}
1922 out:
1923 	if (tdvp == tvp)
1924 		vrele(tdvp);
1925 	else
1926 		vput(tdvp);
1927 	if (tvp)
1928 		vput(tvp);
1929 	vrele(fdvp);
1930 	vrele(fvp);
1931 	return (error);
1932 }
1933 
1934 /*
1935  * nfs file rename rpc called from nfs_remove() above
1936  */
1937 int
1938 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, struct sillyrename *sp)
1939 {
1940 	return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen,
1941 		sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curlwp));
1942 }
1943 
1944 /*
1945  * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit().
1946  */
1947 int
1948 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen, struct vnode *tdvp, const char *tnameptr, int tnamelen, kauth_cred_t cred, struct lwp *l)
1949 {
1950 	u_int32_t *tl;
1951 	char *cp;
1952 #ifndef NFS_V2_ONLY
1953 	int32_t t1;
1954 	char *cp2;
1955 #endif
1956 	int32_t t2;
1957 	char *bpos, *dpos;
1958 	int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR;
1959 	struct mbuf *mreq, *mrep, *md, *mb;
1960 	const int v3 = NFS_ISV3(fdvp);
1961 	int rexmit = 0;
1962 	struct nfsnode *fdnp = VTONFS(fdvp);
1963 
1964 	nfsstats.rpccnt[NFSPROC_RENAME]++;
1965 	nfsm_reqhead(fdnp, NFSPROC_RENAME,
1966 		(NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) +
1967 		nfsm_rndup(tnamelen));
1968 	nfsm_fhtom(fdnp, v3);
1969 	nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN);
1970 	nfsm_fhtom(VTONFS(tdvp), v3);
1971 	nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN);
1972 	nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit);
1973 #ifndef NFS_V2_ONLY
1974 	if (v3) {
1975 		nfsm_wcc_data(fdvp, fwccflag, 0, !error);
1976 		nfsm_wcc_data(tdvp, twccflag, 0, !error);
1977 	}
1978 #endif
1979 	nfsm_reqdone;
1980 	VTONFS(fdvp)->n_flag |= NMODIFIED;
1981 	VTONFS(tdvp)->n_flag |= NMODIFIED;
1982 	if (!fwccflag)
1983 		NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp));
1984 	if (!twccflag)
1985 		NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp));
1986 	/*
1987 	 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry.
1988 	 */
1989 	if (rexmit && error == ENOENT)
1990 		error = 0;
1991 	return (error);
1992 }
1993 
1994 /*
1995  * NFS link RPC, called from nfs_link.
1996  * Assumes dvp and vp locked, and leaves them that way.
1997  */
1998 
1999 static int
2000 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name,
2001     size_t namelen, kauth_cred_t cred, struct lwp *l)
2002 {
2003 	u_int32_t *tl;
2004 	char *cp;
2005 #ifndef NFS_V2_ONLY
2006 	int32_t t1;
2007 	char *cp2;
2008 #endif
2009 	int32_t t2;
2010 	char *bpos, *dpos;
2011 	int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0;
2012 	struct mbuf *mreq, *mrep, *md, *mb;
2013 	const int v3 = NFS_ISV3(dvp);
2014 	int rexmit = 0;
2015 	struct nfsnode *np = VTONFS(vp);
2016 
2017 	nfsstats.rpccnt[NFSPROC_LINK]++;
2018 	nfsm_reqhead(np, NFSPROC_LINK,
2019 	    NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen));
2020 	nfsm_fhtom(np, v3);
2021 	nfsm_fhtom(VTONFS(dvp), v3);
2022 	nfsm_strtom(name, namelen, NFS_MAXNAMLEN);
2023 	nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit);
2024 #ifndef NFS_V2_ONLY
2025 	if (v3) {
2026 		nfsm_postop_attr(vp, attrflag, 0);
2027 		nfsm_wcc_data(dvp, wccflag, 0, !error);
2028 	}
2029 #endif
2030 	nfsm_reqdone;
2031 
2032 	VTONFS(dvp)->n_flag |= NMODIFIED;
2033 	if (!attrflag)
2034 		NFS_INVALIDATE_ATTRCACHE(VTONFS(vp));
2035 	if (!wccflag)
2036 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2037 
2038 	/*
2039 	 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2040 	 */
2041 	if (rexmit && error == EEXIST)
2042 		error = 0;
2043 
2044 	return error;
2045 }
2046 
2047 /*
2048  * nfs hard link create call
2049  */
2050 int
2051 nfs_link(void *v)
2052 {
2053 	struct vop_link_args /* {
2054 		struct vnode *a_dvp;
2055 		struct vnode *a_vp;
2056 		struct componentname *a_cnp;
2057 	} */ *ap = v;
2058 	struct vnode *vp = ap->a_vp;
2059 	struct vnode *dvp = ap->a_dvp;
2060 	struct componentname *cnp = ap->a_cnp;
2061 	int error = 0;
2062 
2063 	error = vn_lock(vp, LK_EXCLUSIVE);
2064 	if (error != 0) {
2065 		VOP_ABORTOP(dvp, cnp);
2066 		vput(dvp);
2067 		return error;
2068 	}
2069 
2070 	/*
2071 	 * Push all writes to the server, so that the attribute cache
2072 	 * doesn't get "out of sync" with the server.
2073 	 * XXX There should be a better way!
2074 	 */
2075 	VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0);
2076 
2077 	error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
2078 	    cnp->cn_cred, curlwp);
2079 
2080 	if (error == 0) {
2081 		cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
2082 	}
2083 	VOP_UNLOCK(vp);
2084 	VN_KNOTE(vp, NOTE_LINK);
2085 	VN_KNOTE(dvp, NOTE_WRITE);
2086 	vput(dvp);
2087 	return (error);
2088 }
2089 
2090 /*
2091  * nfs symbolic link create call
2092  */
2093 int
2094 nfs_symlink(void *v)
2095 {
2096 	struct vop_symlink_v3_args /* {
2097 		struct vnode *a_dvp;
2098 		struct vnode **a_vpp;
2099 		struct componentname *a_cnp;
2100 		struct vattr *a_vap;
2101 		char *a_target;
2102 	} */ *ap = v;
2103 	struct vnode *dvp = ap->a_dvp;
2104 	struct vattr *vap = ap->a_vap;
2105 	struct componentname *cnp = ap->a_cnp;
2106 	struct nfsv2_sattr *sp;
2107 	u_int32_t *tl;
2108 	char *cp;
2109 	int32_t t1, t2;
2110 	char *bpos, *dpos, *cp2;
2111 	int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp;
2112 	struct mbuf *mreq, *mrep, *md, *mb;
2113 	struct vnode *newvp = (struct vnode *)0;
2114 	const int v3 = NFS_ISV3(dvp);
2115 	int rexmit = 0;
2116 	struct nfsnode *dnp = VTONFS(dvp);
2117 
2118 	*ap->a_vpp = NULL;
2119 	nfsstats.rpccnt[NFSPROC_SYMLINK]++;
2120 	slen = strlen(ap->a_target);
2121 	nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED +
2122 	    nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3));
2123 	nfsm_fhtom(dnp, v3);
2124 	nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2125 #ifndef NFS_V2_ONlY
2126 	if (v3)
2127 		nfsm_v3attrbuild(vap, false);
2128 #endif
2129 	nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN);
2130 #ifndef NFS_V2_ONlY
2131 	if (!v3) {
2132 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2133 		sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode);
2134 		sp->sa_uid = nfs_xdrneg1;
2135 		sp->sa_gid = nfs_xdrneg1;
2136 		sp->sa_size = nfs_xdrneg1;
2137 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2138 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2139 	}
2140 #endif
2141 	nfsm_request1(dnp, NFSPROC_SYMLINK, curlwp, cnp->cn_cred,
2142 	    &rexmit);
2143 #ifndef NFS_V2_ONlY
2144 	if (v3) {
2145 		if (!error)
2146 			nfsm_mtofh(dvp, newvp, v3, gotvp);
2147 		nfsm_wcc_data(dvp, wccflag, 0, !error);
2148 	}
2149 #endif
2150 	nfsm_reqdone;
2151 	/*
2152 	 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry.
2153 	 */
2154 	if (rexmit && error == EEXIST)
2155 		error = 0;
2156 	if (error == 0 || error == EEXIST)
2157 		cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0);
2158 	if (error == 0 && newvp == NULL) {
2159 		struct nfsnode *np = NULL;
2160 
2161 		error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
2162 		    cnp->cn_cred, curlwp, &np);
2163 		if (error == 0)
2164 			newvp = NFSTOV(np);
2165 	}
2166 	if (error) {
2167 		if (newvp != NULL)
2168 			vput(newvp);
2169 	} else {
2170 		*ap->a_vpp = newvp;
2171 		VOP_UNLOCK(newvp);
2172 	}
2173 	VTONFS(dvp)->n_flag |= NMODIFIED;
2174 	if (!wccflag)
2175 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2176 	VN_KNOTE(dvp, NOTE_WRITE);
2177 	return (error);
2178 }
2179 
2180 /*
2181  * nfs make dir call
2182  */
2183 int
2184 nfs_mkdir(void *v)
2185 {
2186 	struct vop_mkdir_v3_args /* {
2187 		struct vnode *a_dvp;
2188 		struct vnode **a_vpp;
2189 		struct componentname *a_cnp;
2190 		struct vattr *a_vap;
2191 	} */ *ap = v;
2192 	struct vnode *dvp = ap->a_dvp;
2193 	struct vattr *vap = ap->a_vap;
2194 	struct componentname *cnp = ap->a_cnp;
2195 	struct nfsv2_sattr *sp;
2196 	u_int32_t *tl;
2197 	char *cp;
2198 	int32_t t1, t2;
2199 	int len;
2200 	struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0;
2201 	struct vnode *newvp = (struct vnode *)0;
2202 	char *bpos, *dpos, *cp2;
2203 	int error = 0, wccflag = NFSV3_WCCRATTR;
2204 	int gotvp = 0;
2205 	int rexmit = 0;
2206 	struct mbuf *mreq, *mrep, *md, *mb;
2207 	const int v3 = NFS_ISV3(dvp);
2208 
2209 	len = cnp->cn_namelen;
2210 	nfsstats.rpccnt[NFSPROC_MKDIR]++;
2211 	nfsm_reqhead(dnp, NFSPROC_MKDIR,
2212 	  NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3));
2213 	nfsm_fhtom(dnp, v3);
2214 	nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN);
2215 #ifndef NFS_V2_ONLY
2216 	if (v3) {
2217 		nfsm_v3attrbuild(vap, false);
2218 	} else
2219 #endif
2220 	{
2221 		nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR);
2222 		sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode);
2223 		sp->sa_uid = nfs_xdrneg1;
2224 		sp->sa_gid = nfs_xdrneg1;
2225 		sp->sa_size = nfs_xdrneg1;
2226 		txdr_nfsv2time(&vap->va_atime, &sp->sa_atime);
2227 		txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime);
2228 	}
2229 	nfsm_request1(dnp, NFSPROC_MKDIR, curlwp, cnp->cn_cred, &rexmit);
2230 	if (!error)
2231 		nfsm_mtofh(dvp, newvp, v3, gotvp);
2232 	if (v3)
2233 		nfsm_wcc_data(dvp, wccflag, 0, !error);
2234 	nfsm_reqdone;
2235 	VTONFS(dvp)->n_flag |= NMODIFIED;
2236 	if (!wccflag)
2237 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2238 	/*
2239 	 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry
2240 	 * if we can succeed in looking up the directory.
2241 	 */
2242 	if ((rexmit && error == EEXIST) || (!error && !gotvp)) {
2243 		if (newvp) {
2244 			vput(newvp);
2245 			newvp = (struct vnode *)0;
2246 		}
2247 		error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred,
2248 			curlwp, &np);
2249 		if (!error) {
2250 			newvp = NFSTOV(np);
2251 			if (newvp->v_type != VDIR || newvp == dvp)
2252 				error = EEXIST;
2253 		}
2254 	}
2255 	if (error) {
2256 		if (newvp) {
2257 			if (dvp != newvp)
2258 				vput(newvp);
2259 			else
2260 				vrele(newvp);
2261 		}
2262 	} else {
2263 		VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2264 		nfs_cache_enter(dvp, newvp, cnp);
2265 		*ap->a_vpp = newvp;
2266 		VOP_UNLOCK(newvp);
2267 	}
2268 	return (error);
2269 }
2270 
2271 /*
2272  * nfs remove directory call
2273  */
2274 int
2275 nfs_rmdir(void *v)
2276 {
2277 	struct vop_rmdir_args /* {
2278 		struct vnode *a_dvp;
2279 		struct vnode *a_vp;
2280 		struct componentname *a_cnp;
2281 	} */ *ap = v;
2282 	struct vnode *vp = ap->a_vp;
2283 	struct vnode *dvp = ap->a_dvp;
2284 	struct componentname *cnp = ap->a_cnp;
2285 	u_int32_t *tl;
2286 	char *cp;
2287 #ifndef NFS_V2_ONLY
2288 	int32_t t1;
2289 	char *cp2;
2290 #endif
2291 	int32_t t2;
2292 	char *bpos, *dpos;
2293 	int error = 0, wccflag = NFSV3_WCCRATTR;
2294 	int rexmit = 0;
2295 	struct mbuf *mreq, *mrep, *md, *mb;
2296 	const int v3 = NFS_ISV3(dvp);
2297 	struct nfsnode *dnp;
2298 
2299 	if (dvp == vp) {
2300 		vrele(dvp);
2301 		vput(dvp);
2302 		return (EINVAL);
2303 	}
2304 	nfsstats.rpccnt[NFSPROC_RMDIR]++;
2305 	dnp = VTONFS(dvp);
2306 	nfsm_reqhead(dnp, NFSPROC_RMDIR,
2307 		NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen));
2308 	nfsm_fhtom(dnp, v3);
2309 	nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN);
2310 	nfsm_request1(dnp, NFSPROC_RMDIR, curlwp, cnp->cn_cred, &rexmit);
2311 #ifndef NFS_V2_ONLY
2312 	if (v3)
2313 		nfsm_wcc_data(dvp, wccflag, 0, !error);
2314 #endif
2315 	nfsm_reqdone;
2316 	VTONFS(dvp)->n_flag |= NMODIFIED;
2317 	if (!wccflag)
2318 		NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp));
2319 	VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
2320 	VN_KNOTE(vp, NOTE_DELETE);
2321 	cache_purge(vp);
2322 	vput(vp);
2323 	vput(dvp);
2324 	/*
2325 	 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry.
2326 	 */
2327 	if (rexmit && error == ENOENT)
2328 		error = 0;
2329 	return (error);
2330 }
2331 
2332 /*
2333  * nfs readdir call
2334  */
2335 int
2336 nfs_readdir(void *v)
2337 {
2338 	struct vop_readdir_args /* {
2339 		struct vnode *a_vp;
2340 		struct uio *a_uio;
2341 		kauth_cred_t a_cred;
2342 		int *a_eofflag;
2343 		off_t **a_cookies;
2344 		int *a_ncookies;
2345 	} */ *ap = v;
2346 	struct vnode *vp = ap->a_vp;
2347 	struct uio *uio = ap->a_uio;
2348 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2349 	char *base = uio->uio_iov->iov_base;
2350 	int tresid, error;
2351 	size_t count, lost;
2352 	struct dirent *dp;
2353 	off_t *cookies = NULL;
2354 	int ncookies = 0, nc;
2355 
2356 	if (vp->v_type != VDIR)
2357 		return (EPERM);
2358 
2359 	lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1);
2360 	count = uio->uio_resid - lost;
2361 	if (count <= 0)
2362 		return (EINVAL);
2363 
2364 	/*
2365 	 * Call nfs_bioread() to do the real work.
2366 	 */
2367 	tresid = uio->uio_resid = count;
2368 	error = nfs_bioread(vp, uio, 0, ap->a_cred,
2369 		    ap->a_cookies ? NFSBIO_CACHECOOKIES : 0);
2370 
2371 	if (!error && ap->a_cookies) {
2372 		ncookies = count / 16;
2373 		cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK);
2374 		*ap->a_cookies = cookies;
2375 	}
2376 
2377 	if (!error && uio->uio_resid == tresid) {
2378 		uio->uio_resid += lost;
2379 		nfsstats.direofcache_misses++;
2380 		if (ap->a_cookies)
2381 			*ap->a_ncookies = 0;
2382 		*ap->a_eofflag = 1;
2383 		return (0);
2384 	}
2385 
2386 	if (!error && ap->a_cookies) {
2387 		/*
2388 		 * Only the NFS server and emulations use cookies, and they
2389 		 * load the directory block into system space, so we can
2390 		 * just look at it directly.
2391 		 */
2392 		if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) ||
2393 		    uio->uio_iovcnt != 1)
2394 			panic("nfs_readdir: lost in space");
2395 		for (nc = 0; ncookies-- &&
2396 		     base < (char *)uio->uio_iov->iov_base; nc++){
2397 			dp = (struct dirent *) base;
2398 			if (dp->d_reclen == 0)
2399 				break;
2400 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
2401 				*(cookies++) = (off_t)NFS_GETCOOKIE32(dp);
2402 			else
2403 				*(cookies++) = NFS_GETCOOKIE(dp);
2404 			base += dp->d_reclen;
2405 		}
2406 		uio->uio_resid +=
2407 		    ((char *)uio->uio_iov->iov_base - base);
2408 		uio->uio_iov->iov_len +=
2409 		    ((char *)uio->uio_iov->iov_base - base);
2410 		uio->uio_iov->iov_base = base;
2411 		*ap->a_ncookies = nc;
2412 	}
2413 
2414 	uio->uio_resid += lost;
2415 	*ap->a_eofflag = 0;
2416 	return (error);
2417 }
2418 
2419 /*
2420  * Readdir rpc call.
2421  * Called from below the buffer cache by nfs_doio().
2422  */
2423 int
2424 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
2425 {
2426 	int len, left;
2427 	struct dirent *dp = NULL;
2428 	u_int32_t *tl;
2429 	char *cp;
2430 	int32_t t1, t2;
2431 	char *bpos, *dpos, *cp2;
2432 	struct mbuf *mreq, *mrep, *md, *mb;
2433 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2434 	struct nfsnode *dnp = VTONFS(vp);
2435 	u_quad_t fileno;
2436 	int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1;
2437 #ifndef NFS_V2_ONLY
2438 	int attrflag;
2439 #endif
2440 	int nrpcs = 0, reclen;
2441 	const int v3 = NFS_ISV3(vp);
2442 
2443 #ifdef DIAGNOSTIC
2444 	/*
2445 	 * Should be called from buffer cache, so only amount of
2446 	 * NFS_DIRBLKSIZ will be requested.
2447 	 */
2448 	if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2449 		panic("nfs readdirrpc bad uio");
2450 #endif
2451 
2452 	/*
2453 	 * Loop around doing readdir rpc's of size nm_readdirsize
2454 	 * truncated to a multiple of NFS_DIRFRAGSIZ.
2455 	 * The stopping criteria is EOF or buffer full.
2456 	 */
2457 	while (more_dirs && bigenough) {
2458 		/*
2459 		 * Heuristic: don't bother to do another RPC to further
2460 		 * fill up this block if there is not much room left. (< 50%
2461 		 * of the readdir RPC size). This wastes some buffer space
2462 		 * but can save up to 50% in RPC calls.
2463 		 */
2464 		if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2465 			bigenough = 0;
2466 			break;
2467 		}
2468 		nfsstats.rpccnt[NFSPROC_READDIR]++;
2469 		nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) +
2470 			NFSX_READDIR(v3));
2471 		nfsm_fhtom(dnp, v3);
2472 #ifndef NFS_V2_ONLY
2473 		if (v3) {
2474 			nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED);
2475 			if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2476 				txdr_swapcookie3(uiop->uio_offset, tl);
2477 			} else {
2478 				txdr_cookie3(uiop->uio_offset, tl);
2479 			}
2480 			tl += 2;
2481 			*tl++ = dnp->n_cookieverf.nfsuquad[0];
2482 			*tl++ = dnp->n_cookieverf.nfsuquad[1];
2483 		} else
2484 #endif
2485 		{
2486 			nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
2487 			*tl++ = txdr_unsigned(uiop->uio_offset);
2488 		}
2489 		*tl = txdr_unsigned(nmp->nm_readdirsize);
2490 		nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred);
2491 		nrpcs++;
2492 #ifndef NFS_V2_ONLY
2493 		if (v3) {
2494 			nfsm_postop_attr(vp, attrflag, 0);
2495 			if (!error) {
2496 				nfsm_dissect(tl, u_int32_t *,
2497 				    2 * NFSX_UNSIGNED);
2498 				dnp->n_cookieverf.nfsuquad[0] = *tl++;
2499 				dnp->n_cookieverf.nfsuquad[1] = *tl;
2500 			} else {
2501 				m_freem(mrep);
2502 				goto nfsmout;
2503 			}
2504 		}
2505 #endif
2506 		nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2507 		more_dirs = fxdr_unsigned(int, *tl);
2508 
2509 		/* loop thru the dir entries, doctoring them to 4bsd form */
2510 		while (more_dirs && bigenough) {
2511 #ifndef NFS_V2_ONLY
2512 			if (v3) {
2513 				nfsm_dissect(tl, u_int32_t *,
2514 				    3 * NFSX_UNSIGNED);
2515 				fileno = fxdr_hyper(tl);
2516 				len = fxdr_unsigned(int, *(tl + 2));
2517 			} else
2518 #endif
2519 			{
2520 				nfsm_dissect(tl, u_int32_t *,
2521 				    2 * NFSX_UNSIGNED);
2522 				fileno = fxdr_unsigned(u_quad_t, *tl++);
2523 				len = fxdr_unsigned(int, *tl);
2524 			}
2525 			if (len <= 0 || len > NFS_MAXNAMLEN) {
2526 				error = EBADRPC;
2527 				m_freem(mrep);
2528 				goto nfsmout;
2529 			}
2530 			/* for cookie stashing */
2531 			reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2532 			left = NFS_DIRFRAGSIZ - blksiz;
2533 			if (reclen > left) {
2534 				memset(uiop->uio_iov->iov_base, 0, left);
2535 				dp->d_reclen += left;
2536 				UIO_ADVANCE(uiop, left);
2537 				blksiz = 0;
2538 				NFS_STASHCOOKIE(dp, uiop->uio_offset);
2539 			}
2540 			if (reclen > uiop->uio_resid)
2541 				bigenough = 0;
2542 			if (bigenough) {
2543 				int tlen;
2544 
2545 				dp = (struct dirent *)uiop->uio_iov->iov_base;
2546 				dp->d_fileno = fileno;
2547 				dp->d_namlen = len;
2548 				dp->d_reclen = reclen;
2549 				dp->d_type = DT_UNKNOWN;
2550 				blksiz += reclen;
2551 				if (blksiz == NFS_DIRFRAGSIZ)
2552 					blksiz = 0;
2553 				UIO_ADVANCE(uiop, DIRHDSIZ);
2554 				nfsm_mtouio(uiop, len);
2555 				tlen = reclen - (DIRHDSIZ + len);
2556 				(void)memset(uiop->uio_iov->iov_base, 0, tlen);
2557 				UIO_ADVANCE(uiop, tlen);
2558 			} else
2559 				nfsm_adv(nfsm_rndup(len));
2560 #ifndef NFS_V2_ONLY
2561 			if (v3) {
2562 				nfsm_dissect(tl, u_int32_t *,
2563 				    3 * NFSX_UNSIGNED);
2564 			} else
2565 #endif
2566 			{
2567 				nfsm_dissect(tl, u_int32_t *,
2568 				    2 * NFSX_UNSIGNED);
2569 			}
2570 			if (bigenough) {
2571 #ifndef NFS_V2_ONLY
2572 				if (v3) {
2573 					if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2574 						uiop->uio_offset =
2575 						    fxdr_swapcookie3(tl);
2576 					else
2577 						uiop->uio_offset =
2578 						    fxdr_cookie3(tl);
2579 				}
2580 				else
2581 #endif
2582 				{
2583 					uiop->uio_offset =
2584 					    fxdr_unsigned(off_t, *tl);
2585 				}
2586 				NFS_STASHCOOKIE(dp, uiop->uio_offset);
2587 			}
2588 			if (v3)
2589 				tl += 2;
2590 			else
2591 				tl++;
2592 			more_dirs = fxdr_unsigned(int, *tl);
2593 		}
2594 		/*
2595 		 * If at end of rpc data, get the eof boolean
2596 		 */
2597 		if (!more_dirs) {
2598 			nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2599 			more_dirs = (fxdr_unsigned(int, *tl) == 0);
2600 
2601 			/*
2602 			 * kludge: if we got no entries, treat it as EOF.
2603 			 * some server sometimes send a reply without any
2604 			 * entries or EOF.
2605 			 * although it might mean the server has very long name,
2606 			 * we can't handle such entries anyway.
2607 			 */
2608 
2609 			if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2610 				more_dirs = 0;
2611 		}
2612 		m_freem(mrep);
2613 	}
2614 	/*
2615 	 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2616 	 * by increasing d_reclen for the last record.
2617 	 */
2618 	if (blksiz > 0) {
2619 		left = NFS_DIRFRAGSIZ - blksiz;
2620 		memset(uiop->uio_iov->iov_base, 0, left);
2621 		dp->d_reclen += left;
2622 		NFS_STASHCOOKIE(dp, uiop->uio_offset);
2623 		UIO_ADVANCE(uiop, left);
2624 	}
2625 
2626 	/*
2627 	 * We are now either at the end of the directory or have filled the
2628 	 * block.
2629 	 */
2630 	if (bigenough) {
2631 		dnp->n_direofoffset = uiop->uio_offset;
2632 		dnp->n_flag |= NEOFVALID;
2633 	}
2634 nfsmout:
2635 	return (error);
2636 }
2637 
2638 #ifndef NFS_V2_ONLY
2639 /*
2640  * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc().
2641  */
2642 int
2643 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred)
2644 {
2645 	int len, left;
2646 	struct dirent *dp = NULL;
2647 	u_int32_t *tl;
2648 	char *cp;
2649 	int32_t t1, t2;
2650 	struct vnode *newvp;
2651 	char *bpos, *dpos, *cp2;
2652 	struct mbuf *mreq, *mrep, *md, *mb;
2653 	struct nameidata nami, *ndp = &nami;
2654 	struct componentname *cnp = &ndp->ni_cnd;
2655 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
2656 	struct nfsnode *dnp = VTONFS(vp), *np;
2657 	nfsfh_t *fhp;
2658 	u_quad_t fileno;
2659 	int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i;
2660 	int attrflag, fhsize, nrpcs = 0, reclen;
2661 	struct nfs_fattr fattr, *fp;
2662 
2663 #ifdef DIAGNOSTIC
2664 	if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ)
2665 		panic("nfs readdirplusrpc bad uio");
2666 #endif
2667 	ndp->ni_dvp = vp;
2668 	newvp = NULLVP;
2669 
2670 	/*
2671 	 * Loop around doing readdir rpc's of size nm_readdirsize
2672 	 * truncated to a multiple of NFS_DIRFRAGSIZ.
2673 	 * The stopping criteria is EOF or buffer full.
2674 	 */
2675 	while (more_dirs && bigenough) {
2676 		if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) {
2677 			bigenough = 0;
2678 			break;
2679 		}
2680 		nfsstats.rpccnt[NFSPROC_READDIRPLUS]++;
2681 		nfsm_reqhead(dnp, NFSPROC_READDIRPLUS,
2682 			NFSX_FH(1) + 6 * NFSX_UNSIGNED);
2683 		nfsm_fhtom(dnp, 1);
2684  		nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
2685 		if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) {
2686 			txdr_swapcookie3(uiop->uio_offset, tl);
2687 		} else {
2688 			txdr_cookie3(uiop->uio_offset, tl);
2689 		}
2690 		tl += 2;
2691 		*tl++ = dnp->n_cookieverf.nfsuquad[0];
2692 		*tl++ = dnp->n_cookieverf.nfsuquad[1];
2693 		*tl++ = txdr_unsigned(nmp->nm_readdirsize);
2694 		*tl = txdr_unsigned(nmp->nm_rsize);
2695 		nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred);
2696 		nfsm_postop_attr(vp, attrflag, 0);
2697 		if (error) {
2698 			m_freem(mrep);
2699 			goto nfsmout;
2700 		}
2701 		nrpcs++;
2702 		nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2703 		dnp->n_cookieverf.nfsuquad[0] = *tl++;
2704 		dnp->n_cookieverf.nfsuquad[1] = *tl++;
2705 		more_dirs = fxdr_unsigned(int, *tl);
2706 
2707 		/* loop thru the dir entries, doctoring them to 4bsd form */
2708 		while (more_dirs && bigenough) {
2709 			nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2710 			fileno = fxdr_hyper(tl);
2711 			len = fxdr_unsigned(int, *(tl + 2));
2712 			if (len <= 0 || len > NFS_MAXNAMLEN) {
2713 				error = EBADRPC;
2714 				m_freem(mrep);
2715 				goto nfsmout;
2716 			}
2717 			/* for cookie stashing */
2718 			reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t);
2719 			left = NFS_DIRFRAGSIZ - blksiz;
2720 			if (reclen > left) {
2721 				/*
2722 				 * DIRFRAGSIZ is aligned, no need to align
2723 				 * again here.
2724 				 */
2725 				memset(uiop->uio_iov->iov_base, 0, left);
2726 				dp->d_reclen += left;
2727 				UIO_ADVANCE(uiop, left);
2728 				NFS_STASHCOOKIE(dp, uiop->uio_offset);
2729 				blksiz = 0;
2730 			}
2731 			if (reclen > uiop->uio_resid)
2732 				bigenough = 0;
2733 			if (bigenough) {
2734 				int tlen;
2735 
2736 				dp = (struct dirent *)uiop->uio_iov->iov_base;
2737 				dp->d_fileno = fileno;
2738 				dp->d_namlen = len;
2739 				dp->d_reclen = reclen;
2740 				dp->d_type = DT_UNKNOWN;
2741 				blksiz += reclen;
2742 				if (blksiz == NFS_DIRFRAGSIZ)
2743 					blksiz = 0;
2744 				UIO_ADVANCE(uiop, DIRHDSIZ);
2745 				nfsm_mtouio(uiop, len);
2746 				tlen = reclen - (DIRHDSIZ + len);
2747 				(void)memset(uiop->uio_iov->iov_base, 0, tlen);
2748 				UIO_ADVANCE(uiop, tlen);
2749 				cnp->cn_nameptr = dp->d_name;
2750 				cnp->cn_namelen = dp->d_namlen;
2751 			} else
2752 				nfsm_adv(nfsm_rndup(len));
2753 			nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
2754 			if (bigenough) {
2755 				if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE)
2756 					uiop->uio_offset =
2757 						fxdr_swapcookie3(tl);
2758 				else
2759 					uiop->uio_offset =
2760 						fxdr_cookie3(tl);
2761 				NFS_STASHCOOKIE(dp, uiop->uio_offset);
2762 			}
2763 			tl += 2;
2764 
2765 			/*
2766 			 * Since the attributes are before the file handle
2767 			 * (sigh), we must skip over the attributes and then
2768 			 * come back and get them.
2769 			 */
2770 			attrflag = fxdr_unsigned(int, *tl);
2771 			if (attrflag) {
2772 			    nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR);
2773 			    memcpy(&fattr, fp, NFSX_V3FATTR);
2774 			    nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2775 			    doit = fxdr_unsigned(int, *tl);
2776 			    if (doit) {
2777 				nfsm_getfh(fhp, fhsize, 1);
2778 				if (NFS_CMPFH(dnp, fhp, fhsize)) {
2779 				    vref(vp);
2780 				    newvp = vp;
2781 				    np = dnp;
2782 				} else {
2783 				    error = nfs_nget1(vp->v_mount, fhp,
2784 					fhsize, &np, LK_NOWAIT);
2785 				    if (!error)
2786 					newvp = NFSTOV(np);
2787 				}
2788 				if (!error) {
2789 				    nfs_loadattrcache(&newvp, &fattr, 0, 0);
2790 				    if (bigenough) {
2791 					dp->d_type =
2792 					   IFTODT(VTTOIF(np->n_vattr->va_type));
2793 					if (cnp->cn_namelen <= NCHNAMLEN) {
2794 					    ndp->ni_vp = newvp;
2795 					    nfs_cache_enter(ndp->ni_dvp,
2796 						ndp->ni_vp, cnp);
2797 					}
2798 				    }
2799 				}
2800 				error = 0;
2801 			   }
2802 			} else {
2803 			    /* Just skip over the file handle */
2804 			    nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2805 			    i = fxdr_unsigned(int, *tl);
2806 			    nfsm_adv(nfsm_rndup(i));
2807 			}
2808 			if (newvp != NULLVP) {
2809 			    if (newvp == vp)
2810 				vrele(newvp);
2811 			    else
2812 				vput(newvp);
2813 			    newvp = NULLVP;
2814 			}
2815 			nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2816 			more_dirs = fxdr_unsigned(int, *tl);
2817 		}
2818 		/*
2819 		 * If at end of rpc data, get the eof boolean
2820 		 */
2821 		if (!more_dirs) {
2822 			nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED);
2823 			more_dirs = (fxdr_unsigned(int, *tl) == 0);
2824 
2825 			/*
2826 			 * kludge: see a comment in nfs_readdirrpc.
2827 			 */
2828 
2829 			if (uiop->uio_resid >= NFS_DIRBLKSIZ)
2830 				more_dirs = 0;
2831 		}
2832 		m_freem(mrep);
2833 	}
2834 	/*
2835 	 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ
2836 	 * by increasing d_reclen for the last record.
2837 	 */
2838 	if (blksiz > 0) {
2839 		left = NFS_DIRFRAGSIZ - blksiz;
2840 		memset(uiop->uio_iov->iov_base, 0, left);
2841 		dp->d_reclen += left;
2842 		NFS_STASHCOOKIE(dp, uiop->uio_offset);
2843 		UIO_ADVANCE(uiop, left);
2844 	}
2845 
2846 	/*
2847 	 * We are now either at the end of the directory or have filled the
2848 	 * block.
2849 	 */
2850 	if (bigenough) {
2851 		dnp->n_direofoffset = uiop->uio_offset;
2852 		dnp->n_flag |= NEOFVALID;
2853 	}
2854 nfsmout:
2855 	if (newvp != NULLVP) {
2856 		if(newvp == vp)
2857 		    vrele(newvp);
2858 		else
2859 		    vput(newvp);
2860 	}
2861 	return (error);
2862 }
2863 #endif
2864 
2865 /*
2866  * Silly rename. To make the NFS filesystem that is stateless look a little
2867  * more like the "ufs" a remove of an active vnode is translated to a rename
2868  * to a funny looking filename that is removed by nfs_inactive on the
2869  * nfsnode. There is the potential for another process on a different client
2870  * to create the same funny name between the nfs_lookitup() fails and the
2871  * nfs_rename() completes, but...
2872  */
2873 int
2874 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, bool dolink)
2875 {
2876 	struct sillyrename *sp;
2877 	struct nfsnode *np;
2878 	int error;
2879 	pid_t pid;
2880 
2881 	cache_purge(dvp);
2882 	np = VTONFS(vp);
2883 #ifndef DIAGNOSTIC
2884 	if (vp->v_type == VDIR)
2885 		panic("nfs: sillyrename dir");
2886 #endif
2887 	sp = kmem_alloc(sizeof(*sp), KM_SLEEP);
2888 	sp->s_cred = kauth_cred_dup(cnp->cn_cred);
2889 	sp->s_dvp = dvp;
2890 	vref(dvp);
2891 
2892 	/* Fudge together a funny name */
2893 	pid = curlwp->l_proc->p_pid;
2894 	memcpy(sp->s_name, ".nfsAxxxx4.4", 13);
2895 	sp->s_namlen = 12;
2896 	sp->s_name[8] = hexdigits[pid & 0xf];
2897 	sp->s_name[7] = hexdigits[(pid >> 4) & 0xf];
2898 	sp->s_name[6] = hexdigits[(pid >> 8) & 0xf];
2899 	sp->s_name[5] = hexdigits[(pid >> 12) & 0xf];
2900 
2901 	/* Try lookitups until we get one that isn't there */
2902 	while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2903 		curlwp, (struct nfsnode **)0) == 0) {
2904 		sp->s_name[4]++;
2905 		if (sp->s_name[4] > 'z') {
2906 			error = EINVAL;
2907 			goto bad;
2908 		}
2909 	}
2910 	if (dolink) {
2911 		error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen,
2912 		    sp->s_cred, curlwp);
2913 		/*
2914 		 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
2915 		 */
2916 		if (error == ENOTSUP) {
2917 			error = nfs_renameit(dvp, cnp, sp);
2918 		}
2919 	} else {
2920 		error = nfs_renameit(dvp, cnp, sp);
2921 	}
2922 	if (error)
2923 		goto bad;
2924 	error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred,
2925 		curlwp, &np);
2926 	np->n_sillyrename = sp;
2927 	return (0);
2928 bad:
2929 	vrele(sp->s_dvp);
2930 	kauth_cred_free(sp->s_cred);
2931 	kmem_free(sp, sizeof(*sp));
2932 	return (error);
2933 }
2934 
2935 /*
2936  * Look up a file name and optionally either update the file handle or
2937  * allocate an nfsnode, depending on the value of npp.
2938  * npp == NULL	--> just do the lookup
2939  * *npp == NULL --> allocate a new nfsnode and make sure attributes are
2940  *			handled too
2941  * *npp != NULL --> update the file handle in the vnode
2942  */
2943 int
2944 nfs_lookitup(struct vnode *dvp, const char *name, int len, kauth_cred_t cred, struct lwp *l, struct nfsnode **npp)
2945 {
2946 	u_int32_t *tl;
2947 	char *cp;
2948 	int32_t t1, t2;
2949 	struct vnode *newvp = (struct vnode *)0;
2950 	struct nfsnode *np, *dnp = VTONFS(dvp);
2951 	char *bpos, *dpos, *cp2;
2952 	int error = 0, ofhlen, fhlen;
2953 #ifndef NFS_V2_ONLY
2954 	int attrflag;
2955 #endif
2956 	struct mbuf *mreq, *mrep, *md, *mb;
2957 	nfsfh_t *ofhp, *nfhp;
2958 	const int v3 = NFS_ISV3(dvp);
2959 
2960 	nfsstats.rpccnt[NFSPROC_LOOKUP]++;
2961 	nfsm_reqhead(dnp, NFSPROC_LOOKUP,
2962 		NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len));
2963 	nfsm_fhtom(dnp, v3);
2964 	nfsm_strtom(name, len, NFS_MAXNAMLEN);
2965 	nfsm_request(dnp, NFSPROC_LOOKUP, l, cred);
2966 	if (npp && !error) {
2967 		nfsm_getfh(nfhp, fhlen, v3);
2968 		if (*npp) {
2969 		    np = *npp;
2970 		    newvp = NFSTOV(np);
2971 		    ofhlen = np->n_fhsize;
2972 		    ofhp = kmem_alloc(ofhlen, KM_SLEEP);
2973 		    memcpy(ofhp, np->n_fhp, ofhlen);
2974 		    error = vcache_rekey_enter(newvp->v_mount, newvp,
2975 			ofhp, ofhlen, nfhp, fhlen);
2976 		    if (error) {
2977 			kmem_free(ofhp, ofhlen);
2978 			m_freem(mrep);
2979 			return error;
2980 		    }
2981 		    if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) {
2982 			kmem_free(np->n_fhp, np->n_fhsize);
2983 			np->n_fhp = &np->n_fh;
2984 		    }
2985 #if NFS_SMALLFH < NFSX_V3FHMAX
2986 		    else if (np->n_fhsize <= NFS_SMALLFH && fhlen > NFS_SMALLFH)
2987 			np->n_fhp = kmem_alloc(fhlen, KM_SLEEP);
2988 #endif
2989 		    memcpy(np->n_fhp, nfhp, fhlen);
2990 		    np->n_fhsize = fhlen;
2991 		    vcache_rekey_exit(newvp->v_mount, newvp,
2992 			ofhp, ofhlen, np->n_fhp, fhlen);
2993 		    kmem_free(ofhp, ofhlen);
2994 		} else if (NFS_CMPFH(dnp, nfhp, fhlen)) {
2995 		    vref(dvp);
2996 		    newvp = dvp;
2997 		    np = dnp;
2998 		} else {
2999 		    error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np);
3000 		    if (error) {
3001 			m_freem(mrep);
3002 			return (error);
3003 		    }
3004 		    newvp = NFSTOV(np);
3005 		}
3006 #ifndef NFS_V2_ONLY
3007 		if (v3) {
3008 			nfsm_postop_attr(newvp, attrflag, 0);
3009 			if (!attrflag && *npp == NULL) {
3010 				m_freem(mrep);
3011 				vput(newvp);
3012 				return (ENOENT);
3013 			}
3014 		} else
3015 #endif
3016 			nfsm_loadattr(newvp, (struct vattr *)0, 0);
3017 	}
3018 	nfsm_reqdone;
3019 	if (npp && *npp == NULL) {
3020 		if (error) {
3021 			if (newvp)
3022 				vput(newvp);
3023 		} else
3024 			*npp = np;
3025 	}
3026 	return (error);
3027 }
3028 
3029 #ifndef NFS_V2_ONLY
3030 /*
3031  * Nfs Version 3 commit rpc
3032  */
3033 int
3034 nfs_commit(struct vnode *vp, off_t offset, uint32_t cnt, struct lwp *l)
3035 {
3036 	char *cp;
3037 	u_int32_t *tl;
3038 	int32_t t1, t2;
3039 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
3040 	char *bpos, *dpos, *cp2;
3041 	int error = 0, wccflag = NFSV3_WCCRATTR;
3042 	struct mbuf *mreq, *mrep, *md, *mb;
3043 	struct nfsnode *np;
3044 
3045 	KASSERT(NFS_ISV3(vp));
3046 
3047 #ifdef NFS_DEBUG_COMMIT
3048 	printf("commit %lu - %lu\n", (unsigned long)offset,
3049 	    (unsigned long)(offset + cnt));
3050 #endif
3051 
3052 	mutex_enter(&nmp->nm_lock);
3053 	if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) {
3054 		mutex_exit(&nmp->nm_lock);
3055 		return (0);
3056 	}
3057 	mutex_exit(&nmp->nm_lock);
3058 	nfsstats.rpccnt[NFSPROC_COMMIT]++;
3059 	np = VTONFS(vp);
3060 	nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1));
3061 	nfsm_fhtom(np, 1);
3062 	nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
3063 	txdr_hyper(offset, tl);
3064 	tl += 2;
3065 	*tl = txdr_unsigned(cnt);
3066 	nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred);
3067 	nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false);
3068 	if (!error) {
3069 		nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF);
3070 		mutex_enter(&nmp->nm_lock);
3071 		if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) ||
3072 		    memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) {
3073 			memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF);
3074 			error = NFSERR_STALEWRITEVERF;
3075 			nmp->nm_iflag |= NFSMNT_STALEWRITEVERF;
3076 		}
3077 		mutex_exit(&nmp->nm_lock);
3078 	}
3079 	nfsm_reqdone;
3080 	return (error);
3081 }
3082 #endif
3083 
3084 /*
3085  * Kludge City..
3086  * - make nfs_bmap() essentially a no-op that does no translation
3087  * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc
3088  *   (Maybe I could use the process's page mapping, but I was concerned that
3089  *    Kernel Write might not be enabled and also figured copyout() would do
3090  *    a lot more work than memcpy() and also it currently happens in the
3091  *    context of the swapper process (2).
3092  */
3093 int
3094 nfs_bmap(void *v)
3095 {
3096 	struct vop_bmap_args /* {
3097 		struct vnode *a_vp;
3098 		daddr_t  a_bn;
3099 		struct vnode **a_vpp;
3100 		daddr_t *a_bnp;
3101 		int *a_runp;
3102 	} */ *ap = v;
3103 	struct vnode *vp = ap->a_vp;
3104 	int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift;
3105 
3106 	if (ap->a_vpp != NULL)
3107 		*ap->a_vpp = vp;
3108 	if (ap->a_bnp != NULL)
3109 		*ap->a_bnp = ap->a_bn << bshift;
3110 	if (ap->a_runp != NULL)
3111 		*ap->a_runp = 1024 * 1024; /* XXX */
3112 	return (0);
3113 }
3114 
3115 /*
3116  * Strategy routine.
3117  * For async requests when nfsiod(s) are running, queue the request by
3118  * calling nfs_asyncio(), otherwise just all nfs_doio() to do the
3119  * request.
3120  */
3121 int
3122 nfs_strategy(void *v)
3123 {
3124 	struct vop_strategy_args *ap = v;
3125 	struct buf *bp = ap->a_bp;
3126 	int error = 0;
3127 
3128 	if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC))
3129 		panic("nfs physio/async");
3130 
3131 	/*
3132 	 * If the op is asynchronous and an i/o daemon is waiting
3133 	 * queue the request, wake it up and wait for completion
3134 	 * otherwise just do it ourselves.
3135 	 */
3136 	if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp))
3137 		error = nfs_doio(bp);
3138 	return (error);
3139 }
3140 
3141 /*
3142  * fsync vnode op. Just call nfs_flush() with commit == 1.
3143  */
3144 /* ARGSUSED */
3145 int
3146 nfs_fsync(void *v)
3147 {
3148 	struct vop_fsync_args /* {
3149 		struct vnodeop_desc *a_desc;
3150 		struct vnode * a_vp;
3151 		kauth_cred_t  a_cred;
3152 		int  a_flags;
3153 		off_t offlo;
3154 		off_t offhi;
3155 		struct lwp * a_l;
3156 	} */ *ap = v;
3157 
3158 	struct vnode *vp = ap->a_vp;
3159 
3160 	if (vp->v_type != VREG)
3161 		return 0;
3162 
3163 	return (nfs_flush(vp, ap->a_cred,
3164 	    (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, curlwp, 1));
3165 }
3166 
3167 /*
3168  * Flush all the data associated with a vnode.
3169  */
3170 int
3171 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l,
3172     int commit)
3173 {
3174 	struct nfsnode *np = VTONFS(vp);
3175 	int error;
3176 	int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO;
3177 	UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist);
3178 
3179 	mutex_enter(vp->v_interlock);
3180 	error = VOP_PUTPAGES(vp, 0, 0, flushflags);
3181 	if (np->n_flag & NWRITEERR) {
3182 		error = np->n_error;
3183 		np->n_flag &= ~NWRITEERR;
3184 	}
3185 	UVMHIST_LOG(ubchist, "returning %d", error,0,0,0);
3186 	return (error);
3187 }
3188 
3189 /*
3190  * Return POSIX pathconf information applicable to nfs.
3191  *
3192  * N.B. The NFS V2 protocol doesn't support this RPC.
3193  */
3194 /* ARGSUSED */
3195 int
3196 nfs_pathconf(void *v)
3197 {
3198 	struct vop_pathconf_args /* {
3199 		struct vnode *a_vp;
3200 		int a_name;
3201 		register_t *a_retval;
3202 	} */ *ap = v;
3203 	struct nfsv3_pathconf *pcp;
3204 	struct vnode *vp = ap->a_vp;
3205 	struct mbuf *mreq, *mrep, *md, *mb;
3206 	int32_t t1, t2;
3207 	u_int32_t *tl;
3208 	char *bpos, *dpos, *cp, *cp2;
3209 	int error = 0, attrflag;
3210 #ifndef NFS_V2_ONLY
3211 	struct nfsmount *nmp;
3212 	unsigned int l;
3213 	u_int64_t maxsize;
3214 #endif
3215 	const int v3 = NFS_ISV3(vp);
3216 	struct nfsnode *np = VTONFS(vp);
3217 
3218 	switch (ap->a_name) {
3219 		/* Names that can be resolved locally. */
3220 	case _PC_PIPE_BUF:
3221 		*ap->a_retval = PIPE_BUF;
3222 		break;
3223 	case _PC_SYNC_IO:
3224 		*ap->a_retval = 1;
3225 		break;
3226 	/* Names that cannot be resolved locally; do an RPC, if possible. */
3227 	case _PC_LINK_MAX:
3228 	case _PC_NAME_MAX:
3229 	case _PC_CHOWN_RESTRICTED:
3230 	case _PC_NO_TRUNC:
3231 		if (!v3) {
3232 			error = EINVAL;
3233 			break;
3234 		}
3235 		nfsstats.rpccnt[NFSPROC_PATHCONF]++;
3236 		nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1));
3237 		nfsm_fhtom(np, 1);
3238 		nfsm_request(np, NFSPROC_PATHCONF,
3239 		    curlwp, curlwp->l_cred);	/* XXX */
3240 		nfsm_postop_attr(vp, attrflag, 0);
3241 		if (!error) {
3242 			nfsm_dissect(pcp, struct nfsv3_pathconf *,
3243 			    NFSX_V3PATHCONF);
3244 			switch (ap->a_name) {
3245 			case _PC_LINK_MAX:
3246 				*ap->a_retval =
3247 				    fxdr_unsigned(register_t, pcp->pc_linkmax);
3248 				break;
3249 			case _PC_NAME_MAX:
3250 				*ap->a_retval =
3251 				    fxdr_unsigned(register_t, pcp->pc_namemax);
3252 				break;
3253 			case _PC_CHOWN_RESTRICTED:
3254 				*ap->a_retval =
3255 				    (pcp->pc_chownrestricted == nfs_true);
3256 				break;
3257 			case _PC_NO_TRUNC:
3258 				*ap->a_retval =
3259 				    (pcp->pc_notrunc == nfs_true);
3260 				break;
3261 			}
3262 		}
3263 		nfsm_reqdone;
3264 		break;
3265 	case _PC_FILESIZEBITS:
3266 #ifndef NFS_V2_ONLY
3267 		if (v3) {
3268 			nmp = VFSTONFS(vp->v_mount);
3269 			if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0)
3270 				if ((error = nfs_fsinfo(nmp, vp,
3271 				    curlwp->l_cred, curlwp)) != 0) /* XXX */
3272 					break;
3273 			for (l = 0, maxsize = nmp->nm_maxfilesize;
3274 			    (maxsize >> l) > 0; l++)
3275 				;
3276 			*ap->a_retval = l + 1;
3277 		} else
3278 #endif
3279 		{
3280 			*ap->a_retval = 32;	/* NFS V2 limitation */
3281 		}
3282 		break;
3283 	default:
3284 		error = EINVAL;
3285 		break;
3286 	}
3287 
3288 	return (error);
3289 }
3290 
3291 /*
3292  * NFS advisory byte-level locks.
3293  */
3294 int
3295 nfs_advlock(void *v)
3296 {
3297 	struct vop_advlock_args /* {
3298 		struct vnode *a_vp;
3299 		void *a_id;
3300 		int  a_op;
3301 		struct flock *a_fl;
3302 		int  a_flags;
3303 	} */ *ap = v;
3304 	struct nfsnode *np = VTONFS(ap->a_vp);
3305 
3306 	return lf_advlock(ap, &np->n_lockf, np->n_size);
3307 }
3308 
3309 /*
3310  * Print out the contents of an nfsnode.
3311  */
3312 int
3313 nfs_print(void *v)
3314 {
3315 	struct vop_print_args /* {
3316 		struct vnode *a_vp;
3317 	} */ *ap = v;
3318 	struct vnode *vp = ap->a_vp;
3319 	struct nfsnode *np = VTONFS(vp);
3320 
3321 	printf("tag VT_NFS, fileid %lld fsid 0x%llx",
3322 	    (unsigned long long)np->n_vattr->va_fileid,
3323 	    (unsigned long long)np->n_vattr->va_fsid);
3324 	if (vp->v_type == VFIFO)
3325 		VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v);
3326 	printf("\n");
3327 	return (0);
3328 }
3329 
3330 /*
3331  * nfs unlock wrapper.
3332  */
3333 int
3334 nfs_unlock(void *v)
3335 {
3336 	struct vop_unlock_args /* {
3337 		struct vnode *a_vp;
3338 		int a_flags;
3339 	} */ *ap = v;
3340 	struct vnode *vp = ap->a_vp;
3341 
3342 	/*
3343 	 * VOP_UNLOCK can be called by nfs_loadattrcache
3344 	 * with v_data == 0.
3345 	 */
3346 	if (VTONFS(vp)) {
3347 		nfs_delayedtruncate(vp);
3348 	}
3349 
3350 	return genfs_unlock(v);
3351 }
3352 
3353 /*
3354  * nfs special file access vnode op.
3355  * Essentially just get vattr and then imitate iaccess() since the device is
3356  * local to the client.
3357  */
3358 int
3359 nfsspec_access(void *v)
3360 {
3361 	struct vop_access_args /* {
3362 		struct vnode *a_vp;
3363 		int  a_mode;
3364 		kauth_cred_t a_cred;
3365 		struct lwp *a_l;
3366 	} */ *ap = v;
3367 	struct vattr va;
3368 	struct vnode *vp = ap->a_vp;
3369 	int error;
3370 
3371 	error = VOP_GETATTR(vp, &va, ap->a_cred);
3372 	if (error)
3373 		return (error);
3374 
3375         /*
3376 	 * Disallow write attempts on filesystems mounted read-only;
3377 	 * unless the file is a socket, fifo, or a block or character
3378 	 * device resident on the filesystem.
3379 	 */
3380 	if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
3381 		switch (vp->v_type) {
3382 		case VREG:
3383 		case VDIR:
3384 		case VLNK:
3385 			return (EROFS);
3386 		default:
3387 			break;
3388 		}
3389 	}
3390 
3391 	return kauth_authorize_vnode(ap->a_cred, KAUTH_ACCESS_ACTION(ap->a_mode,
3392 	    va.va_type, va.va_mode), vp, NULL, genfs_can_access(va.va_type,
3393 	    va.va_mode, va.va_uid, va.va_gid, ap->a_mode, ap->a_cred));
3394 }
3395 
3396 /*
3397  * Read wrapper for special devices.
3398  */
3399 int
3400 nfsspec_read(void *v)
3401 {
3402 	struct vop_read_args /* {
3403 		struct vnode *a_vp;
3404 		struct uio *a_uio;
3405 		int  a_ioflag;
3406 		kauth_cred_t a_cred;
3407 	} */ *ap = v;
3408 	struct nfsnode *np = VTONFS(ap->a_vp);
3409 
3410 	/*
3411 	 * Set access flag.
3412 	 */
3413 	np->n_flag |= NACC;
3414 	getnanotime(&np->n_atim);
3415 	return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap));
3416 }
3417 
3418 /*
3419  * Write wrapper for special devices.
3420  */
3421 int
3422 nfsspec_write(void *v)
3423 {
3424 	struct vop_write_args /* {
3425 		struct vnode *a_vp;
3426 		struct uio *a_uio;
3427 		int  a_ioflag;
3428 		kauth_cred_t a_cred;
3429 	} */ *ap = v;
3430 	struct nfsnode *np = VTONFS(ap->a_vp);
3431 
3432 	/*
3433 	 * Set update flag.
3434 	 */
3435 	np->n_flag |= NUPD;
3436 	getnanotime(&np->n_mtim);
3437 	return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap));
3438 }
3439 
3440 /*
3441  * Close wrapper for special devices.
3442  *
3443  * Update the times on the nfsnode then do device close.
3444  */
3445 int
3446 nfsspec_close(void *v)
3447 {
3448 	struct vop_close_args /* {
3449 		struct vnode *a_vp;
3450 		int  a_fflag;
3451 		kauth_cred_t a_cred;
3452 		struct lwp *a_l;
3453 	} */ *ap = v;
3454 	struct vnode *vp = ap->a_vp;
3455 	struct nfsnode *np = VTONFS(vp);
3456 	struct vattr vattr;
3457 
3458 	if (np->n_flag & (NACC | NUPD)) {
3459 		np->n_flag |= NCHG;
3460 		if (vp->v_usecount == 1 &&
3461 		    (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3462 			vattr_null(&vattr);
3463 			if (np->n_flag & NACC)
3464 				vattr.va_atime = np->n_atim;
3465 			if (np->n_flag & NUPD)
3466 				vattr.va_mtime = np->n_mtim;
3467 			(void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3468 		}
3469 	}
3470 	return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap));
3471 }
3472 
3473 /*
3474  * Read wrapper for fifos.
3475  */
3476 int
3477 nfsfifo_read(void *v)
3478 {
3479 	struct vop_read_args /* {
3480 		struct vnode *a_vp;
3481 		struct uio *a_uio;
3482 		int  a_ioflag;
3483 		kauth_cred_t a_cred;
3484 	} */ *ap = v;
3485 	struct nfsnode *np = VTONFS(ap->a_vp);
3486 
3487 	/*
3488 	 * Set access flag.
3489 	 */
3490 	np->n_flag |= NACC;
3491 	getnanotime(&np->n_atim);
3492 	return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap));
3493 }
3494 
3495 /*
3496  * Write wrapper for fifos.
3497  */
3498 int
3499 nfsfifo_write(void *v)
3500 {
3501 	struct vop_write_args /* {
3502 		struct vnode *a_vp;
3503 		struct uio *a_uio;
3504 		int  a_ioflag;
3505 		kauth_cred_t a_cred;
3506 	} */ *ap = v;
3507 	struct nfsnode *np = VTONFS(ap->a_vp);
3508 
3509 	/*
3510 	 * Set update flag.
3511 	 */
3512 	np->n_flag |= NUPD;
3513 	getnanotime(&np->n_mtim);
3514 	return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap));
3515 }
3516 
3517 /*
3518  * Close wrapper for fifos.
3519  *
3520  * Update the times on the nfsnode then do fifo close.
3521  */
3522 int
3523 nfsfifo_close(void *v)
3524 {
3525 	struct vop_close_args /* {
3526 		struct vnode *a_vp;
3527 		int  a_fflag;
3528 		kauth_cred_t a_cred;
3529 		struct lwp *a_l;
3530 	} */ *ap = v;
3531 	struct vnode *vp = ap->a_vp;
3532 	struct nfsnode *np = VTONFS(vp);
3533 	struct vattr vattr;
3534 
3535 	if (np->n_flag & (NACC | NUPD)) {
3536 		struct timespec ts;
3537 
3538 		getnanotime(&ts);
3539 		if (np->n_flag & NACC)
3540 			np->n_atim = ts;
3541 		if (np->n_flag & NUPD)
3542 			np->n_mtim = ts;
3543 		np->n_flag |= NCHG;
3544 		if (vp->v_usecount == 1 &&
3545 		    (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
3546 			vattr_null(&vattr);
3547 			if (np->n_flag & NACC)
3548 				vattr.va_atime = np->n_atim;
3549 			if (np->n_flag & NUPD)
3550 				vattr.va_mtime = np->n_mtim;
3551 			(void)VOP_SETATTR(vp, &vattr, ap->a_cred);
3552 		}
3553 	}
3554 	return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap));
3555 }
3556