1 /* $NetBSD: nfs_vnops.c,v 1.244 2006/10/14 09:18:57 yamt Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)nfs_vnops.c 8.19 (Berkeley) 7/31/95 35 */ 36 37 /* 38 * vnode op calls for Sun NFS version 2 and 3 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.244 2006/10/14 09:18:57 yamt Exp $"); 43 44 #include "opt_inet.h" 45 #include "opt_nfs.h" 46 #include "opt_uvmhist.h" 47 48 #include <sys/param.h> 49 #include <sys/proc.h> 50 #include <sys/kernel.h> 51 #include <sys/systm.h> 52 #include <sys/resourcevar.h> 53 #include <sys/proc.h> 54 #include <sys/mount.h> 55 #include <sys/buf.h> 56 #include <sys/disk.h> 57 #include <sys/malloc.h> 58 #include <sys/mbuf.h> 59 #include <sys/namei.h> 60 #include <sys/vnode.h> 61 #include <sys/dirent.h> 62 #include <sys/fcntl.h> 63 #include <sys/hash.h> 64 #include <sys/lockf.h> 65 #include <sys/stat.h> 66 #include <sys/unistd.h> 67 #include <sys/kauth.h> 68 69 #include <uvm/uvm_extern.h> 70 #include <uvm/uvm.h> 71 72 #include <miscfs/fifofs/fifo.h> 73 #include <miscfs/genfs/genfs.h> 74 #include <miscfs/genfs/genfs_node.h> 75 #include <miscfs/specfs/specdev.h> 76 77 #include <nfs/rpcv2.h> 78 #include <nfs/nfsproto.h> 79 #include <nfs/nfs.h> 80 #include <nfs/nfsnode.h> 81 #include <nfs/nfsmount.h> 82 #include <nfs/xdr_subs.h> 83 #include <nfs/nfsm_subs.h> 84 #include <nfs/nqnfs.h> 85 #include <nfs/nfs_var.h> 86 87 #include <net/if.h> 88 #include <netinet/in.h> 89 #include <netinet/in_var.h> 90 91 /* 92 * Global vfs data structures for nfs 93 */ 94 int (**nfsv2_vnodeop_p) __P((void *)); 95 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { 96 { &vop_default_desc, vn_default_error }, 97 { &vop_lookup_desc, nfs_lookup }, /* lookup */ 98 { &vop_create_desc, nfs_create }, /* create */ 99 { &vop_mknod_desc, nfs_mknod }, /* mknod */ 100 { &vop_open_desc, nfs_open }, /* open */ 101 { &vop_close_desc, nfs_close }, /* close */ 102 { &vop_access_desc, nfs_access }, /* access */ 103 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 104 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 105 { &vop_read_desc, nfs_read }, /* read */ 106 { &vop_write_desc, nfs_write }, /* write */ 107 { &vop_lease_desc, nfs_lease_check }, /* lease */ 108 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 109 { &vop_ioctl_desc, nfs_ioctl }, /* ioctl */ 110 { &vop_poll_desc, nfs_poll }, /* poll */ 111 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */ 112 { &vop_revoke_desc, nfs_revoke }, /* revoke */ 113 { &vop_mmap_desc, nfs_mmap }, /* mmap */ 114 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 115 { &vop_seek_desc, nfs_seek }, /* seek */ 116 { &vop_remove_desc, nfs_remove }, /* remove */ 117 { &vop_link_desc, nfs_link }, /* link */ 118 { &vop_rename_desc, nfs_rename }, /* rename */ 119 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */ 120 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */ 121 { &vop_symlink_desc, nfs_symlink }, /* symlink */ 122 { &vop_readdir_desc, nfs_readdir }, /* readdir */ 123 { &vop_readlink_desc, nfs_readlink }, /* readlink */ 124 { &vop_abortop_desc, nfs_abortop }, /* abortop */ 125 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 126 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 127 { &vop_lock_desc, nfs_lock }, /* lock */ 128 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 129 { &vop_bmap_desc, nfs_bmap }, /* bmap */ 130 { &vop_strategy_desc, nfs_strategy }, /* strategy */ 131 { &vop_print_desc, nfs_print }, /* print */ 132 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 133 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */ 134 { &vop_advlock_desc, nfs_advlock }, /* advlock */ 135 { &vop_bwrite_desc, genfs_badop }, /* bwrite */ 136 { &vop_getpages_desc, nfs_getpages }, /* getpages */ 137 { &vop_putpages_desc, genfs_putpages }, /* putpages */ 138 { NULL, NULL } 139 }; 140 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc = 141 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries }; 142 143 /* 144 * Special device vnode ops 145 */ 146 int (**spec_nfsv2nodeop_p) __P((void *)); 147 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = { 148 { &vop_default_desc, vn_default_error }, 149 { &vop_lookup_desc, spec_lookup }, /* lookup */ 150 { &vop_create_desc, spec_create }, /* create */ 151 { &vop_mknod_desc, spec_mknod }, /* mknod */ 152 { &vop_open_desc, spec_open }, /* open */ 153 { &vop_close_desc, nfsspec_close }, /* close */ 154 { &vop_access_desc, nfsspec_access }, /* access */ 155 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 156 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 157 { &vop_read_desc, nfsspec_read }, /* read */ 158 { &vop_write_desc, nfsspec_write }, /* write */ 159 { &vop_lease_desc, spec_lease_check }, /* lease */ 160 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 161 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 162 { &vop_poll_desc, spec_poll }, /* poll */ 163 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */ 164 { &vop_revoke_desc, spec_revoke }, /* revoke */ 165 { &vop_mmap_desc, spec_mmap }, /* mmap */ 166 { &vop_fsync_desc, spec_fsync }, /* fsync */ 167 { &vop_seek_desc, spec_seek }, /* seek */ 168 { &vop_remove_desc, spec_remove }, /* remove */ 169 { &vop_link_desc, spec_link }, /* link */ 170 { &vop_rename_desc, spec_rename }, /* rename */ 171 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 172 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 173 { &vop_symlink_desc, spec_symlink }, /* symlink */ 174 { &vop_readdir_desc, spec_readdir }, /* readdir */ 175 { &vop_readlink_desc, spec_readlink }, /* readlink */ 176 { &vop_abortop_desc, spec_abortop }, /* abortop */ 177 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 178 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 179 { &vop_lock_desc, nfs_lock }, /* lock */ 180 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 181 { &vop_bmap_desc, spec_bmap }, /* bmap */ 182 { &vop_strategy_desc, spec_strategy }, /* strategy */ 183 { &vop_print_desc, nfs_print }, /* print */ 184 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 185 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 186 { &vop_advlock_desc, spec_advlock }, /* advlock */ 187 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 188 { &vop_getpages_desc, spec_getpages }, /* getpages */ 189 { &vop_putpages_desc, spec_putpages }, /* putpages */ 190 { NULL, NULL } 191 }; 192 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc = 193 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries }; 194 195 int (**fifo_nfsv2nodeop_p) __P((void *)); 196 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = { 197 { &vop_default_desc, vn_default_error }, 198 { &vop_lookup_desc, fifo_lookup }, /* lookup */ 199 { &vop_create_desc, fifo_create }, /* create */ 200 { &vop_mknod_desc, fifo_mknod }, /* mknod */ 201 { &vop_open_desc, fifo_open }, /* open */ 202 { &vop_close_desc, nfsfifo_close }, /* close */ 203 { &vop_access_desc, nfsspec_access }, /* access */ 204 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 205 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 206 { &vop_read_desc, nfsfifo_read }, /* read */ 207 { &vop_write_desc, nfsfifo_write }, /* write */ 208 { &vop_lease_desc, fifo_lease_check }, /* lease */ 209 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 210 { &vop_ioctl_desc, fifo_ioctl }, /* ioctl */ 211 { &vop_poll_desc, fifo_poll }, /* poll */ 212 { &vop_kqfilter_desc, fifo_kqfilter }, /* kqfilter */ 213 { &vop_revoke_desc, fifo_revoke }, /* revoke */ 214 { &vop_mmap_desc, fifo_mmap }, /* mmap */ 215 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 216 { &vop_seek_desc, fifo_seek }, /* seek */ 217 { &vop_remove_desc, fifo_remove }, /* remove */ 218 { &vop_link_desc, fifo_link }, /* link */ 219 { &vop_rename_desc, fifo_rename }, /* rename */ 220 { &vop_mkdir_desc, fifo_mkdir }, /* mkdir */ 221 { &vop_rmdir_desc, fifo_rmdir }, /* rmdir */ 222 { &vop_symlink_desc, fifo_symlink }, /* symlink */ 223 { &vop_readdir_desc, fifo_readdir }, /* readdir */ 224 { &vop_readlink_desc, fifo_readlink }, /* readlink */ 225 { &vop_abortop_desc, fifo_abortop }, /* abortop */ 226 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 227 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 228 { &vop_lock_desc, nfs_lock }, /* lock */ 229 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 230 { &vop_bmap_desc, fifo_bmap }, /* bmap */ 231 { &vop_strategy_desc, genfs_badop }, /* strategy */ 232 { &vop_print_desc, nfs_print }, /* print */ 233 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 234 { &vop_pathconf_desc, fifo_pathconf }, /* pathconf */ 235 { &vop_advlock_desc, fifo_advlock }, /* advlock */ 236 { &vop_bwrite_desc, genfs_badop }, /* bwrite */ 237 { &vop_putpages_desc, fifo_putpages }, /* putpages */ 238 { NULL, NULL } 239 }; 240 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc = 241 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries }; 242 243 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *, 244 size_t, kauth_cred_t, struct lwp *); 245 static void nfs_writerpc_extfree(struct mbuf *, caddr_t, size_t, void *); 246 247 /* 248 * Global variables 249 */ 250 extern u_int32_t nfs_true, nfs_false; 251 extern u_int32_t nfs_xdrneg1; 252 extern const nfstype nfsv3_type[9]; 253 254 int nfs_numasync = 0; 255 #define DIRHDSIZ _DIRENT_NAMEOFF(dp) 256 #define UIO_ADVANCE(uio, siz) \ 257 (void)((uio)->uio_resid -= (siz), \ 258 (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \ 259 (uio)->uio_iov->iov_len -= (siz)) 260 261 static void nfs_cache_enter(struct vnode *, struct vnode *, 262 struct componentname *); 263 264 static void 265 nfs_cache_enter(struct vnode *dvp, struct vnode *vp, 266 struct componentname *cnp) 267 { 268 struct nfsnode *dnp = VTONFS(dvp); 269 270 if (vp != NULL) { 271 struct nfsnode *np = VTONFS(vp); 272 273 np->n_ctime = np->n_vattr->va_ctime.tv_sec; 274 } 275 276 if (!timespecisset(&dnp->n_nctime)) 277 dnp->n_nctime = dnp->n_vattr->va_mtime; 278 279 cache_enter(dvp, vp, cnp); 280 } 281 282 /* 283 * nfs null call from vfs. 284 */ 285 int 286 nfs_null(vp, cred, l) 287 struct vnode *vp; 288 kauth_cred_t cred; 289 struct lwp *l; 290 { 291 caddr_t bpos, dpos; 292 int error = 0; 293 struct mbuf *mreq, *mrep, *md, *mb; 294 struct nfsnode *np = VTONFS(vp); 295 296 nfsm_reqhead(np, NFSPROC_NULL, 0); 297 nfsm_request(np, NFSPROC_NULL, l, cred); 298 nfsm_reqdone; 299 return (error); 300 } 301 302 /* 303 * nfs access vnode op. 304 * For nfs version 2, just return ok. File accesses may fail later. 305 * For nfs version 3, use the access rpc to check accessibility. If file modes 306 * are changed on the server, accesses might still fail later. 307 */ 308 int 309 nfs_access(v) 310 void *v; 311 { 312 struct vop_access_args /* { 313 struct vnode *a_vp; 314 int a_mode; 315 kauth_cred_t a_cred; 316 struct lwp *a_l; 317 } */ *ap = v; 318 struct vnode *vp = ap->a_vp; 319 #ifndef NFS_V2_ONLY 320 u_int32_t *tl; 321 caddr_t cp; 322 int32_t t1, t2; 323 caddr_t bpos, dpos, cp2; 324 int error = 0, attrflag; 325 struct mbuf *mreq, *mrep, *md, *mb; 326 u_int32_t mode, rmode; 327 const int v3 = NFS_ISV3(vp); 328 #endif 329 int cachevalid; 330 struct nfsnode *np = VTONFS(vp); 331 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 332 333 cachevalid = (np->n_accstamp != -1 && 334 (time_uptime - np->n_accstamp) < NFS_ATTRTIMEO(nmp, np) && 335 np->n_accuid == kauth_cred_geteuid(ap->a_cred)); 336 337 /* 338 * Check access cache first. If this request has been made for this 339 * uid shortly before, use the cached result. 340 */ 341 if (cachevalid) { 342 if (!np->n_accerror) { 343 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 344 return np->n_accerror; 345 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode) 346 return np->n_accerror; 347 } 348 349 #ifndef NFS_V2_ONLY 350 /* 351 * For nfs v3, do an access rpc, otherwise you are stuck emulating 352 * ufs_access() locally using the vattr. This may not be correct, 353 * since the server may apply other access criteria such as 354 * client uid-->server uid mapping that we do not know about, but 355 * this is better than just returning anything that is lying about 356 * in the cache. 357 */ 358 if (v3) { 359 nfsstats.rpccnt[NFSPROC_ACCESS]++; 360 nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED); 361 nfsm_fhtom(np, v3); 362 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 363 if (ap->a_mode & VREAD) 364 mode = NFSV3ACCESS_READ; 365 else 366 mode = 0; 367 if (vp->v_type != VDIR) { 368 if (ap->a_mode & VWRITE) 369 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); 370 if (ap->a_mode & VEXEC) 371 mode |= NFSV3ACCESS_EXECUTE; 372 } else { 373 if (ap->a_mode & VWRITE) 374 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | 375 NFSV3ACCESS_DELETE); 376 if (ap->a_mode & VEXEC) 377 mode |= NFSV3ACCESS_LOOKUP; 378 } 379 *tl = txdr_unsigned(mode); 380 nfsm_request(np, NFSPROC_ACCESS, ap->a_l, ap->a_cred); 381 nfsm_postop_attr(vp, attrflag, 0); 382 if (!error) { 383 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 384 rmode = fxdr_unsigned(u_int32_t, *tl); 385 /* 386 * The NFS V3 spec does not clarify whether or not 387 * the returned access bits can be a superset of 388 * the ones requested, so... 389 */ 390 if ((rmode & mode) != mode) 391 error = EACCES; 392 } 393 nfsm_reqdone; 394 } else 395 #endif 396 return (nfsspec_access(ap)); 397 #ifndef NFS_V2_ONLY 398 /* 399 * Disallow write attempts on filesystems mounted read-only; 400 * unless the file is a socket, fifo, or a block or character 401 * device resident on the filesystem. 402 */ 403 if (!error && (ap->a_mode & VWRITE) && 404 (vp->v_mount->mnt_flag & MNT_RDONLY)) { 405 switch (vp->v_type) { 406 case VREG: 407 case VDIR: 408 case VLNK: 409 error = EROFS; 410 default: 411 break; 412 } 413 } 414 415 if (!error || error == EACCES) { 416 /* 417 * If we got the same result as for a previous, 418 * different request, OR it in. Don't update 419 * the timestamp in that case. 420 */ 421 if (cachevalid && np->n_accstamp != -1 && 422 error == np->n_accerror) { 423 if (!error) 424 np->n_accmode |= ap->a_mode; 425 else if ((np->n_accmode & ap->a_mode) == ap->a_mode) 426 np->n_accmode = ap->a_mode; 427 } else { 428 np->n_accstamp = time_uptime; 429 np->n_accuid = kauth_cred_geteuid(ap->a_cred); 430 np->n_accmode = ap->a_mode; 431 np->n_accerror = error; 432 } 433 } 434 435 return (error); 436 #endif 437 } 438 439 /* 440 * nfs open vnode op 441 * Check to see if the type is ok 442 * and that deletion is not in progress. 443 * For paged in text files, you will need to flush the page cache 444 * if consistency is lost. 445 */ 446 /* ARGSUSED */ 447 int 448 nfs_open(v) 449 void *v; 450 { 451 struct vop_open_args /* { 452 struct vnode *a_vp; 453 int a_mode; 454 kauth_cred_t a_cred; 455 struct lwp *a_l; 456 } */ *ap = v; 457 struct vnode *vp = ap->a_vp; 458 struct nfsnode *np = VTONFS(vp); 459 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 460 int error; 461 462 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 463 return (EACCES); 464 } 465 466 if (ap->a_mode & FREAD) { 467 if (np->n_rcred != NULL) 468 kauth_cred_free(np->n_rcred); 469 np->n_rcred = ap->a_cred; 470 kauth_cred_hold(np->n_rcred); 471 } 472 if (ap->a_mode & FWRITE) { 473 if (np->n_wcred != NULL) 474 kauth_cred_free(np->n_wcred); 475 np->n_wcred = ap->a_cred; 476 kauth_cred_hold(np->n_wcred); 477 } 478 479 #ifndef NFS_V2_ONLY 480 /* 481 * Get a valid lease. If cached data is stale, flush it. 482 */ 483 if (nmp->nm_flag & NFSMNT_NQNFS) { 484 if (NQNFS_CKINVALID(vp, np, ND_READ)) { 485 do { 486 error = nqnfs_getlease(vp, ND_READ, ap->a_cred, 487 ap->a_l); 488 } while (error == NQNFS_EXPIRED); 489 if (error) 490 return (error); 491 if (np->n_lrev != np->n_brev || 492 (np->n_flag & NQNFSNONCACHE)) { 493 if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 494 ap->a_l, 1)) == EINTR) 495 return (error); 496 np->n_brev = np->n_lrev; 497 } 498 } 499 } else 500 #endif 501 { 502 error = nfs_flushstalebuf(vp, ap->a_cred, ap->a_l, 0); 503 if (error) 504 return error; 505 } 506 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0) 507 NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */ 508 return (0); 509 } 510 511 /* 512 * nfs close vnode op 513 * What an NFS client should do upon close after writing is a debatable issue. 514 * Most NFS clients push delayed writes to the server upon close, basically for 515 * two reasons: 516 * 1 - So that any write errors may be reported back to the client process 517 * doing the close system call. By far the two most likely errors are 518 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 519 * 2 - To put a worst case upper bound on cache inconsistency between 520 * multiple clients for the file. 521 * There is also a consistency problem for Version 2 of the protocol w.r.t. 522 * not being able to tell if other clients are writing a file concurrently, 523 * since there is no way of knowing if the changed modify time in the reply 524 * is only due to the write for this client. 525 * (NFS Version 3 provides weak cache consistency data in the reply that 526 * should be sufficient to detect and handle this case.) 527 * 528 * The current code does the following: 529 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 530 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 531 * or commit them (this satisfies 1 and 2 except for the 532 * case where the server crashes after this close but 533 * before the commit RPC, which is felt to be "good 534 * enough". Changing the last argument to nfs_flush() to 535 * a 1 would force a commit operation, if it is felt a 536 * commit is necessary now. 537 * for NQNFS - do nothing now, since 2 is dealt with via leases and 538 * 1 should be dealt with via an fsync() system call for 539 * cases where write errors are important. 540 */ 541 /* ARGSUSED */ 542 int 543 nfs_close(v) 544 void *v; 545 { 546 struct vop_close_args /* { 547 struct vnodeop_desc *a_desc; 548 struct vnode *a_vp; 549 int a_fflag; 550 kauth_cred_t a_cred; 551 struct lwp *a_l; 552 } */ *ap = v; 553 struct vnode *vp = ap->a_vp; 554 struct nfsnode *np = VTONFS(vp); 555 int error = 0; 556 UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist); 557 558 if (vp->v_type == VREG) { 559 if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 && 560 (np->n_flag & NMODIFIED)) { 561 #ifndef NFS_V2_ONLY 562 if (NFS_ISV3(vp)) { 563 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_l, 0); 564 np->n_flag &= ~NMODIFIED; 565 } else 566 #endif 567 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_l, 1); 568 NFS_INVALIDATE_ATTRCACHE(np); 569 } 570 if (np->n_flag & NWRITEERR) { 571 np->n_flag &= ~NWRITEERR; 572 error = np->n_error; 573 } 574 } 575 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0); 576 return (error); 577 } 578 579 /* 580 * nfs getattr call from vfs. 581 */ 582 int 583 nfs_getattr(v) 584 void *v; 585 { 586 struct vop_getattr_args /* { 587 struct vnode *a_vp; 588 struct vattr *a_vap; 589 kauth_cred_t a_cred; 590 struct lwp *a_l; 591 } */ *ap = v; 592 struct vnode *vp = ap->a_vp; 593 struct nfsnode *np = VTONFS(vp); 594 caddr_t cp; 595 u_int32_t *tl; 596 int32_t t1, t2; 597 caddr_t bpos, dpos; 598 int error = 0; 599 struct mbuf *mreq, *mrep, *md, *mb; 600 const int v3 = NFS_ISV3(vp); 601 602 /* 603 * Update local times for special files. 604 */ 605 if (np->n_flag & (NACC | NUPD)) 606 np->n_flag |= NCHG; 607 608 /* 609 * if we have delayed truncation, do it now. 610 */ 611 nfs_delayedtruncate(vp); 612 613 /* 614 * First look in the cache. 615 */ 616 if (nfs_getattrcache(vp, ap->a_vap) == 0) 617 return (0); 618 nfsstats.rpccnt[NFSPROC_GETATTR]++; 619 nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3)); 620 nfsm_fhtom(np, v3); 621 nfsm_request(np, NFSPROC_GETATTR, ap->a_l, ap->a_cred); 622 if (!error) { 623 nfsm_loadattr(vp, ap->a_vap, 0); 624 if (vp->v_type == VDIR && 625 ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ) 626 ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ; 627 } 628 nfsm_reqdone; 629 return (error); 630 } 631 632 /* 633 * nfs setattr call. 634 */ 635 int 636 nfs_setattr(v) 637 void *v; 638 { 639 struct vop_setattr_args /* { 640 struct vnodeop_desc *a_desc; 641 struct vnode *a_vp; 642 struct vattr *a_vap; 643 kauth_cred_t a_cred; 644 struct lwp *a_l; 645 } */ *ap = v; 646 struct vnode *vp = ap->a_vp; 647 struct nfsnode *np = VTONFS(vp); 648 struct vattr *vap = ap->a_vap; 649 int error = 0; 650 u_quad_t tsize = 0; 651 652 /* 653 * Setting of flags is not supported. 654 */ 655 if (vap->va_flags != VNOVAL) 656 return (EOPNOTSUPP); 657 658 /* 659 * Disallow write attempts if the filesystem is mounted read-only. 660 */ 661 if ((vap->va_uid != (uid_t)VNOVAL || 662 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 663 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 664 (vp->v_mount->mnt_flag & MNT_RDONLY)) 665 return (EROFS); 666 if (vap->va_size != VNOVAL) { 667 switch (vp->v_type) { 668 case VDIR: 669 return (EISDIR); 670 case VCHR: 671 case VBLK: 672 case VSOCK: 673 case VFIFO: 674 if (vap->va_mtime.tv_sec == VNOVAL && 675 vap->va_atime.tv_sec == VNOVAL && 676 vap->va_mode == (mode_t)VNOVAL && 677 vap->va_uid == (uid_t)VNOVAL && 678 vap->va_gid == (gid_t)VNOVAL) 679 return (0); 680 vap->va_size = VNOVAL; 681 break; 682 default: 683 /* 684 * Disallow write attempts if the filesystem is 685 * mounted read-only. 686 */ 687 if (vp->v_mount->mnt_flag & MNT_RDONLY) 688 return (EROFS); 689 genfs_node_wrlock(vp); 690 uvm_vnp_setsize(vp, vap->va_size); 691 tsize = np->n_size; 692 np->n_size = vap->va_size; 693 if (vap->va_size == 0) 694 error = nfs_vinvalbuf(vp, 0, 695 ap->a_cred, ap->a_l, 1); 696 else 697 error = nfs_vinvalbuf(vp, V_SAVE, 698 ap->a_cred, ap->a_l, 1); 699 if (error) { 700 uvm_vnp_setsize(vp, tsize); 701 genfs_node_unlock(vp); 702 return (error); 703 } 704 np->n_vattr->va_size = vap->va_size; 705 } 706 } else { 707 /* 708 * flush files before setattr because a later write of 709 * cached data might change timestamps or reset sugid bits 710 */ 711 if ((vap->va_mtime.tv_sec != VNOVAL || 712 vap->va_atime.tv_sec != VNOVAL || 713 vap->va_mode != VNOVAL) && 714 vp->v_type == VREG && 715 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 716 ap->a_l, 1)) == EINTR) 717 return (error); 718 } 719 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_l); 720 if (vap->va_size != VNOVAL) { 721 if (error) { 722 np->n_size = np->n_vattr->va_size = tsize; 723 uvm_vnp_setsize(vp, np->n_size); 724 } 725 genfs_node_unlock(vp); 726 } 727 VN_KNOTE(vp, NOTE_ATTRIB); 728 return (error); 729 } 730 731 /* 732 * Do an nfs setattr rpc. 733 */ 734 int 735 nfs_setattrrpc(vp, vap, cred, l) 736 struct vnode *vp; 737 struct vattr *vap; 738 kauth_cred_t cred; 739 struct lwp *l; 740 { 741 struct nfsv2_sattr *sp; 742 caddr_t cp; 743 int32_t t1, t2; 744 caddr_t bpos, dpos; 745 u_int32_t *tl; 746 int error = 0; 747 struct mbuf *mreq, *mrep, *md, *mb; 748 const int v3 = NFS_ISV3(vp); 749 struct nfsnode *np = VTONFS(vp); 750 #ifndef NFS_V2_ONLY 751 int wccflag = NFSV3_WCCRATTR; 752 caddr_t cp2; 753 #endif 754 755 nfsstats.rpccnt[NFSPROC_SETATTR]++; 756 nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3)); 757 nfsm_fhtom(np, v3); 758 #ifndef NFS_V2_ONLY 759 if (v3) { 760 nfsm_v3attrbuild(vap, TRUE); 761 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 762 *tl = nfs_false; 763 } else { 764 #endif 765 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 766 if (vap->va_mode == (mode_t)VNOVAL) 767 sp->sa_mode = nfs_xdrneg1; 768 else 769 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); 770 if (vap->va_uid == (uid_t)VNOVAL) 771 sp->sa_uid = nfs_xdrneg1; 772 else 773 sp->sa_uid = txdr_unsigned(vap->va_uid); 774 if (vap->va_gid == (gid_t)VNOVAL) 775 sp->sa_gid = nfs_xdrneg1; 776 else 777 sp->sa_gid = txdr_unsigned(vap->va_gid); 778 sp->sa_size = txdr_unsigned(vap->va_size); 779 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 780 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 781 #ifndef NFS_V2_ONLY 782 } 783 #endif 784 nfsm_request(np, NFSPROC_SETATTR, l, cred); 785 #ifndef NFS_V2_ONLY 786 if (v3) { 787 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, FALSE); 788 } else 789 #endif 790 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 791 nfsm_reqdone; 792 return (error); 793 } 794 795 /* 796 * nfs lookup call, one step at a time... 797 * First look in cache 798 * If not found, unlock the directory nfsnode and do the rpc 799 * 800 * This code is full of lock/unlock statements and checks, because 801 * we continue after cache_lookup has finished (we need to check 802 * with the attr cache and do an rpc if it has timed out). This means 803 * that the locking effects of cache_lookup have to be taken into 804 * account. 805 */ 806 int 807 nfs_lookup(v) 808 void *v; 809 { 810 struct vop_lookup_args /* { 811 struct vnodeop_desc *a_desc; 812 struct vnode *a_dvp; 813 struct vnode **a_vpp; 814 struct componentname *a_cnp; 815 } */ *ap = v; 816 struct componentname *cnp = ap->a_cnp; 817 struct vnode *dvp = ap->a_dvp; 818 struct vnode **vpp = ap->a_vpp; 819 int flags; 820 struct vnode *newvp; 821 u_int32_t *tl; 822 caddr_t cp; 823 int32_t t1, t2; 824 caddr_t bpos, dpos, cp2; 825 struct mbuf *mreq, *mrep, *md, *mb; 826 long len; 827 nfsfh_t *fhp; 828 struct nfsnode *np; 829 int lockparent, wantparent, error = 0, attrflag, fhsize; 830 const int v3 = NFS_ISV3(dvp); 831 832 cnp->cn_flags &= ~PDIRUNLOCK; 833 flags = cnp->cn_flags; 834 835 *vpp = NULLVP; 836 newvp = NULLVP; 837 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 838 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 839 return (EROFS); 840 if (dvp->v_type != VDIR) 841 return (ENOTDIR); 842 843 /* 844 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves. 845 */ 846 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { 847 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_lwp); 848 if (error) 849 return error; 850 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) 851 return EISDIR; 852 VREF(dvp); 853 *vpp = dvp; 854 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 855 cnp->cn_flags |= SAVENAME; 856 return 0; 857 } 858 859 lockparent = flags & LOCKPARENT; 860 wantparent = flags & (LOCKPARENT|WANTPARENT); 861 np = VTONFS(dvp); 862 863 /* 864 * Before tediously performing a linear scan of the directory, 865 * check the name cache to see if the directory/name pair 866 * we are looking for is known already. 867 * If the directory/name pair is found in the name cache, 868 * we have to ensure the directory has not changed from 869 * the time the cache entry has been created. If it has, 870 * the cache entry has to be ignored. 871 */ 872 error = cache_lookup_raw(dvp, vpp, cnp); 873 KASSERT(dvp != *vpp); 874 if (error >= 0) { 875 struct vattr vattr; 876 int err2; 877 878 if (error && error != ENOENT) { 879 *vpp = NULLVP; 880 return error; 881 } 882 883 err2 = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_lwp); 884 if (err2 != 0) { 885 if (error == 0) 886 vrele(*vpp); 887 *vpp = NULLVP; 888 return err2; 889 } 890 891 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred, 892 cnp->cn_lwp) || timespeccmp(&vattr.va_mtime, 893 &VTONFS(dvp)->n_nctime, !=)) { 894 if (error == 0) { 895 vrele(*vpp); 896 *vpp = NULLVP; 897 } 898 cache_purge1(dvp, NULL, PURGE_CHILDREN); 899 timespecclear(&np->n_nctime); 900 goto dorpc; 901 } 902 903 if (error == ENOENT) { 904 goto noentry; 905 } 906 907 newvp = *vpp; 908 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_lwp) 909 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) { 910 nfsstats.lookupcache_hits++; 911 if ((flags & ISDOTDOT) != 0 || 912 (~flags & (LOCKPARENT|ISLASTCN)) != 0) { 913 VOP_UNLOCK(dvp, 0); 914 cnp->cn_flags |= PDIRUNLOCK; 915 } 916 error = vn_lock(newvp, LK_EXCLUSIVE); 917 if (error) { 918 /* newvp has been revoked. */ 919 vrele(newvp); 920 *vpp = NULL; 921 return error; 922 } 923 if ((~flags & (LOCKPARENT|ISLASTCN)) == 0 924 && (cnp->cn_flags & PDIRUNLOCK)) { 925 KASSERT(flags & ISDOTDOT); 926 error = vn_lock(dvp, LK_EXCLUSIVE); 927 if (error) { 928 vput(newvp); 929 *vpp = NULL; 930 return error; 931 } 932 cnp->cn_flags &= ~PDIRUNLOCK; 933 } 934 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 935 cnp->cn_flags |= SAVENAME; 936 KASSERT(newvp->v_type != VNON); 937 return (0); 938 } 939 cache_purge1(newvp, NULL, PURGE_PARENTS); 940 vrele(newvp); 941 *vpp = NULLVP; 942 } 943 dorpc: 944 #if 0 945 /* 946 * because nfsv3 has the same CREATE semantics as ours, 947 * we don't have to perform LOOKUPs beforehand. 948 * 949 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL. 950 * XXX although we have no way to know if O_EXCL is requested or not. 951 */ 952 953 if (v3 && cnp->cn_nameiop == CREATE && 954 (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN && 955 (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 956 KASSERT(lockparent); 957 cnp->cn_flags |= SAVENAME; 958 return (EJUSTRETURN); 959 } 960 #endif /* 0 */ 961 962 error = 0; 963 newvp = NULLVP; 964 nfsstats.lookupcache_misses++; 965 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 966 len = cnp->cn_namelen; 967 nfsm_reqhead(np, NFSPROC_LOOKUP, 968 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 969 nfsm_fhtom(np, v3); 970 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 971 nfsm_request(np, NFSPROC_LOOKUP, cnp->cn_lwp, cnp->cn_cred); 972 if (error) { 973 nfsm_postop_attr(dvp, attrflag, 0); 974 m_freem(mrep); 975 goto nfsmout; 976 } 977 nfsm_getfh(fhp, fhsize, v3); 978 979 /* 980 * Handle RENAME case... 981 */ 982 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) { 983 if (NFS_CMPFH(np, fhp, fhsize)) { 984 m_freem(mrep); 985 return (EISDIR); 986 } 987 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 988 if (error) { 989 m_freem(mrep); 990 return error; 991 } 992 newvp = NFSTOV(np); 993 #ifndef NFS_V2_ONLY 994 if (v3) { 995 nfsm_postop_attr(newvp, attrflag, 0); 996 nfsm_postop_attr(dvp, attrflag, 0); 997 } else 998 #endif 999 nfsm_loadattr(newvp, (struct vattr *)0, 0); 1000 *vpp = newvp; 1001 m_freem(mrep); 1002 cnp->cn_flags |= SAVENAME; 1003 if (!lockparent) { 1004 VOP_UNLOCK(dvp, 0); 1005 cnp->cn_flags |= PDIRUNLOCK; 1006 } 1007 goto validate; 1008 } 1009 1010 /* 1011 * The postop attr handling is duplicated for each if case, 1012 * because it should be done while dvp is locked (unlocking 1013 * dvp is different for each case). 1014 */ 1015 1016 if (NFS_CMPFH(np, fhp, fhsize)) { 1017 /* 1018 * "." lookup 1019 */ 1020 VREF(dvp); 1021 newvp = dvp; 1022 #ifndef NFS_V2_ONLY 1023 if (v3) { 1024 nfsm_postop_attr(newvp, attrflag, 0); 1025 nfsm_postop_attr(dvp, attrflag, 0); 1026 } else 1027 #endif 1028 nfsm_loadattr(newvp, (struct vattr *)0, 0); 1029 } else if (flags & ISDOTDOT) { 1030 /* 1031 * ".." lookup 1032 */ 1033 VOP_UNLOCK(dvp, 0); 1034 cnp->cn_flags |= PDIRUNLOCK; 1035 1036 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 1037 if (error) { 1038 if (vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY) == 0) 1039 cnp->cn_flags &= ~PDIRUNLOCK; 1040 m_freem(mrep); 1041 return error; 1042 } 1043 newvp = NFSTOV(np); 1044 1045 #ifndef NFS_V2_ONLY 1046 if (v3) { 1047 nfsm_postop_attr(newvp, attrflag, 0); 1048 nfsm_postop_attr(dvp, attrflag, 0); 1049 } else 1050 #endif 1051 nfsm_loadattr(newvp, (struct vattr *)0, 0); 1052 1053 if (lockparent && (flags & ISLASTCN)) { 1054 if ((error = vn_lock(dvp, LK_EXCLUSIVE))) { 1055 m_freem(mrep); 1056 vput(newvp); 1057 return error; 1058 } 1059 cnp->cn_flags &= ~PDIRUNLOCK; 1060 } 1061 } else { 1062 /* 1063 * Other lookups. 1064 */ 1065 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 1066 if (error) { 1067 m_freem(mrep); 1068 return error; 1069 } 1070 newvp = NFSTOV(np); 1071 #ifndef NFS_V2_ONLY 1072 if (v3) { 1073 nfsm_postop_attr(newvp, attrflag, 0); 1074 nfsm_postop_attr(dvp, attrflag, 0); 1075 } else 1076 #endif 1077 nfsm_loadattr(newvp, (struct vattr *)0, 0); 1078 if (!lockparent || !(flags & ISLASTCN)) { 1079 VOP_UNLOCK(dvp, 0); 1080 cnp->cn_flags |= PDIRUNLOCK; 1081 } 1082 } 1083 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 1084 cnp->cn_flags |= SAVENAME; 1085 if ((cnp->cn_flags & MAKEENTRY) && 1086 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) { 1087 nfs_cache_enter(dvp, newvp, cnp); 1088 } 1089 *vpp = newvp; 1090 nfsm_reqdone; 1091 if (error) { 1092 /* 1093 * We get here only because of errors returned by 1094 * the RPC. Otherwise we'll have returned above 1095 * (the nfsm_* macros will jump to nfsm_reqdone 1096 * on error). 1097 */ 1098 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) && 1099 cnp->cn_nameiop != CREATE) { 1100 nfs_cache_enter(dvp, NULL, cnp); 1101 } 1102 if (newvp != NULLVP) { 1103 vrele(newvp); 1104 if (newvp != dvp) 1105 VOP_UNLOCK(newvp, 0); 1106 } 1107 noentry: 1108 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 1109 (flags & ISLASTCN) && error == ENOENT) { 1110 if (dvp->v_mount->mnt_flag & MNT_RDONLY) { 1111 error = EROFS; 1112 } else { 1113 error = EJUSTRETURN; 1114 cnp->cn_flags |= SAVENAME; 1115 } 1116 } 1117 *vpp = NULL; 1118 return error; 1119 } 1120 1121 validate: 1122 /* 1123 * make sure we have valid type and size. 1124 */ 1125 1126 newvp = *vpp; 1127 if (newvp->v_type == VNON) { 1128 struct vattr vattr; /* dummy */ 1129 1130 KASSERT(VTONFS(newvp)->n_attrstamp == 0); 1131 error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_lwp); 1132 if (error) { 1133 vput(newvp); 1134 *vpp = NULL; 1135 } 1136 } 1137 1138 return error; 1139 } 1140 1141 /* 1142 * nfs read call. 1143 * Just call nfs_bioread() to do the work. 1144 */ 1145 int 1146 nfs_read(v) 1147 void *v; 1148 { 1149 struct vop_read_args /* { 1150 struct vnode *a_vp; 1151 struct uio *a_uio; 1152 int a_ioflag; 1153 kauth_cred_t a_cred; 1154 } */ *ap = v; 1155 struct vnode *vp = ap->a_vp; 1156 1157 if (vp->v_type != VREG) 1158 return EISDIR; 1159 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0)); 1160 } 1161 1162 /* 1163 * nfs readlink call 1164 */ 1165 int 1166 nfs_readlink(v) 1167 void *v; 1168 { 1169 struct vop_readlink_args /* { 1170 struct vnode *a_vp; 1171 struct uio *a_uio; 1172 kauth_cred_t a_cred; 1173 } */ *ap = v; 1174 struct vnode *vp = ap->a_vp; 1175 struct nfsnode *np = VTONFS(vp); 1176 1177 if (vp->v_type != VLNK) 1178 return (EPERM); 1179 1180 if (np->n_rcred != NULL) { 1181 kauth_cred_free(np->n_rcred); 1182 } 1183 np->n_rcred = ap->a_cred; 1184 kauth_cred_hold(np->n_rcred); 1185 1186 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0)); 1187 } 1188 1189 /* 1190 * Do a readlink rpc. 1191 * Called by nfs_doio() from below the buffer cache. 1192 */ 1193 int 1194 nfs_readlinkrpc(vp, uiop, cred) 1195 struct vnode *vp; 1196 struct uio *uiop; 1197 kauth_cred_t cred; 1198 { 1199 u_int32_t *tl; 1200 caddr_t cp; 1201 int32_t t1, t2; 1202 caddr_t bpos, dpos, cp2; 1203 int error = 0; 1204 uint32_t len; 1205 struct mbuf *mreq, *mrep, *md, *mb; 1206 const int v3 = NFS_ISV3(vp); 1207 struct nfsnode *np = VTONFS(vp); 1208 #ifndef NFS_V2_ONLY 1209 int attrflag; 1210 #endif 1211 1212 nfsstats.rpccnt[NFSPROC_READLINK]++; 1213 nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3)); 1214 nfsm_fhtom(np, v3); 1215 nfsm_request(np, NFSPROC_READLINK, curlwp, cred); 1216 #ifndef NFS_V2_ONLY 1217 if (v3) 1218 nfsm_postop_attr(vp, attrflag, 0); 1219 #endif 1220 if (!error) { 1221 #ifndef NFS_V2_ONLY 1222 if (v3) { 1223 nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED); 1224 len = fxdr_unsigned(uint32_t, *tl); 1225 if (len > MAXPATHLEN) { 1226 /* 1227 * this pathname is too long for us. 1228 */ 1229 m_freem(mrep); 1230 /* Solaris returns EINVAL. should we follow? */ 1231 error = ENAMETOOLONG; 1232 goto nfsmout; 1233 } 1234 } else 1235 #endif 1236 { 1237 nfsm_strsiz(len, NFS_MAXPATHLEN); 1238 } 1239 nfsm_mtouio(uiop, len); 1240 } 1241 nfsm_reqdone; 1242 return (error); 1243 } 1244 1245 /* 1246 * nfs read rpc call 1247 * Ditto above 1248 */ 1249 int 1250 nfs_readrpc(vp, uiop) 1251 struct vnode *vp; 1252 struct uio *uiop; 1253 { 1254 u_int32_t *tl; 1255 caddr_t cp; 1256 int32_t t1, t2; 1257 caddr_t bpos, dpos, cp2; 1258 struct mbuf *mreq, *mrep, *md, *mb; 1259 struct nfsmount *nmp; 1260 int error = 0, len, retlen, tsiz, eof, byte_count; 1261 const int v3 = NFS_ISV3(vp); 1262 struct nfsnode *np = VTONFS(vp); 1263 #ifndef NFS_V2_ONLY 1264 int attrflag; 1265 #endif 1266 1267 #ifndef nolint 1268 eof = 0; 1269 #endif 1270 nmp = VFSTONFS(vp->v_mount); 1271 tsiz = uiop->uio_resid; 1272 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1273 return (EFBIG); 1274 iostat_busy(nmp->nm_stats); 1275 byte_count = 0; /* count bytes actually transferred */ 1276 while (tsiz > 0) { 1277 nfsstats.rpccnt[NFSPROC_READ]++; 1278 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; 1279 nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3); 1280 nfsm_fhtom(np, v3); 1281 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3); 1282 #ifndef NFS_V2_ONLY 1283 if (v3) { 1284 txdr_hyper(uiop->uio_offset, tl); 1285 *(tl + 2) = txdr_unsigned(len); 1286 } else 1287 #endif 1288 { 1289 *tl++ = txdr_unsigned(uiop->uio_offset); 1290 *tl++ = txdr_unsigned(len); 1291 *tl = 0; 1292 } 1293 nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred); 1294 #ifndef NFS_V2_ONLY 1295 if (v3) { 1296 nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC); 1297 if (error) { 1298 m_freem(mrep); 1299 goto nfsmout; 1300 } 1301 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1302 eof = fxdr_unsigned(int, *(tl + 1)); 1303 } else 1304 #endif 1305 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 1306 nfsm_strsiz(retlen, nmp->nm_rsize); 1307 nfsm_mtouio(uiop, retlen); 1308 m_freem(mrep); 1309 tsiz -= retlen; 1310 byte_count += retlen; 1311 #ifndef NFS_V2_ONLY 1312 if (v3) { 1313 if (eof || retlen == 0) 1314 tsiz = 0; 1315 } else 1316 #endif 1317 if (retlen < len) 1318 tsiz = 0; 1319 } 1320 nfsmout: 1321 iostat_unbusy(nmp->nm_stats, byte_count, 1); 1322 return (error); 1323 } 1324 1325 struct nfs_writerpc_context { 1326 struct simplelock nwc_slock; 1327 volatile int nwc_mbufcount; 1328 }; 1329 1330 /* 1331 * free mbuf used to refer protected pages while write rpc call. 1332 * called at splvm. 1333 */ 1334 static void 1335 nfs_writerpc_extfree(struct mbuf *m, caddr_t tbuf __unused, 1336 size_t size __unused, void *arg) 1337 { 1338 struct nfs_writerpc_context *ctx = arg; 1339 1340 KASSERT(m != NULL); 1341 KASSERT(ctx != NULL); 1342 pool_cache_put(&mbpool_cache, m); 1343 simple_lock(&ctx->nwc_slock); 1344 if (--ctx->nwc_mbufcount == 0) { 1345 wakeup(ctx); 1346 } 1347 simple_unlock(&ctx->nwc_slock); 1348 } 1349 1350 /* 1351 * nfs write call 1352 */ 1353 int 1354 nfs_writerpc(vp, uiop, iomode, pageprotected, stalewriteverfp) 1355 struct vnode *vp; 1356 struct uio *uiop; 1357 int *iomode; 1358 boolean_t pageprotected; 1359 boolean_t *stalewriteverfp; 1360 { 1361 u_int32_t *tl; 1362 caddr_t cp; 1363 int32_t t1, t2; 1364 caddr_t bpos, dpos; 1365 struct mbuf *mreq, *mrep, *md, *mb; 1366 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1367 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR; 1368 const int v3 = NFS_ISV3(vp); 1369 int committed = NFSV3WRITE_FILESYNC; 1370 struct nfsnode *np = VTONFS(vp); 1371 struct nfs_writerpc_context ctx; 1372 int s, byte_count; 1373 struct lwp *l = NULL; 1374 size_t origresid; 1375 #ifndef NFS_V2_ONLY 1376 caddr_t cp2; 1377 int rlen, commit; 1378 #endif 1379 1380 simple_lock_init(&ctx.nwc_slock); 1381 ctx.nwc_mbufcount = 1; 1382 1383 if (vp->v_mount->mnt_flag & MNT_RDONLY) { 1384 panic("writerpc readonly vp %p", vp); 1385 } 1386 1387 #ifdef DIAGNOSTIC 1388 if (uiop->uio_iovcnt != 1) 1389 panic("nfs: writerpc iovcnt > 1"); 1390 #endif 1391 tsiz = uiop->uio_resid; 1392 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1393 return (EFBIG); 1394 if (pageprotected) { 1395 l = curlwp; 1396 PHOLD(l); 1397 } 1398 retry: 1399 origresid = uiop->uio_resid; 1400 KASSERT(origresid == uiop->uio_iov->iov_len); 1401 iostat_busy(nmp->nm_stats); 1402 byte_count = 0; /* count of bytes actually written */ 1403 while (tsiz > 0) { 1404 uint32_t datalen; /* data bytes need to be allocated in mbuf */ 1405 uint32_t backup; 1406 boolean_t stalewriteverf = FALSE; 1407 1408 nfsstats.rpccnt[NFSPROC_WRITE]++; 1409 len = min(tsiz, nmp->nm_wsize); 1410 datalen = pageprotected ? 0 : nfsm_rndup(len); 1411 nfsm_reqhead(np, NFSPROC_WRITE, 1412 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen); 1413 nfsm_fhtom(np, v3); 1414 #ifndef NFS_V2_ONLY 1415 if (v3) { 1416 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED); 1417 txdr_hyper(uiop->uio_offset, tl); 1418 tl += 2; 1419 *tl++ = txdr_unsigned(len); 1420 *tl++ = txdr_unsigned(*iomode); 1421 *tl = txdr_unsigned(len); 1422 } else 1423 #endif 1424 { 1425 u_int32_t x; 1426 1427 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED); 1428 /* Set both "begin" and "current" to non-garbage. */ 1429 x = txdr_unsigned((u_int32_t)uiop->uio_offset); 1430 *tl++ = x; /* "begin offset" */ 1431 *tl++ = x; /* "current offset" */ 1432 x = txdr_unsigned(len); 1433 *tl++ = x; /* total to this offset */ 1434 *tl = x; /* size of this write */ 1435 1436 } 1437 if (pageprotected) { 1438 /* 1439 * since we know pages can't be modified during i/o, 1440 * no need to copy them for us. 1441 */ 1442 struct mbuf *m; 1443 struct iovec *iovp = uiop->uio_iov; 1444 1445 m = m_get(M_WAIT, MT_DATA); 1446 MCLAIM(m, &nfs_mowner); 1447 MEXTADD(m, iovp->iov_base, len, M_MBUF, 1448 nfs_writerpc_extfree, &ctx); 1449 m->m_flags |= M_EXT_ROMAP; 1450 m->m_len = len; 1451 mb->m_next = m; 1452 /* 1453 * no need to maintain mb and bpos here 1454 * because no one care them later. 1455 */ 1456 #if 0 1457 mb = m; 1458 bpos = mtod(caddr_t, mb) + mb->m_len; 1459 #endif 1460 UIO_ADVANCE(uiop, len); 1461 uiop->uio_offset += len; 1462 s = splvm(); 1463 simple_lock(&ctx.nwc_slock); 1464 ctx.nwc_mbufcount++; 1465 simple_unlock(&ctx.nwc_slock); 1466 splx(s); 1467 nfs_zeropad(mb, 0, nfsm_padlen(len)); 1468 } else { 1469 nfsm_uiotom(uiop, len); 1470 } 1471 nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred); 1472 #ifndef NFS_V2_ONLY 1473 if (v3) { 1474 wccflag = NFSV3_WCCCHK; 1475 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error); 1476 if (!error) { 1477 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED 1478 + NFSX_V3WRITEVERF); 1479 rlen = fxdr_unsigned(int, *tl++); 1480 if (rlen == 0) { 1481 error = NFSERR_IO; 1482 m_freem(mrep); 1483 break; 1484 } else if (rlen < len) { 1485 backup = len - rlen; 1486 UIO_ADVANCE(uiop, -backup); 1487 uiop->uio_offset -= backup; 1488 len = rlen; 1489 } 1490 commit = fxdr_unsigned(int, *tl++); 1491 1492 /* 1493 * Return the lowest committment level 1494 * obtained by any of the RPCs. 1495 */ 1496 if (committed == NFSV3WRITE_FILESYNC) 1497 committed = commit; 1498 else if (committed == NFSV3WRITE_DATASYNC && 1499 commit == NFSV3WRITE_UNSTABLE) 1500 committed = commit; 1501 simple_lock(&nmp->nm_slock); 1502 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){ 1503 memcpy(nmp->nm_writeverf, tl, 1504 NFSX_V3WRITEVERF); 1505 nmp->nm_iflag |= NFSMNT_HASWRITEVERF; 1506 } else if ((nmp->nm_iflag & 1507 NFSMNT_STALEWRITEVERF) || 1508 memcmp(tl, nmp->nm_writeverf, 1509 NFSX_V3WRITEVERF)) { 1510 memcpy(nmp->nm_writeverf, tl, 1511 NFSX_V3WRITEVERF); 1512 /* 1513 * note NFSMNT_STALEWRITEVERF 1514 * if we're the first thread to 1515 * notice it. 1516 */ 1517 if ((nmp->nm_iflag & 1518 NFSMNT_STALEWRITEVERF) == 0) { 1519 stalewriteverf = TRUE; 1520 nmp->nm_iflag |= 1521 NFSMNT_STALEWRITEVERF; 1522 } 1523 } 1524 simple_unlock(&nmp->nm_slock); 1525 } 1526 } else 1527 #endif 1528 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 1529 if (wccflag) 1530 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime; 1531 m_freem(mrep); 1532 if (error) 1533 break; 1534 tsiz -= len; 1535 byte_count += len; 1536 if (stalewriteverf) { 1537 *stalewriteverfp = TRUE; 1538 stalewriteverf = FALSE; 1539 if (committed == NFSV3WRITE_UNSTABLE && 1540 len != origresid) { 1541 /* 1542 * if our write requests weren't atomic but 1543 * unstable, datas in previous iterations 1544 * might have already been lost now. 1545 * then, we should resend them to nfsd. 1546 */ 1547 backup = origresid - tsiz; 1548 UIO_ADVANCE(uiop, -backup); 1549 uiop->uio_offset -= backup; 1550 tsiz = origresid; 1551 goto retry; 1552 } 1553 } 1554 } 1555 nfsmout: 1556 iostat_unbusy(nmp->nm_stats, byte_count, 0); 1557 if (pageprotected) { 1558 /* 1559 * wait until mbufs go away. 1560 * retransmitted mbufs can survive longer than rpc requests 1561 * themselves. 1562 */ 1563 s = splvm(); 1564 simple_lock(&ctx.nwc_slock); 1565 ctx.nwc_mbufcount--; 1566 while (ctx.nwc_mbufcount > 0) { 1567 ltsleep(&ctx, PRIBIO, "nfsmblk", 0, &ctx.nwc_slock); 1568 } 1569 simple_unlock(&ctx.nwc_slock); 1570 splx(s); 1571 PRELE(l); 1572 } 1573 *iomode = committed; 1574 if (error) 1575 uiop->uio_resid = tsiz; 1576 return (error); 1577 } 1578 1579 /* 1580 * nfs mknod rpc 1581 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1582 * mode set to specify the file type and the size field for rdev. 1583 */ 1584 int 1585 nfs_mknodrpc(dvp, vpp, cnp, vap) 1586 struct vnode *dvp; 1587 struct vnode **vpp; 1588 struct componentname *cnp; 1589 struct vattr *vap; 1590 { 1591 struct nfsv2_sattr *sp; 1592 u_int32_t *tl; 1593 caddr_t cp; 1594 int32_t t1, t2; 1595 struct vnode *newvp = (struct vnode *)0; 1596 struct nfsnode *dnp, *np; 1597 char *cp2; 1598 caddr_t bpos, dpos; 1599 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1600 struct mbuf *mreq, *mrep, *md, *mb; 1601 u_int32_t rdev; 1602 const int v3 = NFS_ISV3(dvp); 1603 1604 if (vap->va_type == VCHR || vap->va_type == VBLK) 1605 rdev = txdr_unsigned(vap->va_rdev); 1606 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1607 rdev = nfs_xdrneg1; 1608 else { 1609 VOP_ABORTOP(dvp, cnp); 1610 vput(dvp); 1611 return (EOPNOTSUPP); 1612 } 1613 nfsstats.rpccnt[NFSPROC_MKNOD]++; 1614 dnp = VTONFS(dvp); 1615 nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED + 1616 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1617 nfsm_fhtom(dnp, v3); 1618 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1619 #ifndef NFS_V2_ONLY 1620 if (v3) { 1621 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1622 *tl++ = vtonfsv3_type(vap->va_type); 1623 nfsm_v3attrbuild(vap, FALSE); 1624 if (vap->va_type == VCHR || vap->va_type == VBLK) { 1625 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1626 *tl++ = txdr_unsigned(major(vap->va_rdev)); 1627 *tl = txdr_unsigned(minor(vap->va_rdev)); 1628 } 1629 } else 1630 #endif 1631 { 1632 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1633 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1634 sp->sa_uid = nfs_xdrneg1; 1635 sp->sa_gid = nfs_xdrneg1; 1636 sp->sa_size = rdev; 1637 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1638 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1639 } 1640 nfsm_request(dnp, NFSPROC_MKNOD, cnp->cn_lwp, cnp->cn_cred); 1641 if (!error) { 1642 nfsm_mtofh(dvp, newvp, v3, gotvp); 1643 if (!gotvp) { 1644 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1645 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp, &np); 1646 if (!error) 1647 newvp = NFSTOV(np); 1648 } 1649 } 1650 #ifndef NFS_V2_ONLY 1651 if (v3) 1652 nfsm_wcc_data(dvp, wccflag, 0, !error); 1653 #endif 1654 nfsm_reqdone; 1655 if (error) { 1656 if (newvp) 1657 vput(newvp); 1658 } else { 1659 if (cnp->cn_flags & MAKEENTRY) 1660 nfs_cache_enter(dvp, newvp, cnp); 1661 *vpp = newvp; 1662 } 1663 PNBUF_PUT(cnp->cn_pnbuf); 1664 VTONFS(dvp)->n_flag |= NMODIFIED; 1665 if (!wccflag) 1666 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1667 vput(dvp); 1668 return (error); 1669 } 1670 1671 /* 1672 * nfs mknod vop 1673 * just call nfs_mknodrpc() to do the work. 1674 */ 1675 /* ARGSUSED */ 1676 int 1677 nfs_mknod(v) 1678 void *v; 1679 { 1680 struct vop_mknod_args /* { 1681 struct vnode *a_dvp; 1682 struct vnode **a_vpp; 1683 struct componentname *a_cnp; 1684 struct vattr *a_vap; 1685 } */ *ap = v; 1686 struct vnode *dvp = ap->a_dvp; 1687 struct componentname *cnp = ap->a_cnp; 1688 int error; 1689 1690 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap); 1691 VN_KNOTE(dvp, NOTE_WRITE); 1692 if (error == 0 || error == EEXIST) 1693 cache_purge1(dvp, cnp, 0); 1694 return (error); 1695 } 1696 1697 #ifndef NFS_V2_ONLY 1698 static u_long create_verf; 1699 #endif 1700 /* 1701 * nfs file create call 1702 */ 1703 int 1704 nfs_create(v) 1705 void *v; 1706 { 1707 struct vop_create_args /* { 1708 struct vnode *a_dvp; 1709 struct vnode **a_vpp; 1710 struct componentname *a_cnp; 1711 struct vattr *a_vap; 1712 } */ *ap = v; 1713 struct vnode *dvp = ap->a_dvp; 1714 struct vattr *vap = ap->a_vap; 1715 struct componentname *cnp = ap->a_cnp; 1716 struct nfsv2_sattr *sp; 1717 u_int32_t *tl; 1718 caddr_t cp; 1719 int32_t t1, t2; 1720 struct nfsnode *dnp, *np = (struct nfsnode *)0; 1721 struct vnode *newvp = (struct vnode *)0; 1722 caddr_t bpos, dpos, cp2; 1723 int error, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0; 1724 struct mbuf *mreq, *mrep, *md, *mb; 1725 const int v3 = NFS_ISV3(dvp); 1726 1727 /* 1728 * Oops, not for me.. 1729 */ 1730 if (vap->va_type == VSOCK) 1731 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1732 1733 KASSERT(vap->va_type == VREG); 1734 1735 #ifdef VA_EXCLUSIVE 1736 if (vap->va_vaflags & VA_EXCLUSIVE) 1737 fmode |= O_EXCL; 1738 #endif 1739 again: 1740 error = 0; 1741 nfsstats.rpccnt[NFSPROC_CREATE]++; 1742 dnp = VTONFS(dvp); 1743 nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED + 1744 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1745 nfsm_fhtom(dnp, v3); 1746 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1747 #ifndef NFS_V2_ONLY 1748 if (v3) { 1749 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1750 if (fmode & O_EXCL) { 1751 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); 1752 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF); 1753 #ifdef INET 1754 if (TAILQ_FIRST(&in_ifaddrhead)) 1755 *tl++ = TAILQ_FIRST(&in_ifaddrhead)-> 1756 ia_addr.sin_addr.s_addr; 1757 else 1758 *tl++ = create_verf; 1759 #else 1760 *tl++ = create_verf; 1761 #endif 1762 *tl = ++create_verf; 1763 } else { 1764 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED); 1765 nfsm_v3attrbuild(vap, FALSE); 1766 } 1767 } else 1768 #endif 1769 { 1770 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1771 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1772 sp->sa_uid = nfs_xdrneg1; 1773 sp->sa_gid = nfs_xdrneg1; 1774 sp->sa_size = 0; 1775 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1776 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1777 } 1778 nfsm_request(dnp, NFSPROC_CREATE, cnp->cn_lwp, cnp->cn_cred); 1779 if (!error) { 1780 nfsm_mtofh(dvp, newvp, v3, gotvp); 1781 if (!gotvp) { 1782 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1783 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp, &np); 1784 if (!error) 1785 newvp = NFSTOV(np); 1786 } 1787 } 1788 #ifndef NFS_V2_ONLY 1789 if (v3) 1790 nfsm_wcc_data(dvp, wccflag, 0, !error); 1791 #endif 1792 nfsm_reqdone; 1793 if (error) { 1794 /* 1795 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP. 1796 */ 1797 if (v3 && (fmode & O_EXCL) && error == ENOTSUP) { 1798 fmode &= ~O_EXCL; 1799 goto again; 1800 } 1801 } else if (v3 && (fmode & O_EXCL)) { 1802 struct timespec ts; 1803 1804 getnanotime(&ts); 1805 1806 /* 1807 * make sure that we'll update timestamps as 1808 * most server implementations use them to store 1809 * the create verifier. 1810 * 1811 * XXX it's better to use TOSERVER always. 1812 */ 1813 1814 if (vap->va_atime.tv_sec == VNOVAL) 1815 vap->va_atime = ts; 1816 if (vap->va_mtime.tv_sec == VNOVAL) 1817 vap->va_mtime = ts; 1818 1819 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_lwp); 1820 } 1821 if (error == 0) { 1822 if (cnp->cn_flags & MAKEENTRY) 1823 nfs_cache_enter(dvp, newvp, cnp); 1824 else 1825 cache_purge1(dvp, cnp, 0); 1826 *ap->a_vpp = newvp; 1827 } else { 1828 if (newvp) 1829 vput(newvp); 1830 if (error == EEXIST) 1831 cache_purge1(dvp, cnp, 0); 1832 } 1833 PNBUF_PUT(cnp->cn_pnbuf); 1834 VTONFS(dvp)->n_flag |= NMODIFIED; 1835 if (!wccflag) 1836 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1837 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1838 vput(dvp); 1839 return (error); 1840 } 1841 1842 /* 1843 * nfs file remove call 1844 * To try and make nfs semantics closer to ufs semantics, a file that has 1845 * other processes using the vnode is renamed instead of removed and then 1846 * removed later on the last close. 1847 * - If v_usecount > 1 1848 * If a rename is not already in the works 1849 * call nfs_sillyrename() to set it up 1850 * else 1851 * do the remove rpc 1852 */ 1853 int 1854 nfs_remove(v) 1855 void *v; 1856 { 1857 struct vop_remove_args /* { 1858 struct vnodeop_desc *a_desc; 1859 struct vnode * a_dvp; 1860 struct vnode * a_vp; 1861 struct componentname * a_cnp; 1862 } */ *ap = v; 1863 struct vnode *vp = ap->a_vp; 1864 struct vnode *dvp = ap->a_dvp; 1865 struct componentname *cnp = ap->a_cnp; 1866 struct nfsnode *np = VTONFS(vp); 1867 int error = 0; 1868 struct vattr vattr; 1869 1870 #ifndef DIAGNOSTIC 1871 if ((cnp->cn_flags & HASBUF) == 0) 1872 panic("nfs_remove: no name"); 1873 if (vp->v_usecount < 1) 1874 panic("nfs_remove: bad v_usecount"); 1875 #endif 1876 if (vp->v_type == VDIR) 1877 error = EPERM; 1878 else if (vp->v_usecount == 1 || (np->n_sillyrename && 1879 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_lwp) == 0 && 1880 vattr.va_nlink > 1)) { 1881 /* 1882 * Purge the name cache so that the chance of a lookup for 1883 * the name succeeding while the remove is in progress is 1884 * minimized. Without node locking it can still happen, such 1885 * that an I/O op returns ESTALE, but since you get this if 1886 * another host removes the file.. 1887 */ 1888 cache_purge(vp); 1889 /* 1890 * throw away biocache buffers, mainly to avoid 1891 * unnecessary delayed writes later. 1892 */ 1893 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_lwp, 1); 1894 /* Do the rpc */ 1895 if (error != EINTR) 1896 error = nfs_removerpc(dvp, cnp->cn_nameptr, 1897 cnp->cn_namelen, cnp->cn_cred, cnp->cn_lwp); 1898 } else if (!np->n_sillyrename) 1899 error = nfs_sillyrename(dvp, vp, cnp, FALSE); 1900 PNBUF_PUT(cnp->cn_pnbuf); 1901 if (!error && nfs_getattrcache(vp, &vattr) == 0 && 1902 vattr.va_nlink == 1) { 1903 np->n_flag |= NREMOVED; 1904 } 1905 NFS_INVALIDATE_ATTRCACHE(np); 1906 VN_KNOTE(vp, NOTE_DELETE); 1907 VN_KNOTE(dvp, NOTE_WRITE); 1908 if (dvp == vp) 1909 vrele(vp); 1910 else 1911 vput(vp); 1912 vput(dvp); 1913 return (error); 1914 } 1915 1916 /* 1917 * nfs file remove rpc called from nfs_inactive 1918 */ 1919 int 1920 nfs_removeit(sp) 1921 struct sillyrename *sp; 1922 { 1923 1924 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, 1925 (struct lwp *)0)); 1926 } 1927 1928 /* 1929 * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). 1930 */ 1931 int 1932 nfs_removerpc(dvp, name, namelen, cred, l) 1933 struct vnode *dvp; 1934 const char *name; 1935 int namelen; 1936 kauth_cred_t cred; 1937 struct lwp *l; 1938 { 1939 u_int32_t *tl; 1940 caddr_t cp; 1941 #ifndef NFS_V2_ONLY 1942 int32_t t1; 1943 caddr_t cp2; 1944 #endif 1945 int32_t t2; 1946 caddr_t bpos, dpos; 1947 int error = 0, wccflag = NFSV3_WCCRATTR; 1948 struct mbuf *mreq, *mrep, *md, *mb; 1949 const int v3 = NFS_ISV3(dvp); 1950 int rexmit = 0; 1951 struct nfsnode *dnp = VTONFS(dvp); 1952 1953 nfsstats.rpccnt[NFSPROC_REMOVE]++; 1954 nfsm_reqhead(dnp, NFSPROC_REMOVE, 1955 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen)); 1956 nfsm_fhtom(dnp, v3); 1957 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 1958 nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit); 1959 #ifndef NFS_V2_ONLY 1960 if (v3) 1961 nfsm_wcc_data(dvp, wccflag, 0, !error); 1962 #endif 1963 nfsm_reqdone; 1964 VTONFS(dvp)->n_flag |= NMODIFIED; 1965 if (!wccflag) 1966 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1967 /* 1968 * Kludge City: If the first reply to the remove rpc is lost.. 1969 * the reply to the retransmitted request will be ENOENT 1970 * since the file was in fact removed 1971 * Therefore, we cheat and return success. 1972 */ 1973 if (rexmit && error == ENOENT) 1974 error = 0; 1975 return (error); 1976 } 1977 1978 /* 1979 * nfs file rename call 1980 */ 1981 int 1982 nfs_rename(v) 1983 void *v; 1984 { 1985 struct vop_rename_args /* { 1986 struct vnode *a_fdvp; 1987 struct vnode *a_fvp; 1988 struct componentname *a_fcnp; 1989 struct vnode *a_tdvp; 1990 struct vnode *a_tvp; 1991 struct componentname *a_tcnp; 1992 } */ *ap = v; 1993 struct vnode *fvp = ap->a_fvp; 1994 struct vnode *tvp = ap->a_tvp; 1995 struct vnode *fdvp = ap->a_fdvp; 1996 struct vnode *tdvp = ap->a_tdvp; 1997 struct componentname *tcnp = ap->a_tcnp; 1998 struct componentname *fcnp = ap->a_fcnp; 1999 int error; 2000 2001 #ifndef DIAGNOSTIC 2002 if ((tcnp->cn_flags & HASBUF) == 0 || 2003 (fcnp->cn_flags & HASBUF) == 0) 2004 panic("nfs_rename: no name"); 2005 #endif 2006 /* Check for cross-device rename */ 2007 if ((fvp->v_mount != tdvp->v_mount) || 2008 (tvp && (fvp->v_mount != tvp->v_mount))) { 2009 error = EXDEV; 2010 goto out; 2011 } 2012 2013 /* 2014 * If the tvp exists and is in use, sillyrename it before doing the 2015 * rename of the new file over it. 2016 * 2017 * Have sillyrename use link instead of rename if possible, 2018 * so that we don't lose the file if the rename fails, and so 2019 * that there's no window when the "to" file doesn't exist. 2020 */ 2021 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename && 2022 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, TRUE)) { 2023 VN_KNOTE(tvp, NOTE_DELETE); 2024 vput(tvp); 2025 tvp = NULL; 2026 } 2027 2028 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 2029 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 2030 tcnp->cn_lwp); 2031 2032 VN_KNOTE(fdvp, NOTE_WRITE); 2033 VN_KNOTE(tdvp, NOTE_WRITE); 2034 if (error == 0 || error == EEXIST) { 2035 if (fvp->v_type == VDIR) 2036 cache_purge(fvp); 2037 else 2038 cache_purge1(fdvp, fcnp, 0); 2039 if (tvp != NULL && tvp->v_type == VDIR) 2040 cache_purge(tvp); 2041 else 2042 cache_purge1(tdvp, tcnp, 0); 2043 } 2044 out: 2045 if (tdvp == tvp) 2046 vrele(tdvp); 2047 else 2048 vput(tdvp); 2049 if (tvp) 2050 vput(tvp); 2051 vrele(fdvp); 2052 vrele(fvp); 2053 return (error); 2054 } 2055 2056 /* 2057 * nfs file rename rpc called from nfs_remove() above 2058 */ 2059 int 2060 nfs_renameit(sdvp, scnp, sp) 2061 struct vnode *sdvp; 2062 struct componentname *scnp; 2063 struct sillyrename *sp; 2064 { 2065 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, 2066 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_lwp)); 2067 } 2068 2069 /* 2070 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 2071 */ 2072 int 2073 nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, l) 2074 struct vnode *fdvp; 2075 const char *fnameptr; 2076 int fnamelen; 2077 struct vnode *tdvp; 2078 const char *tnameptr; 2079 int tnamelen; 2080 kauth_cred_t cred; 2081 struct lwp *l; 2082 { 2083 u_int32_t *tl; 2084 caddr_t cp; 2085 #ifndef NFS_V2_ONLY 2086 int32_t t1; 2087 caddr_t cp2; 2088 #endif 2089 int32_t t2; 2090 caddr_t bpos, dpos; 2091 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; 2092 struct mbuf *mreq, *mrep, *md, *mb; 2093 const int v3 = NFS_ISV3(fdvp); 2094 int rexmit = 0; 2095 struct nfsnode *fdnp = VTONFS(fdvp); 2096 2097 nfsstats.rpccnt[NFSPROC_RENAME]++; 2098 nfsm_reqhead(fdnp, NFSPROC_RENAME, 2099 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) + 2100 nfsm_rndup(tnamelen)); 2101 nfsm_fhtom(fdnp, v3); 2102 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); 2103 nfsm_fhtom(VTONFS(tdvp), v3); 2104 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); 2105 nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit); 2106 #ifndef NFS_V2_ONLY 2107 if (v3) { 2108 nfsm_wcc_data(fdvp, fwccflag, 0, !error); 2109 nfsm_wcc_data(tdvp, twccflag, 0, !error); 2110 } 2111 #endif 2112 nfsm_reqdone; 2113 VTONFS(fdvp)->n_flag |= NMODIFIED; 2114 VTONFS(tdvp)->n_flag |= NMODIFIED; 2115 if (!fwccflag) 2116 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp)); 2117 if (!twccflag) 2118 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp)); 2119 /* 2120 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 2121 */ 2122 if (rexmit && error == ENOENT) 2123 error = 0; 2124 return (error); 2125 } 2126 2127 /* 2128 * NFS link RPC, called from nfs_link. 2129 * Assumes dvp and vp locked, and leaves them that way. 2130 */ 2131 2132 static int 2133 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name, 2134 size_t namelen, kauth_cred_t cred, struct lwp *l) 2135 { 2136 u_int32_t *tl; 2137 caddr_t cp; 2138 #ifndef NFS_V2_ONLY 2139 int32_t t1; 2140 caddr_t cp2; 2141 #endif 2142 int32_t t2; 2143 caddr_t bpos, dpos; 2144 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; 2145 struct mbuf *mreq, *mrep, *md, *mb; 2146 const int v3 = NFS_ISV3(dvp); 2147 int rexmit = 0; 2148 struct nfsnode *np = VTONFS(vp); 2149 2150 nfsstats.rpccnt[NFSPROC_LINK]++; 2151 nfsm_reqhead(np, NFSPROC_LINK, 2152 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen)); 2153 nfsm_fhtom(np, v3); 2154 nfsm_fhtom(VTONFS(dvp), v3); 2155 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 2156 nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit); 2157 #ifndef NFS_V2_ONLY 2158 if (v3) { 2159 nfsm_postop_attr(vp, attrflag, 0); 2160 nfsm_wcc_data(dvp, wccflag, 0, !error); 2161 } 2162 #endif 2163 nfsm_reqdone; 2164 2165 VTONFS(dvp)->n_flag |= NMODIFIED; 2166 if (!attrflag) 2167 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp)); 2168 if (!wccflag) 2169 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2170 2171 /* 2172 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 2173 */ 2174 if (rexmit && error == EEXIST) 2175 error = 0; 2176 2177 return error; 2178 } 2179 2180 /* 2181 * nfs hard link create call 2182 */ 2183 int 2184 nfs_link(v) 2185 void *v; 2186 { 2187 struct vop_link_args /* { 2188 struct vnode *a_dvp; 2189 struct vnode *a_vp; 2190 struct componentname *a_cnp; 2191 } */ *ap = v; 2192 struct vnode *vp = ap->a_vp; 2193 struct vnode *dvp = ap->a_dvp; 2194 struct componentname *cnp = ap->a_cnp; 2195 int error = 0; 2196 2197 if (dvp->v_mount != vp->v_mount) { 2198 VOP_ABORTOP(dvp, cnp); 2199 vput(dvp); 2200 return (EXDEV); 2201 } 2202 if (dvp != vp) { 2203 error = vn_lock(vp, LK_EXCLUSIVE); 2204 if (error != 0) { 2205 VOP_ABORTOP(dvp, cnp); 2206 vput(dvp); 2207 return error; 2208 } 2209 } 2210 2211 /* 2212 * Push all writes to the server, so that the attribute cache 2213 * doesn't get "out of sync" with the server. 2214 * XXX There should be a better way! 2215 */ 2216 VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0, cnp->cn_lwp); 2217 2218 error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, 2219 cnp->cn_cred, cnp->cn_lwp); 2220 2221 if (error == 0) 2222 cache_purge1(dvp, cnp, 0); 2223 PNBUF_PUT(cnp->cn_pnbuf); 2224 if (dvp != vp) 2225 VOP_UNLOCK(vp, 0); 2226 VN_KNOTE(vp, NOTE_LINK); 2227 VN_KNOTE(dvp, NOTE_WRITE); 2228 vput(dvp); 2229 return (error); 2230 } 2231 2232 /* 2233 * nfs symbolic link create call 2234 */ 2235 int 2236 nfs_symlink(v) 2237 void *v; 2238 { 2239 struct vop_symlink_args /* { 2240 struct vnode *a_dvp; 2241 struct vnode **a_vpp; 2242 struct componentname *a_cnp; 2243 struct vattr *a_vap; 2244 char *a_target; 2245 } */ *ap = v; 2246 struct vnode *dvp = ap->a_dvp; 2247 struct vattr *vap = ap->a_vap; 2248 struct componentname *cnp = ap->a_cnp; 2249 struct nfsv2_sattr *sp; 2250 u_int32_t *tl; 2251 caddr_t cp; 2252 int32_t t1, t2; 2253 caddr_t bpos, dpos, cp2; 2254 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; 2255 struct mbuf *mreq, *mrep, *md, *mb; 2256 struct vnode *newvp = (struct vnode *)0; 2257 const int v3 = NFS_ISV3(dvp); 2258 int rexmit = 0; 2259 struct nfsnode *dnp = VTONFS(dvp); 2260 2261 *ap->a_vpp = NULL; 2262 nfsstats.rpccnt[NFSPROC_SYMLINK]++; 2263 slen = strlen(ap->a_target); 2264 nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED + 2265 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3)); 2266 nfsm_fhtom(dnp, v3); 2267 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 2268 #ifndef NFS_V2_ONlY 2269 if (v3) 2270 nfsm_v3attrbuild(vap, FALSE); 2271 #endif 2272 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); 2273 #ifndef NFS_V2_ONlY 2274 if (!v3) { 2275 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 2276 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); 2277 sp->sa_uid = nfs_xdrneg1; 2278 sp->sa_gid = nfs_xdrneg1; 2279 sp->sa_size = nfs_xdrneg1; 2280 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 2281 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 2282 } 2283 #endif 2284 nfsm_request1(dnp, NFSPROC_SYMLINK, cnp->cn_lwp, cnp->cn_cred, 2285 &rexmit); 2286 #ifndef NFS_V2_ONlY 2287 if (v3) { 2288 if (!error) 2289 nfsm_mtofh(dvp, newvp, v3, gotvp); 2290 nfsm_wcc_data(dvp, wccflag, 0, !error); 2291 } 2292 #endif 2293 nfsm_reqdone; 2294 /* 2295 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 2296 */ 2297 if (rexmit && error == EEXIST) 2298 error = 0; 2299 if (error == 0 || error == EEXIST) 2300 cache_purge1(dvp, cnp, 0); 2301 if (error == 0 && newvp == NULL) { 2302 struct nfsnode *np = NULL; 2303 2304 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2305 cnp->cn_cred, cnp->cn_lwp, &np); 2306 if (error == 0) 2307 newvp = NFSTOV(np); 2308 } 2309 if (error) { 2310 if (newvp != NULL) 2311 vput(newvp); 2312 } else { 2313 *ap->a_vpp = newvp; 2314 } 2315 PNBUF_PUT(cnp->cn_pnbuf); 2316 VTONFS(dvp)->n_flag |= NMODIFIED; 2317 if (!wccflag) 2318 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2319 VN_KNOTE(dvp, NOTE_WRITE); 2320 vput(dvp); 2321 return (error); 2322 } 2323 2324 /* 2325 * nfs make dir call 2326 */ 2327 int 2328 nfs_mkdir(v) 2329 void *v; 2330 { 2331 struct vop_mkdir_args /* { 2332 struct vnode *a_dvp; 2333 struct vnode **a_vpp; 2334 struct componentname *a_cnp; 2335 struct vattr *a_vap; 2336 } */ *ap = v; 2337 struct vnode *dvp = ap->a_dvp; 2338 struct vattr *vap = ap->a_vap; 2339 struct componentname *cnp = ap->a_cnp; 2340 struct nfsv2_sattr *sp; 2341 u_int32_t *tl; 2342 caddr_t cp; 2343 int32_t t1, t2; 2344 int len; 2345 struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0; 2346 struct vnode *newvp = (struct vnode *)0; 2347 caddr_t bpos, dpos, cp2; 2348 int error = 0, wccflag = NFSV3_WCCRATTR; 2349 int gotvp = 0; 2350 int rexmit = 0; 2351 struct mbuf *mreq, *mrep, *md, *mb; 2352 const int v3 = NFS_ISV3(dvp); 2353 2354 len = cnp->cn_namelen; 2355 nfsstats.rpccnt[NFSPROC_MKDIR]++; 2356 nfsm_reqhead(dnp, NFSPROC_MKDIR, 2357 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3)); 2358 nfsm_fhtom(dnp, v3); 2359 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 2360 #ifndef NFS_V2_ONLY 2361 if (v3) { 2362 nfsm_v3attrbuild(vap, FALSE); 2363 } else 2364 #endif 2365 { 2366 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 2367 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); 2368 sp->sa_uid = nfs_xdrneg1; 2369 sp->sa_gid = nfs_xdrneg1; 2370 sp->sa_size = nfs_xdrneg1; 2371 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 2372 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 2373 } 2374 nfsm_request1(dnp, NFSPROC_MKDIR, cnp->cn_lwp, cnp->cn_cred, &rexmit); 2375 if (!error) 2376 nfsm_mtofh(dvp, newvp, v3, gotvp); 2377 if (v3) 2378 nfsm_wcc_data(dvp, wccflag, 0, !error); 2379 nfsm_reqdone; 2380 VTONFS(dvp)->n_flag |= NMODIFIED; 2381 if (!wccflag) 2382 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2383 /* 2384 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry 2385 * if we can succeed in looking up the directory. 2386 */ 2387 if ((rexmit && error == EEXIST) || (!error && !gotvp)) { 2388 if (newvp) { 2389 vput(newvp); 2390 newvp = (struct vnode *)0; 2391 } 2392 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, 2393 cnp->cn_lwp, &np); 2394 if (!error) { 2395 newvp = NFSTOV(np); 2396 if (newvp->v_type != VDIR || newvp == dvp) 2397 error = EEXIST; 2398 } 2399 } 2400 if (error) { 2401 if (newvp) { 2402 if (dvp != newvp) 2403 vput(newvp); 2404 else 2405 vrele(newvp); 2406 } 2407 } else { 2408 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); 2409 if (cnp->cn_flags & MAKEENTRY) 2410 nfs_cache_enter(dvp, newvp, cnp); 2411 *ap->a_vpp = newvp; 2412 } 2413 PNBUF_PUT(cnp->cn_pnbuf); 2414 vput(dvp); 2415 return (error); 2416 } 2417 2418 /* 2419 * nfs remove directory call 2420 */ 2421 int 2422 nfs_rmdir(v) 2423 void *v; 2424 { 2425 struct vop_rmdir_args /* { 2426 struct vnode *a_dvp; 2427 struct vnode *a_vp; 2428 struct componentname *a_cnp; 2429 } */ *ap = v; 2430 struct vnode *vp = ap->a_vp; 2431 struct vnode *dvp = ap->a_dvp; 2432 struct componentname *cnp = ap->a_cnp; 2433 u_int32_t *tl; 2434 caddr_t cp; 2435 #ifndef NFS_V2_ONLY 2436 int32_t t1; 2437 caddr_t cp2; 2438 #endif 2439 int32_t t2; 2440 caddr_t bpos, dpos; 2441 int error = 0, wccflag = NFSV3_WCCRATTR; 2442 int rexmit = 0; 2443 struct mbuf *mreq, *mrep, *md, *mb; 2444 const int v3 = NFS_ISV3(dvp); 2445 struct nfsnode *dnp; 2446 2447 if (dvp == vp) { 2448 vrele(dvp); 2449 vput(dvp); 2450 PNBUF_PUT(cnp->cn_pnbuf); 2451 return (EINVAL); 2452 } 2453 nfsstats.rpccnt[NFSPROC_RMDIR]++; 2454 dnp = VTONFS(dvp); 2455 nfsm_reqhead(dnp, NFSPROC_RMDIR, 2456 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 2457 nfsm_fhtom(dnp, v3); 2458 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 2459 nfsm_request1(dnp, NFSPROC_RMDIR, cnp->cn_lwp, cnp->cn_cred, &rexmit); 2460 #ifndef NFS_V2_ONLY 2461 if (v3) 2462 nfsm_wcc_data(dvp, wccflag, 0, !error); 2463 #endif 2464 nfsm_reqdone; 2465 PNBUF_PUT(cnp->cn_pnbuf); 2466 VTONFS(dvp)->n_flag |= NMODIFIED; 2467 if (!wccflag) 2468 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2469 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); 2470 VN_KNOTE(vp, NOTE_DELETE); 2471 cache_purge(vp); 2472 vput(vp); 2473 vput(dvp); 2474 /* 2475 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 2476 */ 2477 if (rexmit && error == ENOENT) 2478 error = 0; 2479 return (error); 2480 } 2481 2482 /* 2483 * nfs readdir call 2484 */ 2485 int 2486 nfs_readdir(v) 2487 void *v; 2488 { 2489 struct vop_readdir_args /* { 2490 struct vnode *a_vp; 2491 struct uio *a_uio; 2492 kauth_cred_t a_cred; 2493 int *a_eofflag; 2494 off_t **a_cookies; 2495 int *a_ncookies; 2496 } */ *ap = v; 2497 struct vnode *vp = ap->a_vp; 2498 struct uio *uio = ap->a_uio; 2499 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2500 char *base = uio->uio_iov->iov_base; 2501 int tresid, error; 2502 size_t count, lost; 2503 struct dirent *dp; 2504 off_t *cookies = NULL; 2505 int ncookies = 0, nc; 2506 2507 if (vp->v_type != VDIR) 2508 return (EPERM); 2509 2510 lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1); 2511 count = uio->uio_resid - lost; 2512 if (count <= 0) 2513 return (EINVAL); 2514 2515 /* 2516 * Call nfs_bioread() to do the real work. 2517 */ 2518 tresid = uio->uio_resid = count; 2519 error = nfs_bioread(vp, uio, 0, ap->a_cred, 2520 ap->a_cookies ? NFSBIO_CACHECOOKIES : 0); 2521 2522 if (!error && ap->a_cookies) { 2523 ncookies = count / 16; 2524 cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK); 2525 *ap->a_cookies = cookies; 2526 } 2527 2528 if (!error && uio->uio_resid == tresid) { 2529 uio->uio_resid += lost; 2530 nfsstats.direofcache_misses++; 2531 if (ap->a_cookies) 2532 *ap->a_ncookies = 0; 2533 *ap->a_eofflag = 1; 2534 return (0); 2535 } 2536 2537 if (!error && ap->a_cookies) { 2538 /* 2539 * Only the NFS server and emulations use cookies, and they 2540 * load the directory block into system space, so we can 2541 * just look at it directly. 2542 */ 2543 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) || 2544 uio->uio_iovcnt != 1) 2545 panic("nfs_readdir: lost in space"); 2546 for (nc = 0; ncookies-- && 2547 base < (char *)uio->uio_iov->iov_base; nc++){ 2548 dp = (struct dirent *) base; 2549 if (dp->d_reclen == 0) 2550 break; 2551 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) 2552 *(cookies++) = (off_t)NFS_GETCOOKIE32(dp); 2553 else 2554 *(cookies++) = NFS_GETCOOKIE(dp); 2555 base += dp->d_reclen; 2556 } 2557 uio->uio_resid += 2558 ((caddr_t)uio->uio_iov->iov_base - base); 2559 uio->uio_iov->iov_len += 2560 ((caddr_t)uio->uio_iov->iov_base - base); 2561 uio->uio_iov->iov_base = base; 2562 *ap->a_ncookies = nc; 2563 } 2564 2565 uio->uio_resid += lost; 2566 *ap->a_eofflag = 0; 2567 return (error); 2568 } 2569 2570 /* 2571 * Readdir rpc call. 2572 * Called from below the buffer cache by nfs_doio(). 2573 */ 2574 int 2575 nfs_readdirrpc(vp, uiop, cred) 2576 struct vnode *vp; 2577 struct uio *uiop; 2578 kauth_cred_t cred; 2579 { 2580 int len, left; 2581 struct dirent *dp = NULL; 2582 u_int32_t *tl; 2583 caddr_t cp; 2584 int32_t t1, t2; 2585 caddr_t bpos, dpos, cp2; 2586 struct mbuf *mreq, *mrep, *md, *mb; 2587 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2588 struct nfsnode *dnp = VTONFS(vp); 2589 u_quad_t fileno; 2590 int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1; 2591 #ifndef NFS_V2_ONLY 2592 int attrflag; 2593 #endif 2594 int nrpcs = 0, reclen; 2595 const int v3 = NFS_ISV3(vp); 2596 2597 #ifdef DIAGNOSTIC 2598 /* 2599 * Should be called from buffer cache, so only amount of 2600 * NFS_DIRBLKSIZ will be requested. 2601 */ 2602 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ) 2603 panic("nfs readdirrpc bad uio"); 2604 #endif 2605 2606 /* 2607 * Loop around doing readdir rpc's of size nm_readdirsize 2608 * truncated to a multiple of NFS_DIRFRAGSIZ. 2609 * The stopping criteria is EOF or buffer full. 2610 */ 2611 while (more_dirs && bigenough) { 2612 /* 2613 * Heuristic: don't bother to do another RPC to further 2614 * fill up this block if there is not much room left. (< 50% 2615 * of the readdir RPC size). This wastes some buffer space 2616 * but can save up to 50% in RPC calls. 2617 */ 2618 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) { 2619 bigenough = 0; 2620 break; 2621 } 2622 nfsstats.rpccnt[NFSPROC_READDIR]++; 2623 nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) + 2624 NFSX_READDIR(v3)); 2625 nfsm_fhtom(dnp, v3); 2626 #ifndef NFS_V2_ONLY 2627 if (v3) { 2628 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED); 2629 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) { 2630 txdr_swapcookie3(uiop->uio_offset, tl); 2631 } else { 2632 txdr_cookie3(uiop->uio_offset, tl); 2633 } 2634 tl += 2; 2635 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2636 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2637 } else 2638 #endif 2639 { 2640 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 2641 *tl++ = txdr_unsigned(uiop->uio_offset); 2642 } 2643 *tl = txdr_unsigned(nmp->nm_readdirsize); 2644 nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred); 2645 nrpcs++; 2646 #ifndef NFS_V2_ONLY 2647 if (v3) { 2648 nfsm_postop_attr(vp, attrflag, 0); 2649 if (!error) { 2650 nfsm_dissect(tl, u_int32_t *, 2651 2 * NFSX_UNSIGNED); 2652 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2653 dnp->n_cookieverf.nfsuquad[1] = *tl; 2654 } else { 2655 m_freem(mrep); 2656 goto nfsmout; 2657 } 2658 } 2659 #endif 2660 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2661 more_dirs = fxdr_unsigned(int, *tl); 2662 2663 /* loop thru the dir entries, doctoring them to 4bsd form */ 2664 while (more_dirs && bigenough) { 2665 #ifndef NFS_V2_ONLY 2666 if (v3) { 2667 nfsm_dissect(tl, u_int32_t *, 2668 3 * NFSX_UNSIGNED); 2669 fileno = fxdr_hyper(tl); 2670 len = fxdr_unsigned(int, *(tl + 2)); 2671 } else 2672 #endif 2673 { 2674 nfsm_dissect(tl, u_int32_t *, 2675 2 * NFSX_UNSIGNED); 2676 fileno = fxdr_unsigned(u_quad_t, *tl++); 2677 len = fxdr_unsigned(int, *tl); 2678 } 2679 if (len <= 0 || len > NFS_MAXNAMLEN) { 2680 error = EBADRPC; 2681 m_freem(mrep); 2682 goto nfsmout; 2683 } 2684 /* for cookie stashing */ 2685 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t); 2686 left = NFS_DIRFRAGSIZ - blksiz; 2687 if (reclen > left) { 2688 memset(uiop->uio_iov->iov_base, 0, left); 2689 dp->d_reclen += left; 2690 UIO_ADVANCE(uiop, left); 2691 blksiz = 0; 2692 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2693 } 2694 if (reclen > uiop->uio_resid) 2695 bigenough = 0; 2696 if (bigenough) { 2697 int tlen; 2698 2699 dp = (struct dirent *)uiop->uio_iov->iov_base; 2700 dp->d_fileno = fileno; 2701 dp->d_namlen = len; 2702 dp->d_reclen = reclen; 2703 dp->d_type = DT_UNKNOWN; 2704 blksiz += reclen; 2705 if (blksiz == NFS_DIRFRAGSIZ) 2706 blksiz = 0; 2707 UIO_ADVANCE(uiop, DIRHDSIZ); 2708 nfsm_mtouio(uiop, len); 2709 tlen = reclen - (DIRHDSIZ + len); 2710 (void)memset(uiop->uio_iov->iov_base, 0, tlen); 2711 UIO_ADVANCE(uiop, tlen); 2712 } else 2713 nfsm_adv(nfsm_rndup(len)); 2714 #ifndef NFS_V2_ONLY 2715 if (v3) { 2716 nfsm_dissect(tl, u_int32_t *, 2717 3 * NFSX_UNSIGNED); 2718 } else 2719 #endif 2720 { 2721 nfsm_dissect(tl, u_int32_t *, 2722 2 * NFSX_UNSIGNED); 2723 } 2724 if (bigenough) { 2725 #ifndef NFS_V2_ONLY 2726 if (v3) { 2727 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) 2728 uiop->uio_offset = 2729 fxdr_swapcookie3(tl); 2730 else 2731 uiop->uio_offset = 2732 fxdr_cookie3(tl); 2733 } 2734 else 2735 #endif 2736 { 2737 uiop->uio_offset = 2738 fxdr_unsigned(off_t, *tl); 2739 } 2740 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2741 } 2742 if (v3) 2743 tl += 2; 2744 else 2745 tl++; 2746 more_dirs = fxdr_unsigned(int, *tl); 2747 } 2748 /* 2749 * If at end of rpc data, get the eof boolean 2750 */ 2751 if (!more_dirs) { 2752 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2753 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2754 2755 /* 2756 * kludge: if we got no entries, treat it as EOF. 2757 * some server sometimes send a reply without any 2758 * entries or EOF. 2759 * although it might mean the server has very long name, 2760 * we can't handle such entries anyway. 2761 */ 2762 2763 if (uiop->uio_resid >= NFS_DIRBLKSIZ) 2764 more_dirs = 0; 2765 } 2766 m_freem(mrep); 2767 } 2768 /* 2769 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ 2770 * by increasing d_reclen for the last record. 2771 */ 2772 if (blksiz > 0) { 2773 left = NFS_DIRFRAGSIZ - blksiz; 2774 memset(uiop->uio_iov->iov_base, 0, left); 2775 dp->d_reclen += left; 2776 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2777 UIO_ADVANCE(uiop, left); 2778 } 2779 2780 /* 2781 * We are now either at the end of the directory or have filled the 2782 * block. 2783 */ 2784 if (bigenough) { 2785 dnp->n_direofoffset = uiop->uio_offset; 2786 dnp->n_flag |= NEOFVALID; 2787 } 2788 nfsmout: 2789 return (error); 2790 } 2791 2792 #ifndef NFS_V2_ONLY 2793 /* 2794 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). 2795 */ 2796 int 2797 nfs_readdirplusrpc(vp, uiop, cred) 2798 struct vnode *vp; 2799 struct uio *uiop; 2800 kauth_cred_t cred; 2801 { 2802 int len, left; 2803 struct dirent *dp = NULL; 2804 u_int32_t *tl; 2805 caddr_t cp; 2806 int32_t t1, t2; 2807 struct vnode *newvp; 2808 caddr_t bpos, dpos, cp2; 2809 struct mbuf *mreq, *mrep, *md, *mb; 2810 struct nameidata nami, *ndp = &nami; 2811 struct componentname *cnp = &ndp->ni_cnd; 2812 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2813 struct nfsnode *dnp = VTONFS(vp), *np; 2814 nfsfh_t *fhp; 2815 u_quad_t fileno; 2816 int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; 2817 int attrflag, fhsize, nrpcs = 0, reclen; 2818 struct nfs_fattr fattr, *fp; 2819 2820 #ifdef DIAGNOSTIC 2821 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ) 2822 panic("nfs readdirplusrpc bad uio"); 2823 #endif 2824 ndp->ni_dvp = vp; 2825 newvp = NULLVP; 2826 2827 /* 2828 * Loop around doing readdir rpc's of size nm_readdirsize 2829 * truncated to a multiple of NFS_DIRFRAGSIZ. 2830 * The stopping criteria is EOF or buffer full. 2831 */ 2832 while (more_dirs && bigenough) { 2833 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) { 2834 bigenough = 0; 2835 break; 2836 } 2837 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; 2838 nfsm_reqhead(dnp, NFSPROC_READDIRPLUS, 2839 NFSX_FH(1) + 6 * NFSX_UNSIGNED); 2840 nfsm_fhtom(dnp, 1); 2841 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED); 2842 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) { 2843 txdr_swapcookie3(uiop->uio_offset, tl); 2844 } else { 2845 txdr_cookie3(uiop->uio_offset, tl); 2846 } 2847 tl += 2; 2848 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2849 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2850 *tl++ = txdr_unsigned(nmp->nm_readdirsize); 2851 *tl = txdr_unsigned(nmp->nm_rsize); 2852 nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred); 2853 nfsm_postop_attr(vp, attrflag, 0); 2854 if (error) { 2855 m_freem(mrep); 2856 goto nfsmout; 2857 } 2858 nrpcs++; 2859 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2860 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2861 dnp->n_cookieverf.nfsuquad[1] = *tl++; 2862 more_dirs = fxdr_unsigned(int, *tl); 2863 2864 /* loop thru the dir entries, doctoring them to 4bsd form */ 2865 while (more_dirs && bigenough) { 2866 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2867 fileno = fxdr_hyper(tl); 2868 len = fxdr_unsigned(int, *(tl + 2)); 2869 if (len <= 0 || len > NFS_MAXNAMLEN) { 2870 error = EBADRPC; 2871 m_freem(mrep); 2872 goto nfsmout; 2873 } 2874 /* for cookie stashing */ 2875 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t); 2876 left = NFS_DIRFRAGSIZ - blksiz; 2877 if (reclen > left) { 2878 /* 2879 * DIRFRAGSIZ is aligned, no need to align 2880 * again here. 2881 */ 2882 memset(uiop->uio_iov->iov_base, 0, left); 2883 dp->d_reclen += left; 2884 UIO_ADVANCE(uiop, left); 2885 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2886 blksiz = 0; 2887 } 2888 if (reclen > uiop->uio_resid) 2889 bigenough = 0; 2890 if (bigenough) { 2891 int tlen; 2892 2893 dp = (struct dirent *)uiop->uio_iov->iov_base; 2894 dp->d_fileno = fileno; 2895 dp->d_namlen = len; 2896 dp->d_reclen = reclen; 2897 dp->d_type = DT_UNKNOWN; 2898 blksiz += reclen; 2899 if (blksiz == NFS_DIRFRAGSIZ) 2900 blksiz = 0; 2901 UIO_ADVANCE(uiop, DIRHDSIZ); 2902 nfsm_mtouio(uiop, len); 2903 tlen = reclen - (DIRHDSIZ + len); 2904 (void)memset(uiop->uio_iov->iov_base, 0, tlen); 2905 UIO_ADVANCE(uiop, tlen); 2906 cnp->cn_nameptr = dp->d_name; 2907 cnp->cn_namelen = dp->d_namlen; 2908 } else 2909 nfsm_adv(nfsm_rndup(len)); 2910 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2911 if (bigenough) { 2912 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) 2913 uiop->uio_offset = 2914 fxdr_swapcookie3(tl); 2915 else 2916 uiop->uio_offset = 2917 fxdr_cookie3(tl); 2918 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2919 } 2920 tl += 2; 2921 2922 /* 2923 * Since the attributes are before the file handle 2924 * (sigh), we must skip over the attributes and then 2925 * come back and get them. 2926 */ 2927 attrflag = fxdr_unsigned(int, *tl); 2928 if (attrflag) { 2929 nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR); 2930 memcpy(&fattr, fp, NFSX_V3FATTR); 2931 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2932 doit = fxdr_unsigned(int, *tl); 2933 if (doit) { 2934 nfsm_getfh(fhp, fhsize, 1); 2935 if (NFS_CMPFH(dnp, fhp, fhsize)) { 2936 VREF(vp); 2937 newvp = vp; 2938 np = dnp; 2939 } else { 2940 error = nfs_nget1(vp->v_mount, fhp, 2941 fhsize, &np, LK_NOWAIT); 2942 if (!error) 2943 newvp = NFSTOV(np); 2944 } 2945 if (!error) { 2946 const char *xcp; 2947 2948 nfs_loadattrcache(&newvp, &fattr, 0, 0); 2949 if (bigenough) { 2950 dp->d_type = 2951 IFTODT(VTTOIF(np->n_vattr->va_type)); 2952 if (cnp->cn_namelen <= NCHNAMLEN) { 2953 ndp->ni_vp = newvp; 2954 xcp = cnp->cn_nameptr + 2955 cnp->cn_namelen; 2956 cnp->cn_hash = 2957 namei_hash(cnp->cn_nameptr, &xcp); 2958 nfs_cache_enter(ndp->ni_dvp, 2959 ndp->ni_vp, cnp); 2960 } 2961 } 2962 } 2963 error = 0; 2964 } 2965 } else { 2966 /* Just skip over the file handle */ 2967 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2968 i = fxdr_unsigned(int, *tl); 2969 nfsm_adv(nfsm_rndup(i)); 2970 } 2971 if (newvp != NULLVP) { 2972 if (newvp == vp) 2973 vrele(newvp); 2974 else 2975 vput(newvp); 2976 newvp = NULLVP; 2977 } 2978 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2979 more_dirs = fxdr_unsigned(int, *tl); 2980 } 2981 /* 2982 * If at end of rpc data, get the eof boolean 2983 */ 2984 if (!more_dirs) { 2985 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2986 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2987 2988 /* 2989 * kludge: see a comment in nfs_readdirrpc. 2990 */ 2991 2992 if (uiop->uio_resid >= NFS_DIRBLKSIZ) 2993 more_dirs = 0; 2994 } 2995 m_freem(mrep); 2996 } 2997 /* 2998 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ 2999 * by increasing d_reclen for the last record. 3000 */ 3001 if (blksiz > 0) { 3002 left = NFS_DIRFRAGSIZ - blksiz; 3003 memset(uiop->uio_iov->iov_base, 0, left); 3004 dp->d_reclen += left; 3005 NFS_STASHCOOKIE(dp, uiop->uio_offset); 3006 UIO_ADVANCE(uiop, left); 3007 } 3008 3009 /* 3010 * We are now either at the end of the directory or have filled the 3011 * block. 3012 */ 3013 if (bigenough) { 3014 dnp->n_direofoffset = uiop->uio_offset; 3015 dnp->n_flag |= NEOFVALID; 3016 } 3017 nfsmout: 3018 if (newvp != NULLVP) { 3019 if(newvp == vp) 3020 vrele(newvp); 3021 else 3022 vput(newvp); 3023 } 3024 return (error); 3025 } 3026 #endif 3027 3028 /* 3029 * Silly rename. To make the NFS filesystem that is stateless look a little 3030 * more like the "ufs" a remove of an active vnode is translated to a rename 3031 * to a funny looking filename that is removed by nfs_inactive on the 3032 * nfsnode. There is the potential for another process on a different client 3033 * to create the same funny name between the nfs_lookitup() fails and the 3034 * nfs_rename() completes, but... 3035 */ 3036 int 3037 nfs_sillyrename(dvp, vp, cnp, dolink) 3038 struct vnode *dvp, *vp; 3039 struct componentname *cnp; 3040 boolean_t dolink; 3041 { 3042 struct sillyrename *sp; 3043 struct nfsnode *np; 3044 int error; 3045 short pid; 3046 3047 cache_purge(dvp); 3048 np = VTONFS(vp); 3049 #ifndef DIAGNOSTIC 3050 if (vp->v_type == VDIR) 3051 panic("nfs: sillyrename dir"); 3052 #endif 3053 MALLOC(sp, struct sillyrename *, sizeof (struct sillyrename), 3054 M_NFSREQ, M_WAITOK); 3055 sp->s_cred = kauth_cred_dup(cnp->cn_cred); 3056 sp->s_dvp = dvp; 3057 VREF(dvp); 3058 3059 /* Fudge together a funny name */ 3060 pid = cnp->cn_lwp->l_proc->p_pid; 3061 memcpy(sp->s_name, ".nfsAxxxx4.4", 13); 3062 sp->s_namlen = 12; 3063 sp->s_name[8] = hexdigits[pid & 0xf]; 3064 sp->s_name[7] = hexdigits[(pid >> 4) & 0xf]; 3065 sp->s_name[6] = hexdigits[(pid >> 8) & 0xf]; 3066 sp->s_name[5] = hexdigits[(pid >> 12) & 0xf]; 3067 3068 /* Try lookitups until we get one that isn't there */ 3069 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 3070 cnp->cn_lwp, (struct nfsnode **)0) == 0) { 3071 sp->s_name[4]++; 3072 if (sp->s_name[4] > 'z') { 3073 error = EINVAL; 3074 goto bad; 3075 } 3076 } 3077 if (dolink) { 3078 error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen, 3079 sp->s_cred, cnp->cn_lwp); 3080 /* 3081 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP. 3082 */ 3083 if (error == ENOTSUP) { 3084 error = nfs_renameit(dvp, cnp, sp); 3085 } 3086 } else { 3087 error = nfs_renameit(dvp, cnp, sp); 3088 } 3089 if (error) 3090 goto bad; 3091 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 3092 cnp->cn_lwp, &np); 3093 np->n_sillyrename = sp; 3094 return (0); 3095 bad: 3096 vrele(sp->s_dvp); 3097 kauth_cred_free(sp->s_cred); 3098 free((caddr_t)sp, M_NFSREQ); 3099 return (error); 3100 } 3101 3102 /* 3103 * Look up a file name and optionally either update the file handle or 3104 * allocate an nfsnode, depending on the value of npp. 3105 * npp == NULL --> just do the lookup 3106 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 3107 * handled too 3108 * *npp != NULL --> update the file handle in the vnode 3109 */ 3110 int 3111 nfs_lookitup(dvp, name, len, cred, l, npp) 3112 struct vnode *dvp; 3113 const char *name; 3114 int len; 3115 kauth_cred_t cred; 3116 struct lwp *l; 3117 struct nfsnode **npp; 3118 { 3119 u_int32_t *tl; 3120 caddr_t cp; 3121 int32_t t1, t2; 3122 struct vnode *newvp = (struct vnode *)0; 3123 struct nfsnode *np, *dnp = VTONFS(dvp); 3124 caddr_t bpos, dpos, cp2; 3125 int error = 0, fhlen; 3126 #ifndef NFS_V2_ONLY 3127 int attrflag; 3128 #endif 3129 struct mbuf *mreq, *mrep, *md, *mb; 3130 nfsfh_t *nfhp; 3131 const int v3 = NFS_ISV3(dvp); 3132 3133 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 3134 nfsm_reqhead(dnp, NFSPROC_LOOKUP, 3135 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 3136 nfsm_fhtom(dnp, v3); 3137 nfsm_strtom(name, len, NFS_MAXNAMLEN); 3138 nfsm_request(dnp, NFSPROC_LOOKUP, l, cred); 3139 if (npp && !error) { 3140 nfsm_getfh(nfhp, fhlen, v3); 3141 if (*npp) { 3142 np = *npp; 3143 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) { 3144 free((caddr_t)np->n_fhp, M_NFSBIGFH); 3145 np->n_fhp = &np->n_fh; 3146 } 3147 #if NFS_SMALLFH < NFSX_V3FHMAX 3148 else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH) 3149 np->n_fhp =(nfsfh_t *)malloc(fhlen,M_NFSBIGFH,M_WAITOK); 3150 #endif 3151 memcpy((caddr_t)np->n_fhp, (caddr_t)nfhp, fhlen); 3152 np->n_fhsize = fhlen; 3153 newvp = NFSTOV(np); 3154 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { 3155 VREF(dvp); 3156 newvp = dvp; 3157 np = dnp; 3158 } else { 3159 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); 3160 if (error) { 3161 m_freem(mrep); 3162 return (error); 3163 } 3164 newvp = NFSTOV(np); 3165 } 3166 #ifndef NFS_V2_ONLY 3167 if (v3) { 3168 nfsm_postop_attr(newvp, attrflag, 0); 3169 if (!attrflag && *npp == NULL) { 3170 m_freem(mrep); 3171 vput(newvp); 3172 return (ENOENT); 3173 } 3174 } else 3175 #endif 3176 nfsm_loadattr(newvp, (struct vattr *)0, 0); 3177 } 3178 nfsm_reqdone; 3179 if (npp && *npp == NULL) { 3180 if (error) { 3181 if (newvp) 3182 vput(newvp); 3183 } else 3184 *npp = np; 3185 } 3186 return (error); 3187 } 3188 3189 #ifndef NFS_V2_ONLY 3190 /* 3191 * Nfs Version 3 commit rpc 3192 */ 3193 int 3194 nfs_commit(vp, offset, cnt, l) 3195 struct vnode *vp; 3196 off_t offset; 3197 uint32_t cnt; 3198 struct lwp *l; 3199 { 3200 caddr_t cp; 3201 u_int32_t *tl; 3202 int32_t t1, t2; 3203 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 3204 caddr_t bpos, dpos, cp2; 3205 int error = 0, wccflag = NFSV3_WCCRATTR; 3206 struct mbuf *mreq, *mrep, *md, *mb; 3207 struct nfsnode *np; 3208 3209 KASSERT(NFS_ISV3(vp)); 3210 3211 #ifdef NFS_DEBUG_COMMIT 3212 printf("commit %lu - %lu\n", (unsigned long)offset, 3213 (unsigned long)(offset + cnt)); 3214 #endif 3215 3216 simple_lock(&nmp->nm_slock); 3217 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) { 3218 simple_unlock(&nmp->nm_slock); 3219 return (0); 3220 } 3221 simple_unlock(&nmp->nm_slock); 3222 nfsstats.rpccnt[NFSPROC_COMMIT]++; 3223 np = VTONFS(vp); 3224 nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1)); 3225 nfsm_fhtom(np, 1); 3226 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 3227 txdr_hyper(offset, tl); 3228 tl += 2; 3229 *tl = txdr_unsigned(cnt); 3230 nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred); 3231 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, FALSE); 3232 if (!error) { 3233 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF); 3234 simple_lock(&nmp->nm_slock); 3235 if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) || 3236 memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) { 3237 memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF); 3238 error = NFSERR_STALEWRITEVERF; 3239 nmp->nm_iflag |= NFSMNT_STALEWRITEVERF; 3240 } 3241 simple_unlock(&nmp->nm_slock); 3242 } 3243 nfsm_reqdone; 3244 return (error); 3245 } 3246 #endif 3247 3248 /* 3249 * Kludge City.. 3250 * - make nfs_bmap() essentially a no-op that does no translation 3251 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc 3252 * (Maybe I could use the process's page mapping, but I was concerned that 3253 * Kernel Write might not be enabled and also figured copyout() would do 3254 * a lot more work than memcpy() and also it currently happens in the 3255 * context of the swapper process (2). 3256 */ 3257 int 3258 nfs_bmap(v) 3259 void *v; 3260 { 3261 struct vop_bmap_args /* { 3262 struct vnode *a_vp; 3263 daddr_t a_bn; 3264 struct vnode **a_vpp; 3265 daddr_t *a_bnp; 3266 int *a_runp; 3267 } */ *ap = v; 3268 struct vnode *vp = ap->a_vp; 3269 int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift; 3270 3271 if (ap->a_vpp != NULL) 3272 *ap->a_vpp = vp; 3273 if (ap->a_bnp != NULL) 3274 *ap->a_bnp = ap->a_bn << bshift; 3275 if (ap->a_runp != NULL) 3276 *ap->a_runp = 1024 * 1024; /* XXX */ 3277 return (0); 3278 } 3279 3280 /* 3281 * Strategy routine. 3282 * For async requests when nfsiod(s) are running, queue the request by 3283 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the 3284 * request. 3285 */ 3286 int 3287 nfs_strategy(v) 3288 void *v; 3289 { 3290 struct vop_strategy_args *ap = v; 3291 struct buf *bp = ap->a_bp; 3292 int error = 0; 3293 3294 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC)) 3295 panic("nfs physio/async"); 3296 3297 /* 3298 * If the op is asynchronous and an i/o daemon is waiting 3299 * queue the request, wake it up and wait for completion 3300 * otherwise just do it ourselves. 3301 */ 3302 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp)) 3303 error = nfs_doio(bp); 3304 return (error); 3305 } 3306 3307 /* 3308 * fsync vnode op. Just call nfs_flush() with commit == 1. 3309 */ 3310 /* ARGSUSED */ 3311 int 3312 nfs_fsync(v) 3313 void *v; 3314 { 3315 struct vop_fsync_args /* { 3316 struct vnodeop_desc *a_desc; 3317 struct vnode * a_vp; 3318 kauth_cred_t a_cred; 3319 int a_flags; 3320 off_t offlo; 3321 off_t offhi; 3322 struct lwp * a_l; 3323 } */ *ap = v; 3324 3325 struct vnode *vp = ap->a_vp; 3326 3327 if (vp->v_type != VREG) 3328 return 0; 3329 3330 return (nfs_flush(vp, ap->a_cred, 3331 (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, ap->a_l, 1)); 3332 } 3333 3334 /* 3335 * Flush all the data associated with a vnode. 3336 */ 3337 int 3338 nfs_flush( 3339 struct vnode *vp, 3340 kauth_cred_t cred __unused, 3341 int waitfor __unused, 3342 struct lwp *l __unused, 3343 int commit __unused 3344 ) 3345 { 3346 struct nfsnode *np = VTONFS(vp); 3347 int error; 3348 int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO; 3349 UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist); 3350 3351 simple_lock(&vp->v_interlock); 3352 error = VOP_PUTPAGES(vp, 0, 0, flushflags); 3353 if (np->n_flag & NWRITEERR) { 3354 error = np->n_error; 3355 np->n_flag &= ~NWRITEERR; 3356 } 3357 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0); 3358 return (error); 3359 } 3360 3361 /* 3362 * Return POSIX pathconf information applicable to nfs. 3363 * 3364 * N.B. The NFS V2 protocol doesn't support this RPC. 3365 */ 3366 /* ARGSUSED */ 3367 int 3368 nfs_pathconf(v) 3369 void *v; 3370 { 3371 struct vop_pathconf_args /* { 3372 struct vnode *a_vp; 3373 int a_name; 3374 register_t *a_retval; 3375 } */ *ap = v; 3376 struct nfsv3_pathconf *pcp; 3377 struct vnode *vp = ap->a_vp; 3378 struct mbuf *mreq, *mrep, *md, *mb; 3379 int32_t t1, t2; 3380 u_int32_t *tl; 3381 caddr_t bpos, dpos, cp, cp2; 3382 int error = 0, attrflag; 3383 #ifndef NFS_V2_ONLY 3384 struct nfsmount *nmp; 3385 unsigned int l; 3386 u_int64_t maxsize; 3387 #endif 3388 const int v3 = NFS_ISV3(vp); 3389 struct nfsnode *np = VTONFS(vp); 3390 3391 switch (ap->a_name) { 3392 /* Names that can be resolved locally. */ 3393 case _PC_PIPE_BUF: 3394 *ap->a_retval = PIPE_BUF; 3395 break; 3396 case _PC_SYNC_IO: 3397 *ap->a_retval = 1; 3398 break; 3399 /* Names that cannot be resolved locally; do an RPC, if possible. */ 3400 case _PC_LINK_MAX: 3401 case _PC_NAME_MAX: 3402 case _PC_CHOWN_RESTRICTED: 3403 case _PC_NO_TRUNC: 3404 if (!v3) { 3405 error = EINVAL; 3406 break; 3407 } 3408 nfsstats.rpccnt[NFSPROC_PATHCONF]++; 3409 nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1)); 3410 nfsm_fhtom(np, 1); 3411 nfsm_request(np, NFSPROC_PATHCONF, 3412 curlwp, curlwp->l_cred); /* XXX */ 3413 nfsm_postop_attr(vp, attrflag, 0); 3414 if (!error) { 3415 nfsm_dissect(pcp, struct nfsv3_pathconf *, 3416 NFSX_V3PATHCONF); 3417 switch (ap->a_name) { 3418 case _PC_LINK_MAX: 3419 *ap->a_retval = 3420 fxdr_unsigned(register_t, pcp->pc_linkmax); 3421 break; 3422 case _PC_NAME_MAX: 3423 *ap->a_retval = 3424 fxdr_unsigned(register_t, pcp->pc_namemax); 3425 break; 3426 case _PC_CHOWN_RESTRICTED: 3427 *ap->a_retval = 3428 (pcp->pc_chownrestricted == nfs_true); 3429 break; 3430 case _PC_NO_TRUNC: 3431 *ap->a_retval = 3432 (pcp->pc_notrunc == nfs_true); 3433 break; 3434 } 3435 } 3436 nfsm_reqdone; 3437 break; 3438 case _PC_FILESIZEBITS: 3439 #ifndef NFS_V2_ONLY 3440 if (v3) { 3441 nmp = VFSTONFS(vp->v_mount); 3442 if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0) 3443 if ((error = nfs_fsinfo(nmp, vp, 3444 curlwp->l_cred, curlwp)) != 0) /* XXX */ 3445 break; 3446 for (l = 0, maxsize = nmp->nm_maxfilesize; 3447 (maxsize >> l) > 0; l++) 3448 ; 3449 *ap->a_retval = l + 1; 3450 } else 3451 #endif 3452 { 3453 *ap->a_retval = 32; /* NFS V2 limitation */ 3454 } 3455 break; 3456 default: 3457 error = EINVAL; 3458 break; 3459 } 3460 3461 return (error); 3462 } 3463 3464 /* 3465 * NFS advisory byte-level locks. 3466 */ 3467 int 3468 nfs_advlock(v) 3469 void *v; 3470 { 3471 struct vop_advlock_args /* { 3472 struct vnode *a_vp; 3473 caddr_t a_id; 3474 int a_op; 3475 struct flock *a_fl; 3476 int a_flags; 3477 } */ *ap = v; 3478 struct nfsnode *np = VTONFS(ap->a_vp); 3479 3480 return lf_advlock(ap, &np->n_lockf, np->n_size); 3481 } 3482 3483 /* 3484 * Print out the contents of an nfsnode. 3485 */ 3486 int 3487 nfs_print(v) 3488 void *v; 3489 { 3490 struct vop_print_args /* { 3491 struct vnode *a_vp; 3492 } */ *ap = v; 3493 struct vnode *vp = ap->a_vp; 3494 struct nfsnode *np = VTONFS(vp); 3495 3496 printf("tag VT_NFS, fileid %lld fsid 0x%lx", 3497 (unsigned long long)np->n_vattr->va_fileid, np->n_vattr->va_fsid); 3498 if (vp->v_type == VFIFO) 3499 fifo_printinfo(vp); 3500 printf("\n"); 3501 return (0); 3502 } 3503 3504 /* 3505 * nfs unlock wrapper. 3506 */ 3507 int 3508 nfs_unlock(void *v) 3509 { 3510 struct vop_unlock_args /* { 3511 struct vnode *a_vp; 3512 int a_flags; 3513 } */ *ap = v; 3514 struct vnode *vp = ap->a_vp; 3515 3516 /* 3517 * VOP_UNLOCK can be called by nfs_loadattrcache 3518 * with v_data == 0. 3519 */ 3520 if (VTONFS(vp)) { 3521 nfs_delayedtruncate(vp); 3522 } 3523 3524 return genfs_unlock(v); 3525 } 3526 3527 /* 3528 * nfs special file access vnode op. 3529 * Essentially just get vattr and then imitate iaccess() since the device is 3530 * local to the client. 3531 */ 3532 int 3533 nfsspec_access(v) 3534 void *v; 3535 { 3536 struct vop_access_args /* { 3537 struct vnode *a_vp; 3538 int a_mode; 3539 kauth_cred_t a_cred; 3540 struct lwp *a_l; 3541 } */ *ap = v; 3542 struct vattr va; 3543 struct vnode *vp = ap->a_vp; 3544 int error; 3545 3546 error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_l); 3547 if (error) 3548 return (error); 3549 3550 /* 3551 * Disallow write attempts on filesystems mounted read-only; 3552 * unless the file is a socket, fifo, or a block or character 3553 * device resident on the filesystem. 3554 */ 3555 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3556 switch (vp->v_type) { 3557 case VREG: 3558 case VDIR: 3559 case VLNK: 3560 return (EROFS); 3561 default: 3562 break; 3563 } 3564 } 3565 3566 return (vaccess(va.va_type, va.va_mode, 3567 va.va_uid, va.va_gid, ap->a_mode, ap->a_cred)); 3568 } 3569 3570 /* 3571 * Read wrapper for special devices. 3572 */ 3573 int 3574 nfsspec_read(v) 3575 void *v; 3576 { 3577 struct vop_read_args /* { 3578 struct vnode *a_vp; 3579 struct uio *a_uio; 3580 int a_ioflag; 3581 kauth_cred_t a_cred; 3582 } */ *ap = v; 3583 struct nfsnode *np = VTONFS(ap->a_vp); 3584 3585 /* 3586 * Set access flag. 3587 */ 3588 np->n_flag |= NACC; 3589 getnanotime(&np->n_atim); 3590 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap)); 3591 } 3592 3593 /* 3594 * Write wrapper for special devices. 3595 */ 3596 int 3597 nfsspec_write(v) 3598 void *v; 3599 { 3600 struct vop_write_args /* { 3601 struct vnode *a_vp; 3602 struct uio *a_uio; 3603 int a_ioflag; 3604 kauth_cred_t a_cred; 3605 } */ *ap = v; 3606 struct nfsnode *np = VTONFS(ap->a_vp); 3607 3608 /* 3609 * Set update flag. 3610 */ 3611 np->n_flag |= NUPD; 3612 getnanotime(&np->n_mtim); 3613 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap)); 3614 } 3615 3616 /* 3617 * Close wrapper for special devices. 3618 * 3619 * Update the times on the nfsnode then do device close. 3620 */ 3621 int 3622 nfsspec_close(v) 3623 void *v; 3624 { 3625 struct vop_close_args /* { 3626 struct vnode *a_vp; 3627 int a_fflag; 3628 kauth_cred_t a_cred; 3629 struct lwp *a_l; 3630 } */ *ap = v; 3631 struct vnode *vp = ap->a_vp; 3632 struct nfsnode *np = VTONFS(vp); 3633 struct vattr vattr; 3634 3635 if (np->n_flag & (NACC | NUPD)) { 3636 np->n_flag |= NCHG; 3637 if (vp->v_usecount == 1 && 3638 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3639 VATTR_NULL(&vattr); 3640 if (np->n_flag & NACC) 3641 vattr.va_atime = np->n_atim; 3642 if (np->n_flag & NUPD) 3643 vattr.va_mtime = np->n_mtim; 3644 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_l); 3645 } 3646 } 3647 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap)); 3648 } 3649 3650 /* 3651 * Read wrapper for fifos. 3652 */ 3653 int 3654 nfsfifo_read(v) 3655 void *v; 3656 { 3657 struct vop_read_args /* { 3658 struct vnode *a_vp; 3659 struct uio *a_uio; 3660 int a_ioflag; 3661 kauth_cred_t a_cred; 3662 } */ *ap = v; 3663 struct nfsnode *np = VTONFS(ap->a_vp); 3664 3665 /* 3666 * Set access flag. 3667 */ 3668 np->n_flag |= NACC; 3669 getnanotime(&np->n_atim); 3670 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap)); 3671 } 3672 3673 /* 3674 * Write wrapper for fifos. 3675 */ 3676 int 3677 nfsfifo_write(v) 3678 void *v; 3679 { 3680 struct vop_write_args /* { 3681 struct vnode *a_vp; 3682 struct uio *a_uio; 3683 int a_ioflag; 3684 kauth_cred_t a_cred; 3685 } */ *ap = v; 3686 struct nfsnode *np = VTONFS(ap->a_vp); 3687 3688 /* 3689 * Set update flag. 3690 */ 3691 np->n_flag |= NUPD; 3692 getnanotime(&np->n_mtim); 3693 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap)); 3694 } 3695 3696 /* 3697 * Close wrapper for fifos. 3698 * 3699 * Update the times on the nfsnode then do fifo close. 3700 */ 3701 int 3702 nfsfifo_close(v) 3703 void *v; 3704 { 3705 struct vop_close_args /* { 3706 struct vnode *a_vp; 3707 int a_fflag; 3708 kauth_cred_t a_cred; 3709 struct lwp *a_l; 3710 } */ *ap = v; 3711 struct vnode *vp = ap->a_vp; 3712 struct nfsnode *np = VTONFS(vp); 3713 struct vattr vattr; 3714 3715 if (np->n_flag & (NACC | NUPD)) { 3716 struct timespec ts; 3717 3718 getnanotime(&ts); 3719 if (np->n_flag & NACC) 3720 np->n_atim = ts; 3721 if (np->n_flag & NUPD) 3722 np->n_mtim = ts; 3723 np->n_flag |= NCHG; 3724 if (vp->v_usecount == 1 && 3725 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3726 VATTR_NULL(&vattr); 3727 if (np->n_flag & NACC) 3728 vattr.va_atime = np->n_atim; 3729 if (np->n_flag & NUPD) 3730 vattr.va_mtime = np->n_mtim; 3731 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_l); 3732 } 3733 } 3734 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap)); 3735 } 3736