1 /* $NetBSD: nfs_vnops.c,v 1.316 2020/06/27 17:29:19 christos Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)nfs_vnops.c 8.19 (Berkeley) 7/31/95 35 */ 36 37 /* 38 * vnode op calls for Sun NFS version 2 and 3 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.316 2020/06/27 17:29:19 christos Exp $"); 43 44 #ifdef _KERNEL_OPT 45 #include "opt_nfs.h" 46 #include "opt_uvmhist.h" 47 #endif 48 49 #include <sys/param.h> 50 #include <sys/proc.h> 51 #include <sys/kernel.h> 52 #include <sys/systm.h> 53 #include <sys/resourcevar.h> 54 #include <sys/mount.h> 55 #include <sys/buf.h> 56 #include <sys/condvar.h> 57 #include <sys/disk.h> 58 #include <sys/malloc.h> 59 #include <sys/kmem.h> 60 #include <sys/mbuf.h> 61 #include <sys/mutex.h> 62 #include <sys/namei.h> 63 #include <sys/vnode.h> 64 #include <sys/dirent.h> 65 #include <sys/fcntl.h> 66 #include <sys/hash.h> 67 #include <sys/lockf.h> 68 #include <sys/stat.h> 69 #include <sys/unistd.h> 70 #include <sys/kauth.h> 71 #include <sys/cprng.h> 72 73 #include <uvm/uvm_extern.h> 74 #include <uvm/uvm.h> 75 76 #include <miscfs/fifofs/fifo.h> 77 #include <miscfs/genfs/genfs.h> 78 #include <miscfs/genfs/genfs_node.h> 79 #include <miscfs/specfs/specdev.h> 80 81 #include <nfs/rpcv2.h> 82 #include <nfs/nfsproto.h> 83 #include <nfs/nfs.h> 84 #include <nfs/nfsnode.h> 85 #include <nfs/nfsmount.h> 86 #include <nfs/xdr_subs.h> 87 #include <nfs/nfsm_subs.h> 88 #include <nfs/nfs_var.h> 89 90 #include <net/if.h> 91 #include <netinet/in.h> 92 #include <netinet/in_var.h> 93 94 /* 95 * Global vfs data structures for nfs 96 */ 97 int (**nfsv2_vnodeop_p)(void *); 98 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { 99 { &vop_default_desc, vn_default_error }, 100 { &vop_lookup_desc, nfs_lookup }, /* lookup */ 101 { &vop_create_desc, nfs_create }, /* create */ 102 { &vop_mknod_desc, nfs_mknod }, /* mknod */ 103 { &vop_open_desc, nfs_open }, /* open */ 104 { &vop_close_desc, nfs_close }, /* close */ 105 { &vop_access_desc, nfs_access }, /* access */ 106 { &vop_accessx_desc, genfs_accessx }, /* accessx */ 107 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 108 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 109 { &vop_read_desc, nfs_read }, /* read */ 110 { &vop_write_desc, nfs_write }, /* write */ 111 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */ 112 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */ 113 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 114 { &vop_ioctl_desc, nfs_ioctl }, /* ioctl */ 115 { &vop_poll_desc, nfs_poll }, /* poll */ 116 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */ 117 { &vop_revoke_desc, nfs_revoke }, /* revoke */ 118 { &vop_mmap_desc, nfs_mmap }, /* mmap */ 119 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 120 { &vop_seek_desc, nfs_seek }, /* seek */ 121 { &vop_remove_desc, nfs_remove }, /* remove */ 122 { &vop_link_desc, nfs_link }, /* link */ 123 { &vop_rename_desc, nfs_rename }, /* rename */ 124 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */ 125 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */ 126 { &vop_symlink_desc, nfs_symlink }, /* symlink */ 127 { &vop_readdir_desc, nfs_readdir }, /* readdir */ 128 { &vop_readlink_desc, nfs_readlink }, /* readlink */ 129 { &vop_abortop_desc, nfs_abortop }, /* abortop */ 130 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 131 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 132 { &vop_lock_desc, nfs_lock }, /* lock */ 133 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 134 { &vop_bmap_desc, nfs_bmap }, /* bmap */ 135 { &vop_strategy_desc, nfs_strategy }, /* strategy */ 136 { &vop_print_desc, nfs_print }, /* print */ 137 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 138 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */ 139 { &vop_advlock_desc, nfs_advlock }, /* advlock */ 140 { &vop_bwrite_desc, genfs_badop }, /* bwrite */ 141 { &vop_getpages_desc, nfs_getpages }, /* getpages */ 142 { &vop_putpages_desc, genfs_putpages }, /* putpages */ 143 { NULL, NULL } 144 }; 145 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc = 146 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries }; 147 148 /* 149 * Special device vnode ops 150 */ 151 int (**spec_nfsv2nodeop_p)(void *); 152 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = { 153 { &vop_default_desc, vn_default_error }, 154 { &vop_lookup_desc, spec_lookup }, /* lookup */ 155 { &vop_create_desc, spec_create }, /* create */ 156 { &vop_mknod_desc, spec_mknod }, /* mknod */ 157 { &vop_open_desc, spec_open }, /* open */ 158 { &vop_close_desc, nfsspec_close }, /* close */ 159 { &vop_access_desc, nfsspec_access }, /* access */ 160 { &vop_accessx_desc, genfs_accessx }, /* accessx */ 161 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 162 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 163 { &vop_read_desc, nfsspec_read }, /* read */ 164 { &vop_write_desc, nfsspec_write }, /* write */ 165 { &vop_fallocate_desc, spec_fallocate }, /* fallocate */ 166 { &vop_fdiscard_desc, spec_fdiscard }, /* fdiscard */ 167 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 168 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 169 { &vop_poll_desc, spec_poll }, /* poll */ 170 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */ 171 { &vop_revoke_desc, spec_revoke }, /* revoke */ 172 { &vop_mmap_desc, spec_mmap }, /* mmap */ 173 { &vop_fsync_desc, spec_fsync }, /* fsync */ 174 { &vop_seek_desc, spec_seek }, /* seek */ 175 { &vop_remove_desc, spec_remove }, /* remove */ 176 { &vop_link_desc, spec_link }, /* link */ 177 { &vop_rename_desc, spec_rename }, /* rename */ 178 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 179 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 180 { &vop_symlink_desc, spec_symlink }, /* symlink */ 181 { &vop_readdir_desc, spec_readdir }, /* readdir */ 182 { &vop_readlink_desc, spec_readlink }, /* readlink */ 183 { &vop_abortop_desc, spec_abortop }, /* abortop */ 184 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 185 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 186 { &vop_lock_desc, nfs_lock }, /* lock */ 187 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 188 { &vop_bmap_desc, spec_bmap }, /* bmap */ 189 { &vop_strategy_desc, spec_strategy }, /* strategy */ 190 { &vop_print_desc, nfs_print }, /* print */ 191 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 192 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 193 { &vop_advlock_desc, spec_advlock }, /* advlock */ 194 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 195 { &vop_getpages_desc, spec_getpages }, /* getpages */ 196 { &vop_putpages_desc, spec_putpages }, /* putpages */ 197 { NULL, NULL } 198 }; 199 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc = 200 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries }; 201 202 int (**fifo_nfsv2nodeop_p)(void *); 203 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = { 204 { &vop_default_desc, vn_default_error }, 205 { &vop_lookup_desc, vn_fifo_bypass }, /* lookup */ 206 { &vop_create_desc, vn_fifo_bypass }, /* create */ 207 { &vop_mknod_desc, vn_fifo_bypass }, /* mknod */ 208 { &vop_open_desc, vn_fifo_bypass }, /* open */ 209 { &vop_close_desc, nfsfifo_close }, /* close */ 210 { &vop_access_desc, nfsspec_access }, /* access */ 211 { &vop_accessx_desc, genfs_accessx }, /* accessx */ 212 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 213 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 214 { &vop_read_desc, nfsfifo_read }, /* read */ 215 { &vop_write_desc, nfsfifo_write }, /* write */ 216 { &vop_fallocate_desc, vn_fifo_bypass }, /* fallocate */ 217 { &vop_fdiscard_desc, vn_fifo_bypass }, /* fdiscard */ 218 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 219 { &vop_ioctl_desc, vn_fifo_bypass }, /* ioctl */ 220 { &vop_poll_desc, vn_fifo_bypass }, /* poll */ 221 { &vop_kqfilter_desc, vn_fifo_bypass }, /* kqfilter */ 222 { &vop_revoke_desc, vn_fifo_bypass }, /* revoke */ 223 { &vop_mmap_desc, vn_fifo_bypass }, /* mmap */ 224 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 225 { &vop_seek_desc, vn_fifo_bypass }, /* seek */ 226 { &vop_remove_desc, vn_fifo_bypass }, /* remove */ 227 { &vop_link_desc, vn_fifo_bypass }, /* link */ 228 { &vop_rename_desc, vn_fifo_bypass }, /* rename */ 229 { &vop_mkdir_desc, vn_fifo_bypass }, /* mkdir */ 230 { &vop_rmdir_desc, vn_fifo_bypass }, /* rmdir */ 231 { &vop_symlink_desc, vn_fifo_bypass }, /* symlink */ 232 { &vop_readdir_desc, vn_fifo_bypass }, /* readdir */ 233 { &vop_readlink_desc, vn_fifo_bypass }, /* readlink */ 234 { &vop_abortop_desc, vn_fifo_bypass }, /* abortop */ 235 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 236 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 237 { &vop_lock_desc, nfs_lock }, /* lock */ 238 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 239 { &vop_bmap_desc, vn_fifo_bypass }, /* bmap */ 240 { &vop_strategy_desc, genfs_badop }, /* strategy */ 241 { &vop_print_desc, nfs_print }, /* print */ 242 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 243 { &vop_pathconf_desc, vn_fifo_bypass }, /* pathconf */ 244 { &vop_advlock_desc, vn_fifo_bypass }, /* advlock */ 245 { &vop_bwrite_desc, genfs_badop }, /* bwrite */ 246 { &vop_putpages_desc, vn_fifo_bypass }, /* putpages */ 247 { NULL, NULL } 248 }; 249 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc = 250 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries }; 251 252 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *, 253 size_t, kauth_cred_t, struct lwp *); 254 static void nfs_writerpc_extfree(struct mbuf *, void *, size_t, void *); 255 256 /* 257 * Global variables 258 */ 259 extern u_int32_t nfs_true, nfs_false; 260 extern u_int32_t nfs_xdrneg1; 261 extern const nfstype nfsv3_type[9]; 262 263 int nfs_numasync = 0; 264 #define DIRHDSIZ _DIRENT_NAMEOFF(dp) 265 #define UIO_ADVANCE(uio, siz) \ 266 (void)((uio)->uio_resid -= (siz), \ 267 (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \ 268 (uio)->uio_iov->iov_len -= (siz)) 269 270 static void nfs_cache_enter(struct vnode *, struct vnode *, 271 struct componentname *); 272 273 static void 274 nfs_cache_enter(struct vnode *dvp, struct vnode *vp, 275 struct componentname *cnp) 276 { 277 struct nfsnode *dnp = VTONFS(dvp); 278 279 if ((cnp->cn_flags & MAKEENTRY) == 0) { 280 return; 281 } 282 if (vp != NULL) { 283 struct nfsnode *np = VTONFS(vp); 284 285 np->n_ctime = np->n_vattr->va_ctime.tv_sec; 286 } 287 288 if (!timespecisset(&dnp->n_nctime)) 289 dnp->n_nctime = dnp->n_vattr->va_mtime; 290 291 cache_enter(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_flags); 292 } 293 294 /* 295 * nfs null call from vfs. 296 */ 297 int 298 nfs_null(struct vnode *vp, kauth_cred_t cred, struct lwp *l) 299 { 300 char *bpos, *dpos; 301 int error = 0; 302 struct mbuf *mreq, *mrep, *md, *mb __unused; 303 struct nfsnode *np = VTONFS(vp); 304 305 nfsm_reqhead(np, NFSPROC_NULL, 0); 306 nfsm_request(np, NFSPROC_NULL, l, cred); 307 nfsm_reqdone; 308 return (error); 309 } 310 311 /* 312 * nfs access vnode op. 313 * For nfs version 2, just return ok. File accesses may fail later. 314 * For nfs version 3, use the access rpc to check accessibility. If file modes 315 * are changed on the server, accesses might still fail later. 316 */ 317 int 318 nfs_access(void *v) 319 { 320 struct vop_access_args /* { 321 struct vnode *a_vp; 322 accmode_t a_accmode; 323 kauth_cred_t a_cred; 324 } */ *ap = v; 325 struct vnode *vp = ap->a_vp; 326 #ifndef NFS_V2_ONLY 327 u_int32_t *tl; 328 char *cp; 329 int32_t t1, t2; 330 char *bpos, *dpos, *cp2; 331 int error = 0, attrflag; 332 struct mbuf *mreq, *mrep, *md, *mb; 333 u_int32_t mode, rmode; 334 const int v3 = NFS_ISV3(vp); 335 #endif 336 int cachevalid; 337 struct nfsnode *np = VTONFS(vp); 338 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 339 340 cachevalid = (np->n_accstamp != -1 && 341 (time_uptime - np->n_accstamp) < nfs_attrtimeo(nmp, np) && 342 np->n_accuid == kauth_cred_geteuid(ap->a_cred)); 343 344 /* 345 * Check access cache first. If this request has been made for this 346 * uid shortly before, use the cached result. 347 */ 348 if (cachevalid) { 349 if (!np->n_accerror) { 350 if ((np->n_accmode & ap->a_accmode) == ap->a_accmode) 351 return np->n_accerror; 352 } else if ((np->n_accmode & ap->a_accmode) == np->n_accmode) 353 return np->n_accerror; 354 } 355 356 #ifndef NFS_V2_ONLY 357 /* 358 * For nfs v3, do an access rpc, otherwise you are stuck emulating 359 * ufs_access() locally using the vattr. This may not be correct, 360 * since the server may apply other access criteria such as 361 * client uid-->server uid mapping that we do not know about, but 362 * this is better than just returning anything that is lying about 363 * in the cache. 364 */ 365 if (v3) { 366 nfsstats.rpccnt[NFSPROC_ACCESS]++; 367 nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED); 368 nfsm_fhtom(np, v3); 369 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 370 if (ap->a_accmode & VREAD) 371 mode = NFSV3ACCESS_READ; 372 else 373 mode = 0; 374 if (vp->v_type != VDIR) { 375 if (ap->a_accmode & VWRITE) 376 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); 377 if (ap->a_accmode & VEXEC) 378 mode |= NFSV3ACCESS_EXECUTE; 379 } else { 380 if (ap->a_accmode & VWRITE) 381 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | 382 NFSV3ACCESS_DELETE); 383 if (ap->a_accmode & VEXEC) 384 mode |= NFSV3ACCESS_LOOKUP; 385 } 386 *tl = txdr_unsigned(mode); 387 nfsm_request(np, NFSPROC_ACCESS, curlwp, ap->a_cred); 388 nfsm_postop_attr(vp, attrflag, 0); 389 if (!error) { 390 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 391 rmode = fxdr_unsigned(u_int32_t, *tl); 392 /* 393 * The NFS V3 spec does not clarify whether or not 394 * the returned access bits can be a superset of 395 * the ones requested, so... 396 */ 397 if ((rmode & mode) != mode) 398 error = EACCES; 399 } 400 nfsm_reqdone; 401 } else 402 #endif 403 return (nfsspec_access(ap)); 404 #ifndef NFS_V2_ONLY 405 /* 406 * Disallow write attempts on filesystems mounted read-only; 407 * unless the file is a socket, fifo, or a block or character 408 * device resident on the filesystem. 409 */ 410 if (!error && (ap->a_accmode & VWRITE) && 411 (vp->v_mount->mnt_flag & MNT_RDONLY)) { 412 switch (vp->v_type) { 413 case VREG: 414 case VDIR: 415 case VLNK: 416 error = EROFS; 417 default: 418 break; 419 } 420 } 421 422 if (!error || error == EACCES) { 423 /* 424 * If we got the same result as for a previous, 425 * different request, OR it in. Don't update 426 * the timestamp in that case. 427 */ 428 if (cachevalid && np->n_accstamp != -1 && 429 error == np->n_accerror) { 430 if (!error) 431 np->n_accmode |= ap->a_accmode; 432 else if ((np->n_accmode & ap->a_accmode) == ap->a_accmode) 433 np->n_accmode = ap->a_accmode; 434 } else { 435 np->n_accstamp = time_uptime; 436 np->n_accuid = kauth_cred_geteuid(ap->a_cred); 437 np->n_accmode = ap->a_accmode; 438 np->n_accerror = error; 439 } 440 } 441 442 return (error); 443 #endif 444 } 445 446 /* 447 * nfs open vnode op 448 * Check to see if the type is ok 449 * and that deletion is not in progress. 450 * For paged in text files, you will need to flush the page cache 451 * if consistency is lost. 452 */ 453 /* ARGSUSED */ 454 int 455 nfs_open(void *v) 456 { 457 struct vop_open_args /* { 458 struct vnode *a_vp; 459 int a_mode; 460 kauth_cred_t a_cred; 461 } */ *ap = v; 462 struct vnode *vp = ap->a_vp; 463 struct nfsnode *np = VTONFS(vp); 464 int error; 465 466 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 467 return (EACCES); 468 } 469 470 if (ap->a_mode & FREAD) { 471 if (np->n_rcred != NULL) 472 kauth_cred_free(np->n_rcred); 473 np->n_rcred = ap->a_cred; 474 kauth_cred_hold(np->n_rcred); 475 } 476 if (ap->a_mode & FWRITE) { 477 if (np->n_wcred != NULL) 478 kauth_cred_free(np->n_wcred); 479 np->n_wcred = ap->a_cred; 480 kauth_cred_hold(np->n_wcred); 481 } 482 483 error = nfs_flushstalebuf(vp, ap->a_cred, curlwp, 0); 484 if (error) 485 return error; 486 487 NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */ 488 489 return (0); 490 } 491 492 /* 493 * nfs close vnode op 494 * What an NFS client should do upon close after writing is a debatable issue. 495 * Most NFS clients push delayed writes to the server upon close, basically for 496 * two reasons: 497 * 1 - So that any write errors may be reported back to the client process 498 * doing the close system call. By far the two most likely errors are 499 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 500 * 2 - To put a worst case upper bound on cache inconsistency between 501 * multiple clients for the file. 502 * There is also a consistency problem for Version 2 of the protocol w.r.t. 503 * not being able to tell if other clients are writing a file concurrently, 504 * since there is no way of knowing if the changed modify time in the reply 505 * is only due to the write for this client. 506 * (NFS Version 3 provides weak cache consistency data in the reply that 507 * should be sufficient to detect and handle this case.) 508 * 509 * The current code does the following: 510 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 511 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 512 * or commit them (this satisfies 1 and 2 except for the 513 * case where the server crashes after this close but 514 * before the commit RPC, which is felt to be "good 515 * enough". Changing the last argument to nfs_flush() to 516 * a 1 would force a commit operation, if it is felt a 517 * commit is necessary now. 518 */ 519 /* ARGSUSED */ 520 int 521 nfs_close(void *v) 522 { 523 struct vop_close_args /* { 524 struct vnodeop_desc *a_desc; 525 struct vnode *a_vp; 526 int a_fflag; 527 kauth_cred_t a_cred; 528 } */ *ap = v; 529 struct vnode *vp = ap->a_vp; 530 struct nfsnode *np = VTONFS(vp); 531 int error = 0; 532 UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist); 533 534 if (vp->v_type == VREG) { 535 if (np->n_flag & NMODIFIED) { 536 #ifndef NFS_V2_ONLY 537 if (NFS_ISV3(vp)) { 538 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, curlwp, 0); 539 np->n_flag &= ~NMODIFIED; 540 } else 541 #endif 542 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 1); 543 NFS_INVALIDATE_ATTRCACHE(np); 544 } 545 if (np->n_flag & NWRITEERR) { 546 np->n_flag &= ~NWRITEERR; 547 error = np->n_error; 548 } 549 } 550 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0); 551 return (error); 552 } 553 554 /* 555 * nfs getattr call from vfs. 556 */ 557 int 558 nfs_getattr(void *v) 559 { 560 struct vop_getattr_args /* { 561 struct vnode *a_vp; 562 struct vattr *a_vap; 563 kauth_cred_t a_cred; 564 } */ *ap = v; 565 struct vnode *vp = ap->a_vp; 566 struct nfsnode *np = VTONFS(vp); 567 char *cp; 568 u_int32_t *tl; 569 int32_t t1, t2; 570 char *bpos, *dpos; 571 int error = 0; 572 struct mbuf *mreq, *mrep, *md, *mb; 573 const int v3 = NFS_ISV3(vp); 574 575 /* 576 * Update local times for special files. 577 */ 578 if (np->n_flag & (NACC | NUPD)) 579 np->n_flag |= NCHG; 580 581 /* 582 * if we have delayed truncation, do it now. 583 */ 584 nfs_delayedtruncate(vp); 585 586 /* 587 * First look in the cache. 588 */ 589 if (nfs_getattrcache(vp, ap->a_vap) == 0) 590 return (0); 591 nfsstats.rpccnt[NFSPROC_GETATTR]++; 592 nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3)); 593 nfsm_fhtom(np, v3); 594 nfsm_request(np, NFSPROC_GETATTR, curlwp, ap->a_cred); 595 if (!error) { 596 nfsm_loadattr(vp, ap->a_vap, 0); 597 if (vp->v_type == VDIR && 598 ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ) 599 ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ; 600 } 601 nfsm_reqdone; 602 return (error); 603 } 604 605 /* 606 * nfs setattr call. 607 */ 608 int 609 nfs_setattr(void *v) 610 { 611 struct vop_setattr_args /* { 612 struct vnodeop_desc *a_desc; 613 struct vnode *a_vp; 614 struct vattr *a_vap; 615 kauth_cred_t a_cred; 616 } */ *ap = v; 617 struct vnode *vp = ap->a_vp; 618 struct nfsnode *np = VTONFS(vp); 619 struct vattr *vap = ap->a_vap; 620 int error = 0; 621 u_quad_t tsize = 0; 622 623 /* 624 * Setting of flags is not supported. 625 */ 626 if (vap->va_flags != VNOVAL) 627 return (EOPNOTSUPP); 628 629 /* 630 * Disallow write attempts if the filesystem is mounted read-only. 631 */ 632 if ((vap->va_uid != (uid_t)VNOVAL || 633 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 634 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 635 (vp->v_mount->mnt_flag & MNT_RDONLY)) 636 return (EROFS); 637 if (vap->va_size != VNOVAL) { 638 if (vap->va_size > VFSTONFS(vp->v_mount)->nm_maxfilesize) { 639 return EFBIG; 640 } 641 switch (vp->v_type) { 642 case VDIR: 643 return (EISDIR); 644 case VCHR: 645 case VBLK: 646 case VSOCK: 647 case VFIFO: 648 if (vap->va_mtime.tv_sec == VNOVAL && 649 vap->va_atime.tv_sec == VNOVAL && 650 vap->va_mode == (mode_t)VNOVAL && 651 vap->va_uid == (uid_t)VNOVAL && 652 vap->va_gid == (gid_t)VNOVAL) 653 return (0); 654 vap->va_size = VNOVAL; 655 break; 656 default: 657 /* 658 * Disallow write attempts if the filesystem is 659 * mounted read-only. 660 */ 661 if (vp->v_mount->mnt_flag & MNT_RDONLY) 662 return (EROFS); 663 genfs_node_wrlock(vp); 664 uvm_vnp_setsize(vp, vap->va_size); 665 tsize = np->n_size; 666 np->n_size = vap->va_size; 667 if (vap->va_size == 0) 668 error = nfs_vinvalbuf(vp, 0, 669 ap->a_cred, curlwp, 1); 670 else 671 error = nfs_vinvalbuf(vp, V_SAVE, 672 ap->a_cred, curlwp, 1); 673 if (error) { 674 uvm_vnp_setsize(vp, tsize); 675 genfs_node_unlock(vp); 676 return (error); 677 } 678 np->n_vattr->va_size = vap->va_size; 679 } 680 } else { 681 /* 682 * flush files before setattr because a later write of 683 * cached data might change timestamps or reset sugid bits 684 */ 685 if ((vap->va_mtime.tv_sec != VNOVAL || 686 vap->va_atime.tv_sec != VNOVAL || 687 vap->va_mode != VNOVAL) && 688 vp->v_type == VREG && 689 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 690 curlwp, 1)) == EINTR) 691 return (error); 692 } 693 error = nfs_setattrrpc(vp, vap, ap->a_cred, curlwp); 694 if (vap->va_size != VNOVAL) { 695 if (error) { 696 np->n_size = np->n_vattr->va_size = tsize; 697 uvm_vnp_setsize(vp, np->n_size); 698 } 699 genfs_node_unlock(vp); 700 } 701 VN_KNOTE(vp, NOTE_ATTRIB); 702 return (error); 703 } 704 705 /* 706 * Do an nfs setattr rpc. 707 */ 708 int 709 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, kauth_cred_t cred, struct lwp *l) 710 { 711 struct nfsv2_sattr *sp; 712 char *cp; 713 int32_t t1, t2; 714 char *bpos, *dpos; 715 u_int32_t *tl; 716 int error = 0; 717 struct mbuf *mreq, *mrep, *md, *mb; 718 const int v3 = NFS_ISV3(vp); 719 struct nfsnode *np = VTONFS(vp); 720 #ifndef NFS_V2_ONLY 721 int wccflag = NFSV3_WCCRATTR; 722 char *cp2; 723 #endif 724 725 nfsstats.rpccnt[NFSPROC_SETATTR]++; 726 nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3)); 727 nfsm_fhtom(np, v3); 728 #ifndef NFS_V2_ONLY 729 if (v3) { 730 nfsm_v3attrbuild(vap, true); 731 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 732 *tl = nfs_false; 733 } else { 734 #endif 735 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 736 if (vap->va_mode == (mode_t)VNOVAL) 737 sp->sa_mode = nfs_xdrneg1; 738 else 739 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); 740 if (vap->va_uid == (uid_t)VNOVAL) 741 sp->sa_uid = nfs_xdrneg1; 742 else 743 sp->sa_uid = txdr_unsigned(vap->va_uid); 744 if (vap->va_gid == (gid_t)VNOVAL) 745 sp->sa_gid = nfs_xdrneg1; 746 else 747 sp->sa_gid = txdr_unsigned(vap->va_gid); 748 sp->sa_size = txdr_unsigned(vap->va_size); 749 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 750 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 751 #ifndef NFS_V2_ONLY 752 } 753 #endif 754 nfsm_request(np, NFSPROC_SETATTR, l, cred); 755 #ifndef NFS_V2_ONLY 756 if (v3) { 757 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false); 758 } else 759 #endif 760 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 761 nfsm_reqdone; 762 return (error); 763 } 764 765 /* 766 * nfs lookup call, one step at a time... 767 * First look in cache 768 * If not found, do the rpc. 769 */ 770 int 771 nfs_lookup(void *v) 772 { 773 struct vop_lookup_v2_args /* { 774 struct vnodeop_desc *a_desc; 775 struct vnode *a_dvp; 776 struct vnode **a_vpp; 777 struct componentname *a_cnp; 778 } */ *ap = v; 779 struct componentname *cnp = ap->a_cnp; 780 struct vnode *dvp = ap->a_dvp; 781 struct vnode **vpp = ap->a_vpp; 782 int flags; 783 struct vnode *newvp; 784 u_int32_t *tl; 785 char *cp; 786 int32_t t1, t2; 787 char *bpos, *dpos, *cp2; 788 struct mbuf *mreq, *mrep, *md, *mb; 789 long len; 790 nfsfh_t *fhp; 791 struct nfsnode *np; 792 int cachefound; 793 int error = 0, attrflag, fhsize; 794 const int v3 = NFS_ISV3(dvp); 795 796 flags = cnp->cn_flags; 797 798 *vpp = NULLVP; 799 newvp = NULLVP; 800 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 801 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 802 return (EROFS); 803 if (dvp->v_type != VDIR) 804 return (ENOTDIR); 805 806 /* 807 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves. 808 */ 809 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { 810 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred); 811 if (error) 812 return error; 813 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) 814 return EISDIR; 815 vref(dvp); 816 *vpp = dvp; 817 return 0; 818 } 819 820 np = VTONFS(dvp); 821 822 /* 823 * Before performing an RPC, check the name cache to see if 824 * the directory/name pair we are looking for is known already. 825 * If the directory/name pair is found in the name cache, 826 * we have to ensure the directory has not changed from 827 * the time the cache entry has been created. If it has, 828 * the cache entry has to be ignored. 829 */ 830 cachefound = cache_lookup_raw(dvp, cnp->cn_nameptr, cnp->cn_namelen, 831 cnp->cn_flags, NULL, vpp); 832 KASSERT(dvp != *vpp); 833 KASSERT((cnp->cn_flags & ISWHITEOUT) == 0); 834 if (cachefound) { 835 struct vattr vattr; 836 837 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred); 838 if (error != 0) { 839 if (*vpp != NULLVP) 840 vrele(*vpp); 841 *vpp = NULLVP; 842 return error; 843 } 844 845 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred) 846 || timespeccmp(&vattr.va_mtime, 847 &VTONFS(dvp)->n_nctime, !=)) { 848 if (*vpp != NULLVP) { 849 vrele(*vpp); 850 *vpp = NULLVP; 851 } 852 cache_purge1(dvp, NULL, 0, PURGE_CHILDREN); 853 timespecclear(&np->n_nctime); 854 goto dorpc; 855 } 856 857 if (*vpp == NULLVP) { 858 /* namecache gave us a negative result */ 859 error = ENOENT; 860 goto noentry; 861 } 862 863 /* 864 * investigate the vnode returned by cache_lookup_raw. 865 * if it isn't appropriate, do an rpc. 866 */ 867 newvp = *vpp; 868 if ((flags & ISDOTDOT) != 0) { 869 VOP_UNLOCK(dvp); 870 } 871 error = vn_lock(newvp, LK_SHARED); 872 if ((flags & ISDOTDOT) != 0) { 873 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 874 } 875 if (error != 0) { 876 /* newvp has been reclaimed. */ 877 vrele(newvp); 878 *vpp = NULLVP; 879 goto dorpc; 880 } 881 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred) 882 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) { 883 nfsstats.lookupcache_hits++; 884 KASSERT(newvp->v_type != VNON); 885 VOP_UNLOCK(newvp); 886 return (0); 887 } 888 cache_purge1(newvp, NULL, 0, PURGE_PARENTS); 889 vput(newvp); 890 *vpp = NULLVP; 891 } 892 dorpc: 893 #if 0 894 /* 895 * because nfsv3 has the same CREATE semantics as ours, 896 * we don't have to perform LOOKUPs beforehand. 897 * 898 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL. 899 * XXX although we have no way to know if O_EXCL is requested or not. 900 */ 901 902 if (v3 && cnp->cn_nameiop == CREATE && 903 (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN && 904 (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 905 return (EJUSTRETURN); 906 } 907 #endif /* 0 */ 908 909 error = 0; 910 newvp = NULLVP; 911 nfsstats.lookupcache_misses++; 912 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 913 len = cnp->cn_namelen; 914 nfsm_reqhead(np, NFSPROC_LOOKUP, 915 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 916 nfsm_fhtom(np, v3); 917 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 918 nfsm_request(np, NFSPROC_LOOKUP, curlwp, cnp->cn_cred); 919 if (error) { 920 nfsm_postop_attr(dvp, attrflag, 0); 921 m_freem(mrep); 922 goto nfsmout; 923 } 924 nfsm_getfh(fhp, fhsize, v3); 925 926 /* 927 * Handle RENAME case... 928 */ 929 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) { 930 if (NFS_CMPFH(np, fhp, fhsize)) { 931 m_freem(mrep); 932 return (EISDIR); 933 } 934 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 935 if (error) { 936 m_freem(mrep); 937 return error; 938 } 939 newvp = NFSTOV(np); 940 #ifndef NFS_V2_ONLY 941 if (v3) { 942 nfsm_postop_attr(newvp, attrflag, 0); 943 nfsm_postop_attr(dvp, attrflag, 0); 944 } else 945 #endif 946 nfsm_loadattr(newvp, (struct vattr *)0, 0); 947 *vpp = newvp; 948 m_freem(mrep); 949 goto validate; 950 } 951 952 /* 953 * The postop attr handling is duplicated for each if case, 954 * because it should be done while dvp is locked (unlocking 955 * dvp is different for each case). 956 */ 957 958 if (NFS_CMPFH(np, fhp, fhsize)) { 959 /* 960 * As we handle "." lookup locally, this is 961 * a broken server. 962 */ 963 m_freem(mrep); 964 return EBADRPC; 965 } else if (flags & ISDOTDOT) { 966 /* 967 * ".." lookup 968 */ 969 VOP_UNLOCK(dvp); 970 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 971 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 972 if (error) { 973 m_freem(mrep); 974 return error; 975 } 976 newvp = NFSTOV(np); 977 978 #ifndef NFS_V2_ONLY 979 if (v3) { 980 nfsm_postop_attr(newvp, attrflag, 0); 981 nfsm_postop_attr(dvp, attrflag, 0); 982 } else 983 #endif 984 nfsm_loadattr(newvp, (struct vattr *)0, 0); 985 } else { 986 /* 987 * Other lookups. 988 */ 989 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 990 if (error) { 991 m_freem(mrep); 992 return error; 993 } 994 newvp = NFSTOV(np); 995 #ifndef NFS_V2_ONLY 996 if (v3) { 997 nfsm_postop_attr(newvp, attrflag, 0); 998 nfsm_postop_attr(dvp, attrflag, 0); 999 } else 1000 #endif 1001 nfsm_loadattr(newvp, (struct vattr *)0, 0); 1002 } 1003 if (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) { 1004 nfs_cache_enter(dvp, newvp, cnp); 1005 } 1006 *vpp = newvp; 1007 nfsm_reqdone; 1008 if (error) { 1009 /* 1010 * We get here only because of errors returned by 1011 * the RPC. Otherwise we'll have returned above 1012 * (the nfsm_* macros will jump to nfsm_reqdone 1013 * on error). 1014 */ 1015 if (error == ENOENT && cnp->cn_nameiop != CREATE) { 1016 nfs_cache_enter(dvp, NULL, cnp); 1017 } 1018 if (newvp != NULLVP) { 1019 if (newvp == dvp) { 1020 vrele(newvp); 1021 } else { 1022 vput(newvp); 1023 } 1024 } 1025 noentry: 1026 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 1027 (flags & ISLASTCN) && error == ENOENT) { 1028 if (dvp->v_mount->mnt_flag & MNT_RDONLY) { 1029 error = EROFS; 1030 } else { 1031 error = EJUSTRETURN; 1032 } 1033 } 1034 *vpp = NULL; 1035 return error; 1036 } 1037 1038 validate: 1039 /* 1040 * make sure we have valid type and size. 1041 */ 1042 1043 newvp = *vpp; 1044 if (newvp->v_type == VNON) { 1045 struct vattr vattr; /* dummy */ 1046 1047 KASSERT(VTONFS(newvp)->n_attrstamp == 0); 1048 error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred); 1049 if (error) { 1050 vput(newvp); 1051 *vpp = NULL; 1052 } 1053 } 1054 if (error) 1055 return error; 1056 if (newvp != dvp) 1057 VOP_UNLOCK(newvp); 1058 return 0; 1059 } 1060 1061 /* 1062 * nfs read call. 1063 * Just call nfs_bioread() to do the work. 1064 */ 1065 int 1066 nfs_read(void *v) 1067 { 1068 struct vop_read_args /* { 1069 struct vnode *a_vp; 1070 struct uio *a_uio; 1071 int a_ioflag; 1072 kauth_cred_t a_cred; 1073 } */ *ap = v; 1074 struct vnode *vp = ap->a_vp; 1075 1076 if (vp->v_type != VREG) 1077 return EISDIR; 1078 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0)); 1079 } 1080 1081 /* 1082 * nfs readlink call 1083 */ 1084 int 1085 nfs_readlink(void *v) 1086 { 1087 struct vop_readlink_args /* { 1088 struct vnode *a_vp; 1089 struct uio *a_uio; 1090 kauth_cred_t a_cred; 1091 } */ *ap = v; 1092 struct vnode *vp = ap->a_vp; 1093 struct nfsnode *np = VTONFS(vp); 1094 1095 if (vp->v_type != VLNK) 1096 return (EPERM); 1097 1098 if (np->n_rcred != NULL) { 1099 kauth_cred_free(np->n_rcred); 1100 } 1101 np->n_rcred = ap->a_cred; 1102 kauth_cred_hold(np->n_rcred); 1103 1104 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0)); 1105 } 1106 1107 /* 1108 * Do a readlink rpc. 1109 * Called by nfs_doio() from below the buffer cache. 1110 */ 1111 int 1112 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred) 1113 { 1114 u_int32_t *tl; 1115 char *cp; 1116 int32_t t1, t2; 1117 char *bpos, *dpos, *cp2; 1118 int error = 0; 1119 uint32_t len; 1120 struct mbuf *mreq, *mrep, *md, *mb; 1121 const int v3 = NFS_ISV3(vp); 1122 struct nfsnode *np = VTONFS(vp); 1123 #ifndef NFS_V2_ONLY 1124 int attrflag; 1125 #endif 1126 1127 nfsstats.rpccnt[NFSPROC_READLINK]++; 1128 nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3)); 1129 nfsm_fhtom(np, v3); 1130 nfsm_request(np, NFSPROC_READLINK, curlwp, cred); 1131 #ifndef NFS_V2_ONLY 1132 if (v3) 1133 nfsm_postop_attr(vp, attrflag, 0); 1134 #endif 1135 if (!error) { 1136 #ifndef NFS_V2_ONLY 1137 if (v3) { 1138 nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED); 1139 len = fxdr_unsigned(uint32_t, *tl); 1140 if (len > NFS_MAXPATHLEN) { 1141 /* 1142 * this pathname is too long for us. 1143 */ 1144 m_freem(mrep); 1145 /* Solaris returns EINVAL. should we follow? */ 1146 error = ENAMETOOLONG; 1147 goto nfsmout; 1148 } 1149 } else 1150 #endif 1151 { 1152 nfsm_strsiz(len, NFS_MAXPATHLEN); 1153 } 1154 nfsm_mtouio(uiop, len); 1155 } 1156 nfsm_reqdone; 1157 return (error); 1158 } 1159 1160 /* 1161 * nfs read rpc call 1162 * Ditto above 1163 */ 1164 int 1165 nfs_readrpc(struct vnode *vp, struct uio *uiop) 1166 { 1167 u_int32_t *tl; 1168 char *cp; 1169 int32_t t1, t2; 1170 char *bpos, *dpos, *cp2; 1171 struct mbuf *mreq, *mrep, *md, *mb; 1172 struct nfsmount *nmp; 1173 int error = 0, len, retlen, tsiz, eof __unused, byte_count; 1174 const int v3 = NFS_ISV3(vp); 1175 struct nfsnode *np = VTONFS(vp); 1176 #ifndef NFS_V2_ONLY 1177 int attrflag; 1178 #endif 1179 1180 #ifndef nolint 1181 eof = 0; 1182 #endif 1183 nmp = VFSTONFS(vp->v_mount); 1184 tsiz = uiop->uio_resid; 1185 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1186 return (EFBIG); 1187 iostat_busy(nmp->nm_stats); 1188 byte_count = 0; /* count bytes actually transferred */ 1189 while (tsiz > 0) { 1190 nfsstats.rpccnt[NFSPROC_READ]++; 1191 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; 1192 nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3); 1193 nfsm_fhtom(np, v3); 1194 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3); 1195 #ifndef NFS_V2_ONLY 1196 if (v3) { 1197 txdr_hyper(uiop->uio_offset, tl); 1198 *(tl + 2) = txdr_unsigned(len); 1199 } else 1200 #endif 1201 { 1202 *tl++ = txdr_unsigned(uiop->uio_offset); 1203 *tl++ = txdr_unsigned(len); 1204 *tl = 0; 1205 } 1206 nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred); 1207 #ifndef NFS_V2_ONLY 1208 if (v3) { 1209 nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC); 1210 if (error) { 1211 m_freem(mrep); 1212 goto nfsmout; 1213 } 1214 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1215 eof = fxdr_unsigned(int, *(tl + 1)); 1216 } else 1217 #endif 1218 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 1219 nfsm_strsiz(retlen, nmp->nm_rsize); 1220 nfsm_mtouio(uiop, retlen); 1221 m_freem(mrep); 1222 tsiz -= retlen; 1223 byte_count += retlen; 1224 #ifndef NFS_V2_ONLY 1225 if (v3) { 1226 if (eof || retlen == 0) 1227 tsiz = 0; 1228 } else 1229 #endif 1230 if (retlen < len) 1231 tsiz = 0; 1232 } 1233 nfsmout: 1234 iostat_unbusy(nmp->nm_stats, byte_count, 1); 1235 return (error); 1236 } 1237 1238 struct nfs_writerpc_context { 1239 kmutex_t nwc_lock; 1240 kcondvar_t nwc_cv; 1241 int nwc_mbufcount; 1242 }; 1243 1244 /* 1245 * free mbuf used to refer protected pages while write rpc call. 1246 * called at splvm. 1247 */ 1248 static void 1249 nfs_writerpc_extfree(struct mbuf *m, void *tbuf, size_t size, void *arg) 1250 { 1251 struct nfs_writerpc_context *ctx = arg; 1252 1253 KASSERT(m != NULL); 1254 KASSERT(ctx != NULL); 1255 pool_cache_put(mb_cache, m); 1256 mutex_enter(&ctx->nwc_lock); 1257 if (--ctx->nwc_mbufcount == 0) { 1258 cv_signal(&ctx->nwc_cv); 1259 } 1260 mutex_exit(&ctx->nwc_lock); 1261 } 1262 1263 /* 1264 * nfs write call 1265 */ 1266 int 1267 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, bool pageprotected, bool *stalewriteverfp) 1268 { 1269 u_int32_t *tl; 1270 char *cp; 1271 int32_t t1, t2; 1272 char *bpos, *dpos; 1273 struct mbuf *mreq, *mrep, *md, *mb; 1274 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1275 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR; 1276 const int v3 = NFS_ISV3(vp); 1277 int committed = NFSV3WRITE_FILESYNC; 1278 struct nfsnode *np = VTONFS(vp); 1279 struct nfs_writerpc_context ctx; 1280 int byte_count; 1281 size_t origresid; 1282 #ifndef NFS_V2_ONLY 1283 char *cp2; 1284 int rlen, commit; 1285 #endif 1286 1287 if (vp->v_mount->mnt_flag & MNT_RDONLY) { 1288 panic("writerpc readonly vp %p", vp); 1289 } 1290 1291 #ifdef DIAGNOSTIC 1292 if (uiop->uio_iovcnt != 1) 1293 panic("nfs: writerpc iovcnt > 1"); 1294 #endif 1295 tsiz = uiop->uio_resid; 1296 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1297 return EFBIG; 1298 1299 mutex_init(&ctx.nwc_lock, MUTEX_DRIVER, IPL_VM); 1300 cv_init(&ctx.nwc_cv, "nfsmblk"); 1301 ctx.nwc_mbufcount = 1; 1302 1303 retry: 1304 origresid = uiop->uio_resid; 1305 KASSERT(origresid == uiop->uio_iov->iov_len); 1306 iostat_busy(nmp->nm_stats); 1307 byte_count = 0; /* count of bytes actually written */ 1308 while (tsiz > 0) { 1309 uint32_t datalen; /* data bytes need to be allocated in mbuf */ 1310 size_t backup; 1311 bool stalewriteverf = false; 1312 1313 nfsstats.rpccnt[NFSPROC_WRITE]++; 1314 len = uimin(tsiz, nmp->nm_wsize); 1315 datalen = pageprotected ? 0 : nfsm_rndup(len); 1316 nfsm_reqhead(np, NFSPROC_WRITE, 1317 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen); 1318 nfsm_fhtom(np, v3); 1319 #ifndef NFS_V2_ONLY 1320 if (v3) { 1321 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED); 1322 txdr_hyper(uiop->uio_offset, tl); 1323 tl += 2; 1324 *tl++ = txdr_unsigned(len); 1325 *tl++ = txdr_unsigned(*iomode); 1326 *tl = txdr_unsigned(len); 1327 } else 1328 #endif 1329 { 1330 u_int32_t x; 1331 1332 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED); 1333 /* Set both "begin" and "current" to non-garbage. */ 1334 x = txdr_unsigned((u_int32_t)uiop->uio_offset); 1335 *tl++ = x; /* "begin offset" */ 1336 *tl++ = x; /* "current offset" */ 1337 x = txdr_unsigned(len); 1338 *tl++ = x; /* total to this offset */ 1339 *tl = x; /* size of this write */ 1340 1341 } 1342 if (pageprotected) { 1343 /* 1344 * since we know pages can't be modified during i/o, 1345 * no need to copy them for us. 1346 */ 1347 struct mbuf *m; 1348 struct iovec *iovp = uiop->uio_iov; 1349 1350 m = m_get(M_WAIT, MT_DATA); 1351 MCLAIM(m, &nfs_mowner); 1352 MEXTADD(m, iovp->iov_base, len, M_MBUF, 1353 nfs_writerpc_extfree, &ctx); 1354 m->m_flags |= M_EXT_ROMAP; 1355 m->m_len = len; 1356 mb->m_next = m; 1357 /* 1358 * no need to maintain mb and bpos here 1359 * because no one care them later. 1360 */ 1361 #if 0 1362 mb = m; 1363 bpos = mtod(void *, mb) + mb->m_len; 1364 #endif 1365 UIO_ADVANCE(uiop, len); 1366 uiop->uio_offset += len; 1367 mutex_enter(&ctx.nwc_lock); 1368 ctx.nwc_mbufcount++; 1369 mutex_exit(&ctx.nwc_lock); 1370 nfs_zeropad(mb, 0, nfsm_padlen(len)); 1371 } else { 1372 nfsm_uiotom(uiop, len); 1373 } 1374 nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred); 1375 #ifndef NFS_V2_ONLY 1376 if (v3) { 1377 wccflag = NFSV3_WCCCHK; 1378 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error); 1379 if (!error) { 1380 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED 1381 + NFSX_V3WRITEVERF); 1382 rlen = fxdr_unsigned(int, *tl++); 1383 if (rlen == 0) { 1384 error = NFSERR_IO; 1385 m_freem(mrep); 1386 break; 1387 } else if (rlen < len) { 1388 backup = len - rlen; 1389 UIO_ADVANCE(uiop, -backup); 1390 uiop->uio_offset -= backup; 1391 len = rlen; 1392 } 1393 commit = fxdr_unsigned(int, *tl++); 1394 1395 /* 1396 * Return the lowest committment level 1397 * obtained by any of the RPCs. 1398 */ 1399 if (committed == NFSV3WRITE_FILESYNC) 1400 committed = commit; 1401 else if (committed == NFSV3WRITE_DATASYNC && 1402 commit == NFSV3WRITE_UNSTABLE) 1403 committed = commit; 1404 mutex_enter(&nmp->nm_lock); 1405 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){ 1406 memcpy(nmp->nm_writeverf, tl, 1407 NFSX_V3WRITEVERF); 1408 nmp->nm_iflag |= NFSMNT_HASWRITEVERF; 1409 } else if ((nmp->nm_iflag & 1410 NFSMNT_STALEWRITEVERF) || 1411 memcmp(tl, nmp->nm_writeverf, 1412 NFSX_V3WRITEVERF)) { 1413 memcpy(nmp->nm_writeverf, tl, 1414 NFSX_V3WRITEVERF); 1415 /* 1416 * note NFSMNT_STALEWRITEVERF 1417 * if we're the first thread to 1418 * notice it. 1419 */ 1420 if ((nmp->nm_iflag & 1421 NFSMNT_STALEWRITEVERF) == 0) { 1422 stalewriteverf = true; 1423 nmp->nm_iflag |= 1424 NFSMNT_STALEWRITEVERF; 1425 } 1426 } 1427 mutex_exit(&nmp->nm_lock); 1428 } 1429 } else 1430 #endif 1431 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 1432 if (wccflag) 1433 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime; 1434 m_freem(mrep); 1435 if (error) 1436 break; 1437 tsiz -= len; 1438 byte_count += len; 1439 if (stalewriteverf) { 1440 *stalewriteverfp = true; 1441 stalewriteverf = false; 1442 if (committed == NFSV3WRITE_UNSTABLE && 1443 len != origresid) { 1444 /* 1445 * if our write requests weren't atomic but 1446 * unstable, datas in previous iterations 1447 * might have already been lost now. 1448 * then, we should resend them to nfsd. 1449 */ 1450 backup = origresid - tsiz; 1451 UIO_ADVANCE(uiop, -backup); 1452 uiop->uio_offset -= backup; 1453 tsiz = origresid; 1454 goto retry; 1455 } 1456 } 1457 } 1458 nfsmout: 1459 iostat_unbusy(nmp->nm_stats, byte_count, 0); 1460 if (pageprotected) { 1461 /* 1462 * wait until mbufs go away. 1463 * retransmitted mbufs can survive longer than rpc requests 1464 * themselves. 1465 */ 1466 mutex_enter(&ctx.nwc_lock); 1467 ctx.nwc_mbufcount--; 1468 while (ctx.nwc_mbufcount > 0) { 1469 cv_wait(&ctx.nwc_cv, &ctx.nwc_lock); 1470 } 1471 mutex_exit(&ctx.nwc_lock); 1472 } 1473 mutex_destroy(&ctx.nwc_lock); 1474 cv_destroy(&ctx.nwc_cv); 1475 *iomode = committed; 1476 if (error) 1477 uiop->uio_resid = tsiz; 1478 return (error); 1479 } 1480 1481 /* 1482 * nfs mknod rpc 1483 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1484 * mode set to specify the file type and the size field for rdev. 1485 */ 1486 int 1487 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap) 1488 { 1489 struct nfsv2_sattr *sp; 1490 u_int32_t *tl; 1491 char *cp; 1492 int32_t t1, t2; 1493 struct vnode *newvp = (struct vnode *)0; 1494 struct nfsnode *dnp, *np; 1495 char *cp2; 1496 char *bpos, *dpos; 1497 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1498 struct mbuf *mreq, *mrep, *md, *mb; 1499 u_int32_t rdev; 1500 const int v3 = NFS_ISV3(dvp); 1501 1502 if (vap->va_type == VCHR || vap->va_type == VBLK) 1503 rdev = txdr_unsigned(vap->va_rdev); 1504 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1505 rdev = nfs_xdrneg1; 1506 else { 1507 VOP_ABORTOP(dvp, cnp); 1508 return (EOPNOTSUPP); 1509 } 1510 nfsstats.rpccnt[NFSPROC_MKNOD]++; 1511 dnp = VTONFS(dvp); 1512 nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED + 1513 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1514 nfsm_fhtom(dnp, v3); 1515 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1516 #ifndef NFS_V2_ONLY 1517 if (v3) { 1518 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1519 *tl++ = vtonfsv3_type(vap->va_type); 1520 nfsm_v3attrbuild(vap, false); 1521 if (vap->va_type == VCHR || vap->va_type == VBLK) { 1522 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1523 *tl++ = txdr_unsigned(major(vap->va_rdev)); 1524 *tl = txdr_unsigned(minor(vap->va_rdev)); 1525 } 1526 } else 1527 #endif 1528 { 1529 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1530 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1531 sp->sa_uid = nfs_xdrneg1; 1532 sp->sa_gid = nfs_xdrneg1; 1533 sp->sa_size = rdev; 1534 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1535 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1536 } 1537 nfsm_request(dnp, NFSPROC_MKNOD, curlwp, cnp->cn_cred); 1538 if (!error) { 1539 nfsm_mtofh(dvp, newvp, v3, gotvp); 1540 if (!gotvp) { 1541 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1542 cnp->cn_namelen, cnp->cn_cred, curlwp, &np); 1543 if (!error) 1544 newvp = NFSTOV(np); 1545 } 1546 } 1547 #ifndef NFS_V2_ONLY 1548 if (v3) 1549 nfsm_wcc_data(dvp, wccflag, 0, !error); 1550 #endif 1551 nfsm_reqdone; 1552 if (error) { 1553 if (newvp) 1554 vput(newvp); 1555 } else { 1556 nfs_cache_enter(dvp, newvp, cnp); 1557 *vpp = newvp; 1558 VOP_UNLOCK(newvp); 1559 } 1560 VTONFS(dvp)->n_flag |= NMODIFIED; 1561 if (!wccflag) 1562 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1563 return (error); 1564 } 1565 1566 /* 1567 * nfs mknod vop 1568 * just call nfs_mknodrpc() to do the work. 1569 */ 1570 /* ARGSUSED */ 1571 int 1572 nfs_mknod(void *v) 1573 { 1574 struct vop_mknod_v3_args /* { 1575 struct vnode *a_dvp; 1576 struct vnode **a_vpp; 1577 struct componentname *a_cnp; 1578 struct vattr *a_vap; 1579 } */ *ap = v; 1580 struct vnode *dvp = ap->a_dvp; 1581 struct componentname *cnp = ap->a_cnp; 1582 int error; 1583 1584 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap); 1585 VN_KNOTE(dvp, NOTE_WRITE); 1586 if (error == 0 || error == EEXIST) 1587 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 1588 return (error); 1589 } 1590 1591 /* 1592 * nfs file create call 1593 */ 1594 int 1595 nfs_create(void *v) 1596 { 1597 struct vop_create_v3_args /* { 1598 struct vnode *a_dvp; 1599 struct vnode **a_vpp; 1600 struct componentname *a_cnp; 1601 struct vattr *a_vap; 1602 } */ *ap = v; 1603 struct vnode *dvp = ap->a_dvp; 1604 struct vattr *vap = ap->a_vap; 1605 struct componentname *cnp = ap->a_cnp; 1606 struct nfsv2_sattr *sp; 1607 u_int32_t *tl; 1608 char *cp; 1609 int32_t t1, t2; 1610 struct nfsnode *dnp, *np = (struct nfsnode *)0; 1611 struct vnode *newvp = (struct vnode *)0; 1612 char *bpos, *dpos, *cp2; 1613 int error, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1614 struct mbuf *mreq, *mrep, *md, *mb; 1615 const int v3 = NFS_ISV3(dvp); 1616 u_int32_t excl_mode = NFSV3CREATE_UNCHECKED; 1617 1618 /* 1619 * Oops, not for me.. 1620 */ 1621 if (vap->va_type == VSOCK) 1622 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1623 1624 KASSERT(vap->va_type == VREG); 1625 1626 #ifdef VA_EXCLUSIVE 1627 if (vap->va_vaflags & VA_EXCLUSIVE) { 1628 excl_mode = NFSV3CREATE_EXCLUSIVE; 1629 } 1630 #endif 1631 again: 1632 error = 0; 1633 nfsstats.rpccnt[NFSPROC_CREATE]++; 1634 dnp = VTONFS(dvp); 1635 nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED + 1636 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1637 nfsm_fhtom(dnp, v3); 1638 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1639 #ifndef NFS_V2_ONLY 1640 if (v3) { 1641 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1642 if (excl_mode == NFSV3CREATE_EXCLUSIVE) { 1643 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); 1644 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF); 1645 *tl++ = cprng_fast32(); 1646 *tl = cprng_fast32(); 1647 } else { 1648 *tl = txdr_unsigned(excl_mode); 1649 nfsm_v3attrbuild(vap, false); 1650 } 1651 } else 1652 #endif 1653 { 1654 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1655 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1656 sp->sa_uid = nfs_xdrneg1; 1657 sp->sa_gid = nfs_xdrneg1; 1658 sp->sa_size = 0; 1659 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1660 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1661 } 1662 nfsm_request(dnp, NFSPROC_CREATE, curlwp, cnp->cn_cred); 1663 if (!error) { 1664 nfsm_mtofh(dvp, newvp, v3, gotvp); 1665 if (!gotvp) { 1666 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1667 cnp->cn_namelen, cnp->cn_cred, curlwp, &np); 1668 if (!error) 1669 newvp = NFSTOV(np); 1670 } 1671 } 1672 #ifndef NFS_V2_ONLY 1673 if (v3) 1674 nfsm_wcc_data(dvp, wccflag, 0, !error); 1675 #endif 1676 nfsm_reqdone; 1677 if (error) { 1678 /* 1679 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP. 1680 */ 1681 if (v3 && error == ENOTSUP) { 1682 if (excl_mode == NFSV3CREATE_EXCLUSIVE) { 1683 excl_mode = NFSV3CREATE_GUARDED; 1684 goto again; 1685 } else if (excl_mode == NFSV3CREATE_GUARDED) { 1686 excl_mode = NFSV3CREATE_UNCHECKED; 1687 goto again; 1688 } 1689 } 1690 } else if (v3 && (excl_mode == NFSV3CREATE_EXCLUSIVE)) { 1691 struct timespec ts; 1692 1693 getnanotime(&ts); 1694 1695 /* 1696 * make sure that we'll update timestamps as 1697 * most server implementations use them to store 1698 * the create verifier. 1699 * 1700 * XXX it's better to use TOSERVER always. 1701 */ 1702 1703 if (vap->va_atime.tv_sec == VNOVAL) 1704 vap->va_atime = ts; 1705 if (vap->va_mtime.tv_sec == VNOVAL) 1706 vap->va_mtime = ts; 1707 1708 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, curlwp); 1709 } 1710 if (error == 0) { 1711 if (cnp->cn_flags & MAKEENTRY) 1712 nfs_cache_enter(dvp, newvp, cnp); 1713 else 1714 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 1715 *ap->a_vpp = newvp; 1716 VOP_UNLOCK(newvp); 1717 } else { 1718 if (newvp) 1719 vput(newvp); 1720 if (error == EEXIST) 1721 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 1722 } 1723 VTONFS(dvp)->n_flag |= NMODIFIED; 1724 if (!wccflag) 1725 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1726 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1727 return (error); 1728 } 1729 1730 /* 1731 * nfs file remove call 1732 * To try and make nfs semantics closer to ufs semantics, a file that has 1733 * other processes using the vnode is renamed instead of removed and then 1734 * removed later on the last close. 1735 * - If vrefcnt(vp) > 1 1736 * If a rename is not already in the works 1737 * call nfs_sillyrename() to set it up 1738 * else 1739 * do the remove rpc 1740 */ 1741 int 1742 nfs_remove(void *v) 1743 { 1744 struct vop_remove_v2_args /* { 1745 struct vnodeop_desc *a_desc; 1746 struct vnode * a_dvp; 1747 struct vnode * a_vp; 1748 struct componentname * a_cnp; 1749 } */ *ap = v; 1750 struct vnode *vp = ap->a_vp; 1751 struct vnode *dvp = ap->a_dvp; 1752 struct componentname *cnp = ap->a_cnp; 1753 struct nfsnode *np = VTONFS(vp); 1754 int error = 0; 1755 struct vattr vattr; 1756 1757 #ifndef DIAGNOSTIC 1758 if (vrefcnt(vp) < 1) 1759 panic("nfs_remove: bad vrefcnt(vp)"); 1760 #endif 1761 if (vp->v_type == VDIR) 1762 error = EPERM; 1763 else if (vrefcnt(vp) == 1 || (np->n_sillyrename && 1764 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 && 1765 vattr.va_nlink > 1)) { 1766 /* 1767 * Purge the name cache so that the chance of a lookup for 1768 * the name succeeding while the remove is in progress is 1769 * minimized. Without node locking it can still happen, such 1770 * that an I/O op returns ESTALE, but since you get this if 1771 * another host removes the file.. 1772 */ 1773 cache_purge(vp); 1774 /* 1775 * throw away biocache buffers, mainly to avoid 1776 * unnecessary delayed writes later. 1777 */ 1778 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, curlwp, 1); 1779 /* Do the rpc */ 1780 if (error != EINTR) 1781 error = nfs_removerpc(dvp, cnp->cn_nameptr, 1782 cnp->cn_namelen, cnp->cn_cred, curlwp); 1783 } else if (!np->n_sillyrename) 1784 error = nfs_sillyrename(dvp, vp, cnp, false); 1785 if (!error && nfs_getattrcache(vp, &vattr) == 0 && 1786 vattr.va_nlink == 1) { 1787 np->n_flag |= NREMOVED; 1788 } 1789 NFS_INVALIDATE_ATTRCACHE(np); 1790 VN_KNOTE(vp, NOTE_DELETE); 1791 VN_KNOTE(dvp, NOTE_WRITE); 1792 if (dvp == vp) 1793 vrele(vp); 1794 else 1795 vput(vp); 1796 return (error); 1797 } 1798 1799 /* 1800 * nfs file remove rpc called from nfs_inactive 1801 */ 1802 int 1803 nfs_removeit(struct sillyrename *sp) 1804 { 1805 1806 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, 1807 (struct lwp *)0)); 1808 } 1809 1810 /* 1811 * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). 1812 */ 1813 int 1814 nfs_removerpc(struct vnode *dvp, const char *name, int namelen, kauth_cred_t cred, struct lwp *l) 1815 { 1816 u_int32_t *tl; 1817 char *cp; 1818 #ifndef NFS_V2_ONLY 1819 int32_t t1; 1820 char *cp2; 1821 #endif 1822 int32_t t2; 1823 char *bpos, *dpos; 1824 int error = 0, wccflag = NFSV3_WCCRATTR; 1825 struct mbuf *mreq, *mrep, *md, *mb; 1826 const int v3 = NFS_ISV3(dvp); 1827 int rexmit = 0; 1828 struct nfsnode *dnp = VTONFS(dvp); 1829 1830 nfsstats.rpccnt[NFSPROC_REMOVE]++; 1831 nfsm_reqhead(dnp, NFSPROC_REMOVE, 1832 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen)); 1833 nfsm_fhtom(dnp, v3); 1834 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 1835 nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit); 1836 #ifndef NFS_V2_ONLY 1837 if (v3) 1838 nfsm_wcc_data(dvp, wccflag, 0, !error); 1839 #endif 1840 nfsm_reqdone; 1841 VTONFS(dvp)->n_flag |= NMODIFIED; 1842 if (!wccflag) 1843 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1844 /* 1845 * Kludge City: If the first reply to the remove rpc is lost.. 1846 * the reply to the retransmitted request will be ENOENT 1847 * since the file was in fact removed 1848 * Therefore, we cheat and return success. 1849 */ 1850 if (rexmit && error == ENOENT) 1851 error = 0; 1852 return (error); 1853 } 1854 1855 /* 1856 * nfs file rename call 1857 */ 1858 int 1859 nfs_rename(void *v) 1860 { 1861 struct vop_rename_args /* { 1862 struct vnode *a_fdvp; 1863 struct vnode *a_fvp; 1864 struct componentname *a_fcnp; 1865 struct vnode *a_tdvp; 1866 struct vnode *a_tvp; 1867 struct componentname *a_tcnp; 1868 } */ *ap = v; 1869 struct vnode *fvp = ap->a_fvp; 1870 struct vnode *tvp = ap->a_tvp; 1871 struct vnode *fdvp = ap->a_fdvp; 1872 struct vnode *tdvp = ap->a_tdvp; 1873 struct componentname *tcnp = ap->a_tcnp; 1874 struct componentname *fcnp = ap->a_fcnp; 1875 int error; 1876 1877 /* Check for cross-device rename */ 1878 if ((fvp->v_mount != tdvp->v_mount) || 1879 (tvp && (fvp->v_mount != tvp->v_mount))) { 1880 error = EXDEV; 1881 goto out; 1882 } 1883 1884 /* 1885 * If the tvp exists and is in use, sillyrename it before doing the 1886 * rename of the new file over it. 1887 * 1888 * Have sillyrename use link instead of rename if possible, 1889 * so that we don't lose the file if the rename fails, and so 1890 * that there's no window when the "to" file doesn't exist. 1891 */ 1892 if (tvp && vrefcnt(tvp) > 1 && !VTONFS(tvp)->n_sillyrename && 1893 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) { 1894 VN_KNOTE(tvp, NOTE_DELETE); 1895 vput(tvp); 1896 tvp = NULL; 1897 } 1898 1899 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1900 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1901 curlwp); 1902 1903 VN_KNOTE(fdvp, NOTE_WRITE); 1904 VN_KNOTE(tdvp, NOTE_WRITE); 1905 if (error == 0 || error == EEXIST) { 1906 if (fvp->v_type == VDIR) 1907 cache_purge(fvp); 1908 else 1909 cache_purge1(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1910 0); 1911 if (tvp != NULL && tvp->v_type == VDIR) 1912 cache_purge(tvp); 1913 else 1914 cache_purge1(tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, 1915 0); 1916 } 1917 out: 1918 if (tdvp == tvp) 1919 vrele(tdvp); 1920 else 1921 vput(tdvp); 1922 if (tvp) 1923 vput(tvp); 1924 vrele(fdvp); 1925 vrele(fvp); 1926 return (error); 1927 } 1928 1929 /* 1930 * nfs file rename rpc called from nfs_remove() above 1931 */ 1932 int 1933 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, struct sillyrename *sp) 1934 { 1935 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, 1936 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curlwp)); 1937 } 1938 1939 /* 1940 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1941 */ 1942 int 1943 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen, struct vnode *tdvp, const char *tnameptr, int tnamelen, kauth_cred_t cred, struct lwp *l) 1944 { 1945 u_int32_t *tl; 1946 char *cp; 1947 #ifndef NFS_V2_ONLY 1948 int32_t t1; 1949 char *cp2; 1950 #endif 1951 int32_t t2; 1952 char *bpos, *dpos; 1953 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; 1954 struct mbuf *mreq, *mrep, *md, *mb; 1955 const int v3 = NFS_ISV3(fdvp); 1956 int rexmit = 0; 1957 struct nfsnode *fdnp = VTONFS(fdvp); 1958 1959 nfsstats.rpccnt[NFSPROC_RENAME]++; 1960 nfsm_reqhead(fdnp, NFSPROC_RENAME, 1961 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) + 1962 nfsm_rndup(tnamelen)); 1963 nfsm_fhtom(fdnp, v3); 1964 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); 1965 nfsm_fhtom(VTONFS(tdvp), v3); 1966 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); 1967 nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit); 1968 #ifndef NFS_V2_ONLY 1969 if (v3) { 1970 nfsm_wcc_data(fdvp, fwccflag, 0, !error); 1971 nfsm_wcc_data(tdvp, twccflag, 0, !error); 1972 } 1973 #endif 1974 nfsm_reqdone; 1975 VTONFS(fdvp)->n_flag |= NMODIFIED; 1976 VTONFS(tdvp)->n_flag |= NMODIFIED; 1977 if (!fwccflag) 1978 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp)); 1979 if (!twccflag) 1980 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp)); 1981 /* 1982 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1983 */ 1984 if (rexmit && error == ENOENT) 1985 error = 0; 1986 return (error); 1987 } 1988 1989 /* 1990 * NFS link RPC, called from nfs_link. 1991 * Assumes dvp and vp locked, and leaves them that way. 1992 */ 1993 1994 static int 1995 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name, 1996 size_t namelen, kauth_cred_t cred, struct lwp *l) 1997 { 1998 u_int32_t *tl; 1999 char *cp; 2000 #ifndef NFS_V2_ONLY 2001 int32_t t1; 2002 char *cp2; 2003 #endif 2004 int32_t t2; 2005 char *bpos, *dpos; 2006 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; 2007 struct mbuf *mreq, *mrep, *md, *mb; 2008 const int v3 = NFS_ISV3(dvp); 2009 int rexmit = 0; 2010 struct nfsnode *np = VTONFS(vp); 2011 2012 nfsstats.rpccnt[NFSPROC_LINK]++; 2013 nfsm_reqhead(np, NFSPROC_LINK, 2014 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen)); 2015 nfsm_fhtom(np, v3); 2016 nfsm_fhtom(VTONFS(dvp), v3); 2017 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 2018 nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit); 2019 #ifndef NFS_V2_ONLY 2020 if (v3) { 2021 nfsm_postop_attr(vp, attrflag, 0); 2022 nfsm_wcc_data(dvp, wccflag, 0, !error); 2023 } 2024 #endif 2025 nfsm_reqdone; 2026 2027 VTONFS(dvp)->n_flag |= NMODIFIED; 2028 if (!attrflag) 2029 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp)); 2030 if (!wccflag) 2031 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2032 2033 /* 2034 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 2035 */ 2036 if (rexmit && error == EEXIST) 2037 error = 0; 2038 2039 return error; 2040 } 2041 2042 /* 2043 * nfs hard link create call 2044 */ 2045 int 2046 nfs_link(void *v) 2047 { 2048 struct vop_link_v2_args /* { 2049 struct vnode *a_dvp; 2050 struct vnode *a_vp; 2051 struct componentname *a_cnp; 2052 } */ *ap = v; 2053 struct vnode *vp = ap->a_vp; 2054 struct vnode *dvp = ap->a_dvp; 2055 struct componentname *cnp = ap->a_cnp; 2056 int error = 0; 2057 2058 error = vn_lock(vp, LK_EXCLUSIVE); 2059 if (error != 0) { 2060 VOP_ABORTOP(dvp, cnp); 2061 return error; 2062 } 2063 2064 /* 2065 * Push all writes to the server, so that the attribute cache 2066 * doesn't get "out of sync" with the server. 2067 * XXX There should be a better way! 2068 */ 2069 VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0); 2070 2071 error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, 2072 cnp->cn_cred, curlwp); 2073 2074 if (error == 0) { 2075 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 2076 } 2077 VOP_UNLOCK(vp); 2078 VN_KNOTE(vp, NOTE_LINK); 2079 VN_KNOTE(dvp, NOTE_WRITE); 2080 return (error); 2081 } 2082 2083 /* 2084 * nfs symbolic link create call 2085 */ 2086 int 2087 nfs_symlink(void *v) 2088 { 2089 struct vop_symlink_v3_args /* { 2090 struct vnode *a_dvp; 2091 struct vnode **a_vpp; 2092 struct componentname *a_cnp; 2093 struct vattr *a_vap; 2094 char *a_target; 2095 } */ *ap = v; 2096 struct vnode *dvp = ap->a_dvp; 2097 struct vattr *vap = ap->a_vap; 2098 struct componentname *cnp = ap->a_cnp; 2099 struct nfsv2_sattr *sp; 2100 u_int32_t *tl; 2101 char *cp; 2102 int32_t t1, t2; 2103 char *bpos, *dpos, *cp2; 2104 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; 2105 struct mbuf *mreq, *mrep, *md, *mb; 2106 struct vnode *newvp = (struct vnode *)0; 2107 const int v3 = NFS_ISV3(dvp); 2108 int rexmit = 0; 2109 struct nfsnode *dnp = VTONFS(dvp); 2110 2111 *ap->a_vpp = NULL; 2112 nfsstats.rpccnt[NFSPROC_SYMLINK]++; 2113 slen = strlen(ap->a_target); 2114 nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED + 2115 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3)); 2116 nfsm_fhtom(dnp, v3); 2117 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 2118 #ifndef NFS_V2_ONlY 2119 if (v3) 2120 nfsm_v3attrbuild(vap, false); 2121 #endif 2122 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); 2123 #ifndef NFS_V2_ONlY 2124 if (!v3) { 2125 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 2126 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); 2127 sp->sa_uid = nfs_xdrneg1; 2128 sp->sa_gid = nfs_xdrneg1; 2129 sp->sa_size = nfs_xdrneg1; 2130 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 2131 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 2132 } 2133 #endif 2134 nfsm_request1(dnp, NFSPROC_SYMLINK, curlwp, cnp->cn_cred, 2135 &rexmit); 2136 #ifndef NFS_V2_ONlY 2137 if (v3) { 2138 if (!error) 2139 nfsm_mtofh(dvp, newvp, v3, gotvp); 2140 nfsm_wcc_data(dvp, wccflag, 0, !error); 2141 } 2142 #endif 2143 nfsm_reqdone; 2144 /* 2145 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 2146 */ 2147 if (rexmit && error == EEXIST) 2148 error = 0; 2149 if (error == 0 || error == EEXIST) 2150 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 2151 if (error == 0 && newvp == NULL) { 2152 struct nfsnode *np = NULL; 2153 2154 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2155 cnp->cn_cred, curlwp, &np); 2156 if (error == 0) 2157 newvp = NFSTOV(np); 2158 } 2159 if (error) { 2160 if (newvp != NULL) 2161 vput(newvp); 2162 } else { 2163 *ap->a_vpp = newvp; 2164 VOP_UNLOCK(newvp); 2165 } 2166 VTONFS(dvp)->n_flag |= NMODIFIED; 2167 if (!wccflag) 2168 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2169 VN_KNOTE(dvp, NOTE_WRITE); 2170 return (error); 2171 } 2172 2173 /* 2174 * nfs make dir call 2175 */ 2176 int 2177 nfs_mkdir(void *v) 2178 { 2179 struct vop_mkdir_v3_args /* { 2180 struct vnode *a_dvp; 2181 struct vnode **a_vpp; 2182 struct componentname *a_cnp; 2183 struct vattr *a_vap; 2184 } */ *ap = v; 2185 struct vnode *dvp = ap->a_dvp; 2186 struct vattr *vap = ap->a_vap; 2187 struct componentname *cnp = ap->a_cnp; 2188 struct nfsv2_sattr *sp; 2189 u_int32_t *tl; 2190 char *cp; 2191 int32_t t1, t2; 2192 int len; 2193 struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0; 2194 struct vnode *newvp = (struct vnode *)0; 2195 char *bpos, *dpos, *cp2; 2196 int error = 0, wccflag = NFSV3_WCCRATTR; 2197 int gotvp = 0; 2198 int rexmit = 0; 2199 struct mbuf *mreq, *mrep, *md, *mb; 2200 const int v3 = NFS_ISV3(dvp); 2201 2202 len = cnp->cn_namelen; 2203 nfsstats.rpccnt[NFSPROC_MKDIR]++; 2204 nfsm_reqhead(dnp, NFSPROC_MKDIR, 2205 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3)); 2206 nfsm_fhtom(dnp, v3); 2207 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 2208 #ifndef NFS_V2_ONLY 2209 if (v3) { 2210 nfsm_v3attrbuild(vap, false); 2211 } else 2212 #endif 2213 { 2214 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 2215 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); 2216 sp->sa_uid = nfs_xdrneg1; 2217 sp->sa_gid = nfs_xdrneg1; 2218 sp->sa_size = nfs_xdrneg1; 2219 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 2220 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 2221 } 2222 nfsm_request1(dnp, NFSPROC_MKDIR, curlwp, cnp->cn_cred, &rexmit); 2223 if (!error) 2224 nfsm_mtofh(dvp, newvp, v3, gotvp); 2225 if (v3) 2226 nfsm_wcc_data(dvp, wccflag, 0, !error); 2227 nfsm_reqdone; 2228 VTONFS(dvp)->n_flag |= NMODIFIED; 2229 if (!wccflag) 2230 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2231 /* 2232 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry 2233 * if we can succeed in looking up the directory. 2234 */ 2235 if ((rexmit && error == EEXIST) || (!error && !gotvp)) { 2236 if (newvp) { 2237 vput(newvp); 2238 newvp = (struct vnode *)0; 2239 } 2240 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, 2241 curlwp, &np); 2242 if (!error) { 2243 newvp = NFSTOV(np); 2244 if (newvp->v_type != VDIR || newvp == dvp) 2245 error = EEXIST; 2246 } 2247 } 2248 if (error) { 2249 if (newvp) { 2250 if (dvp != newvp) 2251 vput(newvp); 2252 else 2253 vrele(newvp); 2254 } 2255 } else { 2256 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); 2257 nfs_cache_enter(dvp, newvp, cnp); 2258 *ap->a_vpp = newvp; 2259 VOP_UNLOCK(newvp); 2260 } 2261 return (error); 2262 } 2263 2264 /* 2265 * nfs remove directory call 2266 */ 2267 int 2268 nfs_rmdir(void *v) 2269 { 2270 struct vop_rmdir_v2_args /* { 2271 struct vnode *a_dvp; 2272 struct vnode *a_vp; 2273 struct componentname *a_cnp; 2274 } */ *ap = v; 2275 struct vnode *vp = ap->a_vp; 2276 struct vnode *dvp = ap->a_dvp; 2277 struct componentname *cnp = ap->a_cnp; 2278 u_int32_t *tl; 2279 char *cp; 2280 #ifndef NFS_V2_ONLY 2281 int32_t t1; 2282 char *cp2; 2283 #endif 2284 int32_t t2; 2285 char *bpos, *dpos; 2286 int error = 0, wccflag = NFSV3_WCCRATTR; 2287 int rexmit = 0; 2288 struct mbuf *mreq, *mrep, *md, *mb; 2289 const int v3 = NFS_ISV3(dvp); 2290 struct nfsnode *dnp; 2291 2292 if (dvp == vp) { 2293 vrele(vp); 2294 return (EINVAL); 2295 } 2296 nfsstats.rpccnt[NFSPROC_RMDIR]++; 2297 dnp = VTONFS(dvp); 2298 nfsm_reqhead(dnp, NFSPROC_RMDIR, 2299 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 2300 nfsm_fhtom(dnp, v3); 2301 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 2302 nfsm_request1(dnp, NFSPROC_RMDIR, curlwp, cnp->cn_cred, &rexmit); 2303 #ifndef NFS_V2_ONLY 2304 if (v3) 2305 nfsm_wcc_data(dvp, wccflag, 0, !error); 2306 #endif 2307 nfsm_reqdone; 2308 VTONFS(dvp)->n_flag |= NMODIFIED; 2309 if (!wccflag) 2310 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2311 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); 2312 VN_KNOTE(vp, NOTE_DELETE); 2313 cache_purge(vp); 2314 vput(vp); 2315 /* 2316 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 2317 */ 2318 if (rexmit && error == ENOENT) 2319 error = 0; 2320 return (error); 2321 } 2322 2323 /* 2324 * nfs readdir call 2325 */ 2326 int 2327 nfs_readdir(void *v) 2328 { 2329 struct vop_readdir_args /* { 2330 struct vnode *a_vp; 2331 struct uio *a_uio; 2332 kauth_cred_t a_cred; 2333 int *a_eofflag; 2334 off_t **a_cookies; 2335 int *a_ncookies; 2336 } */ *ap = v; 2337 struct vnode *vp = ap->a_vp; 2338 struct uio *uio = ap->a_uio; 2339 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2340 char *base = uio->uio_iov->iov_base; 2341 int tresid, error; 2342 size_t count, lost; 2343 struct dirent *dp; 2344 off_t *cookies = NULL; 2345 int ncookies = 0, nc; 2346 2347 if (vp->v_type != VDIR) 2348 return (EPERM); 2349 2350 lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1); 2351 count = uio->uio_resid - lost; 2352 if (count <= 0) 2353 return (EINVAL); 2354 2355 /* 2356 * Call nfs_bioread() to do the real work. 2357 */ 2358 tresid = uio->uio_resid = count; 2359 error = nfs_bioread(vp, uio, 0, ap->a_cred, 2360 ap->a_cookies ? NFSBIO_CACHECOOKIES : 0); 2361 2362 if (!error && ap->a_cookies) { 2363 ncookies = count / 16; 2364 cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK); 2365 *ap->a_cookies = cookies; 2366 } 2367 2368 if (!error && uio->uio_resid == tresid) { 2369 uio->uio_resid += lost; 2370 nfsstats.direofcache_misses++; 2371 if (ap->a_cookies) 2372 *ap->a_ncookies = 0; 2373 *ap->a_eofflag = 1; 2374 return (0); 2375 } 2376 2377 if (!error && ap->a_cookies) { 2378 /* 2379 * Only the NFS server and emulations use cookies, and they 2380 * load the directory block into system space, so we can 2381 * just look at it directly. 2382 */ 2383 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) || 2384 uio->uio_iovcnt != 1) 2385 panic("nfs_readdir: lost in space"); 2386 for (nc = 0; ncookies-- && 2387 base < (char *)uio->uio_iov->iov_base; nc++){ 2388 dp = (struct dirent *) base; 2389 if (dp->d_reclen == 0) 2390 break; 2391 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) 2392 *(cookies++) = (off_t)NFS_GETCOOKIE32(dp); 2393 else 2394 *(cookies++) = NFS_GETCOOKIE(dp); 2395 base += dp->d_reclen; 2396 } 2397 uio->uio_resid += 2398 ((char *)uio->uio_iov->iov_base - base); 2399 uio->uio_iov->iov_len += 2400 ((char *)uio->uio_iov->iov_base - base); 2401 uio->uio_iov->iov_base = base; 2402 *ap->a_ncookies = nc; 2403 } 2404 2405 uio->uio_resid += lost; 2406 *ap->a_eofflag = 0; 2407 return (error); 2408 } 2409 2410 /* 2411 * Readdir rpc call. 2412 * Called from below the buffer cache by nfs_doio(). 2413 */ 2414 int 2415 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred) 2416 { 2417 int len, left; 2418 struct dirent *dp = NULL; 2419 u_int32_t *tl; 2420 char *cp; 2421 int32_t t1, t2; 2422 char *bpos, *dpos, *cp2; 2423 struct mbuf *mreq, *mrep, *md, *mb; 2424 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2425 struct nfsnode *dnp = VTONFS(vp); 2426 u_quad_t fileno; 2427 int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1; 2428 #ifndef NFS_V2_ONLY 2429 int attrflag; 2430 #endif 2431 int nrpcs = 0, reclen; 2432 const int v3 = NFS_ISV3(vp); 2433 2434 #ifdef DIAGNOSTIC 2435 /* 2436 * Should be called from buffer cache, so only amount of 2437 * NFS_DIRBLKSIZ will be requested. 2438 */ 2439 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ) 2440 panic("nfs readdirrpc bad uio"); 2441 #endif 2442 2443 /* 2444 * Loop around doing readdir rpc's of size nm_readdirsize 2445 * truncated to a multiple of NFS_DIRFRAGSIZ. 2446 * The stopping criteria is EOF or buffer full. 2447 */ 2448 while (more_dirs && bigenough) { 2449 /* 2450 * Heuristic: don't bother to do another RPC to further 2451 * fill up this block if there is not much room left. (< 50% 2452 * of the readdir RPC size). This wastes some buffer space 2453 * but can save up to 50% in RPC calls. 2454 */ 2455 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) { 2456 bigenough = 0; 2457 break; 2458 } 2459 nfsstats.rpccnt[NFSPROC_READDIR]++; 2460 nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) + 2461 NFSX_READDIR(v3)); 2462 nfsm_fhtom(dnp, v3); 2463 #ifndef NFS_V2_ONLY 2464 if (v3) { 2465 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED); 2466 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) { 2467 txdr_swapcookie3(uiop->uio_offset, tl); 2468 } else { 2469 txdr_cookie3(uiop->uio_offset, tl); 2470 } 2471 tl += 2; 2472 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2473 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2474 } else 2475 #endif 2476 { 2477 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 2478 *tl++ = txdr_unsigned(uiop->uio_offset); 2479 } 2480 *tl = txdr_unsigned(nmp->nm_readdirsize); 2481 nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred); 2482 nrpcs++; 2483 #ifndef NFS_V2_ONLY 2484 if (v3) { 2485 nfsm_postop_attr(vp, attrflag, 0); 2486 if (!error) { 2487 nfsm_dissect(tl, u_int32_t *, 2488 2 * NFSX_UNSIGNED); 2489 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2490 dnp->n_cookieverf.nfsuquad[1] = *tl; 2491 } else { 2492 m_freem(mrep); 2493 goto nfsmout; 2494 } 2495 } 2496 #endif 2497 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2498 more_dirs = fxdr_unsigned(int, *tl); 2499 2500 /* loop thru the dir entries, doctoring them to 4bsd form */ 2501 while (more_dirs && bigenough) { 2502 #ifndef NFS_V2_ONLY 2503 if (v3) { 2504 nfsm_dissect(tl, u_int32_t *, 2505 3 * NFSX_UNSIGNED); 2506 fileno = fxdr_hyper(tl); 2507 len = fxdr_unsigned(int, *(tl + 2)); 2508 } else 2509 #endif 2510 { 2511 nfsm_dissect(tl, u_int32_t *, 2512 2 * NFSX_UNSIGNED); 2513 fileno = fxdr_unsigned(u_quad_t, *tl++); 2514 len = fxdr_unsigned(int, *tl); 2515 } 2516 if (len <= 0 || len > NFS_MAXNAMLEN) { 2517 error = EBADRPC; 2518 m_freem(mrep); 2519 goto nfsmout; 2520 } 2521 /* for cookie stashing */ 2522 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t); 2523 left = NFS_DIRFRAGSIZ - blksiz; 2524 if (reclen > left) { 2525 memset(uiop->uio_iov->iov_base, 0, left); 2526 dp->d_reclen += left; 2527 UIO_ADVANCE(uiop, left); 2528 blksiz = 0; 2529 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2530 } 2531 if (reclen > uiop->uio_resid) 2532 bigenough = 0; 2533 if (bigenough) { 2534 int tlen; 2535 2536 dp = (struct dirent *)uiop->uio_iov->iov_base; 2537 dp->d_fileno = fileno; 2538 dp->d_namlen = len; 2539 dp->d_reclen = reclen; 2540 dp->d_type = DT_UNKNOWN; 2541 blksiz += reclen; 2542 if (blksiz == NFS_DIRFRAGSIZ) 2543 blksiz = 0; 2544 UIO_ADVANCE(uiop, DIRHDSIZ); 2545 nfsm_mtouio(uiop, len); 2546 tlen = reclen - (DIRHDSIZ + len); 2547 (void)memset(uiop->uio_iov->iov_base, 0, tlen); 2548 UIO_ADVANCE(uiop, tlen); 2549 } else 2550 nfsm_adv(nfsm_rndup(len)); 2551 #ifndef NFS_V2_ONLY 2552 if (v3) { 2553 nfsm_dissect(tl, u_int32_t *, 2554 3 * NFSX_UNSIGNED); 2555 } else 2556 #endif 2557 { 2558 nfsm_dissect(tl, u_int32_t *, 2559 2 * NFSX_UNSIGNED); 2560 } 2561 if (bigenough) { 2562 #ifndef NFS_V2_ONLY 2563 if (v3) { 2564 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) 2565 uiop->uio_offset = 2566 fxdr_swapcookie3(tl); 2567 else 2568 uiop->uio_offset = 2569 fxdr_cookie3(tl); 2570 } 2571 else 2572 #endif 2573 { 2574 uiop->uio_offset = 2575 fxdr_unsigned(off_t, *tl); 2576 } 2577 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2578 } 2579 if (v3) 2580 tl += 2; 2581 else 2582 tl++; 2583 more_dirs = fxdr_unsigned(int, *tl); 2584 } 2585 /* 2586 * If at end of rpc data, get the eof boolean 2587 */ 2588 if (!more_dirs) { 2589 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2590 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2591 2592 /* 2593 * kludge: if we got no entries, treat it as EOF. 2594 * some server sometimes send a reply without any 2595 * entries or EOF. 2596 * although it might mean the server has very long name, 2597 * we can't handle such entries anyway. 2598 */ 2599 2600 if (uiop->uio_resid >= NFS_DIRBLKSIZ) 2601 more_dirs = 0; 2602 } 2603 m_freem(mrep); 2604 } 2605 /* 2606 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ 2607 * by increasing d_reclen for the last record. 2608 */ 2609 if (blksiz > 0) { 2610 left = NFS_DIRFRAGSIZ - blksiz; 2611 memset(uiop->uio_iov->iov_base, 0, left); 2612 dp->d_reclen += left; 2613 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2614 UIO_ADVANCE(uiop, left); 2615 } 2616 2617 /* 2618 * We are now either at the end of the directory or have filled the 2619 * block. 2620 */ 2621 if (bigenough) { 2622 dnp->n_direofoffset = uiop->uio_offset; 2623 dnp->n_flag |= NEOFVALID; 2624 } 2625 nfsmout: 2626 return (error); 2627 } 2628 2629 #ifndef NFS_V2_ONLY 2630 /* 2631 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). 2632 */ 2633 int 2634 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred) 2635 { 2636 int len, left; 2637 struct dirent *dp = NULL; 2638 u_int32_t *tl; 2639 char *cp; 2640 int32_t t1, t2; 2641 struct vnode *newvp; 2642 char *bpos, *dpos, *cp2; 2643 struct mbuf *mreq, *mrep, *md, *mb; 2644 struct nameidata nami, *ndp = &nami; 2645 struct componentname *cnp = &ndp->ni_cnd; 2646 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2647 struct nfsnode *dnp = VTONFS(vp), *np; 2648 nfsfh_t *fhp; 2649 u_quad_t fileno; 2650 int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; 2651 int attrflag, fhsize, nrpcs = 0, reclen; 2652 struct nfs_fattr fattr, *fp; 2653 2654 #ifdef DIAGNOSTIC 2655 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ) 2656 panic("nfs readdirplusrpc bad uio"); 2657 #endif 2658 ndp->ni_dvp = vp; 2659 newvp = NULLVP; 2660 2661 /* 2662 * Loop around doing readdir rpc's of size nm_readdirsize 2663 * truncated to a multiple of NFS_DIRFRAGSIZ. 2664 * The stopping criteria is EOF or buffer full. 2665 */ 2666 while (more_dirs && bigenough) { 2667 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) { 2668 bigenough = 0; 2669 break; 2670 } 2671 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; 2672 nfsm_reqhead(dnp, NFSPROC_READDIRPLUS, 2673 NFSX_FH(1) + 6 * NFSX_UNSIGNED); 2674 nfsm_fhtom(dnp, 1); 2675 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED); 2676 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) { 2677 txdr_swapcookie3(uiop->uio_offset, tl); 2678 } else { 2679 txdr_cookie3(uiop->uio_offset, tl); 2680 } 2681 tl += 2; 2682 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2683 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2684 *tl++ = txdr_unsigned(nmp->nm_readdirsize); 2685 *tl = txdr_unsigned(nmp->nm_rsize); 2686 nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred); 2687 nfsm_postop_attr(vp, attrflag, 0); 2688 if (error) { 2689 m_freem(mrep); 2690 goto nfsmout; 2691 } 2692 nrpcs++; 2693 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2694 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2695 dnp->n_cookieverf.nfsuquad[1] = *tl++; 2696 more_dirs = fxdr_unsigned(int, *tl); 2697 2698 /* loop thru the dir entries, doctoring them to 4bsd form */ 2699 while (more_dirs && bigenough) { 2700 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2701 fileno = fxdr_hyper(tl); 2702 len = fxdr_unsigned(int, *(tl + 2)); 2703 if (len <= 0 || len > NFS_MAXNAMLEN) { 2704 error = EBADRPC; 2705 m_freem(mrep); 2706 goto nfsmout; 2707 } 2708 /* for cookie stashing */ 2709 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t); 2710 left = NFS_DIRFRAGSIZ - blksiz; 2711 if (reclen > left) { 2712 /* 2713 * DIRFRAGSIZ is aligned, no need to align 2714 * again here. 2715 */ 2716 memset(uiop->uio_iov->iov_base, 0, left); 2717 dp->d_reclen += left; 2718 UIO_ADVANCE(uiop, left); 2719 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2720 blksiz = 0; 2721 } 2722 if (reclen > uiop->uio_resid) 2723 bigenough = 0; 2724 if (bigenough) { 2725 int tlen; 2726 2727 dp = (struct dirent *)uiop->uio_iov->iov_base; 2728 dp->d_fileno = fileno; 2729 dp->d_namlen = len; 2730 dp->d_reclen = reclen; 2731 dp->d_type = DT_UNKNOWN; 2732 blksiz += reclen; 2733 if (blksiz == NFS_DIRFRAGSIZ) 2734 blksiz = 0; 2735 UIO_ADVANCE(uiop, DIRHDSIZ); 2736 nfsm_mtouio(uiop, len); 2737 tlen = reclen - (DIRHDSIZ + len); 2738 (void)memset(uiop->uio_iov->iov_base, 0, tlen); 2739 UIO_ADVANCE(uiop, tlen); 2740 cnp->cn_nameptr = dp->d_name; 2741 cnp->cn_namelen = dp->d_namlen; 2742 } else 2743 nfsm_adv(nfsm_rndup(len)); 2744 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2745 if (bigenough) { 2746 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) 2747 uiop->uio_offset = 2748 fxdr_swapcookie3(tl); 2749 else 2750 uiop->uio_offset = 2751 fxdr_cookie3(tl); 2752 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2753 } 2754 tl += 2; 2755 2756 /* 2757 * Since the attributes are before the file handle 2758 * (sigh), we must skip over the attributes and then 2759 * come back and get them. 2760 */ 2761 attrflag = fxdr_unsigned(int, *tl); 2762 if (attrflag) { 2763 nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR); 2764 memcpy(&fattr, fp, NFSX_V3FATTR); 2765 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2766 doit = fxdr_unsigned(int, *tl); 2767 if (doit) { 2768 nfsm_getfh(fhp, fhsize, 1); 2769 if (NFS_CMPFH(dnp, fhp, fhsize)) { 2770 vref(vp); 2771 newvp = vp; 2772 np = dnp; 2773 } else { 2774 error = nfs_nget1(vp->v_mount, fhp, 2775 fhsize, &np, LK_NOWAIT); 2776 if (!error) 2777 newvp = NFSTOV(np); 2778 } 2779 if (!error) { 2780 nfs_loadattrcache(&newvp, &fattr, 0, 0); 2781 if (bigenough) { 2782 dp->d_type = 2783 IFTODT(VTTOIF(np->n_vattr->va_type)); 2784 ndp->ni_vp = newvp; 2785 nfs_cache_enter(ndp->ni_dvp, 2786 ndp->ni_vp, cnp); 2787 } 2788 } 2789 error = 0; 2790 } 2791 } else { 2792 /* Just skip over the file handle */ 2793 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2794 i = fxdr_unsigned(int, *tl); 2795 nfsm_adv(nfsm_rndup(i)); 2796 } 2797 if (newvp != NULLVP) { 2798 if (newvp == vp) 2799 vrele(newvp); 2800 else 2801 vput(newvp); 2802 newvp = NULLVP; 2803 } 2804 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2805 more_dirs = fxdr_unsigned(int, *tl); 2806 } 2807 /* 2808 * If at end of rpc data, get the eof boolean 2809 */ 2810 if (!more_dirs) { 2811 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2812 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2813 2814 /* 2815 * kludge: see a comment in nfs_readdirrpc. 2816 */ 2817 2818 if (uiop->uio_resid >= NFS_DIRBLKSIZ) 2819 more_dirs = 0; 2820 } 2821 m_freem(mrep); 2822 } 2823 /* 2824 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ 2825 * by increasing d_reclen for the last record. 2826 */ 2827 if (blksiz > 0) { 2828 left = NFS_DIRFRAGSIZ - blksiz; 2829 memset(uiop->uio_iov->iov_base, 0, left); 2830 dp->d_reclen += left; 2831 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2832 UIO_ADVANCE(uiop, left); 2833 } 2834 2835 /* 2836 * We are now either at the end of the directory or have filled the 2837 * block. 2838 */ 2839 if (bigenough) { 2840 dnp->n_direofoffset = uiop->uio_offset; 2841 dnp->n_flag |= NEOFVALID; 2842 } 2843 nfsmout: 2844 if (newvp != NULLVP) { 2845 if(newvp == vp) 2846 vrele(newvp); 2847 else 2848 vput(newvp); 2849 } 2850 return (error); 2851 } 2852 #endif 2853 2854 /* 2855 * Silly rename. To make the NFS filesystem that is stateless look a little 2856 * more like the "ufs" a remove of an active vnode is translated to a rename 2857 * to a funny looking filename that is removed by nfs_inactive on the 2858 * nfsnode. There is the potential for another process on a different client 2859 * to create the same funny name between the nfs_lookitup() fails and the 2860 * nfs_rename() completes, but... 2861 */ 2862 int 2863 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, bool dolink) 2864 { 2865 struct sillyrename *sp; 2866 struct nfsnode *np; 2867 int error; 2868 pid_t pid; 2869 2870 cache_purge(dvp); 2871 np = VTONFS(vp); 2872 #ifndef DIAGNOSTIC 2873 if (vp->v_type == VDIR) 2874 panic("nfs: sillyrename dir"); 2875 #endif 2876 sp = kmem_alloc(sizeof(*sp), KM_SLEEP); 2877 sp->s_cred = kauth_cred_dup(cnp->cn_cred); 2878 sp->s_dvp = dvp; 2879 vref(dvp); 2880 2881 /* Fudge together a funny name */ 2882 pid = curlwp->l_proc->p_pid; 2883 memcpy(sp->s_name, ".nfsAxxxx4.4", 13); 2884 sp->s_namlen = 12; 2885 sp->s_name[8] = hexdigits[pid & 0xf]; 2886 sp->s_name[7] = hexdigits[(pid >> 4) & 0xf]; 2887 sp->s_name[6] = hexdigits[(pid >> 8) & 0xf]; 2888 sp->s_name[5] = hexdigits[(pid >> 12) & 0xf]; 2889 2890 /* Try lookitups until we get one that isn't there */ 2891 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2892 curlwp, (struct nfsnode **)0) == 0) { 2893 sp->s_name[4]++; 2894 if (sp->s_name[4] > 'z') { 2895 error = EINVAL; 2896 goto bad; 2897 } 2898 } 2899 if (dolink) { 2900 error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen, 2901 sp->s_cred, curlwp); 2902 /* 2903 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP. 2904 */ 2905 if (error == ENOTSUP) { 2906 error = nfs_renameit(dvp, cnp, sp); 2907 } 2908 } else { 2909 error = nfs_renameit(dvp, cnp, sp); 2910 } 2911 if (error) 2912 goto bad; 2913 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2914 curlwp, &np); 2915 np->n_sillyrename = sp; 2916 return (0); 2917 bad: 2918 vrele(sp->s_dvp); 2919 kauth_cred_free(sp->s_cred); 2920 kmem_free(sp, sizeof(*sp)); 2921 return (error); 2922 } 2923 2924 /* 2925 * Look up a file name and optionally either update the file handle or 2926 * allocate an nfsnode, depending on the value of npp. 2927 * npp == NULL --> just do the lookup 2928 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2929 * handled too 2930 * *npp != NULL --> update the file handle in the vnode 2931 */ 2932 int 2933 nfs_lookitup(struct vnode *dvp, const char *name, int len, kauth_cred_t cred, struct lwp *l, struct nfsnode **npp) 2934 { 2935 u_int32_t *tl; 2936 char *cp; 2937 int32_t t1, t2; 2938 struct vnode *newvp = (struct vnode *)0; 2939 struct nfsnode *np, *dnp = VTONFS(dvp); 2940 char *bpos, *dpos, *cp2; 2941 int error = 0, ofhlen, fhlen; 2942 #ifndef NFS_V2_ONLY 2943 int attrflag; 2944 #endif 2945 struct mbuf *mreq, *mrep, *md, *mb; 2946 nfsfh_t *ofhp, *nfhp; 2947 const int v3 = NFS_ISV3(dvp); 2948 2949 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 2950 nfsm_reqhead(dnp, NFSPROC_LOOKUP, 2951 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 2952 nfsm_fhtom(dnp, v3); 2953 nfsm_strtom(name, len, NFS_MAXNAMLEN); 2954 nfsm_request(dnp, NFSPROC_LOOKUP, l, cred); 2955 if (npp && !error) { 2956 nfsm_getfh(nfhp, fhlen, v3); 2957 if (*npp) { 2958 np = *npp; 2959 newvp = NFSTOV(np); 2960 ofhlen = np->n_fhsize; 2961 ofhp = kmem_alloc(ofhlen, KM_SLEEP); 2962 memcpy(ofhp, np->n_fhp, ofhlen); 2963 error = vcache_rekey_enter(newvp->v_mount, newvp, 2964 ofhp, ofhlen, nfhp, fhlen); 2965 if (error) { 2966 kmem_free(ofhp, ofhlen); 2967 m_freem(mrep); 2968 return error; 2969 } 2970 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) { 2971 kmem_free(np->n_fhp, np->n_fhsize); 2972 np->n_fhp = &np->n_fh; 2973 } 2974 #if NFS_SMALLFH < NFSX_V3FHMAX 2975 else if (np->n_fhsize <= NFS_SMALLFH && fhlen > NFS_SMALLFH) 2976 np->n_fhp = kmem_alloc(fhlen, KM_SLEEP); 2977 #endif 2978 memcpy(np->n_fhp, nfhp, fhlen); 2979 np->n_fhsize = fhlen; 2980 vcache_rekey_exit(newvp->v_mount, newvp, 2981 ofhp, ofhlen, np->n_fhp, fhlen); 2982 kmem_free(ofhp, ofhlen); 2983 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { 2984 vref(dvp); 2985 newvp = dvp; 2986 np = dnp; 2987 } else { 2988 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); 2989 if (error) { 2990 m_freem(mrep); 2991 return (error); 2992 } 2993 newvp = NFSTOV(np); 2994 } 2995 #ifndef NFS_V2_ONLY 2996 if (v3) { 2997 nfsm_postop_attr(newvp, attrflag, 0); 2998 if (!attrflag && *npp == NULL) { 2999 m_freem(mrep); 3000 vput(newvp); 3001 return (ENOENT); 3002 } 3003 } else 3004 #endif 3005 nfsm_loadattr(newvp, (struct vattr *)0, 0); 3006 } 3007 nfsm_reqdone; 3008 if (npp && *npp == NULL) { 3009 if (error) { 3010 if (newvp) 3011 vput(newvp); 3012 } else 3013 *npp = np; 3014 } 3015 return (error); 3016 } 3017 3018 #ifndef NFS_V2_ONLY 3019 /* 3020 * Nfs Version 3 commit rpc 3021 */ 3022 int 3023 nfs_commit(struct vnode *vp, off_t offset, uint32_t cnt, struct lwp *l) 3024 { 3025 char *cp; 3026 u_int32_t *tl; 3027 int32_t t1, t2; 3028 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 3029 char *bpos, *dpos, *cp2; 3030 int error = 0, wccflag = NFSV3_WCCRATTR; 3031 struct mbuf *mreq, *mrep, *md, *mb; 3032 struct nfsnode *np; 3033 3034 KASSERT(NFS_ISV3(vp)); 3035 3036 #ifdef NFS_DEBUG_COMMIT 3037 printf("commit %lu - %lu\n", (unsigned long)offset, 3038 (unsigned long)(offset + cnt)); 3039 #endif 3040 3041 mutex_enter(&nmp->nm_lock); 3042 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) { 3043 mutex_exit(&nmp->nm_lock); 3044 return (0); 3045 } 3046 mutex_exit(&nmp->nm_lock); 3047 nfsstats.rpccnt[NFSPROC_COMMIT]++; 3048 np = VTONFS(vp); 3049 nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1)); 3050 nfsm_fhtom(np, 1); 3051 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 3052 txdr_hyper(offset, tl); 3053 tl += 2; 3054 *tl = txdr_unsigned(cnt); 3055 nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred); 3056 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false); 3057 if (!error) { 3058 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF); 3059 mutex_enter(&nmp->nm_lock); 3060 if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) || 3061 memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) { 3062 memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF); 3063 error = NFSERR_STALEWRITEVERF; 3064 nmp->nm_iflag |= NFSMNT_STALEWRITEVERF; 3065 } 3066 mutex_exit(&nmp->nm_lock); 3067 } 3068 nfsm_reqdone; 3069 return (error); 3070 } 3071 #endif 3072 3073 /* 3074 * Kludge City.. 3075 * - make nfs_bmap() essentially a no-op that does no translation 3076 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc 3077 * (Maybe I could use the process's page mapping, but I was concerned that 3078 * Kernel Write might not be enabled and also figured copyout() would do 3079 * a lot more work than memcpy() and also it currently happens in the 3080 * context of the swapper process (2). 3081 */ 3082 int 3083 nfs_bmap(void *v) 3084 { 3085 struct vop_bmap_args /* { 3086 struct vnode *a_vp; 3087 daddr_t a_bn; 3088 struct vnode **a_vpp; 3089 daddr_t *a_bnp; 3090 int *a_runp; 3091 } */ *ap = v; 3092 struct vnode *vp = ap->a_vp; 3093 int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift; 3094 3095 if (ap->a_vpp != NULL) 3096 *ap->a_vpp = vp; 3097 if (ap->a_bnp != NULL) 3098 *ap->a_bnp = ap->a_bn << bshift; 3099 if (ap->a_runp != NULL) 3100 *ap->a_runp = 1024 * 1024; /* XXX */ 3101 return (0); 3102 } 3103 3104 /* 3105 * Strategy routine. 3106 * For async requests when nfsiod(s) are running, queue the request by 3107 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the 3108 * request. 3109 */ 3110 int 3111 nfs_strategy(void *v) 3112 { 3113 struct vop_strategy_args *ap = v; 3114 struct buf *bp = ap->a_bp; 3115 int error = 0; 3116 3117 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC)) 3118 panic("nfs physio/async"); 3119 3120 /* 3121 * If the op is asynchronous and an i/o daemon is waiting 3122 * queue the request, wake it up and wait for completion 3123 * otherwise just do it ourselves. 3124 */ 3125 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp)) 3126 error = nfs_doio(bp); 3127 return (error); 3128 } 3129 3130 /* 3131 * fsync vnode op. Just call nfs_flush() with commit == 1. 3132 */ 3133 /* ARGSUSED */ 3134 int 3135 nfs_fsync(void *v) 3136 { 3137 struct vop_fsync_args /* { 3138 struct vnodeop_desc *a_desc; 3139 struct vnode * a_vp; 3140 kauth_cred_t a_cred; 3141 int a_flags; 3142 off_t offlo; 3143 off_t offhi; 3144 struct lwp * a_l; 3145 } */ *ap = v; 3146 3147 struct vnode *vp = ap->a_vp; 3148 3149 if (vp->v_type != VREG) 3150 return 0; 3151 3152 return (nfs_flush(vp, ap->a_cred, 3153 (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, curlwp, 1)); 3154 } 3155 3156 /* 3157 * Flush all the data associated with a vnode. 3158 */ 3159 int 3160 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l, 3161 int commit) 3162 { 3163 struct nfsnode *np = VTONFS(vp); 3164 int error; 3165 int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO; 3166 UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist); 3167 3168 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 3169 error = VOP_PUTPAGES(vp, 0, 0, flushflags); 3170 if (np->n_flag & NWRITEERR) { 3171 error = np->n_error; 3172 np->n_flag &= ~NWRITEERR; 3173 } 3174 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0); 3175 return (error); 3176 } 3177 3178 /* 3179 * Return POSIX pathconf information applicable to nfs. 3180 * 3181 * N.B. The NFS V2 protocol doesn't support this RPC. 3182 */ 3183 /* ARGSUSED */ 3184 int 3185 nfs_pathconf(void *v) 3186 { 3187 struct vop_pathconf_args /* { 3188 struct vnode *a_vp; 3189 int a_name; 3190 register_t *a_retval; 3191 } */ *ap = v; 3192 struct nfsv3_pathconf *pcp; 3193 struct vnode *vp = ap->a_vp; 3194 struct mbuf *mreq, *mrep, *md, *mb; 3195 int32_t t1, t2; 3196 u_int32_t *tl; 3197 char *bpos, *dpos, *cp, *cp2; 3198 int error = 0, attrflag; 3199 #ifndef NFS_V2_ONLY 3200 struct nfsmount *nmp; 3201 unsigned int l; 3202 u_int64_t maxsize; 3203 #endif 3204 const int v3 = NFS_ISV3(vp); 3205 struct nfsnode *np = VTONFS(vp); 3206 3207 switch (ap->a_name) { 3208 /* Names that can be resolved locally. */ 3209 case _PC_PIPE_BUF: 3210 *ap->a_retval = PIPE_BUF; 3211 break; 3212 case _PC_SYNC_IO: 3213 *ap->a_retval = 1; 3214 break; 3215 /* Names that cannot be resolved locally; do an RPC, if possible. */ 3216 case _PC_LINK_MAX: 3217 case _PC_NAME_MAX: 3218 case _PC_CHOWN_RESTRICTED: 3219 case _PC_NO_TRUNC: 3220 if (!v3) { 3221 error = EINVAL; 3222 break; 3223 } 3224 nfsstats.rpccnt[NFSPROC_PATHCONF]++; 3225 nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1)); 3226 nfsm_fhtom(np, 1); 3227 nfsm_request(np, NFSPROC_PATHCONF, 3228 curlwp, curlwp->l_cred); /* XXX */ 3229 nfsm_postop_attr(vp, attrflag, 0); 3230 if (!error) { 3231 nfsm_dissect(pcp, struct nfsv3_pathconf *, 3232 NFSX_V3PATHCONF); 3233 switch (ap->a_name) { 3234 case _PC_LINK_MAX: 3235 *ap->a_retval = 3236 fxdr_unsigned(register_t, pcp->pc_linkmax); 3237 break; 3238 case _PC_NAME_MAX: 3239 *ap->a_retval = 3240 fxdr_unsigned(register_t, pcp->pc_namemax); 3241 break; 3242 case _PC_CHOWN_RESTRICTED: 3243 *ap->a_retval = 3244 (pcp->pc_chownrestricted == nfs_true); 3245 break; 3246 case _PC_NO_TRUNC: 3247 *ap->a_retval = 3248 (pcp->pc_notrunc == nfs_true); 3249 break; 3250 } 3251 } 3252 nfsm_reqdone; 3253 break; 3254 case _PC_FILESIZEBITS: 3255 #ifndef NFS_V2_ONLY 3256 if (v3) { 3257 nmp = VFSTONFS(vp->v_mount); 3258 if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0) 3259 if ((error = nfs_fsinfo(nmp, vp, 3260 curlwp->l_cred, curlwp)) != 0) /* XXX */ 3261 break; 3262 for (l = 0, maxsize = nmp->nm_maxfilesize; 3263 (maxsize >> l) > 0; l++) 3264 ; 3265 *ap->a_retval = l + 1; 3266 } else 3267 #endif 3268 { 3269 *ap->a_retval = 32; /* NFS V2 limitation */ 3270 } 3271 break; 3272 default: 3273 error = genfs_pathconf(ap); 3274 break; 3275 } 3276 3277 return (error); 3278 } 3279 3280 /* 3281 * NFS advisory byte-level locks. 3282 */ 3283 int 3284 nfs_advlock(void *v) 3285 { 3286 struct vop_advlock_args /* { 3287 struct vnode *a_vp; 3288 void *a_id; 3289 int a_op; 3290 struct flock *a_fl; 3291 int a_flags; 3292 } */ *ap = v; 3293 struct nfsnode *np = VTONFS(ap->a_vp); 3294 3295 return lf_advlock(ap, &np->n_lockf, np->n_size); 3296 } 3297 3298 /* 3299 * Print out the contents of an nfsnode. 3300 */ 3301 int 3302 nfs_print(void *v) 3303 { 3304 struct vop_print_args /* { 3305 struct vnode *a_vp; 3306 } */ *ap = v; 3307 struct vnode *vp = ap->a_vp; 3308 struct nfsnode *np = VTONFS(vp); 3309 3310 printf("tag VT_NFS, fileid %lld fsid 0x%llx", 3311 (unsigned long long)np->n_vattr->va_fileid, 3312 (unsigned long long)np->n_vattr->va_fsid); 3313 if (vp->v_type == VFIFO) 3314 VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v); 3315 printf("\n"); 3316 return (0); 3317 } 3318 3319 /* 3320 * nfs unlock wrapper. 3321 */ 3322 int 3323 nfs_unlock(void *v) 3324 { 3325 struct vop_unlock_args /* { 3326 struct vnode *a_vp; 3327 int a_flags; 3328 } */ *ap = v; 3329 struct vnode *vp = ap->a_vp; 3330 3331 /* 3332 * VOP_UNLOCK can be called by nfs_loadattrcache 3333 * with v_data == 0. 3334 */ 3335 if (VTONFS(vp)) { 3336 nfs_delayedtruncate(vp); 3337 } 3338 3339 return genfs_unlock(v); 3340 } 3341 3342 /* 3343 * nfs special file access vnode op. 3344 * Essentially just get vattr and then imitate iaccess() since the device is 3345 * local to the client. 3346 */ 3347 int 3348 nfsspec_access(void *v) 3349 { 3350 struct vop_access_args /* { 3351 struct vnode *a_vp; 3352 accmode_t a_accmode; 3353 kauth_cred_t a_cred; 3354 struct lwp *a_l; 3355 } */ *ap = v; 3356 struct vattr va; 3357 struct vnode *vp = ap->a_vp; 3358 int error; 3359 3360 error = VOP_GETATTR(vp, &va, ap->a_cred); 3361 if (error) 3362 return (error); 3363 3364 /* 3365 * Disallow write attempts on filesystems mounted read-only; 3366 * unless the file is a socket, fifo, or a block or character 3367 * device resident on the filesystem. 3368 */ 3369 if ((ap->a_accmode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3370 switch (vp->v_type) { 3371 case VREG: 3372 case VDIR: 3373 case VLNK: 3374 return (EROFS); 3375 default: 3376 break; 3377 } 3378 } 3379 3380 return kauth_authorize_vnode(ap->a_cred, KAUTH_ACCESS_ACTION( 3381 ap->a_accmode, va.va_type, va.va_mode), vp, NULL, genfs_can_access( 3382 vp, ap->a_cred, va.va_uid, va.va_gid, va.va_mode, NULL, 3383 ap->a_accmode)); 3384 } 3385 3386 /* 3387 * Read wrapper for special devices. 3388 */ 3389 int 3390 nfsspec_read(void *v) 3391 { 3392 struct vop_read_args /* { 3393 struct vnode *a_vp; 3394 struct uio *a_uio; 3395 int a_ioflag; 3396 kauth_cred_t a_cred; 3397 } */ *ap = v; 3398 struct nfsnode *np = VTONFS(ap->a_vp); 3399 3400 /* 3401 * Set access flag. 3402 */ 3403 np->n_flag |= NACC; 3404 getnanotime(&np->n_atim); 3405 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap)); 3406 } 3407 3408 /* 3409 * Write wrapper for special devices. 3410 */ 3411 int 3412 nfsspec_write(void *v) 3413 { 3414 struct vop_write_args /* { 3415 struct vnode *a_vp; 3416 struct uio *a_uio; 3417 int a_ioflag; 3418 kauth_cred_t a_cred; 3419 } */ *ap = v; 3420 struct nfsnode *np = VTONFS(ap->a_vp); 3421 3422 /* 3423 * Set update flag. 3424 */ 3425 np->n_flag |= NUPD; 3426 getnanotime(&np->n_mtim); 3427 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap)); 3428 } 3429 3430 /* 3431 * Close wrapper for special devices. 3432 * 3433 * Update the times on the nfsnode then do device close. 3434 */ 3435 int 3436 nfsspec_close(void *v) 3437 { 3438 struct vop_close_args /* { 3439 struct vnode *a_vp; 3440 int a_fflag; 3441 kauth_cred_t a_cred; 3442 struct lwp *a_l; 3443 } */ *ap = v; 3444 struct vnode *vp = ap->a_vp; 3445 struct nfsnode *np = VTONFS(vp); 3446 struct vattr vattr; 3447 3448 if (np->n_flag & (NACC | NUPD)) { 3449 np->n_flag |= NCHG; 3450 if (vrefcnt(vp) == 1 && 3451 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3452 vattr_null(&vattr); 3453 if (np->n_flag & NACC) 3454 vattr.va_atime = np->n_atim; 3455 if (np->n_flag & NUPD) 3456 vattr.va_mtime = np->n_mtim; 3457 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3458 } 3459 } 3460 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap)); 3461 } 3462 3463 /* 3464 * Read wrapper for fifos. 3465 */ 3466 int 3467 nfsfifo_read(void *v) 3468 { 3469 struct vop_read_args /* { 3470 struct vnode *a_vp; 3471 struct uio *a_uio; 3472 int a_ioflag; 3473 kauth_cred_t a_cred; 3474 } */ *ap = v; 3475 struct nfsnode *np = VTONFS(ap->a_vp); 3476 3477 /* 3478 * Set access flag. 3479 */ 3480 np->n_flag |= NACC; 3481 getnanotime(&np->n_atim); 3482 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap)); 3483 } 3484 3485 /* 3486 * Write wrapper for fifos. 3487 */ 3488 int 3489 nfsfifo_write(void *v) 3490 { 3491 struct vop_write_args /* { 3492 struct vnode *a_vp; 3493 struct uio *a_uio; 3494 int a_ioflag; 3495 kauth_cred_t a_cred; 3496 } */ *ap = v; 3497 struct nfsnode *np = VTONFS(ap->a_vp); 3498 3499 /* 3500 * Set update flag. 3501 */ 3502 np->n_flag |= NUPD; 3503 getnanotime(&np->n_mtim); 3504 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap)); 3505 } 3506 3507 /* 3508 * Close wrapper for fifos. 3509 * 3510 * Update the times on the nfsnode then do fifo close. 3511 */ 3512 int 3513 nfsfifo_close(void *v) 3514 { 3515 struct vop_close_args /* { 3516 struct vnode *a_vp; 3517 int a_fflag; 3518 kauth_cred_t a_cred; 3519 struct lwp *a_l; 3520 } */ *ap = v; 3521 struct vnode *vp = ap->a_vp; 3522 struct nfsnode *np = VTONFS(vp); 3523 struct vattr vattr; 3524 3525 if (np->n_flag & (NACC | NUPD)) { 3526 struct timespec ts; 3527 3528 getnanotime(&ts); 3529 if (np->n_flag & NACC) 3530 np->n_atim = ts; 3531 if (np->n_flag & NUPD) 3532 np->n_mtim = ts; 3533 np->n_flag |= NCHG; 3534 if (vrefcnt(vp) == 1 && 3535 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3536 vattr_null(&vattr); 3537 if (np->n_flag & NACC) 3538 vattr.va_atime = np->n_atim; 3539 if (np->n_flag & NUPD) 3540 vattr.va_mtime = np->n_mtim; 3541 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3542 } 3543 } 3544 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap)); 3545 } 3546