1 /* $NetBSD: nfs_vnops.c,v 1.308 2015/05/14 17:35:54 chs Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)nfs_vnops.c 8.19 (Berkeley) 7/31/95 35 */ 36 37 /* 38 * vnode op calls for Sun NFS version 2 and 3 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.308 2015/05/14 17:35:54 chs Exp $"); 43 44 #ifdef _KERNEL_OPT 45 #include "opt_nfs.h" 46 #include "opt_uvmhist.h" 47 #endif 48 49 #include <sys/param.h> 50 #include <sys/proc.h> 51 #include <sys/kernel.h> 52 #include <sys/systm.h> 53 #include <sys/resourcevar.h> 54 #include <sys/mount.h> 55 #include <sys/buf.h> 56 #include <sys/condvar.h> 57 #include <sys/disk.h> 58 #include <sys/malloc.h> 59 #include <sys/kmem.h> 60 #include <sys/mbuf.h> 61 #include <sys/mutex.h> 62 #include <sys/namei.h> 63 #include <sys/vnode.h> 64 #include <sys/dirent.h> 65 #include <sys/fcntl.h> 66 #include <sys/hash.h> 67 #include <sys/lockf.h> 68 #include <sys/stat.h> 69 #include <sys/unistd.h> 70 #include <sys/kauth.h> 71 #include <sys/cprng.h> 72 73 #include <uvm/uvm_extern.h> 74 #include <uvm/uvm.h> 75 76 #include <miscfs/fifofs/fifo.h> 77 #include <miscfs/genfs/genfs.h> 78 #include <miscfs/genfs/genfs_node.h> 79 #include <miscfs/specfs/specdev.h> 80 81 #include <nfs/rpcv2.h> 82 #include <nfs/nfsproto.h> 83 #include <nfs/nfs.h> 84 #include <nfs/nfsnode.h> 85 #include <nfs/nfsmount.h> 86 #include <nfs/xdr_subs.h> 87 #include <nfs/nfsm_subs.h> 88 #include <nfs/nfs_var.h> 89 90 #include <net/if.h> 91 #include <netinet/in.h> 92 #include <netinet/in_var.h> 93 94 /* 95 * Global vfs data structures for nfs 96 */ 97 int (**nfsv2_vnodeop_p)(void *); 98 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { 99 { &vop_default_desc, vn_default_error }, 100 { &vop_lookup_desc, nfs_lookup }, /* lookup */ 101 { &vop_create_desc, nfs_create }, /* create */ 102 { &vop_mknod_desc, nfs_mknod }, /* mknod */ 103 { &vop_open_desc, nfs_open }, /* open */ 104 { &vop_close_desc, nfs_close }, /* close */ 105 { &vop_access_desc, nfs_access }, /* access */ 106 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 107 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 108 { &vop_read_desc, nfs_read }, /* read */ 109 { &vop_write_desc, nfs_write }, /* write */ 110 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */ 111 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */ 112 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 113 { &vop_ioctl_desc, nfs_ioctl }, /* ioctl */ 114 { &vop_poll_desc, nfs_poll }, /* poll */ 115 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */ 116 { &vop_revoke_desc, nfs_revoke }, /* revoke */ 117 { &vop_mmap_desc, nfs_mmap }, /* mmap */ 118 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 119 { &vop_seek_desc, nfs_seek }, /* seek */ 120 { &vop_remove_desc, nfs_remove }, /* remove */ 121 { &vop_link_desc, nfs_link }, /* link */ 122 { &vop_rename_desc, nfs_rename }, /* rename */ 123 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */ 124 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */ 125 { &vop_symlink_desc, nfs_symlink }, /* symlink */ 126 { &vop_readdir_desc, nfs_readdir }, /* readdir */ 127 { &vop_readlink_desc, nfs_readlink }, /* readlink */ 128 { &vop_abortop_desc, nfs_abortop }, /* abortop */ 129 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 130 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 131 { &vop_lock_desc, nfs_lock }, /* lock */ 132 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 133 { &vop_bmap_desc, nfs_bmap }, /* bmap */ 134 { &vop_strategy_desc, nfs_strategy }, /* strategy */ 135 { &vop_print_desc, nfs_print }, /* print */ 136 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 137 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */ 138 { &vop_advlock_desc, nfs_advlock }, /* advlock */ 139 { &vop_bwrite_desc, genfs_badop }, /* bwrite */ 140 { &vop_getpages_desc, nfs_getpages }, /* getpages */ 141 { &vop_putpages_desc, genfs_putpages }, /* putpages */ 142 { NULL, NULL } 143 }; 144 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc = 145 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries }; 146 147 /* 148 * Special device vnode ops 149 */ 150 int (**spec_nfsv2nodeop_p)(void *); 151 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = { 152 { &vop_default_desc, vn_default_error }, 153 { &vop_lookup_desc, spec_lookup }, /* lookup */ 154 { &vop_create_desc, spec_create }, /* create */ 155 { &vop_mknod_desc, spec_mknod }, /* mknod */ 156 { &vop_open_desc, spec_open }, /* open */ 157 { &vop_close_desc, nfsspec_close }, /* close */ 158 { &vop_access_desc, nfsspec_access }, /* access */ 159 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 160 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 161 { &vop_read_desc, nfsspec_read }, /* read */ 162 { &vop_write_desc, nfsspec_write }, /* write */ 163 { &vop_fallocate_desc, spec_fallocate }, /* fallocate */ 164 { &vop_fdiscard_desc, spec_fdiscard }, /* fdiscard */ 165 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 166 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 167 { &vop_poll_desc, spec_poll }, /* poll */ 168 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */ 169 { &vop_revoke_desc, spec_revoke }, /* revoke */ 170 { &vop_mmap_desc, spec_mmap }, /* mmap */ 171 { &vop_fsync_desc, spec_fsync }, /* fsync */ 172 { &vop_seek_desc, spec_seek }, /* seek */ 173 { &vop_remove_desc, spec_remove }, /* remove */ 174 { &vop_link_desc, spec_link }, /* link */ 175 { &vop_rename_desc, spec_rename }, /* rename */ 176 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 177 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 178 { &vop_symlink_desc, spec_symlink }, /* symlink */ 179 { &vop_readdir_desc, spec_readdir }, /* readdir */ 180 { &vop_readlink_desc, spec_readlink }, /* readlink */ 181 { &vop_abortop_desc, spec_abortop }, /* abortop */ 182 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 183 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 184 { &vop_lock_desc, nfs_lock }, /* lock */ 185 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 186 { &vop_bmap_desc, spec_bmap }, /* bmap */ 187 { &vop_strategy_desc, spec_strategy }, /* strategy */ 188 { &vop_print_desc, nfs_print }, /* print */ 189 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 190 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 191 { &vop_advlock_desc, spec_advlock }, /* advlock */ 192 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 193 { &vop_getpages_desc, spec_getpages }, /* getpages */ 194 { &vop_putpages_desc, spec_putpages }, /* putpages */ 195 { NULL, NULL } 196 }; 197 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc = 198 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries }; 199 200 int (**fifo_nfsv2nodeop_p)(void *); 201 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = { 202 { &vop_default_desc, vn_default_error }, 203 { &vop_lookup_desc, vn_fifo_bypass }, /* lookup */ 204 { &vop_create_desc, vn_fifo_bypass }, /* create */ 205 { &vop_mknod_desc, vn_fifo_bypass }, /* mknod */ 206 { &vop_open_desc, vn_fifo_bypass }, /* open */ 207 { &vop_close_desc, nfsfifo_close }, /* close */ 208 { &vop_access_desc, nfsspec_access }, /* access */ 209 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 210 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 211 { &vop_read_desc, nfsfifo_read }, /* read */ 212 { &vop_write_desc, nfsfifo_write }, /* write */ 213 { &vop_fallocate_desc, vn_fifo_bypass }, /* fallocate */ 214 { &vop_fdiscard_desc, vn_fifo_bypass }, /* fdiscard */ 215 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 216 { &vop_ioctl_desc, vn_fifo_bypass }, /* ioctl */ 217 { &vop_poll_desc, vn_fifo_bypass }, /* poll */ 218 { &vop_kqfilter_desc, vn_fifo_bypass }, /* kqfilter */ 219 { &vop_revoke_desc, vn_fifo_bypass }, /* revoke */ 220 { &vop_mmap_desc, vn_fifo_bypass }, /* mmap */ 221 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 222 { &vop_seek_desc, vn_fifo_bypass }, /* seek */ 223 { &vop_remove_desc, vn_fifo_bypass }, /* remove */ 224 { &vop_link_desc, vn_fifo_bypass }, /* link */ 225 { &vop_rename_desc, vn_fifo_bypass }, /* rename */ 226 { &vop_mkdir_desc, vn_fifo_bypass }, /* mkdir */ 227 { &vop_rmdir_desc, vn_fifo_bypass }, /* rmdir */ 228 { &vop_symlink_desc, vn_fifo_bypass }, /* symlink */ 229 { &vop_readdir_desc, vn_fifo_bypass }, /* readdir */ 230 { &vop_readlink_desc, vn_fifo_bypass }, /* readlink */ 231 { &vop_abortop_desc, vn_fifo_bypass }, /* abortop */ 232 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 233 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 234 { &vop_lock_desc, nfs_lock }, /* lock */ 235 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 236 { &vop_bmap_desc, vn_fifo_bypass }, /* bmap */ 237 { &vop_strategy_desc, genfs_badop }, /* strategy */ 238 { &vop_print_desc, nfs_print }, /* print */ 239 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 240 { &vop_pathconf_desc, vn_fifo_bypass }, /* pathconf */ 241 { &vop_advlock_desc, vn_fifo_bypass }, /* advlock */ 242 { &vop_bwrite_desc, genfs_badop }, /* bwrite */ 243 { &vop_putpages_desc, vn_fifo_bypass }, /* putpages */ 244 { NULL, NULL } 245 }; 246 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc = 247 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries }; 248 249 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *, 250 size_t, kauth_cred_t, struct lwp *); 251 static void nfs_writerpc_extfree(struct mbuf *, void *, size_t, void *); 252 253 /* 254 * Global variables 255 */ 256 extern u_int32_t nfs_true, nfs_false; 257 extern u_int32_t nfs_xdrneg1; 258 extern const nfstype nfsv3_type[9]; 259 260 int nfs_numasync = 0; 261 #define DIRHDSIZ _DIRENT_NAMEOFF(dp) 262 #define UIO_ADVANCE(uio, siz) \ 263 (void)((uio)->uio_resid -= (siz), \ 264 (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \ 265 (uio)->uio_iov->iov_len -= (siz)) 266 267 static void nfs_cache_enter(struct vnode *, struct vnode *, 268 struct componentname *); 269 270 static void 271 nfs_cache_enter(struct vnode *dvp, struct vnode *vp, 272 struct componentname *cnp) 273 { 274 struct nfsnode *dnp = VTONFS(dvp); 275 276 if ((cnp->cn_flags & MAKEENTRY) == 0) { 277 return; 278 } 279 if (vp != NULL) { 280 struct nfsnode *np = VTONFS(vp); 281 282 np->n_ctime = np->n_vattr->va_ctime.tv_sec; 283 } 284 285 if (!timespecisset(&dnp->n_nctime)) 286 dnp->n_nctime = dnp->n_vattr->va_mtime; 287 288 cache_enter(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_flags); 289 } 290 291 /* 292 * nfs null call from vfs. 293 */ 294 int 295 nfs_null(struct vnode *vp, kauth_cred_t cred, struct lwp *l) 296 { 297 char *bpos, *dpos; 298 int error = 0; 299 struct mbuf *mreq, *mrep, *md, *mb __unused; 300 struct nfsnode *np = VTONFS(vp); 301 302 nfsm_reqhead(np, NFSPROC_NULL, 0); 303 nfsm_request(np, NFSPROC_NULL, l, cred); 304 nfsm_reqdone; 305 return (error); 306 } 307 308 /* 309 * nfs access vnode op. 310 * For nfs version 2, just return ok. File accesses may fail later. 311 * For nfs version 3, use the access rpc to check accessibility. If file modes 312 * are changed on the server, accesses might still fail later. 313 */ 314 int 315 nfs_access(void *v) 316 { 317 struct vop_access_args /* { 318 struct vnode *a_vp; 319 int a_mode; 320 kauth_cred_t a_cred; 321 } */ *ap = v; 322 struct vnode *vp = ap->a_vp; 323 #ifndef NFS_V2_ONLY 324 u_int32_t *tl; 325 char *cp; 326 int32_t t1, t2; 327 char *bpos, *dpos, *cp2; 328 int error = 0, attrflag; 329 struct mbuf *mreq, *mrep, *md, *mb; 330 u_int32_t mode, rmode; 331 const int v3 = NFS_ISV3(vp); 332 #endif 333 int cachevalid; 334 struct nfsnode *np = VTONFS(vp); 335 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 336 337 cachevalid = (np->n_accstamp != -1 && 338 (time_uptime - np->n_accstamp) < nfs_attrtimeo(nmp, np) && 339 np->n_accuid == kauth_cred_geteuid(ap->a_cred)); 340 341 /* 342 * Check access cache first. If this request has been made for this 343 * uid shortly before, use the cached result. 344 */ 345 if (cachevalid) { 346 if (!np->n_accerror) { 347 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 348 return np->n_accerror; 349 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode) 350 return np->n_accerror; 351 } 352 353 #ifndef NFS_V2_ONLY 354 /* 355 * For nfs v3, do an access rpc, otherwise you are stuck emulating 356 * ufs_access() locally using the vattr. This may not be correct, 357 * since the server may apply other access criteria such as 358 * client uid-->server uid mapping that we do not know about, but 359 * this is better than just returning anything that is lying about 360 * in the cache. 361 */ 362 if (v3) { 363 nfsstats.rpccnt[NFSPROC_ACCESS]++; 364 nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED); 365 nfsm_fhtom(np, v3); 366 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 367 if (ap->a_mode & VREAD) 368 mode = NFSV3ACCESS_READ; 369 else 370 mode = 0; 371 if (vp->v_type != VDIR) { 372 if (ap->a_mode & VWRITE) 373 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); 374 if (ap->a_mode & VEXEC) 375 mode |= NFSV3ACCESS_EXECUTE; 376 } else { 377 if (ap->a_mode & VWRITE) 378 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | 379 NFSV3ACCESS_DELETE); 380 if (ap->a_mode & VEXEC) 381 mode |= NFSV3ACCESS_LOOKUP; 382 } 383 *tl = txdr_unsigned(mode); 384 nfsm_request(np, NFSPROC_ACCESS, curlwp, ap->a_cred); 385 nfsm_postop_attr(vp, attrflag, 0); 386 if (!error) { 387 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 388 rmode = fxdr_unsigned(u_int32_t, *tl); 389 /* 390 * The NFS V3 spec does not clarify whether or not 391 * the returned access bits can be a superset of 392 * the ones requested, so... 393 */ 394 if ((rmode & mode) != mode) 395 error = EACCES; 396 } 397 nfsm_reqdone; 398 } else 399 #endif 400 return (nfsspec_access(ap)); 401 #ifndef NFS_V2_ONLY 402 /* 403 * Disallow write attempts on filesystems mounted read-only; 404 * unless the file is a socket, fifo, or a block or character 405 * device resident on the filesystem. 406 */ 407 if (!error && (ap->a_mode & VWRITE) && 408 (vp->v_mount->mnt_flag & MNT_RDONLY)) { 409 switch (vp->v_type) { 410 case VREG: 411 case VDIR: 412 case VLNK: 413 error = EROFS; 414 default: 415 break; 416 } 417 } 418 419 if (!error || error == EACCES) { 420 /* 421 * If we got the same result as for a previous, 422 * different request, OR it in. Don't update 423 * the timestamp in that case. 424 */ 425 if (cachevalid && np->n_accstamp != -1 && 426 error == np->n_accerror) { 427 if (!error) 428 np->n_accmode |= ap->a_mode; 429 else if ((np->n_accmode & ap->a_mode) == ap->a_mode) 430 np->n_accmode = ap->a_mode; 431 } else { 432 np->n_accstamp = time_uptime; 433 np->n_accuid = kauth_cred_geteuid(ap->a_cred); 434 np->n_accmode = ap->a_mode; 435 np->n_accerror = error; 436 } 437 } 438 439 return (error); 440 #endif 441 } 442 443 /* 444 * nfs open vnode op 445 * Check to see if the type is ok 446 * and that deletion is not in progress. 447 * For paged in text files, you will need to flush the page cache 448 * if consistency is lost. 449 */ 450 /* ARGSUSED */ 451 int 452 nfs_open(void *v) 453 { 454 struct vop_open_args /* { 455 struct vnode *a_vp; 456 int a_mode; 457 kauth_cred_t a_cred; 458 } */ *ap = v; 459 struct vnode *vp = ap->a_vp; 460 struct nfsnode *np = VTONFS(vp); 461 int error; 462 463 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 464 return (EACCES); 465 } 466 467 if (ap->a_mode & FREAD) { 468 if (np->n_rcred != NULL) 469 kauth_cred_free(np->n_rcred); 470 np->n_rcred = ap->a_cred; 471 kauth_cred_hold(np->n_rcred); 472 } 473 if (ap->a_mode & FWRITE) { 474 if (np->n_wcred != NULL) 475 kauth_cred_free(np->n_wcred); 476 np->n_wcred = ap->a_cred; 477 kauth_cred_hold(np->n_wcred); 478 } 479 480 error = nfs_flushstalebuf(vp, ap->a_cred, curlwp, 0); 481 if (error) 482 return error; 483 484 NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */ 485 486 return (0); 487 } 488 489 /* 490 * nfs close vnode op 491 * What an NFS client should do upon close after writing is a debatable issue. 492 * Most NFS clients push delayed writes to the server upon close, basically for 493 * two reasons: 494 * 1 - So that any write errors may be reported back to the client process 495 * doing the close system call. By far the two most likely errors are 496 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 497 * 2 - To put a worst case upper bound on cache inconsistency between 498 * multiple clients for the file. 499 * There is also a consistency problem for Version 2 of the protocol w.r.t. 500 * not being able to tell if other clients are writing a file concurrently, 501 * since there is no way of knowing if the changed modify time in the reply 502 * is only due to the write for this client. 503 * (NFS Version 3 provides weak cache consistency data in the reply that 504 * should be sufficient to detect and handle this case.) 505 * 506 * The current code does the following: 507 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 508 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 509 * or commit them (this satisfies 1 and 2 except for the 510 * case where the server crashes after this close but 511 * before the commit RPC, which is felt to be "good 512 * enough". Changing the last argument to nfs_flush() to 513 * a 1 would force a commit operation, if it is felt a 514 * commit is necessary now. 515 */ 516 /* ARGSUSED */ 517 int 518 nfs_close(void *v) 519 { 520 struct vop_close_args /* { 521 struct vnodeop_desc *a_desc; 522 struct vnode *a_vp; 523 int a_fflag; 524 kauth_cred_t a_cred; 525 } */ *ap = v; 526 struct vnode *vp = ap->a_vp; 527 struct nfsnode *np = VTONFS(vp); 528 int error = 0; 529 UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist); 530 531 if (vp->v_type == VREG) { 532 if (np->n_flag & NMODIFIED) { 533 #ifndef NFS_V2_ONLY 534 if (NFS_ISV3(vp)) { 535 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, curlwp, 0); 536 np->n_flag &= ~NMODIFIED; 537 } else 538 #endif 539 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 1); 540 NFS_INVALIDATE_ATTRCACHE(np); 541 } 542 if (np->n_flag & NWRITEERR) { 543 np->n_flag &= ~NWRITEERR; 544 error = np->n_error; 545 } 546 } 547 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0); 548 return (error); 549 } 550 551 /* 552 * nfs getattr call from vfs. 553 */ 554 int 555 nfs_getattr(void *v) 556 { 557 struct vop_getattr_args /* { 558 struct vnode *a_vp; 559 struct vattr *a_vap; 560 kauth_cred_t a_cred; 561 } */ *ap = v; 562 struct vnode *vp = ap->a_vp; 563 struct nfsnode *np = VTONFS(vp); 564 char *cp; 565 u_int32_t *tl; 566 int32_t t1, t2; 567 char *bpos, *dpos; 568 int error = 0; 569 struct mbuf *mreq, *mrep, *md, *mb; 570 const int v3 = NFS_ISV3(vp); 571 572 /* 573 * Update local times for special files. 574 */ 575 if (np->n_flag & (NACC | NUPD)) 576 np->n_flag |= NCHG; 577 578 /* 579 * if we have delayed truncation, do it now. 580 */ 581 nfs_delayedtruncate(vp); 582 583 /* 584 * First look in the cache. 585 */ 586 if (nfs_getattrcache(vp, ap->a_vap) == 0) 587 return (0); 588 nfsstats.rpccnt[NFSPROC_GETATTR]++; 589 nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3)); 590 nfsm_fhtom(np, v3); 591 nfsm_request(np, NFSPROC_GETATTR, curlwp, ap->a_cred); 592 if (!error) { 593 nfsm_loadattr(vp, ap->a_vap, 0); 594 if (vp->v_type == VDIR && 595 ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ) 596 ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ; 597 } 598 nfsm_reqdone; 599 return (error); 600 } 601 602 /* 603 * nfs setattr call. 604 */ 605 int 606 nfs_setattr(void *v) 607 { 608 struct vop_setattr_args /* { 609 struct vnodeop_desc *a_desc; 610 struct vnode *a_vp; 611 struct vattr *a_vap; 612 kauth_cred_t a_cred; 613 } */ *ap = v; 614 struct vnode *vp = ap->a_vp; 615 struct nfsnode *np = VTONFS(vp); 616 struct vattr *vap = ap->a_vap; 617 int error = 0; 618 u_quad_t tsize = 0; 619 620 /* 621 * Setting of flags is not supported. 622 */ 623 if (vap->va_flags != VNOVAL) 624 return (EOPNOTSUPP); 625 626 /* 627 * Disallow write attempts if the filesystem is mounted read-only. 628 */ 629 if ((vap->va_uid != (uid_t)VNOVAL || 630 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 631 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 632 (vp->v_mount->mnt_flag & MNT_RDONLY)) 633 return (EROFS); 634 if (vap->va_size != VNOVAL) { 635 if (vap->va_size > VFSTONFS(vp->v_mount)->nm_maxfilesize) { 636 return EFBIG; 637 } 638 switch (vp->v_type) { 639 case VDIR: 640 return (EISDIR); 641 case VCHR: 642 case VBLK: 643 case VSOCK: 644 case VFIFO: 645 if (vap->va_mtime.tv_sec == VNOVAL && 646 vap->va_atime.tv_sec == VNOVAL && 647 vap->va_mode == (mode_t)VNOVAL && 648 vap->va_uid == (uid_t)VNOVAL && 649 vap->va_gid == (gid_t)VNOVAL) 650 return (0); 651 vap->va_size = VNOVAL; 652 break; 653 default: 654 /* 655 * Disallow write attempts if the filesystem is 656 * mounted read-only. 657 */ 658 if (vp->v_mount->mnt_flag & MNT_RDONLY) 659 return (EROFS); 660 genfs_node_wrlock(vp); 661 uvm_vnp_setsize(vp, vap->va_size); 662 tsize = np->n_size; 663 np->n_size = vap->va_size; 664 if (vap->va_size == 0) 665 error = nfs_vinvalbuf(vp, 0, 666 ap->a_cred, curlwp, 1); 667 else 668 error = nfs_vinvalbuf(vp, V_SAVE, 669 ap->a_cred, curlwp, 1); 670 if (error) { 671 uvm_vnp_setsize(vp, tsize); 672 genfs_node_unlock(vp); 673 return (error); 674 } 675 np->n_vattr->va_size = vap->va_size; 676 } 677 } else { 678 /* 679 * flush files before setattr because a later write of 680 * cached data might change timestamps or reset sugid bits 681 */ 682 if ((vap->va_mtime.tv_sec != VNOVAL || 683 vap->va_atime.tv_sec != VNOVAL || 684 vap->va_mode != VNOVAL) && 685 vp->v_type == VREG && 686 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 687 curlwp, 1)) == EINTR) 688 return (error); 689 } 690 error = nfs_setattrrpc(vp, vap, ap->a_cred, curlwp); 691 if (vap->va_size != VNOVAL) { 692 if (error) { 693 np->n_size = np->n_vattr->va_size = tsize; 694 uvm_vnp_setsize(vp, np->n_size); 695 } 696 genfs_node_unlock(vp); 697 } 698 VN_KNOTE(vp, NOTE_ATTRIB); 699 return (error); 700 } 701 702 /* 703 * Do an nfs setattr rpc. 704 */ 705 int 706 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, kauth_cred_t cred, struct lwp *l) 707 { 708 struct nfsv2_sattr *sp; 709 char *cp; 710 int32_t t1, t2; 711 char *bpos, *dpos; 712 u_int32_t *tl; 713 int error = 0; 714 struct mbuf *mreq, *mrep, *md, *mb; 715 const int v3 = NFS_ISV3(vp); 716 struct nfsnode *np = VTONFS(vp); 717 #ifndef NFS_V2_ONLY 718 int wccflag = NFSV3_WCCRATTR; 719 char *cp2; 720 #endif 721 722 nfsstats.rpccnt[NFSPROC_SETATTR]++; 723 nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3)); 724 nfsm_fhtom(np, v3); 725 #ifndef NFS_V2_ONLY 726 if (v3) { 727 nfsm_v3attrbuild(vap, true); 728 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 729 *tl = nfs_false; 730 } else { 731 #endif 732 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 733 if (vap->va_mode == (mode_t)VNOVAL) 734 sp->sa_mode = nfs_xdrneg1; 735 else 736 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); 737 if (vap->va_uid == (uid_t)VNOVAL) 738 sp->sa_uid = nfs_xdrneg1; 739 else 740 sp->sa_uid = txdr_unsigned(vap->va_uid); 741 if (vap->va_gid == (gid_t)VNOVAL) 742 sp->sa_gid = nfs_xdrneg1; 743 else 744 sp->sa_gid = txdr_unsigned(vap->va_gid); 745 sp->sa_size = txdr_unsigned(vap->va_size); 746 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 747 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 748 #ifndef NFS_V2_ONLY 749 } 750 #endif 751 nfsm_request(np, NFSPROC_SETATTR, l, cred); 752 #ifndef NFS_V2_ONLY 753 if (v3) { 754 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false); 755 } else 756 #endif 757 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 758 nfsm_reqdone; 759 return (error); 760 } 761 762 /* 763 * nfs lookup call, one step at a time... 764 * First look in cache 765 * If not found, do the rpc. 766 */ 767 int 768 nfs_lookup(void *v) 769 { 770 struct vop_lookup_v2_args /* { 771 struct vnodeop_desc *a_desc; 772 struct vnode *a_dvp; 773 struct vnode **a_vpp; 774 struct componentname *a_cnp; 775 } */ *ap = v; 776 struct componentname *cnp = ap->a_cnp; 777 struct vnode *dvp = ap->a_dvp; 778 struct vnode **vpp = ap->a_vpp; 779 int flags; 780 struct vnode *newvp; 781 u_int32_t *tl; 782 char *cp; 783 int32_t t1, t2; 784 char *bpos, *dpos, *cp2; 785 struct mbuf *mreq, *mrep, *md, *mb; 786 long len; 787 nfsfh_t *fhp; 788 struct nfsnode *np; 789 int cachefound; 790 int error = 0, attrflag, fhsize; 791 const int v3 = NFS_ISV3(dvp); 792 793 flags = cnp->cn_flags; 794 795 *vpp = NULLVP; 796 newvp = NULLVP; 797 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 798 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 799 return (EROFS); 800 if (dvp->v_type != VDIR) 801 return (ENOTDIR); 802 803 /* 804 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves. 805 */ 806 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { 807 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred); 808 if (error) 809 return error; 810 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) 811 return EISDIR; 812 vref(dvp); 813 *vpp = dvp; 814 return 0; 815 } 816 817 np = VTONFS(dvp); 818 819 /* 820 * Before performing an RPC, check the name cache to see if 821 * the directory/name pair we are looking for is known already. 822 * If the directory/name pair is found in the name cache, 823 * we have to ensure the directory has not changed from 824 * the time the cache entry has been created. If it has, 825 * the cache entry has to be ignored. 826 */ 827 cachefound = cache_lookup_raw(dvp, cnp->cn_nameptr, cnp->cn_namelen, 828 cnp->cn_flags, NULL, vpp); 829 KASSERT(dvp != *vpp); 830 KASSERT((cnp->cn_flags & ISWHITEOUT) == 0); 831 if (cachefound) { 832 struct vattr vattr; 833 834 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred); 835 if (error != 0) { 836 if (*vpp != NULLVP) 837 vrele(*vpp); 838 *vpp = NULLVP; 839 return error; 840 } 841 842 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred) 843 || timespeccmp(&vattr.va_mtime, 844 &VTONFS(dvp)->n_nctime, !=)) { 845 if (*vpp != NULLVP) { 846 vrele(*vpp); 847 *vpp = NULLVP; 848 } 849 cache_purge1(dvp, NULL, 0, PURGE_CHILDREN); 850 timespecclear(&np->n_nctime); 851 goto dorpc; 852 } 853 854 if (*vpp == NULLVP) { 855 /* namecache gave us a negative result */ 856 error = ENOENT; 857 goto noentry; 858 } 859 860 /* 861 * investigate the vnode returned by cache_lookup_raw. 862 * if it isn't appropriate, do an rpc. 863 */ 864 newvp = *vpp; 865 if ((flags & ISDOTDOT) != 0) { 866 VOP_UNLOCK(dvp); 867 } 868 error = vn_lock(newvp, LK_SHARED); 869 if ((flags & ISDOTDOT) != 0) { 870 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 871 } 872 if (error != 0) { 873 /* newvp has been reclaimed. */ 874 vrele(newvp); 875 *vpp = NULLVP; 876 goto dorpc; 877 } 878 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred) 879 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) { 880 nfsstats.lookupcache_hits++; 881 KASSERT(newvp->v_type != VNON); 882 VOP_UNLOCK(newvp); 883 return (0); 884 } 885 cache_purge1(newvp, NULL, 0, PURGE_PARENTS); 886 vput(newvp); 887 *vpp = NULLVP; 888 } 889 dorpc: 890 #if 0 891 /* 892 * because nfsv3 has the same CREATE semantics as ours, 893 * we don't have to perform LOOKUPs beforehand. 894 * 895 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL. 896 * XXX although we have no way to know if O_EXCL is requested or not. 897 */ 898 899 if (v3 && cnp->cn_nameiop == CREATE && 900 (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN && 901 (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 902 return (EJUSTRETURN); 903 } 904 #endif /* 0 */ 905 906 error = 0; 907 newvp = NULLVP; 908 nfsstats.lookupcache_misses++; 909 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 910 len = cnp->cn_namelen; 911 nfsm_reqhead(np, NFSPROC_LOOKUP, 912 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 913 nfsm_fhtom(np, v3); 914 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 915 nfsm_request(np, NFSPROC_LOOKUP, curlwp, cnp->cn_cred); 916 if (error) { 917 nfsm_postop_attr(dvp, attrflag, 0); 918 m_freem(mrep); 919 goto nfsmout; 920 } 921 nfsm_getfh(fhp, fhsize, v3); 922 923 /* 924 * Handle RENAME case... 925 */ 926 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) { 927 if (NFS_CMPFH(np, fhp, fhsize)) { 928 m_freem(mrep); 929 return (EISDIR); 930 } 931 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 932 if (error) { 933 m_freem(mrep); 934 return error; 935 } 936 newvp = NFSTOV(np); 937 #ifndef NFS_V2_ONLY 938 if (v3) { 939 nfsm_postop_attr(newvp, attrflag, 0); 940 nfsm_postop_attr(dvp, attrflag, 0); 941 } else 942 #endif 943 nfsm_loadattr(newvp, (struct vattr *)0, 0); 944 *vpp = newvp; 945 m_freem(mrep); 946 goto validate; 947 } 948 949 /* 950 * The postop attr handling is duplicated for each if case, 951 * because it should be done while dvp is locked (unlocking 952 * dvp is different for each case). 953 */ 954 955 if (NFS_CMPFH(np, fhp, fhsize)) { 956 /* 957 * as we handle "." lookup locally, this should be 958 * a broken server. 959 */ 960 vref(dvp); 961 newvp = dvp; 962 #ifndef NFS_V2_ONLY 963 if (v3) { 964 nfsm_postop_attr(newvp, attrflag, 0); 965 nfsm_postop_attr(dvp, attrflag, 0); 966 } else 967 #endif 968 nfsm_loadattr(newvp, (struct vattr *)0, 0); 969 } else if (flags & ISDOTDOT) { 970 /* 971 * ".." lookup 972 */ 973 VOP_UNLOCK(dvp); 974 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 975 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 976 if (error) { 977 m_freem(mrep); 978 return error; 979 } 980 newvp = NFSTOV(np); 981 982 #ifndef NFS_V2_ONLY 983 if (v3) { 984 nfsm_postop_attr(newvp, attrflag, 0); 985 nfsm_postop_attr(dvp, attrflag, 0); 986 } else 987 #endif 988 nfsm_loadattr(newvp, (struct vattr *)0, 0); 989 } else { 990 /* 991 * Other lookups. 992 */ 993 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 994 if (error) { 995 m_freem(mrep); 996 return error; 997 } 998 newvp = NFSTOV(np); 999 #ifndef NFS_V2_ONLY 1000 if (v3) { 1001 nfsm_postop_attr(newvp, attrflag, 0); 1002 nfsm_postop_attr(dvp, attrflag, 0); 1003 } else 1004 #endif 1005 nfsm_loadattr(newvp, (struct vattr *)0, 0); 1006 } 1007 if (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) { 1008 nfs_cache_enter(dvp, newvp, cnp); 1009 } 1010 *vpp = newvp; 1011 nfsm_reqdone; 1012 if (error) { 1013 /* 1014 * We get here only because of errors returned by 1015 * the RPC. Otherwise we'll have returned above 1016 * (the nfsm_* macros will jump to nfsm_reqdone 1017 * on error). 1018 */ 1019 if (error == ENOENT && cnp->cn_nameiop != CREATE) { 1020 nfs_cache_enter(dvp, NULL, cnp); 1021 } 1022 if (newvp != NULLVP) { 1023 if (newvp == dvp) { 1024 vrele(newvp); 1025 } else { 1026 vput(newvp); 1027 } 1028 } 1029 noentry: 1030 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 1031 (flags & ISLASTCN) && error == ENOENT) { 1032 if (dvp->v_mount->mnt_flag & MNT_RDONLY) { 1033 error = EROFS; 1034 } else { 1035 error = EJUSTRETURN; 1036 } 1037 } 1038 *vpp = NULL; 1039 return error; 1040 } 1041 1042 validate: 1043 /* 1044 * make sure we have valid type and size. 1045 */ 1046 1047 newvp = *vpp; 1048 if (newvp->v_type == VNON) { 1049 struct vattr vattr; /* dummy */ 1050 1051 KASSERT(VTONFS(newvp)->n_attrstamp == 0); 1052 error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred); 1053 if (error) { 1054 vput(newvp); 1055 *vpp = NULL; 1056 } 1057 } 1058 if (error) 1059 return error; 1060 if (newvp != dvp) 1061 VOP_UNLOCK(newvp); 1062 return 0; 1063 } 1064 1065 /* 1066 * nfs read call. 1067 * Just call nfs_bioread() to do the work. 1068 */ 1069 int 1070 nfs_read(void *v) 1071 { 1072 struct vop_read_args /* { 1073 struct vnode *a_vp; 1074 struct uio *a_uio; 1075 int a_ioflag; 1076 kauth_cred_t a_cred; 1077 } */ *ap = v; 1078 struct vnode *vp = ap->a_vp; 1079 1080 if (vp->v_type != VREG) 1081 return EISDIR; 1082 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0)); 1083 } 1084 1085 /* 1086 * nfs readlink call 1087 */ 1088 int 1089 nfs_readlink(void *v) 1090 { 1091 struct vop_readlink_args /* { 1092 struct vnode *a_vp; 1093 struct uio *a_uio; 1094 kauth_cred_t a_cred; 1095 } */ *ap = v; 1096 struct vnode *vp = ap->a_vp; 1097 struct nfsnode *np = VTONFS(vp); 1098 1099 if (vp->v_type != VLNK) 1100 return (EPERM); 1101 1102 if (np->n_rcred != NULL) { 1103 kauth_cred_free(np->n_rcred); 1104 } 1105 np->n_rcred = ap->a_cred; 1106 kauth_cred_hold(np->n_rcred); 1107 1108 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0)); 1109 } 1110 1111 /* 1112 * Do a readlink rpc. 1113 * Called by nfs_doio() from below the buffer cache. 1114 */ 1115 int 1116 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred) 1117 { 1118 u_int32_t *tl; 1119 char *cp; 1120 int32_t t1, t2; 1121 char *bpos, *dpos, *cp2; 1122 int error = 0; 1123 uint32_t len; 1124 struct mbuf *mreq, *mrep, *md, *mb; 1125 const int v3 = NFS_ISV3(vp); 1126 struct nfsnode *np = VTONFS(vp); 1127 #ifndef NFS_V2_ONLY 1128 int attrflag; 1129 #endif 1130 1131 nfsstats.rpccnt[NFSPROC_READLINK]++; 1132 nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3)); 1133 nfsm_fhtom(np, v3); 1134 nfsm_request(np, NFSPROC_READLINK, curlwp, cred); 1135 #ifndef NFS_V2_ONLY 1136 if (v3) 1137 nfsm_postop_attr(vp, attrflag, 0); 1138 #endif 1139 if (!error) { 1140 #ifndef NFS_V2_ONLY 1141 if (v3) { 1142 nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED); 1143 len = fxdr_unsigned(uint32_t, *tl); 1144 if (len > NFS_MAXPATHLEN) { 1145 /* 1146 * this pathname is too long for us. 1147 */ 1148 m_freem(mrep); 1149 /* Solaris returns EINVAL. should we follow? */ 1150 error = ENAMETOOLONG; 1151 goto nfsmout; 1152 } 1153 } else 1154 #endif 1155 { 1156 nfsm_strsiz(len, NFS_MAXPATHLEN); 1157 } 1158 nfsm_mtouio(uiop, len); 1159 } 1160 nfsm_reqdone; 1161 return (error); 1162 } 1163 1164 /* 1165 * nfs read rpc call 1166 * Ditto above 1167 */ 1168 int 1169 nfs_readrpc(struct vnode *vp, struct uio *uiop) 1170 { 1171 u_int32_t *tl; 1172 char *cp; 1173 int32_t t1, t2; 1174 char *bpos, *dpos, *cp2; 1175 struct mbuf *mreq, *mrep, *md, *mb; 1176 struct nfsmount *nmp; 1177 int error = 0, len, retlen, tsiz, eof __unused, byte_count; 1178 const int v3 = NFS_ISV3(vp); 1179 struct nfsnode *np = VTONFS(vp); 1180 #ifndef NFS_V2_ONLY 1181 int attrflag; 1182 #endif 1183 1184 #ifndef nolint 1185 eof = 0; 1186 #endif 1187 nmp = VFSTONFS(vp->v_mount); 1188 tsiz = uiop->uio_resid; 1189 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1190 return (EFBIG); 1191 iostat_busy(nmp->nm_stats); 1192 byte_count = 0; /* count bytes actually transferred */ 1193 while (tsiz > 0) { 1194 nfsstats.rpccnt[NFSPROC_READ]++; 1195 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; 1196 nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3); 1197 nfsm_fhtom(np, v3); 1198 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3); 1199 #ifndef NFS_V2_ONLY 1200 if (v3) { 1201 txdr_hyper(uiop->uio_offset, tl); 1202 *(tl + 2) = txdr_unsigned(len); 1203 } else 1204 #endif 1205 { 1206 *tl++ = txdr_unsigned(uiop->uio_offset); 1207 *tl++ = txdr_unsigned(len); 1208 *tl = 0; 1209 } 1210 nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred); 1211 #ifndef NFS_V2_ONLY 1212 if (v3) { 1213 nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC); 1214 if (error) { 1215 m_freem(mrep); 1216 goto nfsmout; 1217 } 1218 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1219 eof = fxdr_unsigned(int, *(tl + 1)); 1220 } else 1221 #endif 1222 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 1223 nfsm_strsiz(retlen, nmp->nm_rsize); 1224 nfsm_mtouio(uiop, retlen); 1225 m_freem(mrep); 1226 tsiz -= retlen; 1227 byte_count += retlen; 1228 #ifndef NFS_V2_ONLY 1229 if (v3) { 1230 if (eof || retlen == 0) 1231 tsiz = 0; 1232 } else 1233 #endif 1234 if (retlen < len) 1235 tsiz = 0; 1236 } 1237 nfsmout: 1238 iostat_unbusy(nmp->nm_stats, byte_count, 1); 1239 return (error); 1240 } 1241 1242 struct nfs_writerpc_context { 1243 kmutex_t nwc_lock; 1244 kcondvar_t nwc_cv; 1245 int nwc_mbufcount; 1246 }; 1247 1248 /* 1249 * free mbuf used to refer protected pages while write rpc call. 1250 * called at splvm. 1251 */ 1252 static void 1253 nfs_writerpc_extfree(struct mbuf *m, void *tbuf, size_t size, void *arg) 1254 { 1255 struct nfs_writerpc_context *ctx = arg; 1256 1257 KASSERT(m != NULL); 1258 KASSERT(ctx != NULL); 1259 pool_cache_put(mb_cache, m); 1260 mutex_enter(&ctx->nwc_lock); 1261 if (--ctx->nwc_mbufcount == 0) { 1262 cv_signal(&ctx->nwc_cv); 1263 } 1264 mutex_exit(&ctx->nwc_lock); 1265 } 1266 1267 /* 1268 * nfs write call 1269 */ 1270 int 1271 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, bool pageprotected, bool *stalewriteverfp) 1272 { 1273 u_int32_t *tl; 1274 char *cp; 1275 int32_t t1, t2; 1276 char *bpos, *dpos; 1277 struct mbuf *mreq, *mrep, *md, *mb; 1278 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1279 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR; 1280 const int v3 = NFS_ISV3(vp); 1281 int committed = NFSV3WRITE_FILESYNC; 1282 struct nfsnode *np = VTONFS(vp); 1283 struct nfs_writerpc_context ctx; 1284 int byte_count; 1285 size_t origresid; 1286 #ifndef NFS_V2_ONLY 1287 char *cp2; 1288 int rlen, commit; 1289 #endif 1290 1291 if (vp->v_mount->mnt_flag & MNT_RDONLY) { 1292 panic("writerpc readonly vp %p", vp); 1293 } 1294 1295 #ifdef DIAGNOSTIC 1296 if (uiop->uio_iovcnt != 1) 1297 panic("nfs: writerpc iovcnt > 1"); 1298 #endif 1299 tsiz = uiop->uio_resid; 1300 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1301 return EFBIG; 1302 1303 mutex_init(&ctx.nwc_lock, MUTEX_DRIVER, IPL_VM); 1304 cv_init(&ctx.nwc_cv, "nfsmblk"); 1305 ctx.nwc_mbufcount = 1; 1306 1307 retry: 1308 origresid = uiop->uio_resid; 1309 KASSERT(origresid == uiop->uio_iov->iov_len); 1310 iostat_busy(nmp->nm_stats); 1311 byte_count = 0; /* count of bytes actually written */ 1312 while (tsiz > 0) { 1313 uint32_t datalen; /* data bytes need to be allocated in mbuf */ 1314 size_t backup; 1315 bool stalewriteverf = false; 1316 1317 nfsstats.rpccnt[NFSPROC_WRITE]++; 1318 len = min(tsiz, nmp->nm_wsize); 1319 datalen = pageprotected ? 0 : nfsm_rndup(len); 1320 nfsm_reqhead(np, NFSPROC_WRITE, 1321 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen); 1322 nfsm_fhtom(np, v3); 1323 #ifndef NFS_V2_ONLY 1324 if (v3) { 1325 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED); 1326 txdr_hyper(uiop->uio_offset, tl); 1327 tl += 2; 1328 *tl++ = txdr_unsigned(len); 1329 *tl++ = txdr_unsigned(*iomode); 1330 *tl = txdr_unsigned(len); 1331 } else 1332 #endif 1333 { 1334 u_int32_t x; 1335 1336 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED); 1337 /* Set both "begin" and "current" to non-garbage. */ 1338 x = txdr_unsigned((u_int32_t)uiop->uio_offset); 1339 *tl++ = x; /* "begin offset" */ 1340 *tl++ = x; /* "current offset" */ 1341 x = txdr_unsigned(len); 1342 *tl++ = x; /* total to this offset */ 1343 *tl = x; /* size of this write */ 1344 1345 } 1346 if (pageprotected) { 1347 /* 1348 * since we know pages can't be modified during i/o, 1349 * no need to copy them for us. 1350 */ 1351 struct mbuf *m; 1352 struct iovec *iovp = uiop->uio_iov; 1353 1354 m = m_get(M_WAIT, MT_DATA); 1355 MCLAIM(m, &nfs_mowner); 1356 MEXTADD(m, iovp->iov_base, len, M_MBUF, 1357 nfs_writerpc_extfree, &ctx); 1358 m->m_flags |= M_EXT_ROMAP; 1359 m->m_len = len; 1360 mb->m_next = m; 1361 /* 1362 * no need to maintain mb and bpos here 1363 * because no one care them later. 1364 */ 1365 #if 0 1366 mb = m; 1367 bpos = mtod(void *, mb) + mb->m_len; 1368 #endif 1369 UIO_ADVANCE(uiop, len); 1370 uiop->uio_offset += len; 1371 mutex_enter(&ctx.nwc_lock); 1372 ctx.nwc_mbufcount++; 1373 mutex_exit(&ctx.nwc_lock); 1374 nfs_zeropad(mb, 0, nfsm_padlen(len)); 1375 } else { 1376 nfsm_uiotom(uiop, len); 1377 } 1378 nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred); 1379 #ifndef NFS_V2_ONLY 1380 if (v3) { 1381 wccflag = NFSV3_WCCCHK; 1382 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error); 1383 if (!error) { 1384 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED 1385 + NFSX_V3WRITEVERF); 1386 rlen = fxdr_unsigned(int, *tl++); 1387 if (rlen == 0) { 1388 error = NFSERR_IO; 1389 m_freem(mrep); 1390 break; 1391 } else if (rlen < len) { 1392 backup = len - rlen; 1393 UIO_ADVANCE(uiop, -backup); 1394 uiop->uio_offset -= backup; 1395 len = rlen; 1396 } 1397 commit = fxdr_unsigned(int, *tl++); 1398 1399 /* 1400 * Return the lowest committment level 1401 * obtained by any of the RPCs. 1402 */ 1403 if (committed == NFSV3WRITE_FILESYNC) 1404 committed = commit; 1405 else if (committed == NFSV3WRITE_DATASYNC && 1406 commit == NFSV3WRITE_UNSTABLE) 1407 committed = commit; 1408 mutex_enter(&nmp->nm_lock); 1409 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){ 1410 memcpy(nmp->nm_writeverf, tl, 1411 NFSX_V3WRITEVERF); 1412 nmp->nm_iflag |= NFSMNT_HASWRITEVERF; 1413 } else if ((nmp->nm_iflag & 1414 NFSMNT_STALEWRITEVERF) || 1415 memcmp(tl, nmp->nm_writeverf, 1416 NFSX_V3WRITEVERF)) { 1417 memcpy(nmp->nm_writeverf, tl, 1418 NFSX_V3WRITEVERF); 1419 /* 1420 * note NFSMNT_STALEWRITEVERF 1421 * if we're the first thread to 1422 * notice it. 1423 */ 1424 if ((nmp->nm_iflag & 1425 NFSMNT_STALEWRITEVERF) == 0) { 1426 stalewriteverf = true; 1427 nmp->nm_iflag |= 1428 NFSMNT_STALEWRITEVERF; 1429 } 1430 } 1431 mutex_exit(&nmp->nm_lock); 1432 } 1433 } else 1434 #endif 1435 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 1436 if (wccflag) 1437 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime; 1438 m_freem(mrep); 1439 if (error) 1440 break; 1441 tsiz -= len; 1442 byte_count += len; 1443 if (stalewriteverf) { 1444 *stalewriteverfp = true; 1445 stalewriteverf = false; 1446 if (committed == NFSV3WRITE_UNSTABLE && 1447 len != origresid) { 1448 /* 1449 * if our write requests weren't atomic but 1450 * unstable, datas in previous iterations 1451 * might have already been lost now. 1452 * then, we should resend them to nfsd. 1453 */ 1454 backup = origresid - tsiz; 1455 UIO_ADVANCE(uiop, -backup); 1456 uiop->uio_offset -= backup; 1457 tsiz = origresid; 1458 goto retry; 1459 } 1460 } 1461 } 1462 nfsmout: 1463 iostat_unbusy(nmp->nm_stats, byte_count, 0); 1464 if (pageprotected) { 1465 /* 1466 * wait until mbufs go away. 1467 * retransmitted mbufs can survive longer than rpc requests 1468 * themselves. 1469 */ 1470 mutex_enter(&ctx.nwc_lock); 1471 ctx.nwc_mbufcount--; 1472 while (ctx.nwc_mbufcount > 0) { 1473 cv_wait(&ctx.nwc_cv, &ctx.nwc_lock); 1474 } 1475 mutex_exit(&ctx.nwc_lock); 1476 } 1477 mutex_destroy(&ctx.nwc_lock); 1478 cv_destroy(&ctx.nwc_cv); 1479 *iomode = committed; 1480 if (error) 1481 uiop->uio_resid = tsiz; 1482 return (error); 1483 } 1484 1485 /* 1486 * nfs mknod rpc 1487 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1488 * mode set to specify the file type and the size field for rdev. 1489 */ 1490 int 1491 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap) 1492 { 1493 struct nfsv2_sattr *sp; 1494 u_int32_t *tl; 1495 char *cp; 1496 int32_t t1, t2; 1497 struct vnode *newvp = (struct vnode *)0; 1498 struct nfsnode *dnp, *np; 1499 char *cp2; 1500 char *bpos, *dpos; 1501 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1502 struct mbuf *mreq, *mrep, *md, *mb; 1503 u_int32_t rdev; 1504 const int v3 = NFS_ISV3(dvp); 1505 1506 if (vap->va_type == VCHR || vap->va_type == VBLK) 1507 rdev = txdr_unsigned(vap->va_rdev); 1508 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1509 rdev = nfs_xdrneg1; 1510 else { 1511 VOP_ABORTOP(dvp, cnp); 1512 return (EOPNOTSUPP); 1513 } 1514 nfsstats.rpccnt[NFSPROC_MKNOD]++; 1515 dnp = VTONFS(dvp); 1516 nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED + 1517 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1518 nfsm_fhtom(dnp, v3); 1519 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1520 #ifndef NFS_V2_ONLY 1521 if (v3) { 1522 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1523 *tl++ = vtonfsv3_type(vap->va_type); 1524 nfsm_v3attrbuild(vap, false); 1525 if (vap->va_type == VCHR || vap->va_type == VBLK) { 1526 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1527 *tl++ = txdr_unsigned(major(vap->va_rdev)); 1528 *tl = txdr_unsigned(minor(vap->va_rdev)); 1529 } 1530 } else 1531 #endif 1532 { 1533 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1534 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1535 sp->sa_uid = nfs_xdrneg1; 1536 sp->sa_gid = nfs_xdrneg1; 1537 sp->sa_size = rdev; 1538 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1539 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1540 } 1541 nfsm_request(dnp, NFSPROC_MKNOD, curlwp, cnp->cn_cred); 1542 if (!error) { 1543 nfsm_mtofh(dvp, newvp, v3, gotvp); 1544 if (!gotvp) { 1545 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1546 cnp->cn_namelen, cnp->cn_cred, curlwp, &np); 1547 if (!error) 1548 newvp = NFSTOV(np); 1549 } 1550 } 1551 #ifndef NFS_V2_ONLY 1552 if (v3) 1553 nfsm_wcc_data(dvp, wccflag, 0, !error); 1554 #endif 1555 nfsm_reqdone; 1556 if (error) { 1557 if (newvp) 1558 vput(newvp); 1559 } else { 1560 nfs_cache_enter(dvp, newvp, cnp); 1561 *vpp = newvp; 1562 VOP_UNLOCK(newvp); 1563 } 1564 VTONFS(dvp)->n_flag |= NMODIFIED; 1565 if (!wccflag) 1566 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1567 return (error); 1568 } 1569 1570 /* 1571 * nfs mknod vop 1572 * just call nfs_mknodrpc() to do the work. 1573 */ 1574 /* ARGSUSED */ 1575 int 1576 nfs_mknod(void *v) 1577 { 1578 struct vop_mknod_v3_args /* { 1579 struct vnode *a_dvp; 1580 struct vnode **a_vpp; 1581 struct componentname *a_cnp; 1582 struct vattr *a_vap; 1583 } */ *ap = v; 1584 struct vnode *dvp = ap->a_dvp; 1585 struct componentname *cnp = ap->a_cnp; 1586 int error; 1587 1588 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap); 1589 VN_KNOTE(dvp, NOTE_WRITE); 1590 if (error == 0 || error == EEXIST) 1591 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 1592 return (error); 1593 } 1594 1595 /* 1596 * nfs file create call 1597 */ 1598 int 1599 nfs_create(void *v) 1600 { 1601 struct vop_create_v3_args /* { 1602 struct vnode *a_dvp; 1603 struct vnode **a_vpp; 1604 struct componentname *a_cnp; 1605 struct vattr *a_vap; 1606 } */ *ap = v; 1607 struct vnode *dvp = ap->a_dvp; 1608 struct vattr *vap = ap->a_vap; 1609 struct componentname *cnp = ap->a_cnp; 1610 struct nfsv2_sattr *sp; 1611 u_int32_t *tl; 1612 char *cp; 1613 int32_t t1, t2; 1614 struct nfsnode *dnp, *np = (struct nfsnode *)0; 1615 struct vnode *newvp = (struct vnode *)0; 1616 char *bpos, *dpos, *cp2; 1617 int error, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1618 struct mbuf *mreq, *mrep, *md, *mb; 1619 const int v3 = NFS_ISV3(dvp); 1620 u_int32_t excl_mode = NFSV3CREATE_UNCHECKED; 1621 1622 /* 1623 * Oops, not for me.. 1624 */ 1625 if (vap->va_type == VSOCK) 1626 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1627 1628 KASSERT(vap->va_type == VREG); 1629 1630 #ifdef VA_EXCLUSIVE 1631 if (vap->va_vaflags & VA_EXCLUSIVE) { 1632 excl_mode = NFSV3CREATE_EXCLUSIVE; 1633 } 1634 #endif 1635 again: 1636 error = 0; 1637 nfsstats.rpccnt[NFSPROC_CREATE]++; 1638 dnp = VTONFS(dvp); 1639 nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED + 1640 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1641 nfsm_fhtom(dnp, v3); 1642 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1643 #ifndef NFS_V2_ONLY 1644 if (v3) { 1645 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1646 if (excl_mode == NFSV3CREATE_EXCLUSIVE) { 1647 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); 1648 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF); 1649 *tl++ = cprng_fast32(); 1650 *tl = cprng_fast32(); 1651 } else { 1652 *tl = txdr_unsigned(excl_mode); 1653 nfsm_v3attrbuild(vap, false); 1654 } 1655 } else 1656 #endif 1657 { 1658 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1659 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1660 sp->sa_uid = nfs_xdrneg1; 1661 sp->sa_gid = nfs_xdrneg1; 1662 sp->sa_size = 0; 1663 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1664 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1665 } 1666 nfsm_request(dnp, NFSPROC_CREATE, curlwp, cnp->cn_cred); 1667 if (!error) { 1668 nfsm_mtofh(dvp, newvp, v3, gotvp); 1669 if (!gotvp) { 1670 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1671 cnp->cn_namelen, cnp->cn_cred, curlwp, &np); 1672 if (!error) 1673 newvp = NFSTOV(np); 1674 } 1675 } 1676 #ifndef NFS_V2_ONLY 1677 if (v3) 1678 nfsm_wcc_data(dvp, wccflag, 0, !error); 1679 #endif 1680 nfsm_reqdone; 1681 if (error) { 1682 /* 1683 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP. 1684 */ 1685 if (v3 && error == ENOTSUP) { 1686 if (excl_mode == NFSV3CREATE_EXCLUSIVE) { 1687 excl_mode = NFSV3CREATE_GUARDED; 1688 goto again; 1689 } else if (excl_mode == NFSV3CREATE_GUARDED) { 1690 excl_mode = NFSV3CREATE_UNCHECKED; 1691 goto again; 1692 } 1693 } 1694 } else if (v3 && (excl_mode == NFSV3CREATE_EXCLUSIVE)) { 1695 struct timespec ts; 1696 1697 getnanotime(&ts); 1698 1699 /* 1700 * make sure that we'll update timestamps as 1701 * most server implementations use them to store 1702 * the create verifier. 1703 * 1704 * XXX it's better to use TOSERVER always. 1705 */ 1706 1707 if (vap->va_atime.tv_sec == VNOVAL) 1708 vap->va_atime = ts; 1709 if (vap->va_mtime.tv_sec == VNOVAL) 1710 vap->va_mtime = ts; 1711 1712 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, curlwp); 1713 } 1714 if (error == 0) { 1715 if (cnp->cn_flags & MAKEENTRY) 1716 nfs_cache_enter(dvp, newvp, cnp); 1717 else 1718 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 1719 *ap->a_vpp = newvp; 1720 VOP_UNLOCK(newvp); 1721 } else { 1722 if (newvp) 1723 vput(newvp); 1724 if (error == EEXIST) 1725 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 1726 } 1727 VTONFS(dvp)->n_flag |= NMODIFIED; 1728 if (!wccflag) 1729 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1730 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1731 return (error); 1732 } 1733 1734 /* 1735 * nfs file remove call 1736 * To try and make nfs semantics closer to ufs semantics, a file that has 1737 * other processes using the vnode is renamed instead of removed and then 1738 * removed later on the last close. 1739 * - If v_usecount > 1 1740 * If a rename is not already in the works 1741 * call nfs_sillyrename() to set it up 1742 * else 1743 * do the remove rpc 1744 */ 1745 int 1746 nfs_remove(void *v) 1747 { 1748 struct vop_remove_args /* { 1749 struct vnodeop_desc *a_desc; 1750 struct vnode * a_dvp; 1751 struct vnode * a_vp; 1752 struct componentname * a_cnp; 1753 } */ *ap = v; 1754 struct vnode *vp = ap->a_vp; 1755 struct vnode *dvp = ap->a_dvp; 1756 struct componentname *cnp = ap->a_cnp; 1757 struct nfsnode *np = VTONFS(vp); 1758 int error = 0; 1759 struct vattr vattr; 1760 1761 #ifndef DIAGNOSTIC 1762 if (vp->v_usecount < 1) 1763 panic("nfs_remove: bad v_usecount"); 1764 #endif 1765 if (vp->v_type == VDIR) 1766 error = EPERM; 1767 else if (vp->v_usecount == 1 || (np->n_sillyrename && 1768 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 && 1769 vattr.va_nlink > 1)) { 1770 /* 1771 * Purge the name cache so that the chance of a lookup for 1772 * the name succeeding while the remove is in progress is 1773 * minimized. Without node locking it can still happen, such 1774 * that an I/O op returns ESTALE, but since you get this if 1775 * another host removes the file.. 1776 */ 1777 cache_purge(vp); 1778 /* 1779 * throw away biocache buffers, mainly to avoid 1780 * unnecessary delayed writes later. 1781 */ 1782 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, curlwp, 1); 1783 /* Do the rpc */ 1784 if (error != EINTR) 1785 error = nfs_removerpc(dvp, cnp->cn_nameptr, 1786 cnp->cn_namelen, cnp->cn_cred, curlwp); 1787 } else if (!np->n_sillyrename) 1788 error = nfs_sillyrename(dvp, vp, cnp, false); 1789 if (!error && nfs_getattrcache(vp, &vattr) == 0 && 1790 vattr.va_nlink == 1) { 1791 np->n_flag |= NREMOVED; 1792 } 1793 NFS_INVALIDATE_ATTRCACHE(np); 1794 VN_KNOTE(vp, NOTE_DELETE); 1795 VN_KNOTE(dvp, NOTE_WRITE); 1796 if (dvp == vp) 1797 vrele(vp); 1798 else 1799 vput(vp); 1800 vput(dvp); 1801 return (error); 1802 } 1803 1804 /* 1805 * nfs file remove rpc called from nfs_inactive 1806 */ 1807 int 1808 nfs_removeit(struct sillyrename *sp) 1809 { 1810 1811 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, 1812 (struct lwp *)0)); 1813 } 1814 1815 /* 1816 * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). 1817 */ 1818 int 1819 nfs_removerpc(struct vnode *dvp, const char *name, int namelen, kauth_cred_t cred, struct lwp *l) 1820 { 1821 u_int32_t *tl; 1822 char *cp; 1823 #ifndef NFS_V2_ONLY 1824 int32_t t1; 1825 char *cp2; 1826 #endif 1827 int32_t t2; 1828 char *bpos, *dpos; 1829 int error = 0, wccflag = NFSV3_WCCRATTR; 1830 struct mbuf *mreq, *mrep, *md, *mb; 1831 const int v3 = NFS_ISV3(dvp); 1832 int rexmit = 0; 1833 struct nfsnode *dnp = VTONFS(dvp); 1834 1835 nfsstats.rpccnt[NFSPROC_REMOVE]++; 1836 nfsm_reqhead(dnp, NFSPROC_REMOVE, 1837 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen)); 1838 nfsm_fhtom(dnp, v3); 1839 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 1840 nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit); 1841 #ifndef NFS_V2_ONLY 1842 if (v3) 1843 nfsm_wcc_data(dvp, wccflag, 0, !error); 1844 #endif 1845 nfsm_reqdone; 1846 VTONFS(dvp)->n_flag |= NMODIFIED; 1847 if (!wccflag) 1848 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1849 /* 1850 * Kludge City: If the first reply to the remove rpc is lost.. 1851 * the reply to the retransmitted request will be ENOENT 1852 * since the file was in fact removed 1853 * Therefore, we cheat and return success. 1854 */ 1855 if (rexmit && error == ENOENT) 1856 error = 0; 1857 return (error); 1858 } 1859 1860 /* 1861 * nfs file rename call 1862 */ 1863 int 1864 nfs_rename(void *v) 1865 { 1866 struct vop_rename_args /* { 1867 struct vnode *a_fdvp; 1868 struct vnode *a_fvp; 1869 struct componentname *a_fcnp; 1870 struct vnode *a_tdvp; 1871 struct vnode *a_tvp; 1872 struct componentname *a_tcnp; 1873 } */ *ap = v; 1874 struct vnode *fvp = ap->a_fvp; 1875 struct vnode *tvp = ap->a_tvp; 1876 struct vnode *fdvp = ap->a_fdvp; 1877 struct vnode *tdvp = ap->a_tdvp; 1878 struct componentname *tcnp = ap->a_tcnp; 1879 struct componentname *fcnp = ap->a_fcnp; 1880 int error; 1881 1882 /* Check for cross-device rename */ 1883 if ((fvp->v_mount != tdvp->v_mount) || 1884 (tvp && (fvp->v_mount != tvp->v_mount))) { 1885 error = EXDEV; 1886 goto out; 1887 } 1888 1889 /* 1890 * If the tvp exists and is in use, sillyrename it before doing the 1891 * rename of the new file over it. 1892 * 1893 * Have sillyrename use link instead of rename if possible, 1894 * so that we don't lose the file if the rename fails, and so 1895 * that there's no window when the "to" file doesn't exist. 1896 */ 1897 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename && 1898 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) { 1899 VN_KNOTE(tvp, NOTE_DELETE); 1900 vput(tvp); 1901 tvp = NULL; 1902 } 1903 1904 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1905 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1906 curlwp); 1907 1908 VN_KNOTE(fdvp, NOTE_WRITE); 1909 VN_KNOTE(tdvp, NOTE_WRITE); 1910 if (error == 0 || error == EEXIST) { 1911 if (fvp->v_type == VDIR) 1912 cache_purge(fvp); 1913 else 1914 cache_purge1(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1915 0); 1916 if (tvp != NULL && tvp->v_type == VDIR) 1917 cache_purge(tvp); 1918 else 1919 cache_purge1(tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, 1920 0); 1921 } 1922 out: 1923 if (tdvp == tvp) 1924 vrele(tdvp); 1925 else 1926 vput(tdvp); 1927 if (tvp) 1928 vput(tvp); 1929 vrele(fdvp); 1930 vrele(fvp); 1931 return (error); 1932 } 1933 1934 /* 1935 * nfs file rename rpc called from nfs_remove() above 1936 */ 1937 int 1938 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, struct sillyrename *sp) 1939 { 1940 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, 1941 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curlwp)); 1942 } 1943 1944 /* 1945 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1946 */ 1947 int 1948 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen, struct vnode *tdvp, const char *tnameptr, int tnamelen, kauth_cred_t cred, struct lwp *l) 1949 { 1950 u_int32_t *tl; 1951 char *cp; 1952 #ifndef NFS_V2_ONLY 1953 int32_t t1; 1954 char *cp2; 1955 #endif 1956 int32_t t2; 1957 char *bpos, *dpos; 1958 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; 1959 struct mbuf *mreq, *mrep, *md, *mb; 1960 const int v3 = NFS_ISV3(fdvp); 1961 int rexmit = 0; 1962 struct nfsnode *fdnp = VTONFS(fdvp); 1963 1964 nfsstats.rpccnt[NFSPROC_RENAME]++; 1965 nfsm_reqhead(fdnp, NFSPROC_RENAME, 1966 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) + 1967 nfsm_rndup(tnamelen)); 1968 nfsm_fhtom(fdnp, v3); 1969 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); 1970 nfsm_fhtom(VTONFS(tdvp), v3); 1971 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); 1972 nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit); 1973 #ifndef NFS_V2_ONLY 1974 if (v3) { 1975 nfsm_wcc_data(fdvp, fwccflag, 0, !error); 1976 nfsm_wcc_data(tdvp, twccflag, 0, !error); 1977 } 1978 #endif 1979 nfsm_reqdone; 1980 VTONFS(fdvp)->n_flag |= NMODIFIED; 1981 VTONFS(tdvp)->n_flag |= NMODIFIED; 1982 if (!fwccflag) 1983 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp)); 1984 if (!twccflag) 1985 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp)); 1986 /* 1987 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1988 */ 1989 if (rexmit && error == ENOENT) 1990 error = 0; 1991 return (error); 1992 } 1993 1994 /* 1995 * NFS link RPC, called from nfs_link. 1996 * Assumes dvp and vp locked, and leaves them that way. 1997 */ 1998 1999 static int 2000 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name, 2001 size_t namelen, kauth_cred_t cred, struct lwp *l) 2002 { 2003 u_int32_t *tl; 2004 char *cp; 2005 #ifndef NFS_V2_ONLY 2006 int32_t t1; 2007 char *cp2; 2008 #endif 2009 int32_t t2; 2010 char *bpos, *dpos; 2011 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; 2012 struct mbuf *mreq, *mrep, *md, *mb; 2013 const int v3 = NFS_ISV3(dvp); 2014 int rexmit = 0; 2015 struct nfsnode *np = VTONFS(vp); 2016 2017 nfsstats.rpccnt[NFSPROC_LINK]++; 2018 nfsm_reqhead(np, NFSPROC_LINK, 2019 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen)); 2020 nfsm_fhtom(np, v3); 2021 nfsm_fhtom(VTONFS(dvp), v3); 2022 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 2023 nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit); 2024 #ifndef NFS_V2_ONLY 2025 if (v3) { 2026 nfsm_postop_attr(vp, attrflag, 0); 2027 nfsm_wcc_data(dvp, wccflag, 0, !error); 2028 } 2029 #endif 2030 nfsm_reqdone; 2031 2032 VTONFS(dvp)->n_flag |= NMODIFIED; 2033 if (!attrflag) 2034 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp)); 2035 if (!wccflag) 2036 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2037 2038 /* 2039 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 2040 */ 2041 if (rexmit && error == EEXIST) 2042 error = 0; 2043 2044 return error; 2045 } 2046 2047 /* 2048 * nfs hard link create call 2049 */ 2050 int 2051 nfs_link(void *v) 2052 { 2053 struct vop_link_v2_args /* { 2054 struct vnode *a_dvp; 2055 struct vnode *a_vp; 2056 struct componentname *a_cnp; 2057 } */ *ap = v; 2058 struct vnode *vp = ap->a_vp; 2059 struct vnode *dvp = ap->a_dvp; 2060 struct componentname *cnp = ap->a_cnp; 2061 int error = 0; 2062 2063 error = vn_lock(vp, LK_EXCLUSIVE); 2064 if (error != 0) { 2065 VOP_ABORTOP(dvp, cnp); 2066 return error; 2067 } 2068 2069 /* 2070 * Push all writes to the server, so that the attribute cache 2071 * doesn't get "out of sync" with the server. 2072 * XXX There should be a better way! 2073 */ 2074 VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0); 2075 2076 error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, 2077 cnp->cn_cred, curlwp); 2078 2079 if (error == 0) { 2080 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 2081 } 2082 VOP_UNLOCK(vp); 2083 VN_KNOTE(vp, NOTE_LINK); 2084 VN_KNOTE(dvp, NOTE_WRITE); 2085 return (error); 2086 } 2087 2088 /* 2089 * nfs symbolic link create call 2090 */ 2091 int 2092 nfs_symlink(void *v) 2093 { 2094 struct vop_symlink_v3_args /* { 2095 struct vnode *a_dvp; 2096 struct vnode **a_vpp; 2097 struct componentname *a_cnp; 2098 struct vattr *a_vap; 2099 char *a_target; 2100 } */ *ap = v; 2101 struct vnode *dvp = ap->a_dvp; 2102 struct vattr *vap = ap->a_vap; 2103 struct componentname *cnp = ap->a_cnp; 2104 struct nfsv2_sattr *sp; 2105 u_int32_t *tl; 2106 char *cp; 2107 int32_t t1, t2; 2108 char *bpos, *dpos, *cp2; 2109 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; 2110 struct mbuf *mreq, *mrep, *md, *mb; 2111 struct vnode *newvp = (struct vnode *)0; 2112 const int v3 = NFS_ISV3(dvp); 2113 int rexmit = 0; 2114 struct nfsnode *dnp = VTONFS(dvp); 2115 2116 *ap->a_vpp = NULL; 2117 nfsstats.rpccnt[NFSPROC_SYMLINK]++; 2118 slen = strlen(ap->a_target); 2119 nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED + 2120 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3)); 2121 nfsm_fhtom(dnp, v3); 2122 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 2123 #ifndef NFS_V2_ONlY 2124 if (v3) 2125 nfsm_v3attrbuild(vap, false); 2126 #endif 2127 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); 2128 #ifndef NFS_V2_ONlY 2129 if (!v3) { 2130 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 2131 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); 2132 sp->sa_uid = nfs_xdrneg1; 2133 sp->sa_gid = nfs_xdrneg1; 2134 sp->sa_size = nfs_xdrneg1; 2135 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 2136 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 2137 } 2138 #endif 2139 nfsm_request1(dnp, NFSPROC_SYMLINK, curlwp, cnp->cn_cred, 2140 &rexmit); 2141 #ifndef NFS_V2_ONlY 2142 if (v3) { 2143 if (!error) 2144 nfsm_mtofh(dvp, newvp, v3, gotvp); 2145 nfsm_wcc_data(dvp, wccflag, 0, !error); 2146 } 2147 #endif 2148 nfsm_reqdone; 2149 /* 2150 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 2151 */ 2152 if (rexmit && error == EEXIST) 2153 error = 0; 2154 if (error == 0 || error == EEXIST) 2155 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 2156 if (error == 0 && newvp == NULL) { 2157 struct nfsnode *np = NULL; 2158 2159 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2160 cnp->cn_cred, curlwp, &np); 2161 if (error == 0) 2162 newvp = NFSTOV(np); 2163 } 2164 if (error) { 2165 if (newvp != NULL) 2166 vput(newvp); 2167 } else { 2168 *ap->a_vpp = newvp; 2169 VOP_UNLOCK(newvp); 2170 } 2171 VTONFS(dvp)->n_flag |= NMODIFIED; 2172 if (!wccflag) 2173 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2174 VN_KNOTE(dvp, NOTE_WRITE); 2175 return (error); 2176 } 2177 2178 /* 2179 * nfs make dir call 2180 */ 2181 int 2182 nfs_mkdir(void *v) 2183 { 2184 struct vop_mkdir_v3_args /* { 2185 struct vnode *a_dvp; 2186 struct vnode **a_vpp; 2187 struct componentname *a_cnp; 2188 struct vattr *a_vap; 2189 } */ *ap = v; 2190 struct vnode *dvp = ap->a_dvp; 2191 struct vattr *vap = ap->a_vap; 2192 struct componentname *cnp = ap->a_cnp; 2193 struct nfsv2_sattr *sp; 2194 u_int32_t *tl; 2195 char *cp; 2196 int32_t t1, t2; 2197 int len; 2198 struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0; 2199 struct vnode *newvp = (struct vnode *)0; 2200 char *bpos, *dpos, *cp2; 2201 int error = 0, wccflag = NFSV3_WCCRATTR; 2202 int gotvp = 0; 2203 int rexmit = 0; 2204 struct mbuf *mreq, *mrep, *md, *mb; 2205 const int v3 = NFS_ISV3(dvp); 2206 2207 len = cnp->cn_namelen; 2208 nfsstats.rpccnt[NFSPROC_MKDIR]++; 2209 nfsm_reqhead(dnp, NFSPROC_MKDIR, 2210 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3)); 2211 nfsm_fhtom(dnp, v3); 2212 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 2213 #ifndef NFS_V2_ONLY 2214 if (v3) { 2215 nfsm_v3attrbuild(vap, false); 2216 } else 2217 #endif 2218 { 2219 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 2220 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); 2221 sp->sa_uid = nfs_xdrneg1; 2222 sp->sa_gid = nfs_xdrneg1; 2223 sp->sa_size = nfs_xdrneg1; 2224 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 2225 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 2226 } 2227 nfsm_request1(dnp, NFSPROC_MKDIR, curlwp, cnp->cn_cred, &rexmit); 2228 if (!error) 2229 nfsm_mtofh(dvp, newvp, v3, gotvp); 2230 if (v3) 2231 nfsm_wcc_data(dvp, wccflag, 0, !error); 2232 nfsm_reqdone; 2233 VTONFS(dvp)->n_flag |= NMODIFIED; 2234 if (!wccflag) 2235 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2236 /* 2237 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry 2238 * if we can succeed in looking up the directory. 2239 */ 2240 if ((rexmit && error == EEXIST) || (!error && !gotvp)) { 2241 if (newvp) { 2242 vput(newvp); 2243 newvp = (struct vnode *)0; 2244 } 2245 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, 2246 curlwp, &np); 2247 if (!error) { 2248 newvp = NFSTOV(np); 2249 if (newvp->v_type != VDIR || newvp == dvp) 2250 error = EEXIST; 2251 } 2252 } 2253 if (error) { 2254 if (newvp) { 2255 if (dvp != newvp) 2256 vput(newvp); 2257 else 2258 vrele(newvp); 2259 } 2260 } else { 2261 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); 2262 nfs_cache_enter(dvp, newvp, cnp); 2263 *ap->a_vpp = newvp; 2264 VOP_UNLOCK(newvp); 2265 } 2266 return (error); 2267 } 2268 2269 /* 2270 * nfs remove directory call 2271 */ 2272 int 2273 nfs_rmdir(void *v) 2274 { 2275 struct vop_rmdir_args /* { 2276 struct vnode *a_dvp; 2277 struct vnode *a_vp; 2278 struct componentname *a_cnp; 2279 } */ *ap = v; 2280 struct vnode *vp = ap->a_vp; 2281 struct vnode *dvp = ap->a_dvp; 2282 struct componentname *cnp = ap->a_cnp; 2283 u_int32_t *tl; 2284 char *cp; 2285 #ifndef NFS_V2_ONLY 2286 int32_t t1; 2287 char *cp2; 2288 #endif 2289 int32_t t2; 2290 char *bpos, *dpos; 2291 int error = 0, wccflag = NFSV3_WCCRATTR; 2292 int rexmit = 0; 2293 struct mbuf *mreq, *mrep, *md, *mb; 2294 const int v3 = NFS_ISV3(dvp); 2295 struct nfsnode *dnp; 2296 2297 if (dvp == vp) { 2298 vrele(dvp); 2299 vput(dvp); 2300 return (EINVAL); 2301 } 2302 nfsstats.rpccnt[NFSPROC_RMDIR]++; 2303 dnp = VTONFS(dvp); 2304 nfsm_reqhead(dnp, NFSPROC_RMDIR, 2305 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 2306 nfsm_fhtom(dnp, v3); 2307 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 2308 nfsm_request1(dnp, NFSPROC_RMDIR, curlwp, cnp->cn_cred, &rexmit); 2309 #ifndef NFS_V2_ONLY 2310 if (v3) 2311 nfsm_wcc_data(dvp, wccflag, 0, !error); 2312 #endif 2313 nfsm_reqdone; 2314 VTONFS(dvp)->n_flag |= NMODIFIED; 2315 if (!wccflag) 2316 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2317 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); 2318 VN_KNOTE(vp, NOTE_DELETE); 2319 cache_purge(vp); 2320 vput(vp); 2321 vput(dvp); 2322 /* 2323 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 2324 */ 2325 if (rexmit && error == ENOENT) 2326 error = 0; 2327 return (error); 2328 } 2329 2330 /* 2331 * nfs readdir call 2332 */ 2333 int 2334 nfs_readdir(void *v) 2335 { 2336 struct vop_readdir_args /* { 2337 struct vnode *a_vp; 2338 struct uio *a_uio; 2339 kauth_cred_t a_cred; 2340 int *a_eofflag; 2341 off_t **a_cookies; 2342 int *a_ncookies; 2343 } */ *ap = v; 2344 struct vnode *vp = ap->a_vp; 2345 struct uio *uio = ap->a_uio; 2346 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2347 char *base = uio->uio_iov->iov_base; 2348 int tresid, error; 2349 size_t count, lost; 2350 struct dirent *dp; 2351 off_t *cookies = NULL; 2352 int ncookies = 0, nc; 2353 2354 if (vp->v_type != VDIR) 2355 return (EPERM); 2356 2357 lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1); 2358 count = uio->uio_resid - lost; 2359 if (count <= 0) 2360 return (EINVAL); 2361 2362 /* 2363 * Call nfs_bioread() to do the real work. 2364 */ 2365 tresid = uio->uio_resid = count; 2366 error = nfs_bioread(vp, uio, 0, ap->a_cred, 2367 ap->a_cookies ? NFSBIO_CACHECOOKIES : 0); 2368 2369 if (!error && ap->a_cookies) { 2370 ncookies = count / 16; 2371 cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK); 2372 *ap->a_cookies = cookies; 2373 } 2374 2375 if (!error && uio->uio_resid == tresid) { 2376 uio->uio_resid += lost; 2377 nfsstats.direofcache_misses++; 2378 if (ap->a_cookies) 2379 *ap->a_ncookies = 0; 2380 *ap->a_eofflag = 1; 2381 return (0); 2382 } 2383 2384 if (!error && ap->a_cookies) { 2385 /* 2386 * Only the NFS server and emulations use cookies, and they 2387 * load the directory block into system space, so we can 2388 * just look at it directly. 2389 */ 2390 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) || 2391 uio->uio_iovcnt != 1) 2392 panic("nfs_readdir: lost in space"); 2393 for (nc = 0; ncookies-- && 2394 base < (char *)uio->uio_iov->iov_base; nc++){ 2395 dp = (struct dirent *) base; 2396 if (dp->d_reclen == 0) 2397 break; 2398 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) 2399 *(cookies++) = (off_t)NFS_GETCOOKIE32(dp); 2400 else 2401 *(cookies++) = NFS_GETCOOKIE(dp); 2402 base += dp->d_reclen; 2403 } 2404 uio->uio_resid += 2405 ((char *)uio->uio_iov->iov_base - base); 2406 uio->uio_iov->iov_len += 2407 ((char *)uio->uio_iov->iov_base - base); 2408 uio->uio_iov->iov_base = base; 2409 *ap->a_ncookies = nc; 2410 } 2411 2412 uio->uio_resid += lost; 2413 *ap->a_eofflag = 0; 2414 return (error); 2415 } 2416 2417 /* 2418 * Readdir rpc call. 2419 * Called from below the buffer cache by nfs_doio(). 2420 */ 2421 int 2422 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred) 2423 { 2424 int len, left; 2425 struct dirent *dp = NULL; 2426 u_int32_t *tl; 2427 char *cp; 2428 int32_t t1, t2; 2429 char *bpos, *dpos, *cp2; 2430 struct mbuf *mreq, *mrep, *md, *mb; 2431 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2432 struct nfsnode *dnp = VTONFS(vp); 2433 u_quad_t fileno; 2434 int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1; 2435 #ifndef NFS_V2_ONLY 2436 int attrflag; 2437 #endif 2438 int nrpcs = 0, reclen; 2439 const int v3 = NFS_ISV3(vp); 2440 2441 #ifdef DIAGNOSTIC 2442 /* 2443 * Should be called from buffer cache, so only amount of 2444 * NFS_DIRBLKSIZ will be requested. 2445 */ 2446 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ) 2447 panic("nfs readdirrpc bad uio"); 2448 #endif 2449 2450 /* 2451 * Loop around doing readdir rpc's of size nm_readdirsize 2452 * truncated to a multiple of NFS_DIRFRAGSIZ. 2453 * The stopping criteria is EOF or buffer full. 2454 */ 2455 while (more_dirs && bigenough) { 2456 /* 2457 * Heuristic: don't bother to do another RPC to further 2458 * fill up this block if there is not much room left. (< 50% 2459 * of the readdir RPC size). This wastes some buffer space 2460 * but can save up to 50% in RPC calls. 2461 */ 2462 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) { 2463 bigenough = 0; 2464 break; 2465 } 2466 nfsstats.rpccnt[NFSPROC_READDIR]++; 2467 nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) + 2468 NFSX_READDIR(v3)); 2469 nfsm_fhtom(dnp, v3); 2470 #ifndef NFS_V2_ONLY 2471 if (v3) { 2472 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED); 2473 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) { 2474 txdr_swapcookie3(uiop->uio_offset, tl); 2475 } else { 2476 txdr_cookie3(uiop->uio_offset, tl); 2477 } 2478 tl += 2; 2479 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2480 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2481 } else 2482 #endif 2483 { 2484 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 2485 *tl++ = txdr_unsigned(uiop->uio_offset); 2486 } 2487 *tl = txdr_unsigned(nmp->nm_readdirsize); 2488 nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred); 2489 nrpcs++; 2490 #ifndef NFS_V2_ONLY 2491 if (v3) { 2492 nfsm_postop_attr(vp, attrflag, 0); 2493 if (!error) { 2494 nfsm_dissect(tl, u_int32_t *, 2495 2 * NFSX_UNSIGNED); 2496 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2497 dnp->n_cookieverf.nfsuquad[1] = *tl; 2498 } else { 2499 m_freem(mrep); 2500 goto nfsmout; 2501 } 2502 } 2503 #endif 2504 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2505 more_dirs = fxdr_unsigned(int, *tl); 2506 2507 /* loop thru the dir entries, doctoring them to 4bsd form */ 2508 while (more_dirs && bigenough) { 2509 #ifndef NFS_V2_ONLY 2510 if (v3) { 2511 nfsm_dissect(tl, u_int32_t *, 2512 3 * NFSX_UNSIGNED); 2513 fileno = fxdr_hyper(tl); 2514 len = fxdr_unsigned(int, *(tl + 2)); 2515 } else 2516 #endif 2517 { 2518 nfsm_dissect(tl, u_int32_t *, 2519 2 * NFSX_UNSIGNED); 2520 fileno = fxdr_unsigned(u_quad_t, *tl++); 2521 len = fxdr_unsigned(int, *tl); 2522 } 2523 if (len <= 0 || len > NFS_MAXNAMLEN) { 2524 error = EBADRPC; 2525 m_freem(mrep); 2526 goto nfsmout; 2527 } 2528 /* for cookie stashing */ 2529 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t); 2530 left = NFS_DIRFRAGSIZ - blksiz; 2531 if (reclen > left) { 2532 memset(uiop->uio_iov->iov_base, 0, left); 2533 dp->d_reclen += left; 2534 UIO_ADVANCE(uiop, left); 2535 blksiz = 0; 2536 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2537 } 2538 if (reclen > uiop->uio_resid) 2539 bigenough = 0; 2540 if (bigenough) { 2541 int tlen; 2542 2543 dp = (struct dirent *)uiop->uio_iov->iov_base; 2544 dp->d_fileno = fileno; 2545 dp->d_namlen = len; 2546 dp->d_reclen = reclen; 2547 dp->d_type = DT_UNKNOWN; 2548 blksiz += reclen; 2549 if (blksiz == NFS_DIRFRAGSIZ) 2550 blksiz = 0; 2551 UIO_ADVANCE(uiop, DIRHDSIZ); 2552 nfsm_mtouio(uiop, len); 2553 tlen = reclen - (DIRHDSIZ + len); 2554 (void)memset(uiop->uio_iov->iov_base, 0, tlen); 2555 UIO_ADVANCE(uiop, tlen); 2556 } else 2557 nfsm_adv(nfsm_rndup(len)); 2558 #ifndef NFS_V2_ONLY 2559 if (v3) { 2560 nfsm_dissect(tl, u_int32_t *, 2561 3 * NFSX_UNSIGNED); 2562 } else 2563 #endif 2564 { 2565 nfsm_dissect(tl, u_int32_t *, 2566 2 * NFSX_UNSIGNED); 2567 } 2568 if (bigenough) { 2569 #ifndef NFS_V2_ONLY 2570 if (v3) { 2571 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) 2572 uiop->uio_offset = 2573 fxdr_swapcookie3(tl); 2574 else 2575 uiop->uio_offset = 2576 fxdr_cookie3(tl); 2577 } 2578 else 2579 #endif 2580 { 2581 uiop->uio_offset = 2582 fxdr_unsigned(off_t, *tl); 2583 } 2584 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2585 } 2586 if (v3) 2587 tl += 2; 2588 else 2589 tl++; 2590 more_dirs = fxdr_unsigned(int, *tl); 2591 } 2592 /* 2593 * If at end of rpc data, get the eof boolean 2594 */ 2595 if (!more_dirs) { 2596 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2597 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2598 2599 /* 2600 * kludge: if we got no entries, treat it as EOF. 2601 * some server sometimes send a reply without any 2602 * entries or EOF. 2603 * although it might mean the server has very long name, 2604 * we can't handle such entries anyway. 2605 */ 2606 2607 if (uiop->uio_resid >= NFS_DIRBLKSIZ) 2608 more_dirs = 0; 2609 } 2610 m_freem(mrep); 2611 } 2612 /* 2613 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ 2614 * by increasing d_reclen for the last record. 2615 */ 2616 if (blksiz > 0) { 2617 left = NFS_DIRFRAGSIZ - blksiz; 2618 memset(uiop->uio_iov->iov_base, 0, left); 2619 dp->d_reclen += left; 2620 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2621 UIO_ADVANCE(uiop, left); 2622 } 2623 2624 /* 2625 * We are now either at the end of the directory or have filled the 2626 * block. 2627 */ 2628 if (bigenough) { 2629 dnp->n_direofoffset = uiop->uio_offset; 2630 dnp->n_flag |= NEOFVALID; 2631 } 2632 nfsmout: 2633 return (error); 2634 } 2635 2636 #ifndef NFS_V2_ONLY 2637 /* 2638 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). 2639 */ 2640 int 2641 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred) 2642 { 2643 int len, left; 2644 struct dirent *dp = NULL; 2645 u_int32_t *tl; 2646 char *cp; 2647 int32_t t1, t2; 2648 struct vnode *newvp; 2649 char *bpos, *dpos, *cp2; 2650 struct mbuf *mreq, *mrep, *md, *mb; 2651 struct nameidata nami, *ndp = &nami; 2652 struct componentname *cnp = &ndp->ni_cnd; 2653 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2654 struct nfsnode *dnp = VTONFS(vp), *np; 2655 nfsfh_t *fhp; 2656 u_quad_t fileno; 2657 int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; 2658 int attrflag, fhsize, nrpcs = 0, reclen; 2659 struct nfs_fattr fattr, *fp; 2660 2661 #ifdef DIAGNOSTIC 2662 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ) 2663 panic("nfs readdirplusrpc bad uio"); 2664 #endif 2665 ndp->ni_dvp = vp; 2666 newvp = NULLVP; 2667 2668 /* 2669 * Loop around doing readdir rpc's of size nm_readdirsize 2670 * truncated to a multiple of NFS_DIRFRAGSIZ. 2671 * The stopping criteria is EOF or buffer full. 2672 */ 2673 while (more_dirs && bigenough) { 2674 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) { 2675 bigenough = 0; 2676 break; 2677 } 2678 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; 2679 nfsm_reqhead(dnp, NFSPROC_READDIRPLUS, 2680 NFSX_FH(1) + 6 * NFSX_UNSIGNED); 2681 nfsm_fhtom(dnp, 1); 2682 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED); 2683 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) { 2684 txdr_swapcookie3(uiop->uio_offset, tl); 2685 } else { 2686 txdr_cookie3(uiop->uio_offset, tl); 2687 } 2688 tl += 2; 2689 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2690 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2691 *tl++ = txdr_unsigned(nmp->nm_readdirsize); 2692 *tl = txdr_unsigned(nmp->nm_rsize); 2693 nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred); 2694 nfsm_postop_attr(vp, attrflag, 0); 2695 if (error) { 2696 m_freem(mrep); 2697 goto nfsmout; 2698 } 2699 nrpcs++; 2700 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2701 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2702 dnp->n_cookieverf.nfsuquad[1] = *tl++; 2703 more_dirs = fxdr_unsigned(int, *tl); 2704 2705 /* loop thru the dir entries, doctoring them to 4bsd form */ 2706 while (more_dirs && bigenough) { 2707 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2708 fileno = fxdr_hyper(tl); 2709 len = fxdr_unsigned(int, *(tl + 2)); 2710 if (len <= 0 || len > NFS_MAXNAMLEN) { 2711 error = EBADRPC; 2712 m_freem(mrep); 2713 goto nfsmout; 2714 } 2715 /* for cookie stashing */ 2716 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t); 2717 left = NFS_DIRFRAGSIZ - blksiz; 2718 if (reclen > left) { 2719 /* 2720 * DIRFRAGSIZ is aligned, no need to align 2721 * again here. 2722 */ 2723 memset(uiop->uio_iov->iov_base, 0, left); 2724 dp->d_reclen += left; 2725 UIO_ADVANCE(uiop, left); 2726 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2727 blksiz = 0; 2728 } 2729 if (reclen > uiop->uio_resid) 2730 bigenough = 0; 2731 if (bigenough) { 2732 int tlen; 2733 2734 dp = (struct dirent *)uiop->uio_iov->iov_base; 2735 dp->d_fileno = fileno; 2736 dp->d_namlen = len; 2737 dp->d_reclen = reclen; 2738 dp->d_type = DT_UNKNOWN; 2739 blksiz += reclen; 2740 if (blksiz == NFS_DIRFRAGSIZ) 2741 blksiz = 0; 2742 UIO_ADVANCE(uiop, DIRHDSIZ); 2743 nfsm_mtouio(uiop, len); 2744 tlen = reclen - (DIRHDSIZ + len); 2745 (void)memset(uiop->uio_iov->iov_base, 0, tlen); 2746 UIO_ADVANCE(uiop, tlen); 2747 cnp->cn_nameptr = dp->d_name; 2748 cnp->cn_namelen = dp->d_namlen; 2749 } else 2750 nfsm_adv(nfsm_rndup(len)); 2751 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2752 if (bigenough) { 2753 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) 2754 uiop->uio_offset = 2755 fxdr_swapcookie3(tl); 2756 else 2757 uiop->uio_offset = 2758 fxdr_cookie3(tl); 2759 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2760 } 2761 tl += 2; 2762 2763 /* 2764 * Since the attributes are before the file handle 2765 * (sigh), we must skip over the attributes and then 2766 * come back and get them. 2767 */ 2768 attrflag = fxdr_unsigned(int, *tl); 2769 if (attrflag) { 2770 nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR); 2771 memcpy(&fattr, fp, NFSX_V3FATTR); 2772 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2773 doit = fxdr_unsigned(int, *tl); 2774 if (doit) { 2775 nfsm_getfh(fhp, fhsize, 1); 2776 if (NFS_CMPFH(dnp, fhp, fhsize)) { 2777 vref(vp); 2778 newvp = vp; 2779 np = dnp; 2780 } else { 2781 error = nfs_nget1(vp->v_mount, fhp, 2782 fhsize, &np, LK_NOWAIT); 2783 if (!error) 2784 newvp = NFSTOV(np); 2785 } 2786 if (!error) { 2787 nfs_loadattrcache(&newvp, &fattr, 0, 0); 2788 if (bigenough) { 2789 dp->d_type = 2790 IFTODT(VTTOIF(np->n_vattr->va_type)); 2791 if (cnp->cn_namelen <= NCHNAMLEN) { 2792 ndp->ni_vp = newvp; 2793 nfs_cache_enter(ndp->ni_dvp, 2794 ndp->ni_vp, cnp); 2795 } 2796 } 2797 } 2798 error = 0; 2799 } 2800 } else { 2801 /* Just skip over the file handle */ 2802 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2803 i = fxdr_unsigned(int, *tl); 2804 nfsm_adv(nfsm_rndup(i)); 2805 } 2806 if (newvp != NULLVP) { 2807 if (newvp == vp) 2808 vrele(newvp); 2809 else 2810 vput(newvp); 2811 newvp = NULLVP; 2812 } 2813 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2814 more_dirs = fxdr_unsigned(int, *tl); 2815 } 2816 /* 2817 * If at end of rpc data, get the eof boolean 2818 */ 2819 if (!more_dirs) { 2820 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2821 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2822 2823 /* 2824 * kludge: see a comment in nfs_readdirrpc. 2825 */ 2826 2827 if (uiop->uio_resid >= NFS_DIRBLKSIZ) 2828 more_dirs = 0; 2829 } 2830 m_freem(mrep); 2831 } 2832 /* 2833 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ 2834 * by increasing d_reclen for the last record. 2835 */ 2836 if (blksiz > 0) { 2837 left = NFS_DIRFRAGSIZ - blksiz; 2838 memset(uiop->uio_iov->iov_base, 0, left); 2839 dp->d_reclen += left; 2840 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2841 UIO_ADVANCE(uiop, left); 2842 } 2843 2844 /* 2845 * We are now either at the end of the directory or have filled the 2846 * block. 2847 */ 2848 if (bigenough) { 2849 dnp->n_direofoffset = uiop->uio_offset; 2850 dnp->n_flag |= NEOFVALID; 2851 } 2852 nfsmout: 2853 if (newvp != NULLVP) { 2854 if(newvp == vp) 2855 vrele(newvp); 2856 else 2857 vput(newvp); 2858 } 2859 return (error); 2860 } 2861 #endif 2862 2863 /* 2864 * Silly rename. To make the NFS filesystem that is stateless look a little 2865 * more like the "ufs" a remove of an active vnode is translated to a rename 2866 * to a funny looking filename that is removed by nfs_inactive on the 2867 * nfsnode. There is the potential for another process on a different client 2868 * to create the same funny name between the nfs_lookitup() fails and the 2869 * nfs_rename() completes, but... 2870 */ 2871 int 2872 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, bool dolink) 2873 { 2874 struct sillyrename *sp; 2875 struct nfsnode *np; 2876 int error; 2877 pid_t pid; 2878 2879 cache_purge(dvp); 2880 np = VTONFS(vp); 2881 #ifndef DIAGNOSTIC 2882 if (vp->v_type == VDIR) 2883 panic("nfs: sillyrename dir"); 2884 #endif 2885 sp = kmem_alloc(sizeof(*sp), KM_SLEEP); 2886 sp->s_cred = kauth_cred_dup(cnp->cn_cred); 2887 sp->s_dvp = dvp; 2888 vref(dvp); 2889 2890 /* Fudge together a funny name */ 2891 pid = curlwp->l_proc->p_pid; 2892 memcpy(sp->s_name, ".nfsAxxxx4.4", 13); 2893 sp->s_namlen = 12; 2894 sp->s_name[8] = hexdigits[pid & 0xf]; 2895 sp->s_name[7] = hexdigits[(pid >> 4) & 0xf]; 2896 sp->s_name[6] = hexdigits[(pid >> 8) & 0xf]; 2897 sp->s_name[5] = hexdigits[(pid >> 12) & 0xf]; 2898 2899 /* Try lookitups until we get one that isn't there */ 2900 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2901 curlwp, (struct nfsnode **)0) == 0) { 2902 sp->s_name[4]++; 2903 if (sp->s_name[4] > 'z') { 2904 error = EINVAL; 2905 goto bad; 2906 } 2907 } 2908 if (dolink) { 2909 error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen, 2910 sp->s_cred, curlwp); 2911 /* 2912 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP. 2913 */ 2914 if (error == ENOTSUP) { 2915 error = nfs_renameit(dvp, cnp, sp); 2916 } 2917 } else { 2918 error = nfs_renameit(dvp, cnp, sp); 2919 } 2920 if (error) 2921 goto bad; 2922 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2923 curlwp, &np); 2924 np->n_sillyrename = sp; 2925 return (0); 2926 bad: 2927 vrele(sp->s_dvp); 2928 kauth_cred_free(sp->s_cred); 2929 kmem_free(sp, sizeof(*sp)); 2930 return (error); 2931 } 2932 2933 /* 2934 * Look up a file name and optionally either update the file handle or 2935 * allocate an nfsnode, depending on the value of npp. 2936 * npp == NULL --> just do the lookup 2937 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2938 * handled too 2939 * *npp != NULL --> update the file handle in the vnode 2940 */ 2941 int 2942 nfs_lookitup(struct vnode *dvp, const char *name, int len, kauth_cred_t cred, struct lwp *l, struct nfsnode **npp) 2943 { 2944 u_int32_t *tl; 2945 char *cp; 2946 int32_t t1, t2; 2947 struct vnode *newvp = (struct vnode *)0; 2948 struct nfsnode *np, *dnp = VTONFS(dvp); 2949 char *bpos, *dpos, *cp2; 2950 int error = 0, ofhlen, fhlen; 2951 #ifndef NFS_V2_ONLY 2952 int attrflag; 2953 #endif 2954 struct mbuf *mreq, *mrep, *md, *mb; 2955 nfsfh_t *ofhp, *nfhp; 2956 const int v3 = NFS_ISV3(dvp); 2957 2958 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 2959 nfsm_reqhead(dnp, NFSPROC_LOOKUP, 2960 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 2961 nfsm_fhtom(dnp, v3); 2962 nfsm_strtom(name, len, NFS_MAXNAMLEN); 2963 nfsm_request(dnp, NFSPROC_LOOKUP, l, cred); 2964 if (npp && !error) { 2965 nfsm_getfh(nfhp, fhlen, v3); 2966 if (*npp) { 2967 np = *npp; 2968 newvp = NFSTOV(np); 2969 ofhlen = np->n_fhsize; 2970 ofhp = kmem_alloc(ofhlen, KM_SLEEP); 2971 memcpy(ofhp, np->n_fhp, ofhlen); 2972 error = vcache_rekey_enter(newvp->v_mount, newvp, 2973 ofhp, ofhlen, nfhp, fhlen); 2974 if (error) { 2975 kmem_free(ofhp, ofhlen); 2976 m_freem(mrep); 2977 return error; 2978 } 2979 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) { 2980 kmem_free(np->n_fhp, np->n_fhsize); 2981 np->n_fhp = &np->n_fh; 2982 } 2983 #if NFS_SMALLFH < NFSX_V3FHMAX 2984 else if (np->n_fhsize <= NFS_SMALLFH && fhlen > NFS_SMALLFH) 2985 np->n_fhp = kmem_alloc(fhlen, KM_SLEEP); 2986 #endif 2987 memcpy(np->n_fhp, nfhp, fhlen); 2988 np->n_fhsize = fhlen; 2989 vcache_rekey_exit(newvp->v_mount, newvp, 2990 ofhp, ofhlen, np->n_fhp, fhlen); 2991 kmem_free(ofhp, ofhlen); 2992 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { 2993 vref(dvp); 2994 newvp = dvp; 2995 np = dnp; 2996 } else { 2997 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); 2998 if (error) { 2999 m_freem(mrep); 3000 return (error); 3001 } 3002 newvp = NFSTOV(np); 3003 } 3004 #ifndef NFS_V2_ONLY 3005 if (v3) { 3006 nfsm_postop_attr(newvp, attrflag, 0); 3007 if (!attrflag && *npp == NULL) { 3008 m_freem(mrep); 3009 vput(newvp); 3010 return (ENOENT); 3011 } 3012 } else 3013 #endif 3014 nfsm_loadattr(newvp, (struct vattr *)0, 0); 3015 } 3016 nfsm_reqdone; 3017 if (npp && *npp == NULL) { 3018 if (error) { 3019 if (newvp) 3020 vput(newvp); 3021 } else 3022 *npp = np; 3023 } 3024 return (error); 3025 } 3026 3027 #ifndef NFS_V2_ONLY 3028 /* 3029 * Nfs Version 3 commit rpc 3030 */ 3031 int 3032 nfs_commit(struct vnode *vp, off_t offset, uint32_t cnt, struct lwp *l) 3033 { 3034 char *cp; 3035 u_int32_t *tl; 3036 int32_t t1, t2; 3037 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 3038 char *bpos, *dpos, *cp2; 3039 int error = 0, wccflag = NFSV3_WCCRATTR; 3040 struct mbuf *mreq, *mrep, *md, *mb; 3041 struct nfsnode *np; 3042 3043 KASSERT(NFS_ISV3(vp)); 3044 3045 #ifdef NFS_DEBUG_COMMIT 3046 printf("commit %lu - %lu\n", (unsigned long)offset, 3047 (unsigned long)(offset + cnt)); 3048 #endif 3049 3050 mutex_enter(&nmp->nm_lock); 3051 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) { 3052 mutex_exit(&nmp->nm_lock); 3053 return (0); 3054 } 3055 mutex_exit(&nmp->nm_lock); 3056 nfsstats.rpccnt[NFSPROC_COMMIT]++; 3057 np = VTONFS(vp); 3058 nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1)); 3059 nfsm_fhtom(np, 1); 3060 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 3061 txdr_hyper(offset, tl); 3062 tl += 2; 3063 *tl = txdr_unsigned(cnt); 3064 nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred); 3065 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false); 3066 if (!error) { 3067 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF); 3068 mutex_enter(&nmp->nm_lock); 3069 if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) || 3070 memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) { 3071 memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF); 3072 error = NFSERR_STALEWRITEVERF; 3073 nmp->nm_iflag |= NFSMNT_STALEWRITEVERF; 3074 } 3075 mutex_exit(&nmp->nm_lock); 3076 } 3077 nfsm_reqdone; 3078 return (error); 3079 } 3080 #endif 3081 3082 /* 3083 * Kludge City.. 3084 * - make nfs_bmap() essentially a no-op that does no translation 3085 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc 3086 * (Maybe I could use the process's page mapping, but I was concerned that 3087 * Kernel Write might not be enabled and also figured copyout() would do 3088 * a lot more work than memcpy() and also it currently happens in the 3089 * context of the swapper process (2). 3090 */ 3091 int 3092 nfs_bmap(void *v) 3093 { 3094 struct vop_bmap_args /* { 3095 struct vnode *a_vp; 3096 daddr_t a_bn; 3097 struct vnode **a_vpp; 3098 daddr_t *a_bnp; 3099 int *a_runp; 3100 } */ *ap = v; 3101 struct vnode *vp = ap->a_vp; 3102 int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift; 3103 3104 if (ap->a_vpp != NULL) 3105 *ap->a_vpp = vp; 3106 if (ap->a_bnp != NULL) 3107 *ap->a_bnp = ap->a_bn << bshift; 3108 if (ap->a_runp != NULL) 3109 *ap->a_runp = 1024 * 1024; /* XXX */ 3110 return (0); 3111 } 3112 3113 /* 3114 * Strategy routine. 3115 * For async requests when nfsiod(s) are running, queue the request by 3116 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the 3117 * request. 3118 */ 3119 int 3120 nfs_strategy(void *v) 3121 { 3122 struct vop_strategy_args *ap = v; 3123 struct buf *bp = ap->a_bp; 3124 int error = 0; 3125 3126 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC)) 3127 panic("nfs physio/async"); 3128 3129 /* 3130 * If the op is asynchronous and an i/o daemon is waiting 3131 * queue the request, wake it up and wait for completion 3132 * otherwise just do it ourselves. 3133 */ 3134 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp)) 3135 error = nfs_doio(bp); 3136 return (error); 3137 } 3138 3139 /* 3140 * fsync vnode op. Just call nfs_flush() with commit == 1. 3141 */ 3142 /* ARGSUSED */ 3143 int 3144 nfs_fsync(void *v) 3145 { 3146 struct vop_fsync_args /* { 3147 struct vnodeop_desc *a_desc; 3148 struct vnode * a_vp; 3149 kauth_cred_t a_cred; 3150 int a_flags; 3151 off_t offlo; 3152 off_t offhi; 3153 struct lwp * a_l; 3154 } */ *ap = v; 3155 3156 struct vnode *vp = ap->a_vp; 3157 3158 if (vp->v_type != VREG) 3159 return 0; 3160 3161 return (nfs_flush(vp, ap->a_cred, 3162 (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, curlwp, 1)); 3163 } 3164 3165 /* 3166 * Flush all the data associated with a vnode. 3167 */ 3168 int 3169 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l, 3170 int commit) 3171 { 3172 struct nfsnode *np = VTONFS(vp); 3173 int error; 3174 int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO; 3175 UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist); 3176 3177 mutex_enter(vp->v_interlock); 3178 error = VOP_PUTPAGES(vp, 0, 0, flushflags); 3179 if (np->n_flag & NWRITEERR) { 3180 error = np->n_error; 3181 np->n_flag &= ~NWRITEERR; 3182 } 3183 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0); 3184 return (error); 3185 } 3186 3187 /* 3188 * Return POSIX pathconf information applicable to nfs. 3189 * 3190 * N.B. The NFS V2 protocol doesn't support this RPC. 3191 */ 3192 /* ARGSUSED */ 3193 int 3194 nfs_pathconf(void *v) 3195 { 3196 struct vop_pathconf_args /* { 3197 struct vnode *a_vp; 3198 int a_name; 3199 register_t *a_retval; 3200 } */ *ap = v; 3201 struct nfsv3_pathconf *pcp; 3202 struct vnode *vp = ap->a_vp; 3203 struct mbuf *mreq, *mrep, *md, *mb; 3204 int32_t t1, t2; 3205 u_int32_t *tl; 3206 char *bpos, *dpos, *cp, *cp2; 3207 int error = 0, attrflag; 3208 #ifndef NFS_V2_ONLY 3209 struct nfsmount *nmp; 3210 unsigned int l; 3211 u_int64_t maxsize; 3212 #endif 3213 const int v3 = NFS_ISV3(vp); 3214 struct nfsnode *np = VTONFS(vp); 3215 3216 switch (ap->a_name) { 3217 /* Names that can be resolved locally. */ 3218 case _PC_PIPE_BUF: 3219 *ap->a_retval = PIPE_BUF; 3220 break; 3221 case _PC_SYNC_IO: 3222 *ap->a_retval = 1; 3223 break; 3224 /* Names that cannot be resolved locally; do an RPC, if possible. */ 3225 case _PC_LINK_MAX: 3226 case _PC_NAME_MAX: 3227 case _PC_CHOWN_RESTRICTED: 3228 case _PC_NO_TRUNC: 3229 if (!v3) { 3230 error = EINVAL; 3231 break; 3232 } 3233 nfsstats.rpccnt[NFSPROC_PATHCONF]++; 3234 nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1)); 3235 nfsm_fhtom(np, 1); 3236 nfsm_request(np, NFSPROC_PATHCONF, 3237 curlwp, curlwp->l_cred); /* XXX */ 3238 nfsm_postop_attr(vp, attrflag, 0); 3239 if (!error) { 3240 nfsm_dissect(pcp, struct nfsv3_pathconf *, 3241 NFSX_V3PATHCONF); 3242 switch (ap->a_name) { 3243 case _PC_LINK_MAX: 3244 *ap->a_retval = 3245 fxdr_unsigned(register_t, pcp->pc_linkmax); 3246 break; 3247 case _PC_NAME_MAX: 3248 *ap->a_retval = 3249 fxdr_unsigned(register_t, pcp->pc_namemax); 3250 break; 3251 case _PC_CHOWN_RESTRICTED: 3252 *ap->a_retval = 3253 (pcp->pc_chownrestricted == nfs_true); 3254 break; 3255 case _PC_NO_TRUNC: 3256 *ap->a_retval = 3257 (pcp->pc_notrunc == nfs_true); 3258 break; 3259 } 3260 } 3261 nfsm_reqdone; 3262 break; 3263 case _PC_FILESIZEBITS: 3264 #ifndef NFS_V2_ONLY 3265 if (v3) { 3266 nmp = VFSTONFS(vp->v_mount); 3267 if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0) 3268 if ((error = nfs_fsinfo(nmp, vp, 3269 curlwp->l_cred, curlwp)) != 0) /* XXX */ 3270 break; 3271 for (l = 0, maxsize = nmp->nm_maxfilesize; 3272 (maxsize >> l) > 0; l++) 3273 ; 3274 *ap->a_retval = l + 1; 3275 } else 3276 #endif 3277 { 3278 *ap->a_retval = 32; /* NFS V2 limitation */ 3279 } 3280 break; 3281 default: 3282 error = EINVAL; 3283 break; 3284 } 3285 3286 return (error); 3287 } 3288 3289 /* 3290 * NFS advisory byte-level locks. 3291 */ 3292 int 3293 nfs_advlock(void *v) 3294 { 3295 struct vop_advlock_args /* { 3296 struct vnode *a_vp; 3297 void *a_id; 3298 int a_op; 3299 struct flock *a_fl; 3300 int a_flags; 3301 } */ *ap = v; 3302 struct nfsnode *np = VTONFS(ap->a_vp); 3303 3304 return lf_advlock(ap, &np->n_lockf, np->n_size); 3305 } 3306 3307 /* 3308 * Print out the contents of an nfsnode. 3309 */ 3310 int 3311 nfs_print(void *v) 3312 { 3313 struct vop_print_args /* { 3314 struct vnode *a_vp; 3315 } */ *ap = v; 3316 struct vnode *vp = ap->a_vp; 3317 struct nfsnode *np = VTONFS(vp); 3318 3319 printf("tag VT_NFS, fileid %lld fsid 0x%llx", 3320 (unsigned long long)np->n_vattr->va_fileid, 3321 (unsigned long long)np->n_vattr->va_fsid); 3322 if (vp->v_type == VFIFO) 3323 VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v); 3324 printf("\n"); 3325 return (0); 3326 } 3327 3328 /* 3329 * nfs unlock wrapper. 3330 */ 3331 int 3332 nfs_unlock(void *v) 3333 { 3334 struct vop_unlock_args /* { 3335 struct vnode *a_vp; 3336 int a_flags; 3337 } */ *ap = v; 3338 struct vnode *vp = ap->a_vp; 3339 3340 /* 3341 * VOP_UNLOCK can be called by nfs_loadattrcache 3342 * with v_data == 0. 3343 */ 3344 if (VTONFS(vp)) { 3345 nfs_delayedtruncate(vp); 3346 } 3347 3348 return genfs_unlock(v); 3349 } 3350 3351 /* 3352 * nfs special file access vnode op. 3353 * Essentially just get vattr and then imitate iaccess() since the device is 3354 * local to the client. 3355 */ 3356 int 3357 nfsspec_access(void *v) 3358 { 3359 struct vop_access_args /* { 3360 struct vnode *a_vp; 3361 int a_mode; 3362 kauth_cred_t a_cred; 3363 struct lwp *a_l; 3364 } */ *ap = v; 3365 struct vattr va; 3366 struct vnode *vp = ap->a_vp; 3367 int error; 3368 3369 error = VOP_GETATTR(vp, &va, ap->a_cred); 3370 if (error) 3371 return (error); 3372 3373 /* 3374 * Disallow write attempts on filesystems mounted read-only; 3375 * unless the file is a socket, fifo, or a block or character 3376 * device resident on the filesystem. 3377 */ 3378 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3379 switch (vp->v_type) { 3380 case VREG: 3381 case VDIR: 3382 case VLNK: 3383 return (EROFS); 3384 default: 3385 break; 3386 } 3387 } 3388 3389 return kauth_authorize_vnode(ap->a_cred, KAUTH_ACCESS_ACTION(ap->a_mode, 3390 va.va_type, va.va_mode), vp, NULL, genfs_can_access(va.va_type, 3391 va.va_mode, va.va_uid, va.va_gid, ap->a_mode, ap->a_cred)); 3392 } 3393 3394 /* 3395 * Read wrapper for special devices. 3396 */ 3397 int 3398 nfsspec_read(void *v) 3399 { 3400 struct vop_read_args /* { 3401 struct vnode *a_vp; 3402 struct uio *a_uio; 3403 int a_ioflag; 3404 kauth_cred_t a_cred; 3405 } */ *ap = v; 3406 struct nfsnode *np = VTONFS(ap->a_vp); 3407 3408 /* 3409 * Set access flag. 3410 */ 3411 np->n_flag |= NACC; 3412 getnanotime(&np->n_atim); 3413 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap)); 3414 } 3415 3416 /* 3417 * Write wrapper for special devices. 3418 */ 3419 int 3420 nfsspec_write(void *v) 3421 { 3422 struct vop_write_args /* { 3423 struct vnode *a_vp; 3424 struct uio *a_uio; 3425 int a_ioflag; 3426 kauth_cred_t a_cred; 3427 } */ *ap = v; 3428 struct nfsnode *np = VTONFS(ap->a_vp); 3429 3430 /* 3431 * Set update flag. 3432 */ 3433 np->n_flag |= NUPD; 3434 getnanotime(&np->n_mtim); 3435 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap)); 3436 } 3437 3438 /* 3439 * Close wrapper for special devices. 3440 * 3441 * Update the times on the nfsnode then do device close. 3442 */ 3443 int 3444 nfsspec_close(void *v) 3445 { 3446 struct vop_close_args /* { 3447 struct vnode *a_vp; 3448 int a_fflag; 3449 kauth_cred_t a_cred; 3450 struct lwp *a_l; 3451 } */ *ap = v; 3452 struct vnode *vp = ap->a_vp; 3453 struct nfsnode *np = VTONFS(vp); 3454 struct vattr vattr; 3455 3456 if (np->n_flag & (NACC | NUPD)) { 3457 np->n_flag |= NCHG; 3458 if (vp->v_usecount == 1 && 3459 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3460 vattr_null(&vattr); 3461 if (np->n_flag & NACC) 3462 vattr.va_atime = np->n_atim; 3463 if (np->n_flag & NUPD) 3464 vattr.va_mtime = np->n_mtim; 3465 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3466 } 3467 } 3468 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap)); 3469 } 3470 3471 /* 3472 * Read wrapper for fifos. 3473 */ 3474 int 3475 nfsfifo_read(void *v) 3476 { 3477 struct vop_read_args /* { 3478 struct vnode *a_vp; 3479 struct uio *a_uio; 3480 int a_ioflag; 3481 kauth_cred_t a_cred; 3482 } */ *ap = v; 3483 struct nfsnode *np = VTONFS(ap->a_vp); 3484 3485 /* 3486 * Set access flag. 3487 */ 3488 np->n_flag |= NACC; 3489 getnanotime(&np->n_atim); 3490 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap)); 3491 } 3492 3493 /* 3494 * Write wrapper for fifos. 3495 */ 3496 int 3497 nfsfifo_write(void *v) 3498 { 3499 struct vop_write_args /* { 3500 struct vnode *a_vp; 3501 struct uio *a_uio; 3502 int a_ioflag; 3503 kauth_cred_t a_cred; 3504 } */ *ap = v; 3505 struct nfsnode *np = VTONFS(ap->a_vp); 3506 3507 /* 3508 * Set update flag. 3509 */ 3510 np->n_flag |= NUPD; 3511 getnanotime(&np->n_mtim); 3512 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap)); 3513 } 3514 3515 /* 3516 * Close wrapper for fifos. 3517 * 3518 * Update the times on the nfsnode then do fifo close. 3519 */ 3520 int 3521 nfsfifo_close(void *v) 3522 { 3523 struct vop_close_args /* { 3524 struct vnode *a_vp; 3525 int a_fflag; 3526 kauth_cred_t a_cred; 3527 struct lwp *a_l; 3528 } */ *ap = v; 3529 struct vnode *vp = ap->a_vp; 3530 struct nfsnode *np = VTONFS(vp); 3531 struct vattr vattr; 3532 3533 if (np->n_flag & (NACC | NUPD)) { 3534 struct timespec ts; 3535 3536 getnanotime(&ts); 3537 if (np->n_flag & NACC) 3538 np->n_atim = ts; 3539 if (np->n_flag & NUPD) 3540 np->n_mtim = ts; 3541 np->n_flag |= NCHG; 3542 if (vp->v_usecount == 1 && 3543 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3544 vattr_null(&vattr); 3545 if (np->n_flag & NACC) 3546 vattr.va_atime = np->n_atim; 3547 if (np->n_flag & NUPD) 3548 vattr.va_mtime = np->n_mtim; 3549 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3550 } 3551 } 3552 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap)); 3553 } 3554