1 /* $NetBSD: nfs_vnops.c,v 1.301 2013/11/15 14:39:53 nisimura Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)nfs_vnops.c 8.19 (Berkeley) 7/31/95 35 */ 36 37 /* 38 * vnode op calls for Sun NFS version 2 and 3 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.301 2013/11/15 14:39:53 nisimura Exp $"); 43 44 #ifdef _KERNEL_OPT 45 #include "opt_nfs.h" 46 #include "opt_uvmhist.h" 47 #endif 48 49 #include <sys/param.h> 50 #include <sys/proc.h> 51 #include <sys/kernel.h> 52 #include <sys/systm.h> 53 #include <sys/resourcevar.h> 54 #include <sys/mount.h> 55 #include <sys/buf.h> 56 #include <sys/condvar.h> 57 #include <sys/disk.h> 58 #include <sys/malloc.h> 59 #include <sys/kmem.h> 60 #include <sys/mbuf.h> 61 #include <sys/mutex.h> 62 #include <sys/namei.h> 63 #include <sys/vnode.h> 64 #include <sys/dirent.h> 65 #include <sys/fcntl.h> 66 #include <sys/hash.h> 67 #include <sys/lockf.h> 68 #include <sys/stat.h> 69 #include <sys/unistd.h> 70 #include <sys/kauth.h> 71 #include <sys/cprng.h> 72 73 #include <uvm/uvm_extern.h> 74 #include <uvm/uvm.h> 75 76 #include <miscfs/fifofs/fifo.h> 77 #include <miscfs/genfs/genfs.h> 78 #include <miscfs/genfs/genfs_node.h> 79 #include <miscfs/specfs/specdev.h> 80 81 #include <nfs/rpcv2.h> 82 #include <nfs/nfsproto.h> 83 #include <nfs/nfs.h> 84 #include <nfs/nfsnode.h> 85 #include <nfs/nfsmount.h> 86 #include <nfs/xdr_subs.h> 87 #include <nfs/nfsm_subs.h> 88 #include <nfs/nfs_var.h> 89 90 #include <net/if.h> 91 #include <netinet/in.h> 92 #include <netinet/in_var.h> 93 94 /* 95 * Global vfs data structures for nfs 96 */ 97 int (**nfsv2_vnodeop_p)(void *); 98 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { 99 { &vop_default_desc, vn_default_error }, 100 { &vop_lookup_desc, nfs_lookup }, /* lookup */ 101 { &vop_create_desc, nfs_create }, /* create */ 102 { &vop_mknod_desc, nfs_mknod }, /* mknod */ 103 { &vop_open_desc, nfs_open }, /* open */ 104 { &vop_close_desc, nfs_close }, /* close */ 105 { &vop_access_desc, nfs_access }, /* access */ 106 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 107 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 108 { &vop_read_desc, nfs_read }, /* read */ 109 { &vop_write_desc, nfs_write }, /* write */ 110 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 111 { &vop_ioctl_desc, nfs_ioctl }, /* ioctl */ 112 { &vop_poll_desc, nfs_poll }, /* poll */ 113 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */ 114 { &vop_revoke_desc, nfs_revoke }, /* revoke */ 115 { &vop_mmap_desc, nfs_mmap }, /* mmap */ 116 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 117 { &vop_seek_desc, nfs_seek }, /* seek */ 118 { &vop_remove_desc, nfs_remove }, /* remove */ 119 { &vop_link_desc, nfs_link }, /* link */ 120 { &vop_rename_desc, nfs_rename }, /* rename */ 121 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */ 122 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */ 123 { &vop_symlink_desc, nfs_symlink }, /* symlink */ 124 { &vop_readdir_desc, nfs_readdir }, /* readdir */ 125 { &vop_readlink_desc, nfs_readlink }, /* readlink */ 126 { &vop_abortop_desc, nfs_abortop }, /* abortop */ 127 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 128 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 129 { &vop_lock_desc, nfs_lock }, /* lock */ 130 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 131 { &vop_bmap_desc, nfs_bmap }, /* bmap */ 132 { &vop_strategy_desc, nfs_strategy }, /* strategy */ 133 { &vop_print_desc, nfs_print }, /* print */ 134 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 135 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */ 136 { &vop_advlock_desc, nfs_advlock }, /* advlock */ 137 { &vop_bwrite_desc, genfs_badop }, /* bwrite */ 138 { &vop_getpages_desc, nfs_getpages }, /* getpages */ 139 { &vop_putpages_desc, genfs_putpages }, /* putpages */ 140 { NULL, NULL } 141 }; 142 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc = 143 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries }; 144 145 /* 146 * Special device vnode ops 147 */ 148 int (**spec_nfsv2nodeop_p)(void *); 149 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = { 150 { &vop_default_desc, vn_default_error }, 151 { &vop_lookup_desc, spec_lookup }, /* lookup */ 152 { &vop_create_desc, spec_create }, /* create */ 153 { &vop_mknod_desc, spec_mknod }, /* mknod */ 154 { &vop_open_desc, spec_open }, /* open */ 155 { &vop_close_desc, nfsspec_close }, /* close */ 156 { &vop_access_desc, nfsspec_access }, /* access */ 157 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 158 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 159 { &vop_read_desc, nfsspec_read }, /* read */ 160 { &vop_write_desc, nfsspec_write }, /* write */ 161 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 162 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 163 { &vop_poll_desc, spec_poll }, /* poll */ 164 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */ 165 { &vop_revoke_desc, spec_revoke }, /* revoke */ 166 { &vop_mmap_desc, spec_mmap }, /* mmap */ 167 { &vop_fsync_desc, spec_fsync }, /* fsync */ 168 { &vop_seek_desc, spec_seek }, /* seek */ 169 { &vop_remove_desc, spec_remove }, /* remove */ 170 { &vop_link_desc, spec_link }, /* link */ 171 { &vop_rename_desc, spec_rename }, /* rename */ 172 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 173 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 174 { &vop_symlink_desc, spec_symlink }, /* symlink */ 175 { &vop_readdir_desc, spec_readdir }, /* readdir */ 176 { &vop_readlink_desc, spec_readlink }, /* readlink */ 177 { &vop_abortop_desc, spec_abortop }, /* abortop */ 178 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 179 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 180 { &vop_lock_desc, nfs_lock }, /* lock */ 181 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 182 { &vop_bmap_desc, spec_bmap }, /* bmap */ 183 { &vop_strategy_desc, spec_strategy }, /* strategy */ 184 { &vop_print_desc, nfs_print }, /* print */ 185 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 186 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 187 { &vop_advlock_desc, spec_advlock }, /* advlock */ 188 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 189 { &vop_getpages_desc, spec_getpages }, /* getpages */ 190 { &vop_putpages_desc, spec_putpages }, /* putpages */ 191 { NULL, NULL } 192 }; 193 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc = 194 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries }; 195 196 int (**fifo_nfsv2nodeop_p)(void *); 197 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = { 198 { &vop_default_desc, vn_default_error }, 199 { &vop_lookup_desc, vn_fifo_bypass }, /* lookup */ 200 { &vop_create_desc, vn_fifo_bypass }, /* create */ 201 { &vop_mknod_desc, vn_fifo_bypass }, /* mknod */ 202 { &vop_open_desc, vn_fifo_bypass }, /* open */ 203 { &vop_close_desc, nfsfifo_close }, /* close */ 204 { &vop_access_desc, nfsspec_access }, /* access */ 205 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 206 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 207 { &vop_read_desc, nfsfifo_read }, /* read */ 208 { &vop_write_desc, nfsfifo_write }, /* write */ 209 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 210 { &vop_ioctl_desc, vn_fifo_bypass }, /* ioctl */ 211 { &vop_poll_desc, vn_fifo_bypass }, /* poll */ 212 { &vop_kqfilter_desc, vn_fifo_bypass }, /* kqfilter */ 213 { &vop_revoke_desc, vn_fifo_bypass }, /* revoke */ 214 { &vop_mmap_desc, vn_fifo_bypass }, /* mmap */ 215 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 216 { &vop_seek_desc, vn_fifo_bypass }, /* seek */ 217 { &vop_remove_desc, vn_fifo_bypass }, /* remove */ 218 { &vop_link_desc, vn_fifo_bypass }, /* link */ 219 { &vop_rename_desc, vn_fifo_bypass }, /* rename */ 220 { &vop_mkdir_desc, vn_fifo_bypass }, /* mkdir */ 221 { &vop_rmdir_desc, vn_fifo_bypass }, /* rmdir */ 222 { &vop_symlink_desc, vn_fifo_bypass }, /* symlink */ 223 { &vop_readdir_desc, vn_fifo_bypass }, /* readdir */ 224 { &vop_readlink_desc, vn_fifo_bypass }, /* readlink */ 225 { &vop_abortop_desc, vn_fifo_bypass }, /* abortop */ 226 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 227 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 228 { &vop_lock_desc, nfs_lock }, /* lock */ 229 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 230 { &vop_bmap_desc, vn_fifo_bypass }, /* bmap */ 231 { &vop_strategy_desc, genfs_badop }, /* strategy */ 232 { &vop_print_desc, nfs_print }, /* print */ 233 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 234 { &vop_pathconf_desc, vn_fifo_bypass }, /* pathconf */ 235 { &vop_advlock_desc, vn_fifo_bypass }, /* advlock */ 236 { &vop_bwrite_desc, genfs_badop }, /* bwrite */ 237 { &vop_putpages_desc, vn_fifo_bypass }, /* putpages */ 238 { NULL, NULL } 239 }; 240 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc = 241 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries }; 242 243 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *, 244 size_t, kauth_cred_t, struct lwp *); 245 static void nfs_writerpc_extfree(struct mbuf *, void *, size_t, void *); 246 247 /* 248 * Global variables 249 */ 250 extern u_int32_t nfs_true, nfs_false; 251 extern u_int32_t nfs_xdrneg1; 252 extern const nfstype nfsv3_type[9]; 253 254 int nfs_numasync = 0; 255 #define DIRHDSIZ _DIRENT_NAMEOFF(dp) 256 #define UIO_ADVANCE(uio, siz) \ 257 (void)((uio)->uio_resid -= (siz), \ 258 (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \ 259 (uio)->uio_iov->iov_len -= (siz)) 260 261 static void nfs_cache_enter(struct vnode *, struct vnode *, 262 struct componentname *); 263 264 static void 265 nfs_cache_enter(struct vnode *dvp, struct vnode *vp, 266 struct componentname *cnp) 267 { 268 struct nfsnode *dnp = VTONFS(dvp); 269 270 if ((cnp->cn_flags & MAKEENTRY) == 0) { 271 return; 272 } 273 if (vp != NULL) { 274 struct nfsnode *np = VTONFS(vp); 275 276 np->n_ctime = np->n_vattr->va_ctime.tv_sec; 277 } 278 279 if (!timespecisset(&dnp->n_nctime)) 280 dnp->n_nctime = dnp->n_vattr->va_mtime; 281 282 cache_enter(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_flags); 283 } 284 285 /* 286 * nfs null call from vfs. 287 */ 288 int 289 nfs_null(struct vnode *vp, kauth_cred_t cred, struct lwp *l) 290 { 291 char *bpos, *dpos; 292 int error = 0; 293 struct mbuf *mreq, *mrep, *md, *mb __unused; 294 struct nfsnode *np = VTONFS(vp); 295 296 nfsm_reqhead(np, NFSPROC_NULL, 0); 297 nfsm_request(np, NFSPROC_NULL, l, cred); 298 nfsm_reqdone; 299 return (error); 300 } 301 302 /* 303 * nfs access vnode op. 304 * For nfs version 2, just return ok. File accesses may fail later. 305 * For nfs version 3, use the access rpc to check accessibility. If file modes 306 * are changed on the server, accesses might still fail later. 307 */ 308 int 309 nfs_access(void *v) 310 { 311 struct vop_access_args /* { 312 struct vnode *a_vp; 313 int a_mode; 314 kauth_cred_t a_cred; 315 } */ *ap = v; 316 struct vnode *vp = ap->a_vp; 317 #ifndef NFS_V2_ONLY 318 u_int32_t *tl; 319 char *cp; 320 int32_t t1, t2; 321 char *bpos, *dpos, *cp2; 322 int error = 0, attrflag; 323 struct mbuf *mreq, *mrep, *md, *mb; 324 u_int32_t mode, rmode; 325 const int v3 = NFS_ISV3(vp); 326 #endif 327 int cachevalid; 328 struct nfsnode *np = VTONFS(vp); 329 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 330 331 cachevalid = (np->n_accstamp != -1 && 332 (time_uptime - np->n_accstamp) < nfs_attrtimeo(nmp, np) && 333 np->n_accuid == kauth_cred_geteuid(ap->a_cred)); 334 335 /* 336 * Check access cache first. If this request has been made for this 337 * uid shortly before, use the cached result. 338 */ 339 if (cachevalid) { 340 if (!np->n_accerror) { 341 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 342 return np->n_accerror; 343 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode) 344 return np->n_accerror; 345 } 346 347 #ifndef NFS_V2_ONLY 348 /* 349 * For nfs v3, do an access rpc, otherwise you are stuck emulating 350 * ufs_access() locally using the vattr. This may not be correct, 351 * since the server may apply other access criteria such as 352 * client uid-->server uid mapping that we do not know about, but 353 * this is better than just returning anything that is lying about 354 * in the cache. 355 */ 356 if (v3) { 357 nfsstats.rpccnt[NFSPROC_ACCESS]++; 358 nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED); 359 nfsm_fhtom(np, v3); 360 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 361 if (ap->a_mode & VREAD) 362 mode = NFSV3ACCESS_READ; 363 else 364 mode = 0; 365 if (vp->v_type != VDIR) { 366 if (ap->a_mode & VWRITE) 367 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); 368 if (ap->a_mode & VEXEC) 369 mode |= NFSV3ACCESS_EXECUTE; 370 } else { 371 if (ap->a_mode & VWRITE) 372 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | 373 NFSV3ACCESS_DELETE); 374 if (ap->a_mode & VEXEC) 375 mode |= NFSV3ACCESS_LOOKUP; 376 } 377 *tl = txdr_unsigned(mode); 378 nfsm_request(np, NFSPROC_ACCESS, curlwp, ap->a_cred); 379 nfsm_postop_attr(vp, attrflag, 0); 380 if (!error) { 381 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 382 rmode = fxdr_unsigned(u_int32_t, *tl); 383 /* 384 * The NFS V3 spec does not clarify whether or not 385 * the returned access bits can be a superset of 386 * the ones requested, so... 387 */ 388 if ((rmode & mode) != mode) 389 error = EACCES; 390 } 391 nfsm_reqdone; 392 } else 393 #endif 394 return (nfsspec_access(ap)); 395 #ifndef NFS_V2_ONLY 396 /* 397 * Disallow write attempts on filesystems mounted read-only; 398 * unless the file is a socket, fifo, or a block or character 399 * device resident on the filesystem. 400 */ 401 if (!error && (ap->a_mode & VWRITE) && 402 (vp->v_mount->mnt_flag & MNT_RDONLY)) { 403 switch (vp->v_type) { 404 case VREG: 405 case VDIR: 406 case VLNK: 407 error = EROFS; 408 default: 409 break; 410 } 411 } 412 413 if (!error || error == EACCES) { 414 /* 415 * If we got the same result as for a previous, 416 * different request, OR it in. Don't update 417 * the timestamp in that case. 418 */ 419 if (cachevalid && np->n_accstamp != -1 && 420 error == np->n_accerror) { 421 if (!error) 422 np->n_accmode |= ap->a_mode; 423 else if ((np->n_accmode & ap->a_mode) == ap->a_mode) 424 np->n_accmode = ap->a_mode; 425 } else { 426 np->n_accstamp = time_uptime; 427 np->n_accuid = kauth_cred_geteuid(ap->a_cred); 428 np->n_accmode = ap->a_mode; 429 np->n_accerror = error; 430 } 431 } 432 433 return (error); 434 #endif 435 } 436 437 /* 438 * nfs open vnode op 439 * Check to see if the type is ok 440 * and that deletion is not in progress. 441 * For paged in text files, you will need to flush the page cache 442 * if consistency is lost. 443 */ 444 /* ARGSUSED */ 445 int 446 nfs_open(void *v) 447 { 448 struct vop_open_args /* { 449 struct vnode *a_vp; 450 int a_mode; 451 kauth_cred_t a_cred; 452 } */ *ap = v; 453 struct vnode *vp = ap->a_vp; 454 struct nfsnode *np = VTONFS(vp); 455 int error; 456 457 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 458 return (EACCES); 459 } 460 461 if (ap->a_mode & FREAD) { 462 if (np->n_rcred != NULL) 463 kauth_cred_free(np->n_rcred); 464 np->n_rcred = ap->a_cred; 465 kauth_cred_hold(np->n_rcred); 466 } 467 if (ap->a_mode & FWRITE) { 468 if (np->n_wcred != NULL) 469 kauth_cred_free(np->n_wcred); 470 np->n_wcred = ap->a_cred; 471 kauth_cred_hold(np->n_wcred); 472 } 473 474 error = nfs_flushstalebuf(vp, ap->a_cred, curlwp, 0); 475 if (error) 476 return error; 477 478 NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */ 479 480 return (0); 481 } 482 483 /* 484 * nfs close vnode op 485 * What an NFS client should do upon close after writing is a debatable issue. 486 * Most NFS clients push delayed writes to the server upon close, basically for 487 * two reasons: 488 * 1 - So that any write errors may be reported back to the client process 489 * doing the close system call. By far the two most likely errors are 490 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 491 * 2 - To put a worst case upper bound on cache inconsistency between 492 * multiple clients for the file. 493 * There is also a consistency problem for Version 2 of the protocol w.r.t. 494 * not being able to tell if other clients are writing a file concurrently, 495 * since there is no way of knowing if the changed modify time in the reply 496 * is only due to the write for this client. 497 * (NFS Version 3 provides weak cache consistency data in the reply that 498 * should be sufficient to detect and handle this case.) 499 * 500 * The current code does the following: 501 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 502 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 503 * or commit them (this satisfies 1 and 2 except for the 504 * case where the server crashes after this close but 505 * before the commit RPC, which is felt to be "good 506 * enough". Changing the last argument to nfs_flush() to 507 * a 1 would force a commit operation, if it is felt a 508 * commit is necessary now. 509 */ 510 /* ARGSUSED */ 511 int 512 nfs_close(void *v) 513 { 514 struct vop_close_args /* { 515 struct vnodeop_desc *a_desc; 516 struct vnode *a_vp; 517 int a_fflag; 518 kauth_cred_t a_cred; 519 } */ *ap = v; 520 struct vnode *vp = ap->a_vp; 521 struct nfsnode *np = VTONFS(vp); 522 int error = 0; 523 UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist); 524 525 if (vp->v_type == VREG) { 526 if (np->n_flag & NMODIFIED) { 527 #ifndef NFS_V2_ONLY 528 if (NFS_ISV3(vp)) { 529 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, curlwp, 0); 530 np->n_flag &= ~NMODIFIED; 531 } else 532 #endif 533 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 1); 534 NFS_INVALIDATE_ATTRCACHE(np); 535 } 536 if (np->n_flag & NWRITEERR) { 537 np->n_flag &= ~NWRITEERR; 538 error = np->n_error; 539 } 540 } 541 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0); 542 return (error); 543 } 544 545 /* 546 * nfs getattr call from vfs. 547 */ 548 int 549 nfs_getattr(void *v) 550 { 551 struct vop_getattr_args /* { 552 struct vnode *a_vp; 553 struct vattr *a_vap; 554 kauth_cred_t a_cred; 555 } */ *ap = v; 556 struct vnode *vp = ap->a_vp; 557 struct nfsnode *np = VTONFS(vp); 558 char *cp; 559 u_int32_t *tl; 560 int32_t t1, t2; 561 char *bpos, *dpos; 562 int error = 0; 563 struct mbuf *mreq, *mrep, *md, *mb; 564 const int v3 = NFS_ISV3(vp); 565 566 /* 567 * Update local times for special files. 568 */ 569 if (np->n_flag & (NACC | NUPD)) 570 np->n_flag |= NCHG; 571 572 /* 573 * if we have delayed truncation, do it now. 574 */ 575 nfs_delayedtruncate(vp); 576 577 /* 578 * First look in the cache. 579 */ 580 if (nfs_getattrcache(vp, ap->a_vap) == 0) 581 return (0); 582 nfsstats.rpccnt[NFSPROC_GETATTR]++; 583 nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3)); 584 nfsm_fhtom(np, v3); 585 nfsm_request(np, NFSPROC_GETATTR, curlwp, ap->a_cred); 586 if (!error) { 587 nfsm_loadattr(vp, ap->a_vap, 0); 588 if (vp->v_type == VDIR && 589 ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ) 590 ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ; 591 } 592 nfsm_reqdone; 593 return (error); 594 } 595 596 /* 597 * nfs setattr call. 598 */ 599 int 600 nfs_setattr(void *v) 601 { 602 struct vop_setattr_args /* { 603 struct vnodeop_desc *a_desc; 604 struct vnode *a_vp; 605 struct vattr *a_vap; 606 kauth_cred_t a_cred; 607 } */ *ap = v; 608 struct vnode *vp = ap->a_vp; 609 struct nfsnode *np = VTONFS(vp); 610 struct vattr *vap = ap->a_vap; 611 int error = 0; 612 u_quad_t tsize = 0; 613 614 /* 615 * Setting of flags is not supported. 616 */ 617 if (vap->va_flags != VNOVAL) 618 return (EOPNOTSUPP); 619 620 /* 621 * Disallow write attempts if the filesystem is mounted read-only. 622 */ 623 if ((vap->va_uid != (uid_t)VNOVAL || 624 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 625 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 626 (vp->v_mount->mnt_flag & MNT_RDONLY)) 627 return (EROFS); 628 if (vap->va_size != VNOVAL) { 629 if (vap->va_size > VFSTONFS(vp->v_mount)->nm_maxfilesize) { 630 return EFBIG; 631 } 632 switch (vp->v_type) { 633 case VDIR: 634 return (EISDIR); 635 case VCHR: 636 case VBLK: 637 case VSOCK: 638 case VFIFO: 639 if (vap->va_mtime.tv_sec == VNOVAL && 640 vap->va_atime.tv_sec == VNOVAL && 641 vap->va_mode == (mode_t)VNOVAL && 642 vap->va_uid == (uid_t)VNOVAL && 643 vap->va_gid == (gid_t)VNOVAL) 644 return (0); 645 vap->va_size = VNOVAL; 646 break; 647 default: 648 /* 649 * Disallow write attempts if the filesystem is 650 * mounted read-only. 651 */ 652 if (vp->v_mount->mnt_flag & MNT_RDONLY) 653 return (EROFS); 654 genfs_node_wrlock(vp); 655 uvm_vnp_setsize(vp, vap->va_size); 656 tsize = np->n_size; 657 np->n_size = vap->va_size; 658 if (vap->va_size == 0) 659 error = nfs_vinvalbuf(vp, 0, 660 ap->a_cred, curlwp, 1); 661 else 662 error = nfs_vinvalbuf(vp, V_SAVE, 663 ap->a_cred, curlwp, 1); 664 if (error) { 665 uvm_vnp_setsize(vp, tsize); 666 genfs_node_unlock(vp); 667 return (error); 668 } 669 np->n_vattr->va_size = vap->va_size; 670 } 671 } else { 672 /* 673 * flush files before setattr because a later write of 674 * cached data might change timestamps or reset sugid bits 675 */ 676 if ((vap->va_mtime.tv_sec != VNOVAL || 677 vap->va_atime.tv_sec != VNOVAL || 678 vap->va_mode != VNOVAL) && 679 vp->v_type == VREG && 680 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 681 curlwp, 1)) == EINTR) 682 return (error); 683 } 684 error = nfs_setattrrpc(vp, vap, ap->a_cred, curlwp); 685 if (vap->va_size != VNOVAL) { 686 if (error) { 687 np->n_size = np->n_vattr->va_size = tsize; 688 uvm_vnp_setsize(vp, np->n_size); 689 } 690 genfs_node_unlock(vp); 691 } 692 VN_KNOTE(vp, NOTE_ATTRIB); 693 return (error); 694 } 695 696 /* 697 * Do an nfs setattr rpc. 698 */ 699 int 700 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, kauth_cred_t cred, struct lwp *l) 701 { 702 struct nfsv2_sattr *sp; 703 char *cp; 704 int32_t t1, t2; 705 char *bpos, *dpos; 706 u_int32_t *tl; 707 int error = 0; 708 struct mbuf *mreq, *mrep, *md, *mb; 709 const int v3 = NFS_ISV3(vp); 710 struct nfsnode *np = VTONFS(vp); 711 #ifndef NFS_V2_ONLY 712 int wccflag = NFSV3_WCCRATTR; 713 char *cp2; 714 #endif 715 716 nfsstats.rpccnt[NFSPROC_SETATTR]++; 717 nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3)); 718 nfsm_fhtom(np, v3); 719 #ifndef NFS_V2_ONLY 720 if (v3) { 721 nfsm_v3attrbuild(vap, true); 722 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 723 *tl = nfs_false; 724 } else { 725 #endif 726 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 727 if (vap->va_mode == (mode_t)VNOVAL) 728 sp->sa_mode = nfs_xdrneg1; 729 else 730 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); 731 if (vap->va_uid == (uid_t)VNOVAL) 732 sp->sa_uid = nfs_xdrneg1; 733 else 734 sp->sa_uid = txdr_unsigned(vap->va_uid); 735 if (vap->va_gid == (gid_t)VNOVAL) 736 sp->sa_gid = nfs_xdrneg1; 737 else 738 sp->sa_gid = txdr_unsigned(vap->va_gid); 739 sp->sa_size = txdr_unsigned(vap->va_size); 740 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 741 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 742 #ifndef NFS_V2_ONLY 743 } 744 #endif 745 nfsm_request(np, NFSPROC_SETATTR, l, cred); 746 #ifndef NFS_V2_ONLY 747 if (v3) { 748 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false); 749 } else 750 #endif 751 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 752 nfsm_reqdone; 753 return (error); 754 } 755 756 /* 757 * nfs lookup call, one step at a time... 758 * First look in cache 759 * If not found, do the rpc. 760 */ 761 int 762 nfs_lookup(void *v) 763 { 764 struct vop_lookup_args /* { 765 struct vnodeop_desc *a_desc; 766 struct vnode *a_dvp; 767 struct vnode **a_vpp; 768 struct componentname *a_cnp; 769 } */ *ap = v; 770 struct componentname *cnp = ap->a_cnp; 771 struct vnode *dvp = ap->a_dvp; 772 struct vnode **vpp = ap->a_vpp; 773 int flags; 774 struct vnode *newvp; 775 u_int32_t *tl; 776 char *cp; 777 int32_t t1, t2; 778 char *bpos, *dpos, *cp2; 779 struct mbuf *mreq, *mrep, *md, *mb; 780 long len; 781 nfsfh_t *fhp; 782 struct nfsnode *np; 783 int cachefound; 784 int error = 0, attrflag, fhsize; 785 const int v3 = NFS_ISV3(dvp); 786 787 flags = cnp->cn_flags; 788 789 *vpp = NULLVP; 790 newvp = NULLVP; 791 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 792 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 793 return (EROFS); 794 if (dvp->v_type != VDIR) 795 return (ENOTDIR); 796 797 /* 798 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves. 799 */ 800 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { 801 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred); 802 if (error) 803 return error; 804 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) 805 return EISDIR; 806 vref(dvp); 807 *vpp = dvp; 808 return 0; 809 } 810 811 np = VTONFS(dvp); 812 813 /* 814 * Before performing an RPC, check the name cache to see if 815 * the directory/name pair we are looking for is known already. 816 * If the directory/name pair is found in the name cache, 817 * we have to ensure the directory has not changed from 818 * the time the cache entry has been created. If it has, 819 * the cache entry has to be ignored. 820 */ 821 cachefound = cache_lookup_raw(dvp, cnp->cn_nameptr, cnp->cn_namelen, 822 cnp->cn_flags, NULL, vpp); 823 KASSERT(dvp != *vpp); 824 KASSERT((cnp->cn_flags & ISWHITEOUT) == 0); 825 if (cachefound) { 826 struct vattr vattr; 827 828 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred); 829 if (error != 0) { 830 if (*vpp != NULLVP) 831 vrele(*vpp); 832 *vpp = NULLVP; 833 return error; 834 } 835 836 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred) 837 || timespeccmp(&vattr.va_mtime, 838 &VTONFS(dvp)->n_nctime, !=)) { 839 if (*vpp != NULLVP) { 840 vrele(*vpp); 841 *vpp = NULLVP; 842 } 843 cache_purge1(dvp, NULL, 0, PURGE_CHILDREN); 844 timespecclear(&np->n_nctime); 845 goto dorpc; 846 } 847 848 if (*vpp == NULLVP) { 849 /* namecache gave us a negative result */ 850 error = ENOENT; 851 goto noentry; 852 } 853 854 /* 855 * investigate the vnode returned by cache_lookup_raw. 856 * if it isn't appropriate, do an rpc. 857 */ 858 newvp = *vpp; 859 if ((flags & ISDOTDOT) != 0) { 860 VOP_UNLOCK(dvp); 861 } 862 error = vn_lock(newvp, LK_EXCLUSIVE); 863 if ((flags & ISDOTDOT) != 0) { 864 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 865 } 866 if (error != 0) { 867 /* newvp has been reclaimed. */ 868 vrele(newvp); 869 *vpp = NULLVP; 870 goto dorpc; 871 } 872 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred) 873 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) { 874 nfsstats.lookupcache_hits++; 875 KASSERT(newvp->v_type != VNON); 876 return (0); 877 } 878 cache_purge1(newvp, NULL, 0, PURGE_PARENTS); 879 vput(newvp); 880 *vpp = NULLVP; 881 } 882 dorpc: 883 #if 0 884 /* 885 * because nfsv3 has the same CREATE semantics as ours, 886 * we don't have to perform LOOKUPs beforehand. 887 * 888 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL. 889 * XXX although we have no way to know if O_EXCL is requested or not. 890 */ 891 892 if (v3 && cnp->cn_nameiop == CREATE && 893 (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN && 894 (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 895 return (EJUSTRETURN); 896 } 897 #endif /* 0 */ 898 899 error = 0; 900 newvp = NULLVP; 901 nfsstats.lookupcache_misses++; 902 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 903 len = cnp->cn_namelen; 904 nfsm_reqhead(np, NFSPROC_LOOKUP, 905 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 906 nfsm_fhtom(np, v3); 907 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 908 nfsm_request(np, NFSPROC_LOOKUP, curlwp, cnp->cn_cred); 909 if (error) { 910 nfsm_postop_attr(dvp, attrflag, 0); 911 m_freem(mrep); 912 goto nfsmout; 913 } 914 nfsm_getfh(fhp, fhsize, v3); 915 916 /* 917 * Handle RENAME case... 918 */ 919 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) { 920 if (NFS_CMPFH(np, fhp, fhsize)) { 921 m_freem(mrep); 922 return (EISDIR); 923 } 924 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 925 if (error) { 926 m_freem(mrep); 927 return error; 928 } 929 newvp = NFSTOV(np); 930 #ifndef NFS_V2_ONLY 931 if (v3) { 932 nfsm_postop_attr(newvp, attrflag, 0); 933 nfsm_postop_attr(dvp, attrflag, 0); 934 } else 935 #endif 936 nfsm_loadattr(newvp, (struct vattr *)0, 0); 937 *vpp = newvp; 938 m_freem(mrep); 939 goto validate; 940 } 941 942 /* 943 * The postop attr handling is duplicated for each if case, 944 * because it should be done while dvp is locked (unlocking 945 * dvp is different for each case). 946 */ 947 948 if (NFS_CMPFH(np, fhp, fhsize)) { 949 /* 950 * as we handle "." lookup locally, this should be 951 * a broken server. 952 */ 953 vref(dvp); 954 newvp = dvp; 955 #ifndef NFS_V2_ONLY 956 if (v3) { 957 nfsm_postop_attr(newvp, attrflag, 0); 958 nfsm_postop_attr(dvp, attrflag, 0); 959 } else 960 #endif 961 nfsm_loadattr(newvp, (struct vattr *)0, 0); 962 } else if (flags & ISDOTDOT) { 963 /* 964 * ".." lookup 965 */ 966 VOP_UNLOCK(dvp); 967 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 968 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 969 if (error) { 970 m_freem(mrep); 971 return error; 972 } 973 newvp = NFSTOV(np); 974 975 #ifndef NFS_V2_ONLY 976 if (v3) { 977 nfsm_postop_attr(newvp, attrflag, 0); 978 nfsm_postop_attr(dvp, attrflag, 0); 979 } else 980 #endif 981 nfsm_loadattr(newvp, (struct vattr *)0, 0); 982 } else { 983 /* 984 * Other lookups. 985 */ 986 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 987 if (error) { 988 m_freem(mrep); 989 return error; 990 } 991 newvp = NFSTOV(np); 992 #ifndef NFS_V2_ONLY 993 if (v3) { 994 nfsm_postop_attr(newvp, attrflag, 0); 995 nfsm_postop_attr(dvp, attrflag, 0); 996 } else 997 #endif 998 nfsm_loadattr(newvp, (struct vattr *)0, 0); 999 } 1000 if (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN)) { 1001 nfs_cache_enter(dvp, newvp, cnp); 1002 } 1003 *vpp = newvp; 1004 nfsm_reqdone; 1005 if (error) { 1006 /* 1007 * We get here only because of errors returned by 1008 * the RPC. Otherwise we'll have returned above 1009 * (the nfsm_* macros will jump to nfsm_reqdone 1010 * on error). 1011 */ 1012 if (error == ENOENT && cnp->cn_nameiop != CREATE) { 1013 nfs_cache_enter(dvp, NULL, cnp); 1014 } 1015 if (newvp != NULLVP) { 1016 if (newvp == dvp) { 1017 vrele(newvp); 1018 } else { 1019 vput(newvp); 1020 } 1021 } 1022 noentry: 1023 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 1024 (flags & ISLASTCN) && error == ENOENT) { 1025 if (dvp->v_mount->mnt_flag & MNT_RDONLY) { 1026 error = EROFS; 1027 } else { 1028 error = EJUSTRETURN; 1029 } 1030 } 1031 *vpp = NULL; 1032 return error; 1033 } 1034 1035 validate: 1036 /* 1037 * make sure we have valid type and size. 1038 */ 1039 1040 newvp = *vpp; 1041 if (newvp->v_type == VNON) { 1042 struct vattr vattr; /* dummy */ 1043 1044 KASSERT(VTONFS(newvp)->n_attrstamp == 0); 1045 error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred); 1046 if (error) { 1047 vput(newvp); 1048 *vpp = NULL; 1049 } 1050 } 1051 1052 return error; 1053 } 1054 1055 /* 1056 * nfs read call. 1057 * Just call nfs_bioread() to do the work. 1058 */ 1059 int 1060 nfs_read(void *v) 1061 { 1062 struct vop_read_args /* { 1063 struct vnode *a_vp; 1064 struct uio *a_uio; 1065 int a_ioflag; 1066 kauth_cred_t a_cred; 1067 } */ *ap = v; 1068 struct vnode *vp = ap->a_vp; 1069 1070 if (vp->v_type != VREG) 1071 return EISDIR; 1072 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0)); 1073 } 1074 1075 /* 1076 * nfs readlink call 1077 */ 1078 int 1079 nfs_readlink(void *v) 1080 { 1081 struct vop_readlink_args /* { 1082 struct vnode *a_vp; 1083 struct uio *a_uio; 1084 kauth_cred_t a_cred; 1085 } */ *ap = v; 1086 struct vnode *vp = ap->a_vp; 1087 struct nfsnode *np = VTONFS(vp); 1088 1089 if (vp->v_type != VLNK) 1090 return (EPERM); 1091 1092 if (np->n_rcred != NULL) { 1093 kauth_cred_free(np->n_rcred); 1094 } 1095 np->n_rcred = ap->a_cred; 1096 kauth_cred_hold(np->n_rcred); 1097 1098 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0)); 1099 } 1100 1101 /* 1102 * Do a readlink rpc. 1103 * Called by nfs_doio() from below the buffer cache. 1104 */ 1105 int 1106 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred) 1107 { 1108 u_int32_t *tl; 1109 char *cp; 1110 int32_t t1, t2; 1111 char *bpos, *dpos, *cp2; 1112 int error = 0; 1113 uint32_t len; 1114 struct mbuf *mreq, *mrep, *md, *mb; 1115 const int v3 = NFS_ISV3(vp); 1116 struct nfsnode *np = VTONFS(vp); 1117 #ifndef NFS_V2_ONLY 1118 int attrflag; 1119 #endif 1120 1121 nfsstats.rpccnt[NFSPROC_READLINK]++; 1122 nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3)); 1123 nfsm_fhtom(np, v3); 1124 nfsm_request(np, NFSPROC_READLINK, curlwp, cred); 1125 #ifndef NFS_V2_ONLY 1126 if (v3) 1127 nfsm_postop_attr(vp, attrflag, 0); 1128 #endif 1129 if (!error) { 1130 #ifndef NFS_V2_ONLY 1131 if (v3) { 1132 nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED); 1133 len = fxdr_unsigned(uint32_t, *tl); 1134 if (len > NFS_MAXPATHLEN) { 1135 /* 1136 * this pathname is too long for us. 1137 */ 1138 m_freem(mrep); 1139 /* Solaris returns EINVAL. should we follow? */ 1140 error = ENAMETOOLONG; 1141 goto nfsmout; 1142 } 1143 } else 1144 #endif 1145 { 1146 nfsm_strsiz(len, NFS_MAXPATHLEN); 1147 } 1148 nfsm_mtouio(uiop, len); 1149 } 1150 nfsm_reqdone; 1151 return (error); 1152 } 1153 1154 /* 1155 * nfs read rpc call 1156 * Ditto above 1157 */ 1158 int 1159 nfs_readrpc(struct vnode *vp, struct uio *uiop) 1160 { 1161 u_int32_t *tl; 1162 char *cp; 1163 int32_t t1, t2; 1164 char *bpos, *dpos, *cp2; 1165 struct mbuf *mreq, *mrep, *md, *mb; 1166 struct nfsmount *nmp; 1167 int error = 0, len, retlen, tsiz, eof __unused, byte_count; 1168 const int v3 = NFS_ISV3(vp); 1169 struct nfsnode *np = VTONFS(vp); 1170 #ifndef NFS_V2_ONLY 1171 int attrflag; 1172 #endif 1173 1174 #ifndef nolint 1175 eof = 0; 1176 #endif 1177 nmp = VFSTONFS(vp->v_mount); 1178 tsiz = uiop->uio_resid; 1179 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1180 return (EFBIG); 1181 iostat_busy(nmp->nm_stats); 1182 byte_count = 0; /* count bytes actually transferred */ 1183 while (tsiz > 0) { 1184 nfsstats.rpccnt[NFSPROC_READ]++; 1185 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; 1186 nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3); 1187 nfsm_fhtom(np, v3); 1188 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3); 1189 #ifndef NFS_V2_ONLY 1190 if (v3) { 1191 txdr_hyper(uiop->uio_offset, tl); 1192 *(tl + 2) = txdr_unsigned(len); 1193 } else 1194 #endif 1195 { 1196 *tl++ = txdr_unsigned(uiop->uio_offset); 1197 *tl++ = txdr_unsigned(len); 1198 *tl = 0; 1199 } 1200 nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred); 1201 #ifndef NFS_V2_ONLY 1202 if (v3) { 1203 nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC); 1204 if (error) { 1205 m_freem(mrep); 1206 goto nfsmout; 1207 } 1208 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1209 eof = fxdr_unsigned(int, *(tl + 1)); 1210 } else 1211 #endif 1212 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 1213 nfsm_strsiz(retlen, nmp->nm_rsize); 1214 nfsm_mtouio(uiop, retlen); 1215 m_freem(mrep); 1216 tsiz -= retlen; 1217 byte_count += retlen; 1218 #ifndef NFS_V2_ONLY 1219 if (v3) { 1220 if (eof || retlen == 0) 1221 tsiz = 0; 1222 } else 1223 #endif 1224 if (retlen < len) 1225 tsiz = 0; 1226 } 1227 nfsmout: 1228 iostat_unbusy(nmp->nm_stats, byte_count, 1); 1229 return (error); 1230 } 1231 1232 struct nfs_writerpc_context { 1233 kmutex_t nwc_lock; 1234 kcondvar_t nwc_cv; 1235 int nwc_mbufcount; 1236 }; 1237 1238 /* 1239 * free mbuf used to refer protected pages while write rpc call. 1240 * called at splvm. 1241 */ 1242 static void 1243 nfs_writerpc_extfree(struct mbuf *m, void *tbuf, size_t size, void *arg) 1244 { 1245 struct nfs_writerpc_context *ctx = arg; 1246 1247 KASSERT(m != NULL); 1248 KASSERT(ctx != NULL); 1249 pool_cache_put(mb_cache, m); 1250 mutex_enter(&ctx->nwc_lock); 1251 if (--ctx->nwc_mbufcount == 0) { 1252 cv_signal(&ctx->nwc_cv); 1253 } 1254 mutex_exit(&ctx->nwc_lock); 1255 } 1256 1257 /* 1258 * nfs write call 1259 */ 1260 int 1261 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, bool pageprotected, bool *stalewriteverfp) 1262 { 1263 u_int32_t *tl; 1264 char *cp; 1265 int32_t t1, t2; 1266 char *bpos, *dpos; 1267 struct mbuf *mreq, *mrep, *md, *mb; 1268 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1269 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR; 1270 const int v3 = NFS_ISV3(vp); 1271 int committed = NFSV3WRITE_FILESYNC; 1272 struct nfsnode *np = VTONFS(vp); 1273 struct nfs_writerpc_context ctx; 1274 int byte_count; 1275 size_t origresid; 1276 #ifndef NFS_V2_ONLY 1277 char *cp2; 1278 int rlen, commit; 1279 #endif 1280 1281 if (vp->v_mount->mnt_flag & MNT_RDONLY) { 1282 panic("writerpc readonly vp %p", vp); 1283 } 1284 1285 #ifdef DIAGNOSTIC 1286 if (uiop->uio_iovcnt != 1) 1287 panic("nfs: writerpc iovcnt > 1"); 1288 #endif 1289 tsiz = uiop->uio_resid; 1290 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1291 return EFBIG; 1292 1293 mutex_init(&ctx.nwc_lock, MUTEX_DRIVER, IPL_VM); 1294 cv_init(&ctx.nwc_cv, "nfsmblk"); 1295 ctx.nwc_mbufcount = 1; 1296 1297 retry: 1298 origresid = uiop->uio_resid; 1299 KASSERT(origresid == uiop->uio_iov->iov_len); 1300 iostat_busy(nmp->nm_stats); 1301 byte_count = 0; /* count of bytes actually written */ 1302 while (tsiz > 0) { 1303 uint32_t datalen; /* data bytes need to be allocated in mbuf */ 1304 uint32_t backup; 1305 bool stalewriteverf = false; 1306 1307 nfsstats.rpccnt[NFSPROC_WRITE]++; 1308 len = min(tsiz, nmp->nm_wsize); 1309 datalen = pageprotected ? 0 : nfsm_rndup(len); 1310 nfsm_reqhead(np, NFSPROC_WRITE, 1311 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen); 1312 nfsm_fhtom(np, v3); 1313 #ifndef NFS_V2_ONLY 1314 if (v3) { 1315 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED); 1316 txdr_hyper(uiop->uio_offset, tl); 1317 tl += 2; 1318 *tl++ = txdr_unsigned(len); 1319 *tl++ = txdr_unsigned(*iomode); 1320 *tl = txdr_unsigned(len); 1321 } else 1322 #endif 1323 { 1324 u_int32_t x; 1325 1326 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED); 1327 /* Set both "begin" and "current" to non-garbage. */ 1328 x = txdr_unsigned((u_int32_t)uiop->uio_offset); 1329 *tl++ = x; /* "begin offset" */ 1330 *tl++ = x; /* "current offset" */ 1331 x = txdr_unsigned(len); 1332 *tl++ = x; /* total to this offset */ 1333 *tl = x; /* size of this write */ 1334 1335 } 1336 if (pageprotected) { 1337 /* 1338 * since we know pages can't be modified during i/o, 1339 * no need to copy them for us. 1340 */ 1341 struct mbuf *m; 1342 struct iovec *iovp = uiop->uio_iov; 1343 1344 m = m_get(M_WAIT, MT_DATA); 1345 MCLAIM(m, &nfs_mowner); 1346 MEXTADD(m, iovp->iov_base, len, M_MBUF, 1347 nfs_writerpc_extfree, &ctx); 1348 m->m_flags |= M_EXT_ROMAP; 1349 m->m_len = len; 1350 mb->m_next = m; 1351 /* 1352 * no need to maintain mb and bpos here 1353 * because no one care them later. 1354 */ 1355 #if 0 1356 mb = m; 1357 bpos = mtod(void *, mb) + mb->m_len; 1358 #endif 1359 UIO_ADVANCE(uiop, len); 1360 uiop->uio_offset += len; 1361 mutex_enter(&ctx.nwc_lock); 1362 ctx.nwc_mbufcount++; 1363 mutex_exit(&ctx.nwc_lock); 1364 nfs_zeropad(mb, 0, nfsm_padlen(len)); 1365 } else { 1366 nfsm_uiotom(uiop, len); 1367 } 1368 nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred); 1369 #ifndef NFS_V2_ONLY 1370 if (v3) { 1371 wccflag = NFSV3_WCCCHK; 1372 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error); 1373 if (!error) { 1374 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED 1375 + NFSX_V3WRITEVERF); 1376 rlen = fxdr_unsigned(int, *tl++); 1377 if (rlen == 0) { 1378 error = NFSERR_IO; 1379 m_freem(mrep); 1380 break; 1381 } else if (rlen < len) { 1382 backup = len - rlen; 1383 UIO_ADVANCE(uiop, -backup); 1384 uiop->uio_offset -= backup; 1385 len = rlen; 1386 } 1387 commit = fxdr_unsigned(int, *tl++); 1388 1389 /* 1390 * Return the lowest committment level 1391 * obtained by any of the RPCs. 1392 */ 1393 if (committed == NFSV3WRITE_FILESYNC) 1394 committed = commit; 1395 else if (committed == NFSV3WRITE_DATASYNC && 1396 commit == NFSV3WRITE_UNSTABLE) 1397 committed = commit; 1398 mutex_enter(&nmp->nm_lock); 1399 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){ 1400 memcpy(nmp->nm_writeverf, tl, 1401 NFSX_V3WRITEVERF); 1402 nmp->nm_iflag |= NFSMNT_HASWRITEVERF; 1403 } else if ((nmp->nm_iflag & 1404 NFSMNT_STALEWRITEVERF) || 1405 memcmp(tl, nmp->nm_writeverf, 1406 NFSX_V3WRITEVERF)) { 1407 memcpy(nmp->nm_writeverf, tl, 1408 NFSX_V3WRITEVERF); 1409 /* 1410 * note NFSMNT_STALEWRITEVERF 1411 * if we're the first thread to 1412 * notice it. 1413 */ 1414 if ((nmp->nm_iflag & 1415 NFSMNT_STALEWRITEVERF) == 0) { 1416 stalewriteverf = true; 1417 nmp->nm_iflag |= 1418 NFSMNT_STALEWRITEVERF; 1419 } 1420 } 1421 mutex_exit(&nmp->nm_lock); 1422 } 1423 } else 1424 #endif 1425 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 1426 if (wccflag) 1427 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime; 1428 m_freem(mrep); 1429 if (error) 1430 break; 1431 tsiz -= len; 1432 byte_count += len; 1433 if (stalewriteverf) { 1434 *stalewriteverfp = true; 1435 stalewriteverf = false; 1436 if (committed == NFSV3WRITE_UNSTABLE && 1437 len != origresid) { 1438 /* 1439 * if our write requests weren't atomic but 1440 * unstable, datas in previous iterations 1441 * might have already been lost now. 1442 * then, we should resend them to nfsd. 1443 */ 1444 backup = origresid - tsiz; 1445 UIO_ADVANCE(uiop, -backup); 1446 uiop->uio_offset -= backup; 1447 tsiz = origresid; 1448 goto retry; 1449 } 1450 } 1451 } 1452 nfsmout: 1453 iostat_unbusy(nmp->nm_stats, byte_count, 0); 1454 if (pageprotected) { 1455 /* 1456 * wait until mbufs go away. 1457 * retransmitted mbufs can survive longer than rpc requests 1458 * themselves. 1459 */ 1460 mutex_enter(&ctx.nwc_lock); 1461 ctx.nwc_mbufcount--; 1462 while (ctx.nwc_mbufcount > 0) { 1463 cv_wait(&ctx.nwc_cv, &ctx.nwc_lock); 1464 } 1465 mutex_exit(&ctx.nwc_lock); 1466 } 1467 mutex_destroy(&ctx.nwc_lock); 1468 cv_destroy(&ctx.nwc_cv); 1469 *iomode = committed; 1470 if (error) 1471 uiop->uio_resid = tsiz; 1472 return (error); 1473 } 1474 1475 /* 1476 * nfs mknod rpc 1477 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1478 * mode set to specify the file type and the size field for rdev. 1479 */ 1480 int 1481 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap) 1482 { 1483 struct nfsv2_sattr *sp; 1484 u_int32_t *tl; 1485 char *cp; 1486 int32_t t1, t2; 1487 struct vnode *newvp = (struct vnode *)0; 1488 struct nfsnode *dnp, *np; 1489 char *cp2; 1490 char *bpos, *dpos; 1491 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1492 struct mbuf *mreq, *mrep, *md, *mb; 1493 u_int32_t rdev; 1494 const int v3 = NFS_ISV3(dvp); 1495 1496 if (vap->va_type == VCHR || vap->va_type == VBLK) 1497 rdev = txdr_unsigned(vap->va_rdev); 1498 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1499 rdev = nfs_xdrneg1; 1500 else { 1501 VOP_ABORTOP(dvp, cnp); 1502 vput(dvp); 1503 return (EOPNOTSUPP); 1504 } 1505 nfsstats.rpccnt[NFSPROC_MKNOD]++; 1506 dnp = VTONFS(dvp); 1507 nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED + 1508 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1509 nfsm_fhtom(dnp, v3); 1510 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1511 #ifndef NFS_V2_ONLY 1512 if (v3) { 1513 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1514 *tl++ = vtonfsv3_type(vap->va_type); 1515 nfsm_v3attrbuild(vap, false); 1516 if (vap->va_type == VCHR || vap->va_type == VBLK) { 1517 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1518 *tl++ = txdr_unsigned(major(vap->va_rdev)); 1519 *tl = txdr_unsigned(minor(vap->va_rdev)); 1520 } 1521 } else 1522 #endif 1523 { 1524 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1525 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1526 sp->sa_uid = nfs_xdrneg1; 1527 sp->sa_gid = nfs_xdrneg1; 1528 sp->sa_size = rdev; 1529 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1530 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1531 } 1532 nfsm_request(dnp, NFSPROC_MKNOD, curlwp, cnp->cn_cred); 1533 if (!error) { 1534 nfsm_mtofh(dvp, newvp, v3, gotvp); 1535 if (!gotvp) { 1536 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1537 cnp->cn_namelen, cnp->cn_cred, curlwp, &np); 1538 if (!error) 1539 newvp = NFSTOV(np); 1540 } 1541 } 1542 #ifndef NFS_V2_ONLY 1543 if (v3) 1544 nfsm_wcc_data(dvp, wccflag, 0, !error); 1545 #endif 1546 nfsm_reqdone; 1547 if (error) { 1548 if (newvp) 1549 vput(newvp); 1550 } else { 1551 nfs_cache_enter(dvp, newvp, cnp); 1552 *vpp = newvp; 1553 } 1554 VTONFS(dvp)->n_flag |= NMODIFIED; 1555 if (!wccflag) 1556 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1557 vput(dvp); 1558 return (error); 1559 } 1560 1561 /* 1562 * nfs mknod vop 1563 * just call nfs_mknodrpc() to do the work. 1564 */ 1565 /* ARGSUSED */ 1566 int 1567 nfs_mknod(void *v) 1568 { 1569 struct vop_mknod_args /* { 1570 struct vnode *a_dvp; 1571 struct vnode **a_vpp; 1572 struct componentname *a_cnp; 1573 struct vattr *a_vap; 1574 } */ *ap = v; 1575 struct vnode *dvp = ap->a_dvp; 1576 struct componentname *cnp = ap->a_cnp; 1577 int error; 1578 1579 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap); 1580 VN_KNOTE(dvp, NOTE_WRITE); 1581 if (error == 0 || error == EEXIST) 1582 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 1583 return (error); 1584 } 1585 1586 /* 1587 * nfs file create call 1588 */ 1589 int 1590 nfs_create(void *v) 1591 { 1592 struct vop_create_args /* { 1593 struct vnode *a_dvp; 1594 struct vnode **a_vpp; 1595 struct componentname *a_cnp; 1596 struct vattr *a_vap; 1597 } */ *ap = v; 1598 struct vnode *dvp = ap->a_dvp; 1599 struct vattr *vap = ap->a_vap; 1600 struct componentname *cnp = ap->a_cnp; 1601 struct nfsv2_sattr *sp; 1602 u_int32_t *tl; 1603 char *cp; 1604 int32_t t1, t2; 1605 struct nfsnode *dnp, *np = (struct nfsnode *)0; 1606 struct vnode *newvp = (struct vnode *)0; 1607 char *bpos, *dpos, *cp2; 1608 int error, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1609 struct mbuf *mreq, *mrep, *md, *mb; 1610 const int v3 = NFS_ISV3(dvp); 1611 u_int32_t excl_mode = NFSV3CREATE_UNCHECKED; 1612 1613 /* 1614 * Oops, not for me.. 1615 */ 1616 if (vap->va_type == VSOCK) 1617 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1618 1619 KASSERT(vap->va_type == VREG); 1620 1621 #ifdef VA_EXCLUSIVE 1622 if (vap->va_vaflags & VA_EXCLUSIVE) { 1623 excl_mode = NFSV3CREATE_EXCLUSIVE; 1624 } 1625 #endif 1626 again: 1627 error = 0; 1628 nfsstats.rpccnt[NFSPROC_CREATE]++; 1629 dnp = VTONFS(dvp); 1630 nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED + 1631 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1632 nfsm_fhtom(dnp, v3); 1633 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1634 #ifndef NFS_V2_ONLY 1635 if (v3) { 1636 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1637 if (excl_mode == NFSV3CREATE_EXCLUSIVE) { 1638 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); 1639 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF); 1640 *tl++ = cprng_fast32(); 1641 *tl = cprng_fast32(); 1642 } else { 1643 *tl = txdr_unsigned(excl_mode); 1644 nfsm_v3attrbuild(vap, false); 1645 } 1646 } else 1647 #endif 1648 { 1649 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1650 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1651 sp->sa_uid = nfs_xdrneg1; 1652 sp->sa_gid = nfs_xdrneg1; 1653 sp->sa_size = 0; 1654 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1655 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1656 } 1657 nfsm_request(dnp, NFSPROC_CREATE, curlwp, cnp->cn_cred); 1658 if (!error) { 1659 nfsm_mtofh(dvp, newvp, v3, gotvp); 1660 if (!gotvp) { 1661 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1662 cnp->cn_namelen, cnp->cn_cred, curlwp, &np); 1663 if (!error) 1664 newvp = NFSTOV(np); 1665 } 1666 } 1667 #ifndef NFS_V2_ONLY 1668 if (v3) 1669 nfsm_wcc_data(dvp, wccflag, 0, !error); 1670 #endif 1671 nfsm_reqdone; 1672 if (error) { 1673 /* 1674 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP. 1675 */ 1676 if (v3 && error == ENOTSUP) { 1677 if (excl_mode == NFSV3CREATE_EXCLUSIVE) { 1678 excl_mode = NFSV3CREATE_GUARDED; 1679 goto again; 1680 } else if (excl_mode == NFSV3CREATE_GUARDED) { 1681 excl_mode = NFSV3CREATE_UNCHECKED; 1682 goto again; 1683 } 1684 } 1685 } else if (v3 && (excl_mode == NFSV3CREATE_EXCLUSIVE)) { 1686 struct timespec ts; 1687 1688 getnanotime(&ts); 1689 1690 /* 1691 * make sure that we'll update timestamps as 1692 * most server implementations use them to store 1693 * the create verifier. 1694 * 1695 * XXX it's better to use TOSERVER always. 1696 */ 1697 1698 if (vap->va_atime.tv_sec == VNOVAL) 1699 vap->va_atime = ts; 1700 if (vap->va_mtime.tv_sec == VNOVAL) 1701 vap->va_mtime = ts; 1702 1703 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, curlwp); 1704 } 1705 if (error == 0) { 1706 if (cnp->cn_flags & MAKEENTRY) 1707 nfs_cache_enter(dvp, newvp, cnp); 1708 else 1709 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 1710 *ap->a_vpp = newvp; 1711 } else { 1712 if (newvp) 1713 vput(newvp); 1714 if (error == EEXIST) 1715 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 1716 } 1717 VTONFS(dvp)->n_flag |= NMODIFIED; 1718 if (!wccflag) 1719 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1720 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1721 vput(dvp); 1722 return (error); 1723 } 1724 1725 /* 1726 * nfs file remove call 1727 * To try and make nfs semantics closer to ufs semantics, a file that has 1728 * other processes using the vnode is renamed instead of removed and then 1729 * removed later on the last close. 1730 * - If v_usecount > 1 1731 * If a rename is not already in the works 1732 * call nfs_sillyrename() to set it up 1733 * else 1734 * do the remove rpc 1735 */ 1736 int 1737 nfs_remove(void *v) 1738 { 1739 struct vop_remove_args /* { 1740 struct vnodeop_desc *a_desc; 1741 struct vnode * a_dvp; 1742 struct vnode * a_vp; 1743 struct componentname * a_cnp; 1744 } */ *ap = v; 1745 struct vnode *vp = ap->a_vp; 1746 struct vnode *dvp = ap->a_dvp; 1747 struct componentname *cnp = ap->a_cnp; 1748 struct nfsnode *np = VTONFS(vp); 1749 int error = 0; 1750 struct vattr vattr; 1751 1752 #ifndef DIAGNOSTIC 1753 if (vp->v_usecount < 1) 1754 panic("nfs_remove: bad v_usecount"); 1755 #endif 1756 if (vp->v_type == VDIR) 1757 error = EPERM; 1758 else if (vp->v_usecount == 1 || (np->n_sillyrename && 1759 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 && 1760 vattr.va_nlink > 1)) { 1761 /* 1762 * Purge the name cache so that the chance of a lookup for 1763 * the name succeeding while the remove is in progress is 1764 * minimized. Without node locking it can still happen, such 1765 * that an I/O op returns ESTALE, but since you get this if 1766 * another host removes the file.. 1767 */ 1768 cache_purge(vp); 1769 /* 1770 * throw away biocache buffers, mainly to avoid 1771 * unnecessary delayed writes later. 1772 */ 1773 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, curlwp, 1); 1774 /* Do the rpc */ 1775 if (error != EINTR) 1776 error = nfs_removerpc(dvp, cnp->cn_nameptr, 1777 cnp->cn_namelen, cnp->cn_cred, curlwp); 1778 } else if (!np->n_sillyrename) 1779 error = nfs_sillyrename(dvp, vp, cnp, false); 1780 if (!error && nfs_getattrcache(vp, &vattr) == 0 && 1781 vattr.va_nlink == 1) { 1782 np->n_flag |= NREMOVED; 1783 } 1784 NFS_INVALIDATE_ATTRCACHE(np); 1785 VN_KNOTE(vp, NOTE_DELETE); 1786 VN_KNOTE(dvp, NOTE_WRITE); 1787 if (dvp == vp) 1788 vrele(vp); 1789 else 1790 vput(vp); 1791 vput(dvp); 1792 return (error); 1793 } 1794 1795 /* 1796 * nfs file remove rpc called from nfs_inactive 1797 */ 1798 int 1799 nfs_removeit(struct sillyrename *sp) 1800 { 1801 1802 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, 1803 (struct lwp *)0)); 1804 } 1805 1806 /* 1807 * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). 1808 */ 1809 int 1810 nfs_removerpc(struct vnode *dvp, const char *name, int namelen, kauth_cred_t cred, struct lwp *l) 1811 { 1812 u_int32_t *tl; 1813 char *cp; 1814 #ifndef NFS_V2_ONLY 1815 int32_t t1; 1816 char *cp2; 1817 #endif 1818 int32_t t2; 1819 char *bpos, *dpos; 1820 int error = 0, wccflag = NFSV3_WCCRATTR; 1821 struct mbuf *mreq, *mrep, *md, *mb; 1822 const int v3 = NFS_ISV3(dvp); 1823 int rexmit = 0; 1824 struct nfsnode *dnp = VTONFS(dvp); 1825 1826 nfsstats.rpccnt[NFSPROC_REMOVE]++; 1827 nfsm_reqhead(dnp, NFSPROC_REMOVE, 1828 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen)); 1829 nfsm_fhtom(dnp, v3); 1830 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 1831 nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit); 1832 #ifndef NFS_V2_ONLY 1833 if (v3) 1834 nfsm_wcc_data(dvp, wccflag, 0, !error); 1835 #endif 1836 nfsm_reqdone; 1837 VTONFS(dvp)->n_flag |= NMODIFIED; 1838 if (!wccflag) 1839 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1840 /* 1841 * Kludge City: If the first reply to the remove rpc is lost.. 1842 * the reply to the retransmitted request will be ENOENT 1843 * since the file was in fact removed 1844 * Therefore, we cheat and return success. 1845 */ 1846 if (rexmit && error == ENOENT) 1847 error = 0; 1848 return (error); 1849 } 1850 1851 /* 1852 * nfs file rename call 1853 */ 1854 int 1855 nfs_rename(void *v) 1856 { 1857 struct vop_rename_args /* { 1858 struct vnode *a_fdvp; 1859 struct vnode *a_fvp; 1860 struct componentname *a_fcnp; 1861 struct vnode *a_tdvp; 1862 struct vnode *a_tvp; 1863 struct componentname *a_tcnp; 1864 } */ *ap = v; 1865 struct vnode *fvp = ap->a_fvp; 1866 struct vnode *tvp = ap->a_tvp; 1867 struct vnode *fdvp = ap->a_fdvp; 1868 struct vnode *tdvp = ap->a_tdvp; 1869 struct componentname *tcnp = ap->a_tcnp; 1870 struct componentname *fcnp = ap->a_fcnp; 1871 int error; 1872 1873 /* Check for cross-device rename */ 1874 if ((fvp->v_mount != tdvp->v_mount) || 1875 (tvp && (fvp->v_mount != tvp->v_mount))) { 1876 error = EXDEV; 1877 goto out; 1878 } 1879 1880 /* 1881 * If the tvp exists and is in use, sillyrename it before doing the 1882 * rename of the new file over it. 1883 * 1884 * Have sillyrename use link instead of rename if possible, 1885 * so that we don't lose the file if the rename fails, and so 1886 * that there's no window when the "to" file doesn't exist. 1887 */ 1888 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename && 1889 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) { 1890 VN_KNOTE(tvp, NOTE_DELETE); 1891 vput(tvp); 1892 tvp = NULL; 1893 } 1894 1895 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1896 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1897 curlwp); 1898 1899 VN_KNOTE(fdvp, NOTE_WRITE); 1900 VN_KNOTE(tdvp, NOTE_WRITE); 1901 if (error == 0 || error == EEXIST) { 1902 if (fvp->v_type == VDIR) 1903 cache_purge(fvp); 1904 else 1905 cache_purge1(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1906 0); 1907 if (tvp != NULL && tvp->v_type == VDIR) 1908 cache_purge(tvp); 1909 else 1910 cache_purge1(tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, 1911 0); 1912 } 1913 out: 1914 if (tdvp == tvp) 1915 vrele(tdvp); 1916 else 1917 vput(tdvp); 1918 if (tvp) 1919 vput(tvp); 1920 vrele(fdvp); 1921 vrele(fvp); 1922 return (error); 1923 } 1924 1925 /* 1926 * nfs file rename rpc called from nfs_remove() above 1927 */ 1928 int 1929 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, struct sillyrename *sp) 1930 { 1931 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, 1932 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curlwp)); 1933 } 1934 1935 /* 1936 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1937 */ 1938 int 1939 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen, struct vnode *tdvp, const char *tnameptr, int tnamelen, kauth_cred_t cred, struct lwp *l) 1940 { 1941 u_int32_t *tl; 1942 char *cp; 1943 #ifndef NFS_V2_ONLY 1944 int32_t t1; 1945 char *cp2; 1946 #endif 1947 int32_t t2; 1948 char *bpos, *dpos; 1949 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; 1950 struct mbuf *mreq, *mrep, *md, *mb; 1951 const int v3 = NFS_ISV3(fdvp); 1952 int rexmit = 0; 1953 struct nfsnode *fdnp = VTONFS(fdvp); 1954 1955 nfsstats.rpccnt[NFSPROC_RENAME]++; 1956 nfsm_reqhead(fdnp, NFSPROC_RENAME, 1957 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) + 1958 nfsm_rndup(tnamelen)); 1959 nfsm_fhtom(fdnp, v3); 1960 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); 1961 nfsm_fhtom(VTONFS(tdvp), v3); 1962 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); 1963 nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit); 1964 #ifndef NFS_V2_ONLY 1965 if (v3) { 1966 nfsm_wcc_data(fdvp, fwccflag, 0, !error); 1967 nfsm_wcc_data(tdvp, twccflag, 0, !error); 1968 } 1969 #endif 1970 nfsm_reqdone; 1971 VTONFS(fdvp)->n_flag |= NMODIFIED; 1972 VTONFS(tdvp)->n_flag |= NMODIFIED; 1973 if (!fwccflag) 1974 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp)); 1975 if (!twccflag) 1976 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp)); 1977 /* 1978 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1979 */ 1980 if (rexmit && error == ENOENT) 1981 error = 0; 1982 return (error); 1983 } 1984 1985 /* 1986 * NFS link RPC, called from nfs_link. 1987 * Assumes dvp and vp locked, and leaves them that way. 1988 */ 1989 1990 static int 1991 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name, 1992 size_t namelen, kauth_cred_t cred, struct lwp *l) 1993 { 1994 u_int32_t *tl; 1995 char *cp; 1996 #ifndef NFS_V2_ONLY 1997 int32_t t1; 1998 char *cp2; 1999 #endif 2000 int32_t t2; 2001 char *bpos, *dpos; 2002 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; 2003 struct mbuf *mreq, *mrep, *md, *mb; 2004 const int v3 = NFS_ISV3(dvp); 2005 int rexmit = 0; 2006 struct nfsnode *np = VTONFS(vp); 2007 2008 nfsstats.rpccnt[NFSPROC_LINK]++; 2009 nfsm_reqhead(np, NFSPROC_LINK, 2010 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen)); 2011 nfsm_fhtom(np, v3); 2012 nfsm_fhtom(VTONFS(dvp), v3); 2013 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 2014 nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit); 2015 #ifndef NFS_V2_ONLY 2016 if (v3) { 2017 nfsm_postop_attr(vp, attrflag, 0); 2018 nfsm_wcc_data(dvp, wccflag, 0, !error); 2019 } 2020 #endif 2021 nfsm_reqdone; 2022 2023 VTONFS(dvp)->n_flag |= NMODIFIED; 2024 if (!attrflag) 2025 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp)); 2026 if (!wccflag) 2027 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2028 2029 /* 2030 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 2031 */ 2032 if (rexmit && error == EEXIST) 2033 error = 0; 2034 2035 return error; 2036 } 2037 2038 /* 2039 * nfs hard link create call 2040 */ 2041 int 2042 nfs_link(void *v) 2043 { 2044 struct vop_link_args /* { 2045 struct vnode *a_dvp; 2046 struct vnode *a_vp; 2047 struct componentname *a_cnp; 2048 } */ *ap = v; 2049 struct vnode *vp = ap->a_vp; 2050 struct vnode *dvp = ap->a_dvp; 2051 struct componentname *cnp = ap->a_cnp; 2052 int error = 0; 2053 2054 error = vn_lock(vp, LK_EXCLUSIVE); 2055 if (error != 0) { 2056 VOP_ABORTOP(dvp, cnp); 2057 vput(dvp); 2058 return error; 2059 } 2060 2061 /* 2062 * Push all writes to the server, so that the attribute cache 2063 * doesn't get "out of sync" with the server. 2064 * XXX There should be a better way! 2065 */ 2066 VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0); 2067 2068 error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, 2069 cnp->cn_cred, curlwp); 2070 2071 if (error == 0) { 2072 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 2073 } 2074 VOP_UNLOCK(vp); 2075 VN_KNOTE(vp, NOTE_LINK); 2076 VN_KNOTE(dvp, NOTE_WRITE); 2077 vput(dvp); 2078 return (error); 2079 } 2080 2081 /* 2082 * nfs symbolic link create call 2083 */ 2084 int 2085 nfs_symlink(void *v) 2086 { 2087 struct vop_symlink_args /* { 2088 struct vnode *a_dvp; 2089 struct vnode **a_vpp; 2090 struct componentname *a_cnp; 2091 struct vattr *a_vap; 2092 char *a_target; 2093 } */ *ap = v; 2094 struct vnode *dvp = ap->a_dvp; 2095 struct vattr *vap = ap->a_vap; 2096 struct componentname *cnp = ap->a_cnp; 2097 struct nfsv2_sattr *sp; 2098 u_int32_t *tl; 2099 char *cp; 2100 int32_t t1, t2; 2101 char *bpos, *dpos, *cp2; 2102 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; 2103 struct mbuf *mreq, *mrep, *md, *mb; 2104 struct vnode *newvp = (struct vnode *)0; 2105 const int v3 = NFS_ISV3(dvp); 2106 int rexmit = 0; 2107 struct nfsnode *dnp = VTONFS(dvp); 2108 2109 *ap->a_vpp = NULL; 2110 nfsstats.rpccnt[NFSPROC_SYMLINK]++; 2111 slen = strlen(ap->a_target); 2112 nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED + 2113 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3)); 2114 nfsm_fhtom(dnp, v3); 2115 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 2116 #ifndef NFS_V2_ONlY 2117 if (v3) 2118 nfsm_v3attrbuild(vap, false); 2119 #endif 2120 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); 2121 #ifndef NFS_V2_ONlY 2122 if (!v3) { 2123 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 2124 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); 2125 sp->sa_uid = nfs_xdrneg1; 2126 sp->sa_gid = nfs_xdrneg1; 2127 sp->sa_size = nfs_xdrneg1; 2128 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 2129 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 2130 } 2131 #endif 2132 nfsm_request1(dnp, NFSPROC_SYMLINK, curlwp, cnp->cn_cred, 2133 &rexmit); 2134 #ifndef NFS_V2_ONlY 2135 if (v3) { 2136 if (!error) 2137 nfsm_mtofh(dvp, newvp, v3, gotvp); 2138 nfsm_wcc_data(dvp, wccflag, 0, !error); 2139 } 2140 #endif 2141 nfsm_reqdone; 2142 /* 2143 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 2144 */ 2145 if (rexmit && error == EEXIST) 2146 error = 0; 2147 if (error == 0 || error == EEXIST) 2148 cache_purge1(dvp, cnp->cn_nameptr, cnp->cn_namelen, 0); 2149 if (error == 0 && newvp == NULL) { 2150 struct nfsnode *np = NULL; 2151 2152 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2153 cnp->cn_cred, curlwp, &np); 2154 if (error == 0) 2155 newvp = NFSTOV(np); 2156 } 2157 if (error) { 2158 if (newvp != NULL) 2159 vput(newvp); 2160 } else { 2161 *ap->a_vpp = newvp; 2162 } 2163 VTONFS(dvp)->n_flag |= NMODIFIED; 2164 if (!wccflag) 2165 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2166 VN_KNOTE(dvp, NOTE_WRITE); 2167 vput(dvp); 2168 return (error); 2169 } 2170 2171 /* 2172 * nfs make dir call 2173 */ 2174 int 2175 nfs_mkdir(void *v) 2176 { 2177 struct vop_mkdir_args /* { 2178 struct vnode *a_dvp; 2179 struct vnode **a_vpp; 2180 struct componentname *a_cnp; 2181 struct vattr *a_vap; 2182 } */ *ap = v; 2183 struct vnode *dvp = ap->a_dvp; 2184 struct vattr *vap = ap->a_vap; 2185 struct componentname *cnp = ap->a_cnp; 2186 struct nfsv2_sattr *sp; 2187 u_int32_t *tl; 2188 char *cp; 2189 int32_t t1, t2; 2190 int len; 2191 struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0; 2192 struct vnode *newvp = (struct vnode *)0; 2193 char *bpos, *dpos, *cp2; 2194 int error = 0, wccflag = NFSV3_WCCRATTR; 2195 int gotvp = 0; 2196 int rexmit = 0; 2197 struct mbuf *mreq, *mrep, *md, *mb; 2198 const int v3 = NFS_ISV3(dvp); 2199 2200 len = cnp->cn_namelen; 2201 nfsstats.rpccnt[NFSPROC_MKDIR]++; 2202 nfsm_reqhead(dnp, NFSPROC_MKDIR, 2203 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3)); 2204 nfsm_fhtom(dnp, v3); 2205 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 2206 #ifndef NFS_V2_ONLY 2207 if (v3) { 2208 nfsm_v3attrbuild(vap, false); 2209 } else 2210 #endif 2211 { 2212 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 2213 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); 2214 sp->sa_uid = nfs_xdrneg1; 2215 sp->sa_gid = nfs_xdrneg1; 2216 sp->sa_size = nfs_xdrneg1; 2217 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 2218 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 2219 } 2220 nfsm_request1(dnp, NFSPROC_MKDIR, curlwp, cnp->cn_cred, &rexmit); 2221 if (!error) 2222 nfsm_mtofh(dvp, newvp, v3, gotvp); 2223 if (v3) 2224 nfsm_wcc_data(dvp, wccflag, 0, !error); 2225 nfsm_reqdone; 2226 VTONFS(dvp)->n_flag |= NMODIFIED; 2227 if (!wccflag) 2228 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2229 /* 2230 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry 2231 * if we can succeed in looking up the directory. 2232 */ 2233 if ((rexmit && error == EEXIST) || (!error && !gotvp)) { 2234 if (newvp) { 2235 vput(newvp); 2236 newvp = (struct vnode *)0; 2237 } 2238 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, 2239 curlwp, &np); 2240 if (!error) { 2241 newvp = NFSTOV(np); 2242 if (newvp->v_type != VDIR || newvp == dvp) 2243 error = EEXIST; 2244 } 2245 } 2246 if (error) { 2247 if (newvp) { 2248 if (dvp != newvp) 2249 vput(newvp); 2250 else 2251 vrele(newvp); 2252 } 2253 } else { 2254 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); 2255 nfs_cache_enter(dvp, newvp, cnp); 2256 *ap->a_vpp = newvp; 2257 } 2258 vput(dvp); 2259 return (error); 2260 } 2261 2262 /* 2263 * nfs remove directory call 2264 */ 2265 int 2266 nfs_rmdir(void *v) 2267 { 2268 struct vop_rmdir_args /* { 2269 struct vnode *a_dvp; 2270 struct vnode *a_vp; 2271 struct componentname *a_cnp; 2272 } */ *ap = v; 2273 struct vnode *vp = ap->a_vp; 2274 struct vnode *dvp = ap->a_dvp; 2275 struct componentname *cnp = ap->a_cnp; 2276 u_int32_t *tl; 2277 char *cp; 2278 #ifndef NFS_V2_ONLY 2279 int32_t t1; 2280 char *cp2; 2281 #endif 2282 int32_t t2; 2283 char *bpos, *dpos; 2284 int error = 0, wccflag = NFSV3_WCCRATTR; 2285 int rexmit = 0; 2286 struct mbuf *mreq, *mrep, *md, *mb; 2287 const int v3 = NFS_ISV3(dvp); 2288 struct nfsnode *dnp; 2289 2290 if (dvp == vp) { 2291 vrele(dvp); 2292 vput(dvp); 2293 return (EINVAL); 2294 } 2295 nfsstats.rpccnt[NFSPROC_RMDIR]++; 2296 dnp = VTONFS(dvp); 2297 nfsm_reqhead(dnp, NFSPROC_RMDIR, 2298 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 2299 nfsm_fhtom(dnp, v3); 2300 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 2301 nfsm_request1(dnp, NFSPROC_RMDIR, curlwp, cnp->cn_cred, &rexmit); 2302 #ifndef NFS_V2_ONLY 2303 if (v3) 2304 nfsm_wcc_data(dvp, wccflag, 0, !error); 2305 #endif 2306 nfsm_reqdone; 2307 VTONFS(dvp)->n_flag |= NMODIFIED; 2308 if (!wccflag) 2309 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2310 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); 2311 VN_KNOTE(vp, NOTE_DELETE); 2312 cache_purge(vp); 2313 vput(vp); 2314 vput(dvp); 2315 /* 2316 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 2317 */ 2318 if (rexmit && error == ENOENT) 2319 error = 0; 2320 return (error); 2321 } 2322 2323 /* 2324 * nfs readdir call 2325 */ 2326 int 2327 nfs_readdir(void *v) 2328 { 2329 struct vop_readdir_args /* { 2330 struct vnode *a_vp; 2331 struct uio *a_uio; 2332 kauth_cred_t a_cred; 2333 int *a_eofflag; 2334 off_t **a_cookies; 2335 int *a_ncookies; 2336 } */ *ap = v; 2337 struct vnode *vp = ap->a_vp; 2338 struct uio *uio = ap->a_uio; 2339 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2340 char *base = uio->uio_iov->iov_base; 2341 int tresid, error; 2342 size_t count, lost; 2343 struct dirent *dp; 2344 off_t *cookies = NULL; 2345 int ncookies = 0, nc; 2346 2347 if (vp->v_type != VDIR) 2348 return (EPERM); 2349 2350 lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1); 2351 count = uio->uio_resid - lost; 2352 if (count <= 0) 2353 return (EINVAL); 2354 2355 /* 2356 * Call nfs_bioread() to do the real work. 2357 */ 2358 tresid = uio->uio_resid = count; 2359 error = nfs_bioread(vp, uio, 0, ap->a_cred, 2360 ap->a_cookies ? NFSBIO_CACHECOOKIES : 0); 2361 2362 if (!error && ap->a_cookies) { 2363 ncookies = count / 16; 2364 cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK); 2365 *ap->a_cookies = cookies; 2366 } 2367 2368 if (!error && uio->uio_resid == tresid) { 2369 uio->uio_resid += lost; 2370 nfsstats.direofcache_misses++; 2371 if (ap->a_cookies) 2372 *ap->a_ncookies = 0; 2373 *ap->a_eofflag = 1; 2374 return (0); 2375 } 2376 2377 if (!error && ap->a_cookies) { 2378 /* 2379 * Only the NFS server and emulations use cookies, and they 2380 * load the directory block into system space, so we can 2381 * just look at it directly. 2382 */ 2383 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) || 2384 uio->uio_iovcnt != 1) 2385 panic("nfs_readdir: lost in space"); 2386 for (nc = 0; ncookies-- && 2387 base < (char *)uio->uio_iov->iov_base; nc++){ 2388 dp = (struct dirent *) base; 2389 if (dp->d_reclen == 0) 2390 break; 2391 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) 2392 *(cookies++) = (off_t)NFS_GETCOOKIE32(dp); 2393 else 2394 *(cookies++) = NFS_GETCOOKIE(dp); 2395 base += dp->d_reclen; 2396 } 2397 uio->uio_resid += 2398 ((char *)uio->uio_iov->iov_base - base); 2399 uio->uio_iov->iov_len += 2400 ((char *)uio->uio_iov->iov_base - base); 2401 uio->uio_iov->iov_base = base; 2402 *ap->a_ncookies = nc; 2403 } 2404 2405 uio->uio_resid += lost; 2406 *ap->a_eofflag = 0; 2407 return (error); 2408 } 2409 2410 /* 2411 * Readdir rpc call. 2412 * Called from below the buffer cache by nfs_doio(). 2413 */ 2414 int 2415 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred) 2416 { 2417 int len, left; 2418 struct dirent *dp = NULL; 2419 u_int32_t *tl; 2420 char *cp; 2421 int32_t t1, t2; 2422 char *bpos, *dpos, *cp2; 2423 struct mbuf *mreq, *mrep, *md, *mb; 2424 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2425 struct nfsnode *dnp = VTONFS(vp); 2426 u_quad_t fileno; 2427 int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1; 2428 #ifndef NFS_V2_ONLY 2429 int attrflag; 2430 #endif 2431 int nrpcs = 0, reclen; 2432 const int v3 = NFS_ISV3(vp); 2433 2434 #ifdef DIAGNOSTIC 2435 /* 2436 * Should be called from buffer cache, so only amount of 2437 * NFS_DIRBLKSIZ will be requested. 2438 */ 2439 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ) 2440 panic("nfs readdirrpc bad uio"); 2441 #endif 2442 2443 /* 2444 * Loop around doing readdir rpc's of size nm_readdirsize 2445 * truncated to a multiple of NFS_DIRFRAGSIZ. 2446 * The stopping criteria is EOF or buffer full. 2447 */ 2448 while (more_dirs && bigenough) { 2449 /* 2450 * Heuristic: don't bother to do another RPC to further 2451 * fill up this block if there is not much room left. (< 50% 2452 * of the readdir RPC size). This wastes some buffer space 2453 * but can save up to 50% in RPC calls. 2454 */ 2455 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) { 2456 bigenough = 0; 2457 break; 2458 } 2459 nfsstats.rpccnt[NFSPROC_READDIR]++; 2460 nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) + 2461 NFSX_READDIR(v3)); 2462 nfsm_fhtom(dnp, v3); 2463 #ifndef NFS_V2_ONLY 2464 if (v3) { 2465 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED); 2466 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) { 2467 txdr_swapcookie3(uiop->uio_offset, tl); 2468 } else { 2469 txdr_cookie3(uiop->uio_offset, tl); 2470 } 2471 tl += 2; 2472 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2473 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2474 } else 2475 #endif 2476 { 2477 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 2478 *tl++ = txdr_unsigned(uiop->uio_offset); 2479 } 2480 *tl = txdr_unsigned(nmp->nm_readdirsize); 2481 nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred); 2482 nrpcs++; 2483 #ifndef NFS_V2_ONLY 2484 if (v3) { 2485 nfsm_postop_attr(vp, attrflag, 0); 2486 if (!error) { 2487 nfsm_dissect(tl, u_int32_t *, 2488 2 * NFSX_UNSIGNED); 2489 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2490 dnp->n_cookieverf.nfsuquad[1] = *tl; 2491 } else { 2492 m_freem(mrep); 2493 goto nfsmout; 2494 } 2495 } 2496 #endif 2497 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2498 more_dirs = fxdr_unsigned(int, *tl); 2499 2500 /* loop thru the dir entries, doctoring them to 4bsd form */ 2501 while (more_dirs && bigenough) { 2502 #ifndef NFS_V2_ONLY 2503 if (v3) { 2504 nfsm_dissect(tl, u_int32_t *, 2505 3 * NFSX_UNSIGNED); 2506 fileno = fxdr_hyper(tl); 2507 len = fxdr_unsigned(int, *(tl + 2)); 2508 } else 2509 #endif 2510 { 2511 nfsm_dissect(tl, u_int32_t *, 2512 2 * NFSX_UNSIGNED); 2513 fileno = fxdr_unsigned(u_quad_t, *tl++); 2514 len = fxdr_unsigned(int, *tl); 2515 } 2516 if (len <= 0 || len > NFS_MAXNAMLEN) { 2517 error = EBADRPC; 2518 m_freem(mrep); 2519 goto nfsmout; 2520 } 2521 /* for cookie stashing */ 2522 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t); 2523 left = NFS_DIRFRAGSIZ - blksiz; 2524 if (reclen > left) { 2525 memset(uiop->uio_iov->iov_base, 0, left); 2526 dp->d_reclen += left; 2527 UIO_ADVANCE(uiop, left); 2528 blksiz = 0; 2529 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2530 } 2531 if (reclen > uiop->uio_resid) 2532 bigenough = 0; 2533 if (bigenough) { 2534 int tlen; 2535 2536 dp = (struct dirent *)uiop->uio_iov->iov_base; 2537 dp->d_fileno = fileno; 2538 dp->d_namlen = len; 2539 dp->d_reclen = reclen; 2540 dp->d_type = DT_UNKNOWN; 2541 blksiz += reclen; 2542 if (blksiz == NFS_DIRFRAGSIZ) 2543 blksiz = 0; 2544 UIO_ADVANCE(uiop, DIRHDSIZ); 2545 nfsm_mtouio(uiop, len); 2546 tlen = reclen - (DIRHDSIZ + len); 2547 (void)memset(uiop->uio_iov->iov_base, 0, tlen); 2548 UIO_ADVANCE(uiop, tlen); 2549 } else 2550 nfsm_adv(nfsm_rndup(len)); 2551 #ifndef NFS_V2_ONLY 2552 if (v3) { 2553 nfsm_dissect(tl, u_int32_t *, 2554 3 * NFSX_UNSIGNED); 2555 } else 2556 #endif 2557 { 2558 nfsm_dissect(tl, u_int32_t *, 2559 2 * NFSX_UNSIGNED); 2560 } 2561 if (bigenough) { 2562 #ifndef NFS_V2_ONLY 2563 if (v3) { 2564 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) 2565 uiop->uio_offset = 2566 fxdr_swapcookie3(tl); 2567 else 2568 uiop->uio_offset = 2569 fxdr_cookie3(tl); 2570 } 2571 else 2572 #endif 2573 { 2574 uiop->uio_offset = 2575 fxdr_unsigned(off_t, *tl); 2576 } 2577 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2578 } 2579 if (v3) 2580 tl += 2; 2581 else 2582 tl++; 2583 more_dirs = fxdr_unsigned(int, *tl); 2584 } 2585 /* 2586 * If at end of rpc data, get the eof boolean 2587 */ 2588 if (!more_dirs) { 2589 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2590 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2591 2592 /* 2593 * kludge: if we got no entries, treat it as EOF. 2594 * some server sometimes send a reply without any 2595 * entries or EOF. 2596 * although it might mean the server has very long name, 2597 * we can't handle such entries anyway. 2598 */ 2599 2600 if (uiop->uio_resid >= NFS_DIRBLKSIZ) 2601 more_dirs = 0; 2602 } 2603 m_freem(mrep); 2604 } 2605 /* 2606 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ 2607 * by increasing d_reclen for the last record. 2608 */ 2609 if (blksiz > 0) { 2610 left = NFS_DIRFRAGSIZ - blksiz; 2611 memset(uiop->uio_iov->iov_base, 0, left); 2612 dp->d_reclen += left; 2613 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2614 UIO_ADVANCE(uiop, left); 2615 } 2616 2617 /* 2618 * We are now either at the end of the directory or have filled the 2619 * block. 2620 */ 2621 if (bigenough) { 2622 dnp->n_direofoffset = uiop->uio_offset; 2623 dnp->n_flag |= NEOFVALID; 2624 } 2625 nfsmout: 2626 return (error); 2627 } 2628 2629 #ifndef NFS_V2_ONLY 2630 /* 2631 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). 2632 */ 2633 int 2634 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred) 2635 { 2636 int len, left; 2637 struct dirent *dp = NULL; 2638 u_int32_t *tl; 2639 char *cp; 2640 int32_t t1, t2; 2641 struct vnode *newvp; 2642 char *bpos, *dpos, *cp2; 2643 struct mbuf *mreq, *mrep, *md, *mb; 2644 struct nameidata nami, *ndp = &nami; 2645 struct componentname *cnp = &ndp->ni_cnd; 2646 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2647 struct nfsnode *dnp = VTONFS(vp), *np; 2648 nfsfh_t *fhp; 2649 u_quad_t fileno; 2650 int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; 2651 int attrflag, fhsize, nrpcs = 0, reclen; 2652 struct nfs_fattr fattr, *fp; 2653 2654 #ifdef DIAGNOSTIC 2655 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ) 2656 panic("nfs readdirplusrpc bad uio"); 2657 #endif 2658 ndp->ni_dvp = vp; 2659 newvp = NULLVP; 2660 2661 /* 2662 * Loop around doing readdir rpc's of size nm_readdirsize 2663 * truncated to a multiple of NFS_DIRFRAGSIZ. 2664 * The stopping criteria is EOF or buffer full. 2665 */ 2666 while (more_dirs && bigenough) { 2667 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) { 2668 bigenough = 0; 2669 break; 2670 } 2671 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; 2672 nfsm_reqhead(dnp, NFSPROC_READDIRPLUS, 2673 NFSX_FH(1) + 6 * NFSX_UNSIGNED); 2674 nfsm_fhtom(dnp, 1); 2675 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED); 2676 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) { 2677 txdr_swapcookie3(uiop->uio_offset, tl); 2678 } else { 2679 txdr_cookie3(uiop->uio_offset, tl); 2680 } 2681 tl += 2; 2682 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2683 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2684 *tl++ = txdr_unsigned(nmp->nm_readdirsize); 2685 *tl = txdr_unsigned(nmp->nm_rsize); 2686 nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred); 2687 nfsm_postop_attr(vp, attrflag, 0); 2688 if (error) { 2689 m_freem(mrep); 2690 goto nfsmout; 2691 } 2692 nrpcs++; 2693 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2694 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2695 dnp->n_cookieverf.nfsuquad[1] = *tl++; 2696 more_dirs = fxdr_unsigned(int, *tl); 2697 2698 /* loop thru the dir entries, doctoring them to 4bsd form */ 2699 while (more_dirs && bigenough) { 2700 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2701 fileno = fxdr_hyper(tl); 2702 len = fxdr_unsigned(int, *(tl + 2)); 2703 if (len <= 0 || len > NFS_MAXNAMLEN) { 2704 error = EBADRPC; 2705 m_freem(mrep); 2706 goto nfsmout; 2707 } 2708 /* for cookie stashing */ 2709 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t); 2710 left = NFS_DIRFRAGSIZ - blksiz; 2711 if (reclen > left) { 2712 /* 2713 * DIRFRAGSIZ is aligned, no need to align 2714 * again here. 2715 */ 2716 memset(uiop->uio_iov->iov_base, 0, left); 2717 dp->d_reclen += left; 2718 UIO_ADVANCE(uiop, left); 2719 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2720 blksiz = 0; 2721 } 2722 if (reclen > uiop->uio_resid) 2723 bigenough = 0; 2724 if (bigenough) { 2725 int tlen; 2726 2727 dp = (struct dirent *)uiop->uio_iov->iov_base; 2728 dp->d_fileno = fileno; 2729 dp->d_namlen = len; 2730 dp->d_reclen = reclen; 2731 dp->d_type = DT_UNKNOWN; 2732 blksiz += reclen; 2733 if (blksiz == NFS_DIRFRAGSIZ) 2734 blksiz = 0; 2735 UIO_ADVANCE(uiop, DIRHDSIZ); 2736 nfsm_mtouio(uiop, len); 2737 tlen = reclen - (DIRHDSIZ + len); 2738 (void)memset(uiop->uio_iov->iov_base, 0, tlen); 2739 UIO_ADVANCE(uiop, tlen); 2740 cnp->cn_nameptr = dp->d_name; 2741 cnp->cn_namelen = dp->d_namlen; 2742 } else 2743 nfsm_adv(nfsm_rndup(len)); 2744 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2745 if (bigenough) { 2746 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) 2747 uiop->uio_offset = 2748 fxdr_swapcookie3(tl); 2749 else 2750 uiop->uio_offset = 2751 fxdr_cookie3(tl); 2752 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2753 } 2754 tl += 2; 2755 2756 /* 2757 * Since the attributes are before the file handle 2758 * (sigh), we must skip over the attributes and then 2759 * come back and get them. 2760 */ 2761 attrflag = fxdr_unsigned(int, *tl); 2762 if (attrflag) { 2763 nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR); 2764 memcpy(&fattr, fp, NFSX_V3FATTR); 2765 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2766 doit = fxdr_unsigned(int, *tl); 2767 if (doit) { 2768 nfsm_getfh(fhp, fhsize, 1); 2769 if (NFS_CMPFH(dnp, fhp, fhsize)) { 2770 vref(vp); 2771 newvp = vp; 2772 np = dnp; 2773 } else { 2774 error = nfs_nget1(vp->v_mount, fhp, 2775 fhsize, &np, LK_NOWAIT); 2776 if (!error) 2777 newvp = NFSTOV(np); 2778 } 2779 if (!error) { 2780 nfs_loadattrcache(&newvp, &fattr, 0, 0); 2781 if (bigenough) { 2782 dp->d_type = 2783 IFTODT(VTTOIF(np->n_vattr->va_type)); 2784 if (cnp->cn_namelen <= NCHNAMLEN) { 2785 ndp->ni_vp = newvp; 2786 nfs_cache_enter(ndp->ni_dvp, 2787 ndp->ni_vp, cnp); 2788 } 2789 } 2790 } 2791 error = 0; 2792 } 2793 } else { 2794 /* Just skip over the file handle */ 2795 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2796 i = fxdr_unsigned(int, *tl); 2797 nfsm_adv(nfsm_rndup(i)); 2798 } 2799 if (newvp != NULLVP) { 2800 if (newvp == vp) 2801 vrele(newvp); 2802 else 2803 vput(newvp); 2804 newvp = NULLVP; 2805 } 2806 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2807 more_dirs = fxdr_unsigned(int, *tl); 2808 } 2809 /* 2810 * If at end of rpc data, get the eof boolean 2811 */ 2812 if (!more_dirs) { 2813 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2814 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2815 2816 /* 2817 * kludge: see a comment in nfs_readdirrpc. 2818 */ 2819 2820 if (uiop->uio_resid >= NFS_DIRBLKSIZ) 2821 more_dirs = 0; 2822 } 2823 m_freem(mrep); 2824 } 2825 /* 2826 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ 2827 * by increasing d_reclen for the last record. 2828 */ 2829 if (blksiz > 0) { 2830 left = NFS_DIRFRAGSIZ - blksiz; 2831 memset(uiop->uio_iov->iov_base, 0, left); 2832 dp->d_reclen += left; 2833 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2834 UIO_ADVANCE(uiop, left); 2835 } 2836 2837 /* 2838 * We are now either at the end of the directory or have filled the 2839 * block. 2840 */ 2841 if (bigenough) { 2842 dnp->n_direofoffset = uiop->uio_offset; 2843 dnp->n_flag |= NEOFVALID; 2844 } 2845 nfsmout: 2846 if (newvp != NULLVP) { 2847 if(newvp == vp) 2848 vrele(newvp); 2849 else 2850 vput(newvp); 2851 } 2852 return (error); 2853 } 2854 #endif 2855 2856 /* 2857 * Silly rename. To make the NFS filesystem that is stateless look a little 2858 * more like the "ufs" a remove of an active vnode is translated to a rename 2859 * to a funny looking filename that is removed by nfs_inactive on the 2860 * nfsnode. There is the potential for another process on a different client 2861 * to create the same funny name between the nfs_lookitup() fails and the 2862 * nfs_rename() completes, but... 2863 */ 2864 int 2865 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, bool dolink) 2866 { 2867 struct sillyrename *sp; 2868 struct nfsnode *np; 2869 int error; 2870 pid_t pid; 2871 2872 cache_purge(dvp); 2873 np = VTONFS(vp); 2874 #ifndef DIAGNOSTIC 2875 if (vp->v_type == VDIR) 2876 panic("nfs: sillyrename dir"); 2877 #endif 2878 sp = kmem_alloc(sizeof(*sp), KM_SLEEP); 2879 sp->s_cred = kauth_cred_dup(cnp->cn_cred); 2880 sp->s_dvp = dvp; 2881 vref(dvp); 2882 2883 /* Fudge together a funny name */ 2884 pid = curlwp->l_proc->p_pid; 2885 memcpy(sp->s_name, ".nfsAxxxx4.4", 13); 2886 sp->s_namlen = 12; 2887 sp->s_name[8] = hexdigits[pid & 0xf]; 2888 sp->s_name[7] = hexdigits[(pid >> 4) & 0xf]; 2889 sp->s_name[6] = hexdigits[(pid >> 8) & 0xf]; 2890 sp->s_name[5] = hexdigits[(pid >> 12) & 0xf]; 2891 2892 /* Try lookitups until we get one that isn't there */ 2893 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2894 curlwp, (struct nfsnode **)0) == 0) { 2895 sp->s_name[4]++; 2896 if (sp->s_name[4] > 'z') { 2897 error = EINVAL; 2898 goto bad; 2899 } 2900 } 2901 if (dolink) { 2902 error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen, 2903 sp->s_cred, curlwp); 2904 /* 2905 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP. 2906 */ 2907 if (error == ENOTSUP) { 2908 error = nfs_renameit(dvp, cnp, sp); 2909 } 2910 } else { 2911 error = nfs_renameit(dvp, cnp, sp); 2912 } 2913 if (error) 2914 goto bad; 2915 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2916 curlwp, &np); 2917 np->n_sillyrename = sp; 2918 return (0); 2919 bad: 2920 vrele(sp->s_dvp); 2921 kauth_cred_free(sp->s_cred); 2922 kmem_free(sp, sizeof(*sp)); 2923 return (error); 2924 } 2925 2926 /* 2927 * Look up a file name and optionally either update the file handle or 2928 * allocate an nfsnode, depending on the value of npp. 2929 * npp == NULL --> just do the lookup 2930 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2931 * handled too 2932 * *npp != NULL --> update the file handle in the vnode 2933 */ 2934 int 2935 nfs_lookitup(struct vnode *dvp, const char *name, int len, kauth_cred_t cred, struct lwp *l, struct nfsnode **npp) 2936 { 2937 u_int32_t *tl; 2938 char *cp; 2939 int32_t t1, t2; 2940 struct vnode *newvp = (struct vnode *)0; 2941 struct nfsnode *np, *dnp = VTONFS(dvp); 2942 char *bpos, *dpos, *cp2; 2943 int error = 0, fhlen; 2944 #ifndef NFS_V2_ONLY 2945 int attrflag; 2946 #endif 2947 struct mbuf *mreq, *mrep, *md, *mb; 2948 nfsfh_t *nfhp; 2949 const int v3 = NFS_ISV3(dvp); 2950 2951 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 2952 nfsm_reqhead(dnp, NFSPROC_LOOKUP, 2953 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 2954 nfsm_fhtom(dnp, v3); 2955 nfsm_strtom(name, len, NFS_MAXNAMLEN); 2956 nfsm_request(dnp, NFSPROC_LOOKUP, l, cred); 2957 if (npp && !error) { 2958 nfsm_getfh(nfhp, fhlen, v3); 2959 if (*npp) { 2960 np = *npp; 2961 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) { 2962 kmem_free(np->n_fhp, np->n_fhsize); 2963 np->n_fhp = &np->n_fh; 2964 } 2965 #if NFS_SMALLFH < NFSX_V3FHMAX 2966 else if (np->n_fhsize <= NFS_SMALLFH && fhlen > NFS_SMALLFH) 2967 np->n_fhp = kmem_alloc(fhlen, KM_SLEEP); 2968 #endif 2969 memcpy(np->n_fhp, nfhp, fhlen); 2970 np->n_fhsize = fhlen; 2971 newvp = NFSTOV(np); 2972 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { 2973 vref(dvp); 2974 newvp = dvp; 2975 np = dnp; 2976 } else { 2977 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); 2978 if (error) { 2979 m_freem(mrep); 2980 return (error); 2981 } 2982 newvp = NFSTOV(np); 2983 } 2984 #ifndef NFS_V2_ONLY 2985 if (v3) { 2986 nfsm_postop_attr(newvp, attrflag, 0); 2987 if (!attrflag && *npp == NULL) { 2988 m_freem(mrep); 2989 vput(newvp); 2990 return (ENOENT); 2991 } 2992 } else 2993 #endif 2994 nfsm_loadattr(newvp, (struct vattr *)0, 0); 2995 } 2996 nfsm_reqdone; 2997 if (npp && *npp == NULL) { 2998 if (error) { 2999 if (newvp) 3000 vput(newvp); 3001 } else 3002 *npp = np; 3003 } 3004 return (error); 3005 } 3006 3007 #ifndef NFS_V2_ONLY 3008 /* 3009 * Nfs Version 3 commit rpc 3010 */ 3011 int 3012 nfs_commit(struct vnode *vp, off_t offset, uint32_t cnt, struct lwp *l) 3013 { 3014 char *cp; 3015 u_int32_t *tl; 3016 int32_t t1, t2; 3017 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 3018 char *bpos, *dpos, *cp2; 3019 int error = 0, wccflag = NFSV3_WCCRATTR; 3020 struct mbuf *mreq, *mrep, *md, *mb; 3021 struct nfsnode *np; 3022 3023 KASSERT(NFS_ISV3(vp)); 3024 3025 #ifdef NFS_DEBUG_COMMIT 3026 printf("commit %lu - %lu\n", (unsigned long)offset, 3027 (unsigned long)(offset + cnt)); 3028 #endif 3029 3030 mutex_enter(&nmp->nm_lock); 3031 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) { 3032 mutex_exit(&nmp->nm_lock); 3033 return (0); 3034 } 3035 mutex_exit(&nmp->nm_lock); 3036 nfsstats.rpccnt[NFSPROC_COMMIT]++; 3037 np = VTONFS(vp); 3038 nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1)); 3039 nfsm_fhtom(np, 1); 3040 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 3041 txdr_hyper(offset, tl); 3042 tl += 2; 3043 *tl = txdr_unsigned(cnt); 3044 nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred); 3045 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false); 3046 if (!error) { 3047 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF); 3048 mutex_enter(&nmp->nm_lock); 3049 if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) || 3050 memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) { 3051 memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF); 3052 error = NFSERR_STALEWRITEVERF; 3053 nmp->nm_iflag |= NFSMNT_STALEWRITEVERF; 3054 } 3055 mutex_exit(&nmp->nm_lock); 3056 } 3057 nfsm_reqdone; 3058 return (error); 3059 } 3060 #endif 3061 3062 /* 3063 * Kludge City.. 3064 * - make nfs_bmap() essentially a no-op that does no translation 3065 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc 3066 * (Maybe I could use the process's page mapping, but I was concerned that 3067 * Kernel Write might not be enabled and also figured copyout() would do 3068 * a lot more work than memcpy() and also it currently happens in the 3069 * context of the swapper process (2). 3070 */ 3071 int 3072 nfs_bmap(void *v) 3073 { 3074 struct vop_bmap_args /* { 3075 struct vnode *a_vp; 3076 daddr_t a_bn; 3077 struct vnode **a_vpp; 3078 daddr_t *a_bnp; 3079 int *a_runp; 3080 } */ *ap = v; 3081 struct vnode *vp = ap->a_vp; 3082 int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift; 3083 3084 if (ap->a_vpp != NULL) 3085 *ap->a_vpp = vp; 3086 if (ap->a_bnp != NULL) 3087 *ap->a_bnp = ap->a_bn << bshift; 3088 if (ap->a_runp != NULL) 3089 *ap->a_runp = 1024 * 1024; /* XXX */ 3090 return (0); 3091 } 3092 3093 /* 3094 * Strategy routine. 3095 * For async requests when nfsiod(s) are running, queue the request by 3096 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the 3097 * request. 3098 */ 3099 int 3100 nfs_strategy(void *v) 3101 { 3102 struct vop_strategy_args *ap = v; 3103 struct buf *bp = ap->a_bp; 3104 int error = 0; 3105 3106 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC)) 3107 panic("nfs physio/async"); 3108 3109 /* 3110 * If the op is asynchronous and an i/o daemon is waiting 3111 * queue the request, wake it up and wait for completion 3112 * otherwise just do it ourselves. 3113 */ 3114 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp)) 3115 error = nfs_doio(bp); 3116 return (error); 3117 } 3118 3119 /* 3120 * fsync vnode op. Just call nfs_flush() with commit == 1. 3121 */ 3122 /* ARGSUSED */ 3123 int 3124 nfs_fsync(void *v) 3125 { 3126 struct vop_fsync_args /* { 3127 struct vnodeop_desc *a_desc; 3128 struct vnode * a_vp; 3129 kauth_cred_t a_cred; 3130 int a_flags; 3131 off_t offlo; 3132 off_t offhi; 3133 struct lwp * a_l; 3134 } */ *ap = v; 3135 3136 struct vnode *vp = ap->a_vp; 3137 3138 if (vp->v_type != VREG) 3139 return 0; 3140 3141 return (nfs_flush(vp, ap->a_cred, 3142 (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, curlwp, 1)); 3143 } 3144 3145 /* 3146 * Flush all the data associated with a vnode. 3147 */ 3148 int 3149 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l, 3150 int commit) 3151 { 3152 struct nfsnode *np = VTONFS(vp); 3153 int error; 3154 int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO; 3155 UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist); 3156 3157 mutex_enter(vp->v_interlock); 3158 error = VOP_PUTPAGES(vp, 0, 0, flushflags); 3159 if (np->n_flag & NWRITEERR) { 3160 error = np->n_error; 3161 np->n_flag &= ~NWRITEERR; 3162 } 3163 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0); 3164 return (error); 3165 } 3166 3167 /* 3168 * Return POSIX pathconf information applicable to nfs. 3169 * 3170 * N.B. The NFS V2 protocol doesn't support this RPC. 3171 */ 3172 /* ARGSUSED */ 3173 int 3174 nfs_pathconf(void *v) 3175 { 3176 struct vop_pathconf_args /* { 3177 struct vnode *a_vp; 3178 int a_name; 3179 register_t *a_retval; 3180 } */ *ap = v; 3181 struct nfsv3_pathconf *pcp; 3182 struct vnode *vp = ap->a_vp; 3183 struct mbuf *mreq, *mrep, *md, *mb; 3184 int32_t t1, t2; 3185 u_int32_t *tl; 3186 char *bpos, *dpos, *cp, *cp2; 3187 int error = 0, attrflag; 3188 #ifndef NFS_V2_ONLY 3189 struct nfsmount *nmp; 3190 unsigned int l; 3191 u_int64_t maxsize; 3192 #endif 3193 const int v3 = NFS_ISV3(vp); 3194 struct nfsnode *np = VTONFS(vp); 3195 3196 switch (ap->a_name) { 3197 /* Names that can be resolved locally. */ 3198 case _PC_PIPE_BUF: 3199 *ap->a_retval = PIPE_BUF; 3200 break; 3201 case _PC_SYNC_IO: 3202 *ap->a_retval = 1; 3203 break; 3204 /* Names that cannot be resolved locally; do an RPC, if possible. */ 3205 case _PC_LINK_MAX: 3206 case _PC_NAME_MAX: 3207 case _PC_CHOWN_RESTRICTED: 3208 case _PC_NO_TRUNC: 3209 if (!v3) { 3210 error = EINVAL; 3211 break; 3212 } 3213 nfsstats.rpccnt[NFSPROC_PATHCONF]++; 3214 nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1)); 3215 nfsm_fhtom(np, 1); 3216 nfsm_request(np, NFSPROC_PATHCONF, 3217 curlwp, curlwp->l_cred); /* XXX */ 3218 nfsm_postop_attr(vp, attrflag, 0); 3219 if (!error) { 3220 nfsm_dissect(pcp, struct nfsv3_pathconf *, 3221 NFSX_V3PATHCONF); 3222 switch (ap->a_name) { 3223 case _PC_LINK_MAX: 3224 *ap->a_retval = 3225 fxdr_unsigned(register_t, pcp->pc_linkmax); 3226 break; 3227 case _PC_NAME_MAX: 3228 *ap->a_retval = 3229 fxdr_unsigned(register_t, pcp->pc_namemax); 3230 break; 3231 case _PC_CHOWN_RESTRICTED: 3232 *ap->a_retval = 3233 (pcp->pc_chownrestricted == nfs_true); 3234 break; 3235 case _PC_NO_TRUNC: 3236 *ap->a_retval = 3237 (pcp->pc_notrunc == nfs_true); 3238 break; 3239 } 3240 } 3241 nfsm_reqdone; 3242 break; 3243 case _PC_FILESIZEBITS: 3244 #ifndef NFS_V2_ONLY 3245 if (v3) { 3246 nmp = VFSTONFS(vp->v_mount); 3247 if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0) 3248 if ((error = nfs_fsinfo(nmp, vp, 3249 curlwp->l_cred, curlwp)) != 0) /* XXX */ 3250 break; 3251 for (l = 0, maxsize = nmp->nm_maxfilesize; 3252 (maxsize >> l) > 0; l++) 3253 ; 3254 *ap->a_retval = l + 1; 3255 } else 3256 #endif 3257 { 3258 *ap->a_retval = 32; /* NFS V2 limitation */ 3259 } 3260 break; 3261 default: 3262 error = EINVAL; 3263 break; 3264 } 3265 3266 return (error); 3267 } 3268 3269 /* 3270 * NFS advisory byte-level locks. 3271 */ 3272 int 3273 nfs_advlock(void *v) 3274 { 3275 struct vop_advlock_args /* { 3276 struct vnode *a_vp; 3277 void *a_id; 3278 int a_op; 3279 struct flock *a_fl; 3280 int a_flags; 3281 } */ *ap = v; 3282 struct nfsnode *np = VTONFS(ap->a_vp); 3283 3284 return lf_advlock(ap, &np->n_lockf, np->n_size); 3285 } 3286 3287 /* 3288 * Print out the contents of an nfsnode. 3289 */ 3290 int 3291 nfs_print(void *v) 3292 { 3293 struct vop_print_args /* { 3294 struct vnode *a_vp; 3295 } */ *ap = v; 3296 struct vnode *vp = ap->a_vp; 3297 struct nfsnode *np = VTONFS(vp); 3298 3299 printf("tag VT_NFS, fileid %lld fsid 0x%llx", 3300 (unsigned long long)np->n_vattr->va_fileid, 3301 (unsigned long long)np->n_vattr->va_fsid); 3302 if (vp->v_type == VFIFO) 3303 VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v); 3304 printf("\n"); 3305 return (0); 3306 } 3307 3308 /* 3309 * nfs unlock wrapper. 3310 */ 3311 int 3312 nfs_unlock(void *v) 3313 { 3314 struct vop_unlock_args /* { 3315 struct vnode *a_vp; 3316 int a_flags; 3317 } */ *ap = v; 3318 struct vnode *vp = ap->a_vp; 3319 3320 /* 3321 * VOP_UNLOCK can be called by nfs_loadattrcache 3322 * with v_data == 0. 3323 */ 3324 if (VTONFS(vp)) { 3325 nfs_delayedtruncate(vp); 3326 } 3327 3328 return genfs_unlock(v); 3329 } 3330 3331 /* 3332 * nfs special file access vnode op. 3333 * Essentially just get vattr and then imitate iaccess() since the device is 3334 * local to the client. 3335 */ 3336 int 3337 nfsspec_access(void *v) 3338 { 3339 struct vop_access_args /* { 3340 struct vnode *a_vp; 3341 int a_mode; 3342 kauth_cred_t a_cred; 3343 struct lwp *a_l; 3344 } */ *ap = v; 3345 struct vattr va; 3346 struct vnode *vp = ap->a_vp; 3347 int error; 3348 3349 error = VOP_GETATTR(vp, &va, ap->a_cred); 3350 if (error) 3351 return (error); 3352 3353 /* 3354 * Disallow write attempts on filesystems mounted read-only; 3355 * unless the file is a socket, fifo, or a block or character 3356 * device resident on the filesystem. 3357 */ 3358 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3359 switch (vp->v_type) { 3360 case VREG: 3361 case VDIR: 3362 case VLNK: 3363 return (EROFS); 3364 default: 3365 break; 3366 } 3367 } 3368 3369 return kauth_authorize_vnode(ap->a_cred, KAUTH_ACCESS_ACTION(ap->a_mode, 3370 va.va_type, va.va_mode), vp, NULL, genfs_can_access(va.va_type, 3371 va.va_mode, va.va_uid, va.va_gid, ap->a_mode, ap->a_cred)); 3372 } 3373 3374 /* 3375 * Read wrapper for special devices. 3376 */ 3377 int 3378 nfsspec_read(void *v) 3379 { 3380 struct vop_read_args /* { 3381 struct vnode *a_vp; 3382 struct uio *a_uio; 3383 int a_ioflag; 3384 kauth_cred_t a_cred; 3385 } */ *ap = v; 3386 struct nfsnode *np = VTONFS(ap->a_vp); 3387 3388 /* 3389 * Set access flag. 3390 */ 3391 np->n_flag |= NACC; 3392 getnanotime(&np->n_atim); 3393 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap)); 3394 } 3395 3396 /* 3397 * Write wrapper for special devices. 3398 */ 3399 int 3400 nfsspec_write(void *v) 3401 { 3402 struct vop_write_args /* { 3403 struct vnode *a_vp; 3404 struct uio *a_uio; 3405 int a_ioflag; 3406 kauth_cred_t a_cred; 3407 } */ *ap = v; 3408 struct nfsnode *np = VTONFS(ap->a_vp); 3409 3410 /* 3411 * Set update flag. 3412 */ 3413 np->n_flag |= NUPD; 3414 getnanotime(&np->n_mtim); 3415 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap)); 3416 } 3417 3418 /* 3419 * Close wrapper for special devices. 3420 * 3421 * Update the times on the nfsnode then do device close. 3422 */ 3423 int 3424 nfsspec_close(void *v) 3425 { 3426 struct vop_close_args /* { 3427 struct vnode *a_vp; 3428 int a_fflag; 3429 kauth_cred_t a_cred; 3430 struct lwp *a_l; 3431 } */ *ap = v; 3432 struct vnode *vp = ap->a_vp; 3433 struct nfsnode *np = VTONFS(vp); 3434 struct vattr vattr; 3435 3436 if (np->n_flag & (NACC | NUPD)) { 3437 np->n_flag |= NCHG; 3438 if (vp->v_usecount == 1 && 3439 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3440 vattr_null(&vattr); 3441 if (np->n_flag & NACC) 3442 vattr.va_atime = np->n_atim; 3443 if (np->n_flag & NUPD) 3444 vattr.va_mtime = np->n_mtim; 3445 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3446 } 3447 } 3448 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap)); 3449 } 3450 3451 /* 3452 * Read wrapper for fifos. 3453 */ 3454 int 3455 nfsfifo_read(void *v) 3456 { 3457 struct vop_read_args /* { 3458 struct vnode *a_vp; 3459 struct uio *a_uio; 3460 int a_ioflag; 3461 kauth_cred_t a_cred; 3462 } */ *ap = v; 3463 struct nfsnode *np = VTONFS(ap->a_vp); 3464 3465 /* 3466 * Set access flag. 3467 */ 3468 np->n_flag |= NACC; 3469 getnanotime(&np->n_atim); 3470 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap)); 3471 } 3472 3473 /* 3474 * Write wrapper for fifos. 3475 */ 3476 int 3477 nfsfifo_write(void *v) 3478 { 3479 struct vop_write_args /* { 3480 struct vnode *a_vp; 3481 struct uio *a_uio; 3482 int a_ioflag; 3483 kauth_cred_t a_cred; 3484 } */ *ap = v; 3485 struct nfsnode *np = VTONFS(ap->a_vp); 3486 3487 /* 3488 * Set update flag. 3489 */ 3490 np->n_flag |= NUPD; 3491 getnanotime(&np->n_mtim); 3492 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap)); 3493 } 3494 3495 /* 3496 * Close wrapper for fifos. 3497 * 3498 * Update the times on the nfsnode then do fifo close. 3499 */ 3500 int 3501 nfsfifo_close(void *v) 3502 { 3503 struct vop_close_args /* { 3504 struct vnode *a_vp; 3505 int a_fflag; 3506 kauth_cred_t a_cred; 3507 struct lwp *a_l; 3508 } */ *ap = v; 3509 struct vnode *vp = ap->a_vp; 3510 struct nfsnode *np = VTONFS(vp); 3511 struct vattr vattr; 3512 3513 if (np->n_flag & (NACC | NUPD)) { 3514 struct timespec ts; 3515 3516 getnanotime(&ts); 3517 if (np->n_flag & NACC) 3518 np->n_atim = ts; 3519 if (np->n_flag & NUPD) 3520 np->n_mtim = ts; 3521 np->n_flag |= NCHG; 3522 if (vp->v_usecount == 1 && 3523 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3524 vattr_null(&vattr); 3525 if (np->n_flag & NACC) 3526 vattr.va_atime = np->n_atim; 3527 if (np->n_flag & NUPD) 3528 vattr.va_mtime = np->n_mtim; 3529 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3530 } 3531 } 3532 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap)); 3533 } 3534