1 /* $NetBSD: nfs_vnops.c,v 1.289 2010/12/14 16:58:58 cegger Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)nfs_vnops.c 8.19 (Berkeley) 7/31/95 35 */ 36 37 /* 38 * vnode op calls for Sun NFS version 2 and 3 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: nfs_vnops.c,v 1.289 2010/12/14 16:58:58 cegger Exp $"); 43 44 #ifdef _KERNEL_OPT 45 #include "opt_nfs.h" 46 #include "opt_uvmhist.h" 47 #endif 48 49 #include <sys/param.h> 50 #include <sys/proc.h> 51 #include <sys/kernel.h> 52 #include <sys/systm.h> 53 #include <sys/resourcevar.h> 54 #include <sys/mount.h> 55 #include <sys/buf.h> 56 #include <sys/condvar.h> 57 #include <sys/disk.h> 58 #include <sys/malloc.h> 59 #include <sys/kmem.h> 60 #include <sys/mbuf.h> 61 #include <sys/mutex.h> 62 #include <sys/namei.h> 63 #include <sys/vnode.h> 64 #include <sys/dirent.h> 65 #include <sys/fcntl.h> 66 #include <sys/hash.h> 67 #include <sys/lockf.h> 68 #include <sys/stat.h> 69 #include <sys/unistd.h> 70 #include <sys/kauth.h> 71 72 #include <uvm/uvm_extern.h> 73 #include <uvm/uvm.h> 74 75 #include <miscfs/fifofs/fifo.h> 76 #include <miscfs/genfs/genfs.h> 77 #include <miscfs/genfs/genfs_node.h> 78 #include <miscfs/specfs/specdev.h> 79 80 #include <nfs/rpcv2.h> 81 #include <nfs/nfsproto.h> 82 #include <nfs/nfs.h> 83 #include <nfs/nfsnode.h> 84 #include <nfs/nfsmount.h> 85 #include <nfs/xdr_subs.h> 86 #include <nfs/nfsm_subs.h> 87 #include <nfs/nfs_var.h> 88 89 #include <net/if.h> 90 #include <netinet/in.h> 91 #include <netinet/in_var.h> 92 93 /* 94 * Global vfs data structures for nfs 95 */ 96 int (**nfsv2_vnodeop_p)(void *); 97 const struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { 98 { &vop_default_desc, vn_default_error }, 99 { &vop_lookup_desc, nfs_lookup }, /* lookup */ 100 { &vop_create_desc, nfs_create }, /* create */ 101 { &vop_mknod_desc, nfs_mknod }, /* mknod */ 102 { &vop_open_desc, nfs_open }, /* open */ 103 { &vop_close_desc, nfs_close }, /* close */ 104 { &vop_access_desc, nfs_access }, /* access */ 105 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 106 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 107 { &vop_read_desc, nfs_read }, /* read */ 108 { &vop_write_desc, nfs_write }, /* write */ 109 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 110 { &vop_ioctl_desc, nfs_ioctl }, /* ioctl */ 111 { &vop_poll_desc, nfs_poll }, /* poll */ 112 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */ 113 { &vop_revoke_desc, nfs_revoke }, /* revoke */ 114 { &vop_mmap_desc, nfs_mmap }, /* mmap */ 115 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 116 { &vop_seek_desc, nfs_seek }, /* seek */ 117 { &vop_remove_desc, nfs_remove }, /* remove */ 118 { &vop_link_desc, nfs_link }, /* link */ 119 { &vop_rename_desc, nfs_rename }, /* rename */ 120 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */ 121 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */ 122 { &vop_symlink_desc, nfs_symlink }, /* symlink */ 123 { &vop_readdir_desc, nfs_readdir }, /* readdir */ 124 { &vop_readlink_desc, nfs_readlink }, /* readlink */ 125 { &vop_abortop_desc, nfs_abortop }, /* abortop */ 126 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 127 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 128 { &vop_lock_desc, nfs_lock }, /* lock */ 129 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 130 { &vop_bmap_desc, nfs_bmap }, /* bmap */ 131 { &vop_strategy_desc, nfs_strategy }, /* strategy */ 132 { &vop_print_desc, nfs_print }, /* print */ 133 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 134 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */ 135 { &vop_advlock_desc, nfs_advlock }, /* advlock */ 136 { &vop_bwrite_desc, genfs_badop }, /* bwrite */ 137 { &vop_getpages_desc, nfs_getpages }, /* getpages */ 138 { &vop_putpages_desc, genfs_putpages }, /* putpages */ 139 { NULL, NULL } 140 }; 141 const struct vnodeopv_desc nfsv2_vnodeop_opv_desc = 142 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries }; 143 144 /* 145 * Special device vnode ops 146 */ 147 int (**spec_nfsv2nodeop_p)(void *); 148 const struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = { 149 { &vop_default_desc, vn_default_error }, 150 { &vop_lookup_desc, spec_lookup }, /* lookup */ 151 { &vop_create_desc, spec_create }, /* create */ 152 { &vop_mknod_desc, spec_mknod }, /* mknod */ 153 { &vop_open_desc, spec_open }, /* open */ 154 { &vop_close_desc, nfsspec_close }, /* close */ 155 { &vop_access_desc, nfsspec_access }, /* access */ 156 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 157 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 158 { &vop_read_desc, nfsspec_read }, /* read */ 159 { &vop_write_desc, nfsspec_write }, /* write */ 160 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 161 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 162 { &vop_poll_desc, spec_poll }, /* poll */ 163 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */ 164 { &vop_revoke_desc, spec_revoke }, /* revoke */ 165 { &vop_mmap_desc, spec_mmap }, /* mmap */ 166 { &vop_fsync_desc, spec_fsync }, /* fsync */ 167 { &vop_seek_desc, spec_seek }, /* seek */ 168 { &vop_remove_desc, spec_remove }, /* remove */ 169 { &vop_link_desc, spec_link }, /* link */ 170 { &vop_rename_desc, spec_rename }, /* rename */ 171 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 172 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 173 { &vop_symlink_desc, spec_symlink }, /* symlink */ 174 { &vop_readdir_desc, spec_readdir }, /* readdir */ 175 { &vop_readlink_desc, spec_readlink }, /* readlink */ 176 { &vop_abortop_desc, spec_abortop }, /* abortop */ 177 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 178 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 179 { &vop_lock_desc, nfs_lock }, /* lock */ 180 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 181 { &vop_bmap_desc, spec_bmap }, /* bmap */ 182 { &vop_strategy_desc, spec_strategy }, /* strategy */ 183 { &vop_print_desc, nfs_print }, /* print */ 184 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 185 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 186 { &vop_advlock_desc, spec_advlock }, /* advlock */ 187 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 188 { &vop_getpages_desc, spec_getpages }, /* getpages */ 189 { &vop_putpages_desc, spec_putpages }, /* putpages */ 190 { NULL, NULL } 191 }; 192 const struct vnodeopv_desc spec_nfsv2nodeop_opv_desc = 193 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries }; 194 195 int (**fifo_nfsv2nodeop_p)(void *); 196 const struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = { 197 { &vop_default_desc, vn_default_error }, 198 { &vop_lookup_desc, vn_fifo_bypass }, /* lookup */ 199 { &vop_create_desc, vn_fifo_bypass }, /* create */ 200 { &vop_mknod_desc, vn_fifo_bypass }, /* mknod */ 201 { &vop_open_desc, vn_fifo_bypass }, /* open */ 202 { &vop_close_desc, nfsfifo_close }, /* close */ 203 { &vop_access_desc, nfsspec_access }, /* access */ 204 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 205 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 206 { &vop_read_desc, nfsfifo_read }, /* read */ 207 { &vop_write_desc, nfsfifo_write }, /* write */ 208 { &vop_fcntl_desc, genfs_fcntl }, /* fcntl */ 209 { &vop_ioctl_desc, vn_fifo_bypass }, /* ioctl */ 210 { &vop_poll_desc, vn_fifo_bypass }, /* poll */ 211 { &vop_kqfilter_desc, vn_fifo_bypass }, /* kqfilter */ 212 { &vop_revoke_desc, vn_fifo_bypass }, /* revoke */ 213 { &vop_mmap_desc, vn_fifo_bypass }, /* mmap */ 214 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 215 { &vop_seek_desc, vn_fifo_bypass }, /* seek */ 216 { &vop_remove_desc, vn_fifo_bypass }, /* remove */ 217 { &vop_link_desc, vn_fifo_bypass }, /* link */ 218 { &vop_rename_desc, vn_fifo_bypass }, /* rename */ 219 { &vop_mkdir_desc, vn_fifo_bypass }, /* mkdir */ 220 { &vop_rmdir_desc, vn_fifo_bypass }, /* rmdir */ 221 { &vop_symlink_desc, vn_fifo_bypass }, /* symlink */ 222 { &vop_readdir_desc, vn_fifo_bypass }, /* readdir */ 223 { &vop_readlink_desc, vn_fifo_bypass }, /* readlink */ 224 { &vop_abortop_desc, vn_fifo_bypass }, /* abortop */ 225 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 226 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 227 { &vop_lock_desc, nfs_lock }, /* lock */ 228 { &vop_unlock_desc, nfs_unlock }, /* unlock */ 229 { &vop_bmap_desc, vn_fifo_bypass }, /* bmap */ 230 { &vop_strategy_desc, genfs_badop }, /* strategy */ 231 { &vop_print_desc, nfs_print }, /* print */ 232 { &vop_islocked_desc, nfs_islocked }, /* islocked */ 233 { &vop_pathconf_desc, vn_fifo_bypass }, /* pathconf */ 234 { &vop_advlock_desc, vn_fifo_bypass }, /* advlock */ 235 { &vop_bwrite_desc, genfs_badop }, /* bwrite */ 236 { &vop_putpages_desc, vn_fifo_bypass }, /* putpages */ 237 { NULL, NULL } 238 }; 239 const struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc = 240 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries }; 241 242 static int nfs_linkrpc(struct vnode *, struct vnode *, const char *, 243 size_t, kauth_cred_t, struct lwp *); 244 static void nfs_writerpc_extfree(struct mbuf *, void *, size_t, void *); 245 246 /* 247 * Global variables 248 */ 249 extern u_int32_t nfs_true, nfs_false; 250 extern u_int32_t nfs_xdrneg1; 251 extern const nfstype nfsv3_type[9]; 252 253 int nfs_numasync = 0; 254 #define DIRHDSIZ _DIRENT_NAMEOFF(dp) 255 #define UIO_ADVANCE(uio, siz) \ 256 (void)((uio)->uio_resid -= (siz), \ 257 (uio)->uio_iov->iov_base = (char *)(uio)->uio_iov->iov_base + (siz), \ 258 (uio)->uio_iov->iov_len -= (siz)) 259 260 static void nfs_cache_enter(struct vnode *, struct vnode *, 261 struct componentname *); 262 263 static void 264 nfs_cache_enter(struct vnode *dvp, struct vnode *vp, 265 struct componentname *cnp) 266 { 267 struct nfsnode *dnp = VTONFS(dvp); 268 269 if (vp != NULL) { 270 struct nfsnode *np = VTONFS(vp); 271 272 np->n_ctime = np->n_vattr->va_ctime.tv_sec; 273 } 274 275 if (!timespecisset(&dnp->n_nctime)) 276 dnp->n_nctime = dnp->n_vattr->va_mtime; 277 278 cache_enter(dvp, vp, cnp); 279 } 280 281 /* 282 * nfs null call from vfs. 283 */ 284 int 285 nfs_null(struct vnode *vp, kauth_cred_t cred, struct lwp *l) 286 { 287 char *bpos, *dpos; 288 int error = 0; 289 struct mbuf *mreq, *mrep, *md, *mb; 290 struct nfsnode *np = VTONFS(vp); 291 292 nfsm_reqhead(np, NFSPROC_NULL, 0); 293 nfsm_request(np, NFSPROC_NULL, l, cred); 294 nfsm_reqdone; 295 return (error); 296 } 297 298 /* 299 * nfs access vnode op. 300 * For nfs version 2, just return ok. File accesses may fail later. 301 * For nfs version 3, use the access rpc to check accessibility. If file modes 302 * are changed on the server, accesses might still fail later. 303 */ 304 int 305 nfs_access(void *v) 306 { 307 struct vop_access_args /* { 308 struct vnode *a_vp; 309 int a_mode; 310 kauth_cred_t a_cred; 311 } */ *ap = v; 312 struct vnode *vp = ap->a_vp; 313 #ifndef NFS_V2_ONLY 314 u_int32_t *tl; 315 char *cp; 316 int32_t t1, t2; 317 char *bpos, *dpos, *cp2; 318 int error = 0, attrflag; 319 struct mbuf *mreq, *mrep, *md, *mb; 320 u_int32_t mode, rmode; 321 const int v3 = NFS_ISV3(vp); 322 #endif 323 int cachevalid; 324 struct nfsnode *np = VTONFS(vp); 325 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 326 327 cachevalid = (np->n_accstamp != -1 && 328 (time_uptime - np->n_accstamp) < nfs_attrtimeo(nmp, np) && 329 np->n_accuid == kauth_cred_geteuid(ap->a_cred)); 330 331 /* 332 * Check access cache first. If this request has been made for this 333 * uid shortly before, use the cached result. 334 */ 335 if (cachevalid) { 336 if (!np->n_accerror) { 337 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 338 return np->n_accerror; 339 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode) 340 return np->n_accerror; 341 } 342 343 #ifndef NFS_V2_ONLY 344 /* 345 * For nfs v3, do an access rpc, otherwise you are stuck emulating 346 * ufs_access() locally using the vattr. This may not be correct, 347 * since the server may apply other access criteria such as 348 * client uid-->server uid mapping that we do not know about, but 349 * this is better than just returning anything that is lying about 350 * in the cache. 351 */ 352 if (v3) { 353 nfsstats.rpccnt[NFSPROC_ACCESS]++; 354 nfsm_reqhead(np, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED); 355 nfsm_fhtom(np, v3); 356 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 357 if (ap->a_mode & VREAD) 358 mode = NFSV3ACCESS_READ; 359 else 360 mode = 0; 361 if (vp->v_type != VDIR) { 362 if (ap->a_mode & VWRITE) 363 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); 364 if (ap->a_mode & VEXEC) 365 mode |= NFSV3ACCESS_EXECUTE; 366 } else { 367 if (ap->a_mode & VWRITE) 368 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | 369 NFSV3ACCESS_DELETE); 370 if (ap->a_mode & VEXEC) 371 mode |= NFSV3ACCESS_LOOKUP; 372 } 373 *tl = txdr_unsigned(mode); 374 nfsm_request(np, NFSPROC_ACCESS, curlwp, ap->a_cred); 375 nfsm_postop_attr(vp, attrflag, 0); 376 if (!error) { 377 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 378 rmode = fxdr_unsigned(u_int32_t, *tl); 379 /* 380 * The NFS V3 spec does not clarify whether or not 381 * the returned access bits can be a superset of 382 * the ones requested, so... 383 */ 384 if ((rmode & mode) != mode) 385 error = EACCES; 386 } 387 nfsm_reqdone; 388 } else 389 #endif 390 return (nfsspec_access(ap)); 391 #ifndef NFS_V2_ONLY 392 /* 393 * Disallow write attempts on filesystems mounted read-only; 394 * unless the file is a socket, fifo, or a block or character 395 * device resident on the filesystem. 396 */ 397 if (!error && (ap->a_mode & VWRITE) && 398 (vp->v_mount->mnt_flag & MNT_RDONLY)) { 399 switch (vp->v_type) { 400 case VREG: 401 case VDIR: 402 case VLNK: 403 error = EROFS; 404 default: 405 break; 406 } 407 } 408 409 if (!error || error == EACCES) { 410 /* 411 * If we got the same result as for a previous, 412 * different request, OR it in. Don't update 413 * the timestamp in that case. 414 */ 415 if (cachevalid && np->n_accstamp != -1 && 416 error == np->n_accerror) { 417 if (!error) 418 np->n_accmode |= ap->a_mode; 419 else if ((np->n_accmode & ap->a_mode) == ap->a_mode) 420 np->n_accmode = ap->a_mode; 421 } else { 422 np->n_accstamp = time_uptime; 423 np->n_accuid = kauth_cred_geteuid(ap->a_cred); 424 np->n_accmode = ap->a_mode; 425 np->n_accerror = error; 426 } 427 } 428 429 return (error); 430 #endif 431 } 432 433 /* 434 * nfs open vnode op 435 * Check to see if the type is ok 436 * and that deletion is not in progress. 437 * For paged in text files, you will need to flush the page cache 438 * if consistency is lost. 439 */ 440 /* ARGSUSED */ 441 int 442 nfs_open(void *v) 443 { 444 struct vop_open_args /* { 445 struct vnode *a_vp; 446 int a_mode; 447 kauth_cred_t a_cred; 448 } */ *ap = v; 449 struct vnode *vp = ap->a_vp; 450 struct nfsnode *np = VTONFS(vp); 451 int error; 452 453 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 454 return (EACCES); 455 } 456 457 if (ap->a_mode & FREAD) { 458 if (np->n_rcred != NULL) 459 kauth_cred_free(np->n_rcred); 460 np->n_rcred = ap->a_cred; 461 kauth_cred_hold(np->n_rcred); 462 } 463 if (ap->a_mode & FWRITE) { 464 if (np->n_wcred != NULL) 465 kauth_cred_free(np->n_wcred); 466 np->n_wcred = ap->a_cred; 467 kauth_cred_hold(np->n_wcred); 468 } 469 470 error = nfs_flushstalebuf(vp, ap->a_cred, curlwp, 0); 471 if (error) 472 return error; 473 474 NFS_INVALIDATE_ATTRCACHE(np); /* For Open/Close consistency */ 475 476 return (0); 477 } 478 479 /* 480 * nfs close vnode op 481 * What an NFS client should do upon close after writing is a debatable issue. 482 * Most NFS clients push delayed writes to the server upon close, basically for 483 * two reasons: 484 * 1 - So that any write errors may be reported back to the client process 485 * doing the close system call. By far the two most likely errors are 486 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 487 * 2 - To put a worst case upper bound on cache inconsistency between 488 * multiple clients for the file. 489 * There is also a consistency problem for Version 2 of the protocol w.r.t. 490 * not being able to tell if other clients are writing a file concurrently, 491 * since there is no way of knowing if the changed modify time in the reply 492 * is only due to the write for this client. 493 * (NFS Version 3 provides weak cache consistency data in the reply that 494 * should be sufficient to detect and handle this case.) 495 * 496 * The current code does the following: 497 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 498 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 499 * or commit them (this satisfies 1 and 2 except for the 500 * case where the server crashes after this close but 501 * before the commit RPC, which is felt to be "good 502 * enough". Changing the last argument to nfs_flush() to 503 * a 1 would force a commit operation, if it is felt a 504 * commit is necessary now. 505 */ 506 /* ARGSUSED */ 507 int 508 nfs_close(void *v) 509 { 510 struct vop_close_args /* { 511 struct vnodeop_desc *a_desc; 512 struct vnode *a_vp; 513 int a_fflag; 514 kauth_cred_t a_cred; 515 } */ *ap = v; 516 struct vnode *vp = ap->a_vp; 517 struct nfsnode *np = VTONFS(vp); 518 int error = 0; 519 UVMHIST_FUNC("nfs_close"); UVMHIST_CALLED(ubchist); 520 521 if (vp->v_type == VREG) { 522 if (np->n_flag & NMODIFIED) { 523 #ifndef NFS_V2_ONLY 524 if (NFS_ISV3(vp)) { 525 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, curlwp, 0); 526 np->n_flag &= ~NMODIFIED; 527 } else 528 #endif 529 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, curlwp, 1); 530 NFS_INVALIDATE_ATTRCACHE(np); 531 } 532 if (np->n_flag & NWRITEERR) { 533 np->n_flag &= ~NWRITEERR; 534 error = np->n_error; 535 } 536 } 537 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0); 538 return (error); 539 } 540 541 /* 542 * nfs getattr call from vfs. 543 */ 544 int 545 nfs_getattr(void *v) 546 { 547 struct vop_getattr_args /* { 548 struct vnode *a_vp; 549 struct vattr *a_vap; 550 kauth_cred_t a_cred; 551 } */ *ap = v; 552 struct vnode *vp = ap->a_vp; 553 struct nfsnode *np = VTONFS(vp); 554 char *cp; 555 u_int32_t *tl; 556 int32_t t1, t2; 557 char *bpos, *dpos; 558 int error = 0; 559 struct mbuf *mreq, *mrep, *md, *mb; 560 const int v3 = NFS_ISV3(vp); 561 562 /* 563 * Update local times for special files. 564 */ 565 if (np->n_flag & (NACC | NUPD)) 566 np->n_flag |= NCHG; 567 568 /* 569 * if we have delayed truncation, do it now. 570 */ 571 nfs_delayedtruncate(vp); 572 573 /* 574 * First look in the cache. 575 */ 576 if (nfs_getattrcache(vp, ap->a_vap) == 0) 577 return (0); 578 nfsstats.rpccnt[NFSPROC_GETATTR]++; 579 nfsm_reqhead(np, NFSPROC_GETATTR, NFSX_FH(v3)); 580 nfsm_fhtom(np, v3); 581 nfsm_request(np, NFSPROC_GETATTR, curlwp, ap->a_cred); 582 if (!error) { 583 nfsm_loadattr(vp, ap->a_vap, 0); 584 if (vp->v_type == VDIR && 585 ap->a_vap->va_blocksize < NFS_DIRFRAGSIZ) 586 ap->a_vap->va_blocksize = NFS_DIRFRAGSIZ; 587 } 588 nfsm_reqdone; 589 return (error); 590 } 591 592 /* 593 * nfs setattr call. 594 */ 595 int 596 nfs_setattr(void *v) 597 { 598 struct vop_setattr_args /* { 599 struct vnodeop_desc *a_desc; 600 struct vnode *a_vp; 601 struct vattr *a_vap; 602 kauth_cred_t a_cred; 603 } */ *ap = v; 604 struct vnode *vp = ap->a_vp; 605 struct nfsnode *np = VTONFS(vp); 606 struct vattr *vap = ap->a_vap; 607 int error = 0; 608 u_quad_t tsize = 0; 609 610 /* 611 * Setting of flags is not supported. 612 */ 613 if (vap->va_flags != VNOVAL) 614 return (EOPNOTSUPP); 615 616 /* 617 * Disallow write attempts if the filesystem is mounted read-only. 618 */ 619 if ((vap->va_uid != (uid_t)VNOVAL || 620 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 621 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 622 (vp->v_mount->mnt_flag & MNT_RDONLY)) 623 return (EROFS); 624 if (vap->va_size != VNOVAL) { 625 if (vap->va_size > VFSTONFS(vp->v_mount)->nm_maxfilesize) { 626 return EFBIG; 627 } 628 switch (vp->v_type) { 629 case VDIR: 630 return (EISDIR); 631 case VCHR: 632 case VBLK: 633 case VSOCK: 634 case VFIFO: 635 if (vap->va_mtime.tv_sec == VNOVAL && 636 vap->va_atime.tv_sec == VNOVAL && 637 vap->va_mode == (mode_t)VNOVAL && 638 vap->va_uid == (uid_t)VNOVAL && 639 vap->va_gid == (gid_t)VNOVAL) 640 return (0); 641 vap->va_size = VNOVAL; 642 break; 643 default: 644 /* 645 * Disallow write attempts if the filesystem is 646 * mounted read-only. 647 */ 648 if (vp->v_mount->mnt_flag & MNT_RDONLY) 649 return (EROFS); 650 genfs_node_wrlock(vp); 651 uvm_vnp_setsize(vp, vap->va_size); 652 tsize = np->n_size; 653 np->n_size = vap->va_size; 654 if (vap->va_size == 0) 655 error = nfs_vinvalbuf(vp, 0, 656 ap->a_cred, curlwp, 1); 657 else 658 error = nfs_vinvalbuf(vp, V_SAVE, 659 ap->a_cred, curlwp, 1); 660 if (error) { 661 uvm_vnp_setsize(vp, tsize); 662 genfs_node_unlock(vp); 663 return (error); 664 } 665 np->n_vattr->va_size = vap->va_size; 666 } 667 } else { 668 /* 669 * flush files before setattr because a later write of 670 * cached data might change timestamps or reset sugid bits 671 */ 672 if ((vap->va_mtime.tv_sec != VNOVAL || 673 vap->va_atime.tv_sec != VNOVAL || 674 vap->va_mode != VNOVAL) && 675 vp->v_type == VREG && 676 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 677 curlwp, 1)) == EINTR) 678 return (error); 679 } 680 error = nfs_setattrrpc(vp, vap, ap->a_cred, curlwp); 681 if (vap->va_size != VNOVAL) { 682 if (error) { 683 np->n_size = np->n_vattr->va_size = tsize; 684 uvm_vnp_setsize(vp, np->n_size); 685 } 686 genfs_node_unlock(vp); 687 } 688 VN_KNOTE(vp, NOTE_ATTRIB); 689 return (error); 690 } 691 692 /* 693 * Do an nfs setattr rpc. 694 */ 695 int 696 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, kauth_cred_t cred, struct lwp *l) 697 { 698 struct nfsv2_sattr *sp; 699 char *cp; 700 int32_t t1, t2; 701 char *bpos, *dpos; 702 u_int32_t *tl; 703 int error = 0; 704 struct mbuf *mreq, *mrep, *md, *mb; 705 const int v3 = NFS_ISV3(vp); 706 struct nfsnode *np = VTONFS(vp); 707 #ifndef NFS_V2_ONLY 708 int wccflag = NFSV3_WCCRATTR; 709 char *cp2; 710 #endif 711 712 nfsstats.rpccnt[NFSPROC_SETATTR]++; 713 nfsm_reqhead(np, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3)); 714 nfsm_fhtom(np, v3); 715 #ifndef NFS_V2_ONLY 716 if (v3) { 717 nfsm_v3attrbuild(vap, true); 718 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 719 *tl = nfs_false; 720 } else { 721 #endif 722 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 723 if (vap->va_mode == (mode_t)VNOVAL) 724 sp->sa_mode = nfs_xdrneg1; 725 else 726 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); 727 if (vap->va_uid == (uid_t)VNOVAL) 728 sp->sa_uid = nfs_xdrneg1; 729 else 730 sp->sa_uid = txdr_unsigned(vap->va_uid); 731 if (vap->va_gid == (gid_t)VNOVAL) 732 sp->sa_gid = nfs_xdrneg1; 733 else 734 sp->sa_gid = txdr_unsigned(vap->va_gid); 735 sp->sa_size = txdr_unsigned(vap->va_size); 736 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 737 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 738 #ifndef NFS_V2_ONLY 739 } 740 #endif 741 nfsm_request(np, NFSPROC_SETATTR, l, cred); 742 #ifndef NFS_V2_ONLY 743 if (v3) { 744 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false); 745 } else 746 #endif 747 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 748 nfsm_reqdone; 749 return (error); 750 } 751 752 /* 753 * nfs lookup call, one step at a time... 754 * First look in cache 755 * If not found, do the rpc. 756 */ 757 int 758 nfs_lookup(void *v) 759 { 760 struct vop_lookup_args /* { 761 struct vnodeop_desc *a_desc; 762 struct vnode *a_dvp; 763 struct vnode **a_vpp; 764 struct componentname *a_cnp; 765 } */ *ap = v; 766 struct componentname *cnp = ap->a_cnp; 767 struct vnode *dvp = ap->a_dvp; 768 struct vnode **vpp = ap->a_vpp; 769 int flags; 770 struct vnode *newvp; 771 u_int32_t *tl; 772 char *cp; 773 int32_t t1, t2; 774 char *bpos, *dpos, *cp2; 775 struct mbuf *mreq, *mrep, *md, *mb; 776 long len; 777 nfsfh_t *fhp; 778 struct nfsnode *np; 779 int error = 0, attrflag, fhsize; 780 const int v3 = NFS_ISV3(dvp); 781 782 flags = cnp->cn_flags; 783 784 *vpp = NULLVP; 785 newvp = NULLVP; 786 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 787 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 788 return (EROFS); 789 if (dvp->v_type != VDIR) 790 return (ENOTDIR); 791 792 /* 793 * RFC1813(nfsv3) 3.2 says clients should handle "." by themselves. 794 */ 795 if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { 796 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred); 797 if (error) 798 return error; 799 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) 800 return EISDIR; 801 vref(dvp); 802 *vpp = dvp; 803 return 0; 804 } 805 806 np = VTONFS(dvp); 807 808 /* 809 * Before performing an RPC, check the name cache to see if 810 * the directory/name pair we are looking for is known already. 811 * If the directory/name pair is found in the name cache, 812 * we have to ensure the directory has not changed from 813 * the time the cache entry has been created. If it has, 814 * the cache entry has to be ignored. 815 */ 816 error = cache_lookup_raw(dvp, vpp, cnp); 817 KASSERT(dvp != *vpp); 818 KASSERT((cnp->cn_flags & ISWHITEOUT) == 0); 819 if (error >= 0) { 820 struct vattr vattr; 821 int err2; 822 823 if (error && error != ENOENT) { 824 *vpp = NULLVP; 825 return error; 826 } 827 828 err2 = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred); 829 if (err2 != 0) { 830 if (error == 0) 831 vrele(*vpp); 832 *vpp = NULLVP; 833 return err2; 834 } 835 836 if (VOP_GETATTR(dvp, &vattr, cnp->cn_cred) 837 || timespeccmp(&vattr.va_mtime, 838 &VTONFS(dvp)->n_nctime, !=)) { 839 if (error == 0) { 840 vrele(*vpp); 841 *vpp = NULLVP; 842 } 843 cache_purge1(dvp, NULL, PURGE_CHILDREN); 844 timespecclear(&np->n_nctime); 845 goto dorpc; 846 } 847 848 if (error == ENOENT) { 849 goto noentry; 850 } 851 852 /* 853 * investigate the vnode returned by cache_lookup_raw. 854 * if it isn't appropriate, do an rpc. 855 */ 856 newvp = *vpp; 857 if ((flags & ISDOTDOT) != 0) { 858 VOP_UNLOCK(dvp); 859 } 860 error = vn_lock(newvp, LK_EXCLUSIVE); 861 if ((flags & ISDOTDOT) != 0) { 862 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 863 } 864 if (error != 0) { 865 /* newvp has been reclaimed. */ 866 vrele(newvp); 867 *vpp = NULLVP; 868 goto dorpc; 869 } 870 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred) 871 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) { 872 nfsstats.lookupcache_hits++; 873 KASSERT(newvp->v_type != VNON); 874 return (0); 875 } 876 cache_purge1(newvp, NULL, PURGE_PARENTS); 877 vput(newvp); 878 *vpp = NULLVP; 879 } 880 dorpc: 881 #if 0 882 /* 883 * because nfsv3 has the same CREATE semantics as ours, 884 * we don't have to perform LOOKUPs beforehand. 885 * 886 * XXX ideally we can do the same for nfsv2 in the case of !O_EXCL. 887 * XXX although we have no way to know if O_EXCL is requested or not. 888 */ 889 890 if (v3 && cnp->cn_nameiop == CREATE && 891 (flags & (ISLASTCN|ISDOTDOT)) == ISLASTCN && 892 (dvp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 893 return (EJUSTRETURN); 894 } 895 #endif /* 0 */ 896 897 error = 0; 898 newvp = NULLVP; 899 nfsstats.lookupcache_misses++; 900 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 901 len = cnp->cn_namelen; 902 nfsm_reqhead(np, NFSPROC_LOOKUP, 903 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 904 nfsm_fhtom(np, v3); 905 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 906 nfsm_request(np, NFSPROC_LOOKUP, curlwp, cnp->cn_cred); 907 if (error) { 908 nfsm_postop_attr(dvp, attrflag, 0); 909 m_freem(mrep); 910 goto nfsmout; 911 } 912 nfsm_getfh(fhp, fhsize, v3); 913 914 /* 915 * Handle RENAME case... 916 */ 917 if (cnp->cn_nameiop == RENAME && (flags & ISLASTCN)) { 918 if (NFS_CMPFH(np, fhp, fhsize)) { 919 m_freem(mrep); 920 return (EISDIR); 921 } 922 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 923 if (error) { 924 m_freem(mrep); 925 return error; 926 } 927 newvp = NFSTOV(np); 928 #ifndef NFS_V2_ONLY 929 if (v3) { 930 nfsm_postop_attr(newvp, attrflag, 0); 931 nfsm_postop_attr(dvp, attrflag, 0); 932 } else 933 #endif 934 nfsm_loadattr(newvp, (struct vattr *)0, 0); 935 *vpp = newvp; 936 m_freem(mrep); 937 goto validate; 938 } 939 940 /* 941 * The postop attr handling is duplicated for each if case, 942 * because it should be done while dvp is locked (unlocking 943 * dvp is different for each case). 944 */ 945 946 if (NFS_CMPFH(np, fhp, fhsize)) { 947 /* 948 * as we handle "." lookup locally, this should be 949 * a broken server. 950 */ 951 vref(dvp); 952 newvp = dvp; 953 #ifndef NFS_V2_ONLY 954 if (v3) { 955 nfsm_postop_attr(newvp, attrflag, 0); 956 nfsm_postop_attr(dvp, attrflag, 0); 957 } else 958 #endif 959 nfsm_loadattr(newvp, (struct vattr *)0, 0); 960 } else if (flags & ISDOTDOT) { 961 /* 962 * ".." lookup 963 */ 964 VOP_UNLOCK(dvp); 965 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 966 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 967 if (error) { 968 m_freem(mrep); 969 return error; 970 } 971 newvp = NFSTOV(np); 972 973 #ifndef NFS_V2_ONLY 974 if (v3) { 975 nfsm_postop_attr(newvp, attrflag, 0); 976 nfsm_postop_attr(dvp, attrflag, 0); 977 } else 978 #endif 979 nfsm_loadattr(newvp, (struct vattr *)0, 0); 980 } else { 981 /* 982 * Other lookups. 983 */ 984 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 985 if (error) { 986 m_freem(mrep); 987 return error; 988 } 989 newvp = NFSTOV(np); 990 #ifndef NFS_V2_ONLY 991 if (v3) { 992 nfsm_postop_attr(newvp, attrflag, 0); 993 nfsm_postop_attr(dvp, attrflag, 0); 994 } else 995 #endif 996 nfsm_loadattr(newvp, (struct vattr *)0, 0); 997 } 998 if ((cnp->cn_flags & MAKEENTRY) && 999 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) { 1000 nfs_cache_enter(dvp, newvp, cnp); 1001 } 1002 *vpp = newvp; 1003 nfsm_reqdone; 1004 if (error) { 1005 /* 1006 * We get here only because of errors returned by 1007 * the RPC. Otherwise we'll have returned above 1008 * (the nfsm_* macros will jump to nfsm_reqdone 1009 * on error). 1010 */ 1011 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) && 1012 cnp->cn_nameiop != CREATE) { 1013 nfs_cache_enter(dvp, NULL, cnp); 1014 } 1015 if (newvp != NULLVP) { 1016 if (newvp == dvp) { 1017 vrele(newvp); 1018 } else { 1019 vput(newvp); 1020 } 1021 } 1022 noentry: 1023 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 1024 (flags & ISLASTCN) && error == ENOENT) { 1025 if (dvp->v_mount->mnt_flag & MNT_RDONLY) { 1026 error = EROFS; 1027 } else { 1028 error = EJUSTRETURN; 1029 } 1030 } 1031 *vpp = NULL; 1032 return error; 1033 } 1034 1035 validate: 1036 /* 1037 * make sure we have valid type and size. 1038 */ 1039 1040 newvp = *vpp; 1041 if (newvp->v_type == VNON) { 1042 struct vattr vattr; /* dummy */ 1043 1044 KASSERT(VTONFS(newvp)->n_attrstamp == 0); 1045 error = VOP_GETATTR(newvp, &vattr, cnp->cn_cred); 1046 if (error) { 1047 vput(newvp); 1048 *vpp = NULL; 1049 } 1050 } 1051 1052 return error; 1053 } 1054 1055 /* 1056 * nfs read call. 1057 * Just call nfs_bioread() to do the work. 1058 */ 1059 int 1060 nfs_read(void *v) 1061 { 1062 struct vop_read_args /* { 1063 struct vnode *a_vp; 1064 struct uio *a_uio; 1065 int a_ioflag; 1066 kauth_cred_t a_cred; 1067 } */ *ap = v; 1068 struct vnode *vp = ap->a_vp; 1069 1070 if (vp->v_type != VREG) 1071 return EISDIR; 1072 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0)); 1073 } 1074 1075 /* 1076 * nfs readlink call 1077 */ 1078 int 1079 nfs_readlink(void *v) 1080 { 1081 struct vop_readlink_args /* { 1082 struct vnode *a_vp; 1083 struct uio *a_uio; 1084 kauth_cred_t a_cred; 1085 } */ *ap = v; 1086 struct vnode *vp = ap->a_vp; 1087 struct nfsnode *np = VTONFS(vp); 1088 1089 if (vp->v_type != VLNK) 1090 return (EPERM); 1091 1092 if (np->n_rcred != NULL) { 1093 kauth_cred_free(np->n_rcred); 1094 } 1095 np->n_rcred = ap->a_cred; 1096 kauth_cred_hold(np->n_rcred); 1097 1098 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0)); 1099 } 1100 1101 /* 1102 * Do a readlink rpc. 1103 * Called by nfs_doio() from below the buffer cache. 1104 */ 1105 int 1106 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred) 1107 { 1108 u_int32_t *tl; 1109 char *cp; 1110 int32_t t1, t2; 1111 char *bpos, *dpos, *cp2; 1112 int error = 0; 1113 uint32_t len; 1114 struct mbuf *mreq, *mrep, *md, *mb; 1115 const int v3 = NFS_ISV3(vp); 1116 struct nfsnode *np = VTONFS(vp); 1117 #ifndef NFS_V2_ONLY 1118 int attrflag; 1119 #endif 1120 1121 nfsstats.rpccnt[NFSPROC_READLINK]++; 1122 nfsm_reqhead(np, NFSPROC_READLINK, NFSX_FH(v3)); 1123 nfsm_fhtom(np, v3); 1124 nfsm_request(np, NFSPROC_READLINK, curlwp, cred); 1125 #ifndef NFS_V2_ONLY 1126 if (v3) 1127 nfsm_postop_attr(vp, attrflag, 0); 1128 #endif 1129 if (!error) { 1130 #ifndef NFS_V2_ONLY 1131 if (v3) { 1132 nfsm_dissect(tl, uint32_t *, NFSX_UNSIGNED); 1133 len = fxdr_unsigned(uint32_t, *tl); 1134 if (len > MAXPATHLEN) { 1135 /* 1136 * this pathname is too long for us. 1137 */ 1138 m_freem(mrep); 1139 /* Solaris returns EINVAL. should we follow? */ 1140 error = ENAMETOOLONG; 1141 goto nfsmout; 1142 } 1143 } else 1144 #endif 1145 { 1146 nfsm_strsiz(len, NFS_MAXPATHLEN); 1147 } 1148 nfsm_mtouio(uiop, len); 1149 } 1150 nfsm_reqdone; 1151 return (error); 1152 } 1153 1154 /* 1155 * nfs read rpc call 1156 * Ditto above 1157 */ 1158 int 1159 nfs_readrpc(struct vnode *vp, struct uio *uiop) 1160 { 1161 u_int32_t *tl; 1162 char *cp; 1163 int32_t t1, t2; 1164 char *bpos, *dpos, *cp2; 1165 struct mbuf *mreq, *mrep, *md, *mb; 1166 struct nfsmount *nmp; 1167 int error = 0, len, retlen, tsiz, eof, byte_count; 1168 const int v3 = NFS_ISV3(vp); 1169 struct nfsnode *np = VTONFS(vp); 1170 #ifndef NFS_V2_ONLY 1171 int attrflag; 1172 #endif 1173 1174 #ifndef nolint 1175 eof = 0; 1176 #endif 1177 nmp = VFSTONFS(vp->v_mount); 1178 tsiz = uiop->uio_resid; 1179 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1180 return (EFBIG); 1181 iostat_busy(nmp->nm_stats); 1182 byte_count = 0; /* count bytes actually transferred */ 1183 while (tsiz > 0) { 1184 nfsstats.rpccnt[NFSPROC_READ]++; 1185 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; 1186 nfsm_reqhead(np, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3); 1187 nfsm_fhtom(np, v3); 1188 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED * 3); 1189 #ifndef NFS_V2_ONLY 1190 if (v3) { 1191 txdr_hyper(uiop->uio_offset, tl); 1192 *(tl + 2) = txdr_unsigned(len); 1193 } else 1194 #endif 1195 { 1196 *tl++ = txdr_unsigned(uiop->uio_offset); 1197 *tl++ = txdr_unsigned(len); 1198 *tl = 0; 1199 } 1200 nfsm_request(np, NFSPROC_READ, curlwp, np->n_rcred); 1201 #ifndef NFS_V2_ONLY 1202 if (v3) { 1203 nfsm_postop_attr(vp, attrflag, NAC_NOTRUNC); 1204 if (error) { 1205 m_freem(mrep); 1206 goto nfsmout; 1207 } 1208 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1209 eof = fxdr_unsigned(int, *(tl + 1)); 1210 } else 1211 #endif 1212 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 1213 nfsm_strsiz(retlen, nmp->nm_rsize); 1214 nfsm_mtouio(uiop, retlen); 1215 m_freem(mrep); 1216 tsiz -= retlen; 1217 byte_count += retlen; 1218 #ifndef NFS_V2_ONLY 1219 if (v3) { 1220 if (eof || retlen == 0) 1221 tsiz = 0; 1222 } else 1223 #endif 1224 if (retlen < len) 1225 tsiz = 0; 1226 } 1227 nfsmout: 1228 iostat_unbusy(nmp->nm_stats, byte_count, 1); 1229 return (error); 1230 } 1231 1232 struct nfs_writerpc_context { 1233 kmutex_t nwc_lock; 1234 kcondvar_t nwc_cv; 1235 int nwc_mbufcount; 1236 }; 1237 1238 /* 1239 * free mbuf used to refer protected pages while write rpc call. 1240 * called at splvm. 1241 */ 1242 static void 1243 nfs_writerpc_extfree(struct mbuf *m, void *tbuf, size_t size, void *arg) 1244 { 1245 struct nfs_writerpc_context *ctx = arg; 1246 1247 KASSERT(m != NULL); 1248 KASSERT(ctx != NULL); 1249 pool_cache_put(mb_cache, m); 1250 mutex_enter(&ctx->nwc_lock); 1251 if (--ctx->nwc_mbufcount == 0) { 1252 cv_signal(&ctx->nwc_cv); 1253 } 1254 mutex_exit(&ctx->nwc_lock); 1255 } 1256 1257 /* 1258 * nfs write call 1259 */ 1260 int 1261 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, bool pageprotected, bool *stalewriteverfp) 1262 { 1263 u_int32_t *tl; 1264 char *cp; 1265 int32_t t1, t2; 1266 char *bpos, *dpos; 1267 struct mbuf *mreq, *mrep, *md, *mb; 1268 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1269 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR; 1270 const int v3 = NFS_ISV3(vp); 1271 int committed = NFSV3WRITE_FILESYNC; 1272 struct nfsnode *np = VTONFS(vp); 1273 struct nfs_writerpc_context ctx; 1274 int byte_count; 1275 size_t origresid; 1276 #ifndef NFS_V2_ONLY 1277 char *cp2; 1278 int rlen, commit; 1279 #endif 1280 1281 if (vp->v_mount->mnt_flag & MNT_RDONLY) { 1282 panic("writerpc readonly vp %p", vp); 1283 } 1284 1285 #ifdef DIAGNOSTIC 1286 if (uiop->uio_iovcnt != 1) 1287 panic("nfs: writerpc iovcnt > 1"); 1288 #endif 1289 tsiz = uiop->uio_resid; 1290 if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize) 1291 return EFBIG; 1292 1293 mutex_init(&ctx.nwc_lock, MUTEX_DRIVER, IPL_VM); 1294 cv_init(&ctx.nwc_cv, "nfsmblk"); 1295 ctx.nwc_mbufcount = 1; 1296 1297 retry: 1298 origresid = uiop->uio_resid; 1299 KASSERT(origresid == uiop->uio_iov->iov_len); 1300 iostat_busy(nmp->nm_stats); 1301 byte_count = 0; /* count of bytes actually written */ 1302 while (tsiz > 0) { 1303 uint32_t datalen; /* data bytes need to be allocated in mbuf */ 1304 uint32_t backup; 1305 bool stalewriteverf = false; 1306 1307 nfsstats.rpccnt[NFSPROC_WRITE]++; 1308 len = min(tsiz, nmp->nm_wsize); 1309 datalen = pageprotected ? 0 : nfsm_rndup(len); 1310 nfsm_reqhead(np, NFSPROC_WRITE, 1311 NFSX_FH(v3) + 5 * NFSX_UNSIGNED + datalen); 1312 nfsm_fhtom(np, v3); 1313 #ifndef NFS_V2_ONLY 1314 if (v3) { 1315 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED); 1316 txdr_hyper(uiop->uio_offset, tl); 1317 tl += 2; 1318 *tl++ = txdr_unsigned(len); 1319 *tl++ = txdr_unsigned(*iomode); 1320 *tl = txdr_unsigned(len); 1321 } else 1322 #endif 1323 { 1324 u_int32_t x; 1325 1326 nfsm_build(tl, u_int32_t *, 4 * NFSX_UNSIGNED); 1327 /* Set both "begin" and "current" to non-garbage. */ 1328 x = txdr_unsigned((u_int32_t)uiop->uio_offset); 1329 *tl++ = x; /* "begin offset" */ 1330 *tl++ = x; /* "current offset" */ 1331 x = txdr_unsigned(len); 1332 *tl++ = x; /* total to this offset */ 1333 *tl = x; /* size of this write */ 1334 1335 } 1336 if (pageprotected) { 1337 /* 1338 * since we know pages can't be modified during i/o, 1339 * no need to copy them for us. 1340 */ 1341 struct mbuf *m; 1342 struct iovec *iovp = uiop->uio_iov; 1343 1344 m = m_get(M_WAIT, MT_DATA); 1345 MCLAIM(m, &nfs_mowner); 1346 MEXTADD(m, iovp->iov_base, len, M_MBUF, 1347 nfs_writerpc_extfree, &ctx); 1348 m->m_flags |= M_EXT_ROMAP; 1349 m->m_len = len; 1350 mb->m_next = m; 1351 /* 1352 * no need to maintain mb and bpos here 1353 * because no one care them later. 1354 */ 1355 #if 0 1356 mb = m; 1357 bpos = mtod(void *, mb) + mb->m_len; 1358 #endif 1359 UIO_ADVANCE(uiop, len); 1360 uiop->uio_offset += len; 1361 mutex_enter(&ctx.nwc_lock); 1362 ctx.nwc_mbufcount++; 1363 mutex_exit(&ctx.nwc_lock); 1364 nfs_zeropad(mb, 0, nfsm_padlen(len)); 1365 } else { 1366 nfsm_uiotom(uiop, len); 1367 } 1368 nfsm_request(np, NFSPROC_WRITE, curlwp, np->n_wcred); 1369 #ifndef NFS_V2_ONLY 1370 if (v3) { 1371 wccflag = NFSV3_WCCCHK; 1372 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, !error); 1373 if (!error) { 1374 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED 1375 + NFSX_V3WRITEVERF); 1376 rlen = fxdr_unsigned(int, *tl++); 1377 if (rlen == 0) { 1378 error = NFSERR_IO; 1379 m_freem(mrep); 1380 break; 1381 } else if (rlen < len) { 1382 backup = len - rlen; 1383 UIO_ADVANCE(uiop, -backup); 1384 uiop->uio_offset -= backup; 1385 len = rlen; 1386 } 1387 commit = fxdr_unsigned(int, *tl++); 1388 1389 /* 1390 * Return the lowest committment level 1391 * obtained by any of the RPCs. 1392 */ 1393 if (committed == NFSV3WRITE_FILESYNC) 1394 committed = commit; 1395 else if (committed == NFSV3WRITE_DATASYNC && 1396 commit == NFSV3WRITE_UNSTABLE) 1397 committed = commit; 1398 mutex_enter(&nmp->nm_lock); 1399 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0){ 1400 memcpy(nmp->nm_writeverf, tl, 1401 NFSX_V3WRITEVERF); 1402 nmp->nm_iflag |= NFSMNT_HASWRITEVERF; 1403 } else if ((nmp->nm_iflag & 1404 NFSMNT_STALEWRITEVERF) || 1405 memcmp(tl, nmp->nm_writeverf, 1406 NFSX_V3WRITEVERF)) { 1407 memcpy(nmp->nm_writeverf, tl, 1408 NFSX_V3WRITEVERF); 1409 /* 1410 * note NFSMNT_STALEWRITEVERF 1411 * if we're the first thread to 1412 * notice it. 1413 */ 1414 if ((nmp->nm_iflag & 1415 NFSMNT_STALEWRITEVERF) == 0) { 1416 stalewriteverf = true; 1417 nmp->nm_iflag |= 1418 NFSMNT_STALEWRITEVERF; 1419 } 1420 } 1421 mutex_exit(&nmp->nm_lock); 1422 } 1423 } else 1424 #endif 1425 nfsm_loadattr(vp, (struct vattr *)0, NAC_NOTRUNC); 1426 if (wccflag) 1427 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr->va_mtime; 1428 m_freem(mrep); 1429 if (error) 1430 break; 1431 tsiz -= len; 1432 byte_count += len; 1433 if (stalewriteverf) { 1434 *stalewriteverfp = true; 1435 stalewriteverf = false; 1436 if (committed == NFSV3WRITE_UNSTABLE && 1437 len != origresid) { 1438 /* 1439 * if our write requests weren't atomic but 1440 * unstable, datas in previous iterations 1441 * might have already been lost now. 1442 * then, we should resend them to nfsd. 1443 */ 1444 backup = origresid - tsiz; 1445 UIO_ADVANCE(uiop, -backup); 1446 uiop->uio_offset -= backup; 1447 tsiz = origresid; 1448 goto retry; 1449 } 1450 } 1451 } 1452 nfsmout: 1453 iostat_unbusy(nmp->nm_stats, byte_count, 0); 1454 if (pageprotected) { 1455 /* 1456 * wait until mbufs go away. 1457 * retransmitted mbufs can survive longer than rpc requests 1458 * themselves. 1459 */ 1460 mutex_enter(&ctx.nwc_lock); 1461 ctx.nwc_mbufcount--; 1462 while (ctx.nwc_mbufcount > 0) { 1463 cv_wait(&ctx.nwc_cv, &ctx.nwc_lock); 1464 } 1465 mutex_exit(&ctx.nwc_lock); 1466 } 1467 mutex_destroy(&ctx.nwc_lock); 1468 cv_destroy(&ctx.nwc_cv); 1469 *iomode = committed; 1470 if (error) 1471 uiop->uio_resid = tsiz; 1472 return (error); 1473 } 1474 1475 /* 1476 * nfs mknod rpc 1477 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1478 * mode set to specify the file type and the size field for rdev. 1479 */ 1480 int 1481 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap) 1482 { 1483 struct nfsv2_sattr *sp; 1484 u_int32_t *tl; 1485 char *cp; 1486 int32_t t1, t2; 1487 struct vnode *newvp = (struct vnode *)0; 1488 struct nfsnode *dnp, *np; 1489 char *cp2; 1490 char *bpos, *dpos; 1491 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1492 struct mbuf *mreq, *mrep, *md, *mb; 1493 u_int32_t rdev; 1494 const int v3 = NFS_ISV3(dvp); 1495 1496 if (vap->va_type == VCHR || vap->va_type == VBLK) 1497 rdev = txdr_unsigned(vap->va_rdev); 1498 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1499 rdev = nfs_xdrneg1; 1500 else { 1501 VOP_ABORTOP(dvp, cnp); 1502 vput(dvp); 1503 return (EOPNOTSUPP); 1504 } 1505 nfsstats.rpccnt[NFSPROC_MKNOD]++; 1506 dnp = VTONFS(dvp); 1507 nfsm_reqhead(dnp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED + 1508 + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1509 nfsm_fhtom(dnp, v3); 1510 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1511 #ifndef NFS_V2_ONLY 1512 if (v3) { 1513 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1514 *tl++ = vtonfsv3_type(vap->va_type); 1515 nfsm_v3attrbuild(vap, false); 1516 if (vap->va_type == VCHR || vap->va_type == VBLK) { 1517 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1518 *tl++ = txdr_unsigned(major(vap->va_rdev)); 1519 *tl = txdr_unsigned(minor(vap->va_rdev)); 1520 } 1521 } else 1522 #endif 1523 { 1524 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1525 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1526 sp->sa_uid = nfs_xdrneg1; 1527 sp->sa_gid = nfs_xdrneg1; 1528 sp->sa_size = rdev; 1529 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1530 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1531 } 1532 nfsm_request(dnp, NFSPROC_MKNOD, curlwp, cnp->cn_cred); 1533 if (!error) { 1534 nfsm_mtofh(dvp, newvp, v3, gotvp); 1535 if (!gotvp) { 1536 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1537 cnp->cn_namelen, cnp->cn_cred, curlwp, &np); 1538 if (!error) 1539 newvp = NFSTOV(np); 1540 } 1541 } 1542 #ifndef NFS_V2_ONLY 1543 if (v3) 1544 nfsm_wcc_data(dvp, wccflag, 0, !error); 1545 #endif 1546 nfsm_reqdone; 1547 if (error) { 1548 if (newvp) 1549 vput(newvp); 1550 } else { 1551 if (cnp->cn_flags & MAKEENTRY) 1552 nfs_cache_enter(dvp, newvp, cnp); 1553 *vpp = newvp; 1554 } 1555 VTONFS(dvp)->n_flag |= NMODIFIED; 1556 if (!wccflag) 1557 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1558 vput(dvp); 1559 return (error); 1560 } 1561 1562 /* 1563 * nfs mknod vop 1564 * just call nfs_mknodrpc() to do the work. 1565 */ 1566 /* ARGSUSED */ 1567 int 1568 nfs_mknod(void *v) 1569 { 1570 struct vop_mknod_args /* { 1571 struct vnode *a_dvp; 1572 struct vnode **a_vpp; 1573 struct componentname *a_cnp; 1574 struct vattr *a_vap; 1575 } */ *ap = v; 1576 struct vnode *dvp = ap->a_dvp; 1577 struct componentname *cnp = ap->a_cnp; 1578 int error; 1579 1580 error = nfs_mknodrpc(dvp, ap->a_vpp, cnp, ap->a_vap); 1581 VN_KNOTE(dvp, NOTE_WRITE); 1582 if (error == 0 || error == EEXIST) 1583 cache_purge1(dvp, cnp, 0); 1584 return (error); 1585 } 1586 1587 /* 1588 * nfs file create call 1589 */ 1590 int 1591 nfs_create(void *v) 1592 { 1593 struct vop_create_args /* { 1594 struct vnode *a_dvp; 1595 struct vnode **a_vpp; 1596 struct componentname *a_cnp; 1597 struct vattr *a_vap; 1598 } */ *ap = v; 1599 struct vnode *dvp = ap->a_dvp; 1600 struct vattr *vap = ap->a_vap; 1601 struct componentname *cnp = ap->a_cnp; 1602 struct nfsv2_sattr *sp; 1603 u_int32_t *tl; 1604 char *cp; 1605 int32_t t1, t2; 1606 struct nfsnode *dnp, *np = (struct nfsnode *)0; 1607 struct vnode *newvp = (struct vnode *)0; 1608 char *bpos, *dpos, *cp2; 1609 int error, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1610 struct mbuf *mreq, *mrep, *md, *mb; 1611 const int v3 = NFS_ISV3(dvp); 1612 u_int32_t excl_mode = NFSV3CREATE_UNCHECKED; 1613 1614 /* 1615 * Oops, not for me.. 1616 */ 1617 if (vap->va_type == VSOCK) 1618 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1619 1620 KASSERT(vap->va_type == VREG); 1621 1622 #ifdef VA_EXCLUSIVE 1623 if (vap->va_vaflags & VA_EXCLUSIVE) { 1624 excl_mode = NFSV3CREATE_EXCLUSIVE; 1625 } 1626 #endif 1627 again: 1628 error = 0; 1629 nfsstats.rpccnt[NFSPROC_CREATE]++; 1630 dnp = VTONFS(dvp); 1631 nfsm_reqhead(dnp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED + 1632 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1633 nfsm_fhtom(dnp, v3); 1634 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1635 #ifndef NFS_V2_ONLY 1636 if (v3) { 1637 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 1638 if (excl_mode == NFSV3CREATE_EXCLUSIVE) { 1639 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); 1640 nfsm_build(tl, u_int32_t *, NFSX_V3CREATEVERF); 1641 *tl++ = arc4random(); 1642 *tl = arc4random(); 1643 } else { 1644 *tl = txdr_unsigned(excl_mode); 1645 nfsm_v3attrbuild(vap, false); 1646 } 1647 } else 1648 #endif 1649 { 1650 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 1651 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1652 sp->sa_uid = nfs_xdrneg1; 1653 sp->sa_gid = nfs_xdrneg1; 1654 sp->sa_size = 0; 1655 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1656 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1657 } 1658 nfsm_request(dnp, NFSPROC_CREATE, curlwp, cnp->cn_cred); 1659 if (!error) { 1660 nfsm_mtofh(dvp, newvp, v3, gotvp); 1661 if (!gotvp) { 1662 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1663 cnp->cn_namelen, cnp->cn_cred, curlwp, &np); 1664 if (!error) 1665 newvp = NFSTOV(np); 1666 } 1667 } 1668 #ifndef NFS_V2_ONLY 1669 if (v3) 1670 nfsm_wcc_data(dvp, wccflag, 0, !error); 1671 #endif 1672 nfsm_reqdone; 1673 if (error) { 1674 /* 1675 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP. 1676 */ 1677 if (v3 && error == ENOTSUP) { 1678 if (excl_mode == NFSV3CREATE_EXCLUSIVE) { 1679 excl_mode = NFSV3CREATE_GUARDED; 1680 goto again; 1681 } else if (excl_mode == NFSV3CREATE_GUARDED) { 1682 excl_mode = NFSV3CREATE_UNCHECKED; 1683 goto again; 1684 } 1685 } 1686 } else if (v3 && (excl_mode == NFSV3CREATE_EXCLUSIVE)) { 1687 struct timespec ts; 1688 1689 getnanotime(&ts); 1690 1691 /* 1692 * make sure that we'll update timestamps as 1693 * most server implementations use them to store 1694 * the create verifier. 1695 * 1696 * XXX it's better to use TOSERVER always. 1697 */ 1698 1699 if (vap->va_atime.tv_sec == VNOVAL) 1700 vap->va_atime = ts; 1701 if (vap->va_mtime.tv_sec == VNOVAL) 1702 vap->va_mtime = ts; 1703 1704 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, curlwp); 1705 } 1706 if (error == 0) { 1707 if (cnp->cn_flags & MAKEENTRY) 1708 nfs_cache_enter(dvp, newvp, cnp); 1709 else 1710 cache_purge1(dvp, cnp, 0); 1711 *ap->a_vpp = newvp; 1712 } else { 1713 if (newvp) 1714 vput(newvp); 1715 if (error == EEXIST) 1716 cache_purge1(dvp, cnp, 0); 1717 } 1718 VTONFS(dvp)->n_flag |= NMODIFIED; 1719 if (!wccflag) 1720 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1721 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1722 vput(dvp); 1723 return (error); 1724 } 1725 1726 /* 1727 * nfs file remove call 1728 * To try and make nfs semantics closer to ufs semantics, a file that has 1729 * other processes using the vnode is renamed instead of removed and then 1730 * removed later on the last close. 1731 * - If v_usecount > 1 1732 * If a rename is not already in the works 1733 * call nfs_sillyrename() to set it up 1734 * else 1735 * do the remove rpc 1736 */ 1737 int 1738 nfs_remove(void *v) 1739 { 1740 struct vop_remove_args /* { 1741 struct vnodeop_desc *a_desc; 1742 struct vnode * a_dvp; 1743 struct vnode * a_vp; 1744 struct componentname * a_cnp; 1745 } */ *ap = v; 1746 struct vnode *vp = ap->a_vp; 1747 struct vnode *dvp = ap->a_dvp; 1748 struct componentname *cnp = ap->a_cnp; 1749 struct nfsnode *np = VTONFS(vp); 1750 int error = 0; 1751 struct vattr vattr; 1752 1753 #ifndef DIAGNOSTIC 1754 if (vp->v_usecount < 1) 1755 panic("nfs_remove: bad v_usecount"); 1756 #endif 1757 if (vp->v_type == VDIR) 1758 error = EPERM; 1759 else if (vp->v_usecount == 1 || (np->n_sillyrename && 1760 VOP_GETATTR(vp, &vattr, cnp->cn_cred) == 0 && 1761 vattr.va_nlink > 1)) { 1762 /* 1763 * Purge the name cache so that the chance of a lookup for 1764 * the name succeeding while the remove is in progress is 1765 * minimized. Without node locking it can still happen, such 1766 * that an I/O op returns ESTALE, but since you get this if 1767 * another host removes the file.. 1768 */ 1769 cache_purge(vp); 1770 /* 1771 * throw away biocache buffers, mainly to avoid 1772 * unnecessary delayed writes later. 1773 */ 1774 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, curlwp, 1); 1775 /* Do the rpc */ 1776 if (error != EINTR) 1777 error = nfs_removerpc(dvp, cnp->cn_nameptr, 1778 cnp->cn_namelen, cnp->cn_cred, curlwp); 1779 } else if (!np->n_sillyrename) 1780 error = nfs_sillyrename(dvp, vp, cnp, false); 1781 if (!error && nfs_getattrcache(vp, &vattr) == 0 && 1782 vattr.va_nlink == 1) { 1783 np->n_flag |= NREMOVED; 1784 } 1785 NFS_INVALIDATE_ATTRCACHE(np); 1786 VN_KNOTE(vp, NOTE_DELETE); 1787 VN_KNOTE(dvp, NOTE_WRITE); 1788 if (dvp == vp) 1789 vrele(vp); 1790 else 1791 vput(vp); 1792 vput(dvp); 1793 return (error); 1794 } 1795 1796 /* 1797 * nfs file remove rpc called from nfs_inactive 1798 */ 1799 int 1800 nfs_removeit(struct sillyrename *sp) 1801 { 1802 1803 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, 1804 (struct lwp *)0)); 1805 } 1806 1807 /* 1808 * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). 1809 */ 1810 int 1811 nfs_removerpc(struct vnode *dvp, const char *name, int namelen, kauth_cred_t cred, struct lwp *l) 1812 { 1813 u_int32_t *tl; 1814 char *cp; 1815 #ifndef NFS_V2_ONLY 1816 int32_t t1; 1817 char *cp2; 1818 #endif 1819 int32_t t2; 1820 char *bpos, *dpos; 1821 int error = 0, wccflag = NFSV3_WCCRATTR; 1822 struct mbuf *mreq, *mrep, *md, *mb; 1823 const int v3 = NFS_ISV3(dvp); 1824 int rexmit = 0; 1825 struct nfsnode *dnp = VTONFS(dvp); 1826 1827 nfsstats.rpccnt[NFSPROC_REMOVE]++; 1828 nfsm_reqhead(dnp, NFSPROC_REMOVE, 1829 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen)); 1830 nfsm_fhtom(dnp, v3); 1831 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 1832 nfsm_request1(dnp, NFSPROC_REMOVE, l, cred, &rexmit); 1833 #ifndef NFS_V2_ONLY 1834 if (v3) 1835 nfsm_wcc_data(dvp, wccflag, 0, !error); 1836 #endif 1837 nfsm_reqdone; 1838 VTONFS(dvp)->n_flag |= NMODIFIED; 1839 if (!wccflag) 1840 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1841 /* 1842 * Kludge City: If the first reply to the remove rpc is lost.. 1843 * the reply to the retransmitted request will be ENOENT 1844 * since the file was in fact removed 1845 * Therefore, we cheat and return success. 1846 */ 1847 if (rexmit && error == ENOENT) 1848 error = 0; 1849 return (error); 1850 } 1851 1852 /* 1853 * nfs file rename call 1854 */ 1855 int 1856 nfs_rename(void *v) 1857 { 1858 struct vop_rename_args /* { 1859 struct vnode *a_fdvp; 1860 struct vnode *a_fvp; 1861 struct componentname *a_fcnp; 1862 struct vnode *a_tdvp; 1863 struct vnode *a_tvp; 1864 struct componentname *a_tcnp; 1865 } */ *ap = v; 1866 struct vnode *fvp = ap->a_fvp; 1867 struct vnode *tvp = ap->a_tvp; 1868 struct vnode *fdvp = ap->a_fdvp; 1869 struct vnode *tdvp = ap->a_tdvp; 1870 struct componentname *tcnp = ap->a_tcnp; 1871 struct componentname *fcnp = ap->a_fcnp; 1872 int error; 1873 1874 /* Check for cross-device rename */ 1875 if ((fvp->v_mount != tdvp->v_mount) || 1876 (tvp && (fvp->v_mount != tvp->v_mount))) { 1877 error = EXDEV; 1878 goto out; 1879 } 1880 1881 /* 1882 * If the tvp exists and is in use, sillyrename it before doing the 1883 * rename of the new file over it. 1884 * 1885 * Have sillyrename use link instead of rename if possible, 1886 * so that we don't lose the file if the rename fails, and so 1887 * that there's no window when the "to" file doesn't exist. 1888 */ 1889 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename && 1890 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp, true)) { 1891 VN_KNOTE(tvp, NOTE_DELETE); 1892 vput(tvp); 1893 tvp = NULL; 1894 } 1895 1896 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1897 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1898 curlwp); 1899 1900 VN_KNOTE(fdvp, NOTE_WRITE); 1901 VN_KNOTE(tdvp, NOTE_WRITE); 1902 if (error == 0 || error == EEXIST) { 1903 if (fvp->v_type == VDIR) 1904 cache_purge(fvp); 1905 else 1906 cache_purge1(fdvp, fcnp, 0); 1907 if (tvp != NULL && tvp->v_type == VDIR) 1908 cache_purge(tvp); 1909 else 1910 cache_purge1(tdvp, tcnp, 0); 1911 } 1912 out: 1913 if (tdvp == tvp) 1914 vrele(tdvp); 1915 else 1916 vput(tdvp); 1917 if (tvp) 1918 vput(tvp); 1919 vrele(fdvp); 1920 vrele(fvp); 1921 return (error); 1922 } 1923 1924 /* 1925 * nfs file rename rpc called from nfs_remove() above 1926 */ 1927 int 1928 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, struct sillyrename *sp) 1929 { 1930 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, 1931 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curlwp)); 1932 } 1933 1934 /* 1935 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1936 */ 1937 int 1938 nfs_renamerpc(struct vnode *fdvp, const char *fnameptr, int fnamelen, struct vnode *tdvp, const char *tnameptr, int tnamelen, kauth_cred_t cred, struct lwp *l) 1939 { 1940 u_int32_t *tl; 1941 char *cp; 1942 #ifndef NFS_V2_ONLY 1943 int32_t t1; 1944 char *cp2; 1945 #endif 1946 int32_t t2; 1947 char *bpos, *dpos; 1948 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; 1949 struct mbuf *mreq, *mrep, *md, *mb; 1950 const int v3 = NFS_ISV3(fdvp); 1951 int rexmit = 0; 1952 struct nfsnode *fdnp = VTONFS(fdvp); 1953 1954 nfsstats.rpccnt[NFSPROC_RENAME]++; 1955 nfsm_reqhead(fdnp, NFSPROC_RENAME, 1956 (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) + 1957 nfsm_rndup(tnamelen)); 1958 nfsm_fhtom(fdnp, v3); 1959 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); 1960 nfsm_fhtom(VTONFS(tdvp), v3); 1961 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); 1962 nfsm_request1(fdnp, NFSPROC_RENAME, l, cred, &rexmit); 1963 #ifndef NFS_V2_ONLY 1964 if (v3) { 1965 nfsm_wcc_data(fdvp, fwccflag, 0, !error); 1966 nfsm_wcc_data(tdvp, twccflag, 0, !error); 1967 } 1968 #endif 1969 nfsm_reqdone; 1970 VTONFS(fdvp)->n_flag |= NMODIFIED; 1971 VTONFS(tdvp)->n_flag |= NMODIFIED; 1972 if (!fwccflag) 1973 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp)); 1974 if (!twccflag) 1975 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp)); 1976 /* 1977 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1978 */ 1979 if (rexmit && error == ENOENT) 1980 error = 0; 1981 return (error); 1982 } 1983 1984 /* 1985 * NFS link RPC, called from nfs_link. 1986 * Assumes dvp and vp locked, and leaves them that way. 1987 */ 1988 1989 static int 1990 nfs_linkrpc(struct vnode *dvp, struct vnode *vp, const char *name, 1991 size_t namelen, kauth_cred_t cred, struct lwp *l) 1992 { 1993 u_int32_t *tl; 1994 char *cp; 1995 #ifndef NFS_V2_ONLY 1996 int32_t t1; 1997 char *cp2; 1998 #endif 1999 int32_t t2; 2000 char *bpos, *dpos; 2001 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; 2002 struct mbuf *mreq, *mrep, *md, *mb; 2003 const int v3 = NFS_ISV3(dvp); 2004 int rexmit = 0; 2005 struct nfsnode *np = VTONFS(vp); 2006 2007 nfsstats.rpccnt[NFSPROC_LINK]++; 2008 nfsm_reqhead(np, NFSPROC_LINK, 2009 NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(namelen)); 2010 nfsm_fhtom(np, v3); 2011 nfsm_fhtom(VTONFS(dvp), v3); 2012 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 2013 nfsm_request1(np, NFSPROC_LINK, l, cred, &rexmit); 2014 #ifndef NFS_V2_ONLY 2015 if (v3) { 2016 nfsm_postop_attr(vp, attrflag, 0); 2017 nfsm_wcc_data(dvp, wccflag, 0, !error); 2018 } 2019 #endif 2020 nfsm_reqdone; 2021 2022 VTONFS(dvp)->n_flag |= NMODIFIED; 2023 if (!attrflag) 2024 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp)); 2025 if (!wccflag) 2026 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2027 2028 /* 2029 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 2030 */ 2031 if (rexmit && error == EEXIST) 2032 error = 0; 2033 2034 return error; 2035 } 2036 2037 /* 2038 * nfs hard link create call 2039 */ 2040 int 2041 nfs_link(void *v) 2042 { 2043 struct vop_link_args /* { 2044 struct vnode *a_dvp; 2045 struct vnode *a_vp; 2046 struct componentname *a_cnp; 2047 } */ *ap = v; 2048 struct vnode *vp = ap->a_vp; 2049 struct vnode *dvp = ap->a_dvp; 2050 struct componentname *cnp = ap->a_cnp; 2051 int error = 0; 2052 2053 if (dvp->v_mount != vp->v_mount) { 2054 VOP_ABORTOP(dvp, cnp); 2055 vput(dvp); 2056 return (EXDEV); 2057 } 2058 if (dvp != vp) { 2059 error = vn_lock(vp, LK_EXCLUSIVE); 2060 if (error != 0) { 2061 VOP_ABORTOP(dvp, cnp); 2062 vput(dvp); 2063 return error; 2064 } 2065 } 2066 2067 /* 2068 * Push all writes to the server, so that the attribute cache 2069 * doesn't get "out of sync" with the server. 2070 * XXX There should be a better way! 2071 */ 2072 VOP_FSYNC(vp, cnp->cn_cred, FSYNC_WAIT, 0, 0); 2073 2074 error = nfs_linkrpc(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen, 2075 cnp->cn_cred, curlwp); 2076 2077 if (error == 0) 2078 cache_purge1(dvp, cnp, 0); 2079 if (dvp != vp) 2080 VOP_UNLOCK(vp); 2081 VN_KNOTE(vp, NOTE_LINK); 2082 VN_KNOTE(dvp, NOTE_WRITE); 2083 vput(dvp); 2084 return (error); 2085 } 2086 2087 /* 2088 * nfs symbolic link create call 2089 */ 2090 int 2091 nfs_symlink(void *v) 2092 { 2093 struct vop_symlink_args /* { 2094 struct vnode *a_dvp; 2095 struct vnode **a_vpp; 2096 struct componentname *a_cnp; 2097 struct vattr *a_vap; 2098 char *a_target; 2099 } */ *ap = v; 2100 struct vnode *dvp = ap->a_dvp; 2101 struct vattr *vap = ap->a_vap; 2102 struct componentname *cnp = ap->a_cnp; 2103 struct nfsv2_sattr *sp; 2104 u_int32_t *tl; 2105 char *cp; 2106 int32_t t1, t2; 2107 char *bpos, *dpos, *cp2; 2108 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; 2109 struct mbuf *mreq, *mrep, *md, *mb; 2110 struct vnode *newvp = (struct vnode *)0; 2111 const int v3 = NFS_ISV3(dvp); 2112 int rexmit = 0; 2113 struct nfsnode *dnp = VTONFS(dvp); 2114 2115 *ap->a_vpp = NULL; 2116 nfsstats.rpccnt[NFSPROC_SYMLINK]++; 2117 slen = strlen(ap->a_target); 2118 nfsm_reqhead(dnp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED + 2119 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3)); 2120 nfsm_fhtom(dnp, v3); 2121 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 2122 #ifndef NFS_V2_ONlY 2123 if (v3) 2124 nfsm_v3attrbuild(vap, false); 2125 #endif 2126 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); 2127 #ifndef NFS_V2_ONlY 2128 if (!v3) { 2129 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 2130 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); 2131 sp->sa_uid = nfs_xdrneg1; 2132 sp->sa_gid = nfs_xdrneg1; 2133 sp->sa_size = nfs_xdrneg1; 2134 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 2135 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 2136 } 2137 #endif 2138 nfsm_request1(dnp, NFSPROC_SYMLINK, curlwp, cnp->cn_cred, 2139 &rexmit); 2140 #ifndef NFS_V2_ONlY 2141 if (v3) { 2142 if (!error) 2143 nfsm_mtofh(dvp, newvp, v3, gotvp); 2144 nfsm_wcc_data(dvp, wccflag, 0, !error); 2145 } 2146 #endif 2147 nfsm_reqdone; 2148 /* 2149 * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. 2150 */ 2151 if (rexmit && error == EEXIST) 2152 error = 0; 2153 if (error == 0 || error == EEXIST) 2154 cache_purge1(dvp, cnp, 0); 2155 if (error == 0 && newvp == NULL) { 2156 struct nfsnode *np = NULL; 2157 2158 error = nfs_lookitup(dvp, cnp->cn_nameptr, cnp->cn_namelen, 2159 cnp->cn_cred, curlwp, &np); 2160 if (error == 0) 2161 newvp = NFSTOV(np); 2162 } 2163 if (error) { 2164 if (newvp != NULL) 2165 vput(newvp); 2166 } else { 2167 *ap->a_vpp = newvp; 2168 } 2169 VTONFS(dvp)->n_flag |= NMODIFIED; 2170 if (!wccflag) 2171 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2172 VN_KNOTE(dvp, NOTE_WRITE); 2173 vput(dvp); 2174 return (error); 2175 } 2176 2177 /* 2178 * nfs make dir call 2179 */ 2180 int 2181 nfs_mkdir(void *v) 2182 { 2183 struct vop_mkdir_args /* { 2184 struct vnode *a_dvp; 2185 struct vnode **a_vpp; 2186 struct componentname *a_cnp; 2187 struct vattr *a_vap; 2188 } */ *ap = v; 2189 struct vnode *dvp = ap->a_dvp; 2190 struct vattr *vap = ap->a_vap; 2191 struct componentname *cnp = ap->a_cnp; 2192 struct nfsv2_sattr *sp; 2193 u_int32_t *tl; 2194 char *cp; 2195 int32_t t1, t2; 2196 int len; 2197 struct nfsnode *dnp = VTONFS(dvp), *np = (struct nfsnode *)0; 2198 struct vnode *newvp = (struct vnode *)0; 2199 char *bpos, *dpos, *cp2; 2200 int error = 0, wccflag = NFSV3_WCCRATTR; 2201 int gotvp = 0; 2202 int rexmit = 0; 2203 struct mbuf *mreq, *mrep, *md, *mb; 2204 const int v3 = NFS_ISV3(dvp); 2205 2206 len = cnp->cn_namelen; 2207 nfsstats.rpccnt[NFSPROC_MKDIR]++; 2208 nfsm_reqhead(dnp, NFSPROC_MKDIR, 2209 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3)); 2210 nfsm_fhtom(dnp, v3); 2211 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 2212 #ifndef NFS_V2_ONLY 2213 if (v3) { 2214 nfsm_v3attrbuild(vap, false); 2215 } else 2216 #endif 2217 { 2218 nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); 2219 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); 2220 sp->sa_uid = nfs_xdrneg1; 2221 sp->sa_gid = nfs_xdrneg1; 2222 sp->sa_size = nfs_xdrneg1; 2223 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 2224 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 2225 } 2226 nfsm_request1(dnp, NFSPROC_MKDIR, curlwp, cnp->cn_cred, &rexmit); 2227 if (!error) 2228 nfsm_mtofh(dvp, newvp, v3, gotvp); 2229 if (v3) 2230 nfsm_wcc_data(dvp, wccflag, 0, !error); 2231 nfsm_reqdone; 2232 VTONFS(dvp)->n_flag |= NMODIFIED; 2233 if (!wccflag) 2234 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2235 /* 2236 * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry 2237 * if we can succeed in looking up the directory. 2238 */ 2239 if ((rexmit && error == EEXIST) || (!error && !gotvp)) { 2240 if (newvp) { 2241 vput(newvp); 2242 newvp = (struct vnode *)0; 2243 } 2244 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, 2245 curlwp, &np); 2246 if (!error) { 2247 newvp = NFSTOV(np); 2248 if (newvp->v_type != VDIR || newvp == dvp) 2249 error = EEXIST; 2250 } 2251 } 2252 if (error) { 2253 if (newvp) { 2254 if (dvp != newvp) 2255 vput(newvp); 2256 else 2257 vrele(newvp); 2258 } 2259 } else { 2260 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); 2261 if (cnp->cn_flags & MAKEENTRY) 2262 nfs_cache_enter(dvp, newvp, cnp); 2263 *ap->a_vpp = newvp; 2264 } 2265 vput(dvp); 2266 return (error); 2267 } 2268 2269 /* 2270 * nfs remove directory call 2271 */ 2272 int 2273 nfs_rmdir(void *v) 2274 { 2275 struct vop_rmdir_args /* { 2276 struct vnode *a_dvp; 2277 struct vnode *a_vp; 2278 struct componentname *a_cnp; 2279 } */ *ap = v; 2280 struct vnode *vp = ap->a_vp; 2281 struct vnode *dvp = ap->a_dvp; 2282 struct componentname *cnp = ap->a_cnp; 2283 u_int32_t *tl; 2284 char *cp; 2285 #ifndef NFS_V2_ONLY 2286 int32_t t1; 2287 char *cp2; 2288 #endif 2289 int32_t t2; 2290 char *bpos, *dpos; 2291 int error = 0, wccflag = NFSV3_WCCRATTR; 2292 int rexmit = 0; 2293 struct mbuf *mreq, *mrep, *md, *mb; 2294 const int v3 = NFS_ISV3(dvp); 2295 struct nfsnode *dnp; 2296 2297 if (dvp == vp) { 2298 vrele(dvp); 2299 vput(dvp); 2300 return (EINVAL); 2301 } 2302 nfsstats.rpccnt[NFSPROC_RMDIR]++; 2303 dnp = VTONFS(dvp); 2304 nfsm_reqhead(dnp, NFSPROC_RMDIR, 2305 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 2306 nfsm_fhtom(dnp, v3); 2307 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 2308 nfsm_request1(dnp, NFSPROC_RMDIR, curlwp, cnp->cn_cred, &rexmit); 2309 #ifndef NFS_V2_ONLY 2310 if (v3) 2311 nfsm_wcc_data(dvp, wccflag, 0, !error); 2312 #endif 2313 nfsm_reqdone; 2314 VTONFS(dvp)->n_flag |= NMODIFIED; 2315 if (!wccflag) 2316 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2317 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); 2318 VN_KNOTE(vp, NOTE_DELETE); 2319 cache_purge(vp); 2320 vput(vp); 2321 vput(dvp); 2322 /* 2323 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 2324 */ 2325 if (rexmit && error == ENOENT) 2326 error = 0; 2327 return (error); 2328 } 2329 2330 /* 2331 * nfs readdir call 2332 */ 2333 int 2334 nfs_readdir(void *v) 2335 { 2336 struct vop_readdir_args /* { 2337 struct vnode *a_vp; 2338 struct uio *a_uio; 2339 kauth_cred_t a_cred; 2340 int *a_eofflag; 2341 off_t **a_cookies; 2342 int *a_ncookies; 2343 } */ *ap = v; 2344 struct vnode *vp = ap->a_vp; 2345 struct uio *uio = ap->a_uio; 2346 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2347 char *base = uio->uio_iov->iov_base; 2348 int tresid, error; 2349 size_t count, lost; 2350 struct dirent *dp; 2351 off_t *cookies = NULL; 2352 int ncookies = 0, nc; 2353 2354 if (vp->v_type != VDIR) 2355 return (EPERM); 2356 2357 lost = uio->uio_resid & (NFS_DIRFRAGSIZ - 1); 2358 count = uio->uio_resid - lost; 2359 if (count <= 0) 2360 return (EINVAL); 2361 2362 /* 2363 * Call nfs_bioread() to do the real work. 2364 */ 2365 tresid = uio->uio_resid = count; 2366 error = nfs_bioread(vp, uio, 0, ap->a_cred, 2367 ap->a_cookies ? NFSBIO_CACHECOOKIES : 0); 2368 2369 if (!error && ap->a_cookies) { 2370 ncookies = count / 16; 2371 cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK); 2372 *ap->a_cookies = cookies; 2373 } 2374 2375 if (!error && uio->uio_resid == tresid) { 2376 uio->uio_resid += lost; 2377 nfsstats.direofcache_misses++; 2378 if (ap->a_cookies) 2379 *ap->a_ncookies = 0; 2380 *ap->a_eofflag = 1; 2381 return (0); 2382 } 2383 2384 if (!error && ap->a_cookies) { 2385 /* 2386 * Only the NFS server and emulations use cookies, and they 2387 * load the directory block into system space, so we can 2388 * just look at it directly. 2389 */ 2390 if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace) || 2391 uio->uio_iovcnt != 1) 2392 panic("nfs_readdir: lost in space"); 2393 for (nc = 0; ncookies-- && 2394 base < (char *)uio->uio_iov->iov_base; nc++){ 2395 dp = (struct dirent *) base; 2396 if (dp->d_reclen == 0) 2397 break; 2398 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) 2399 *(cookies++) = (off_t)NFS_GETCOOKIE32(dp); 2400 else 2401 *(cookies++) = NFS_GETCOOKIE(dp); 2402 base += dp->d_reclen; 2403 } 2404 uio->uio_resid += 2405 ((char *)uio->uio_iov->iov_base - base); 2406 uio->uio_iov->iov_len += 2407 ((char *)uio->uio_iov->iov_base - base); 2408 uio->uio_iov->iov_base = base; 2409 *ap->a_ncookies = nc; 2410 } 2411 2412 uio->uio_resid += lost; 2413 *ap->a_eofflag = 0; 2414 return (error); 2415 } 2416 2417 /* 2418 * Readdir rpc call. 2419 * Called from below the buffer cache by nfs_doio(). 2420 */ 2421 int 2422 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred) 2423 { 2424 int len, left; 2425 struct dirent *dp = NULL; 2426 u_int32_t *tl; 2427 char *cp; 2428 int32_t t1, t2; 2429 char *bpos, *dpos, *cp2; 2430 struct mbuf *mreq, *mrep, *md, *mb; 2431 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2432 struct nfsnode *dnp = VTONFS(vp); 2433 u_quad_t fileno; 2434 int error = 0, more_dirs = 1, blksiz = 0, bigenough = 1; 2435 #ifndef NFS_V2_ONLY 2436 int attrflag; 2437 #endif 2438 int nrpcs = 0, reclen; 2439 const int v3 = NFS_ISV3(vp); 2440 2441 #ifdef DIAGNOSTIC 2442 /* 2443 * Should be called from buffer cache, so only amount of 2444 * NFS_DIRBLKSIZ will be requested. 2445 */ 2446 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ) 2447 panic("nfs readdirrpc bad uio"); 2448 #endif 2449 2450 /* 2451 * Loop around doing readdir rpc's of size nm_readdirsize 2452 * truncated to a multiple of NFS_DIRFRAGSIZ. 2453 * The stopping criteria is EOF or buffer full. 2454 */ 2455 while (more_dirs && bigenough) { 2456 /* 2457 * Heuristic: don't bother to do another RPC to further 2458 * fill up this block if there is not much room left. (< 50% 2459 * of the readdir RPC size). This wastes some buffer space 2460 * but can save up to 50% in RPC calls. 2461 */ 2462 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) { 2463 bigenough = 0; 2464 break; 2465 } 2466 nfsstats.rpccnt[NFSPROC_READDIR]++; 2467 nfsm_reqhead(dnp, NFSPROC_READDIR, NFSX_FH(v3) + 2468 NFSX_READDIR(v3)); 2469 nfsm_fhtom(dnp, v3); 2470 #ifndef NFS_V2_ONLY 2471 if (v3) { 2472 nfsm_build(tl, u_int32_t *, 5 * NFSX_UNSIGNED); 2473 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) { 2474 txdr_swapcookie3(uiop->uio_offset, tl); 2475 } else { 2476 txdr_cookie3(uiop->uio_offset, tl); 2477 } 2478 tl += 2; 2479 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2480 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2481 } else 2482 #endif 2483 { 2484 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 2485 *tl++ = txdr_unsigned(uiop->uio_offset); 2486 } 2487 *tl = txdr_unsigned(nmp->nm_readdirsize); 2488 nfsm_request(dnp, NFSPROC_READDIR, curlwp, cred); 2489 nrpcs++; 2490 #ifndef NFS_V2_ONLY 2491 if (v3) { 2492 nfsm_postop_attr(vp, attrflag, 0); 2493 if (!error) { 2494 nfsm_dissect(tl, u_int32_t *, 2495 2 * NFSX_UNSIGNED); 2496 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2497 dnp->n_cookieverf.nfsuquad[1] = *tl; 2498 } else { 2499 m_freem(mrep); 2500 goto nfsmout; 2501 } 2502 } 2503 #endif 2504 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2505 more_dirs = fxdr_unsigned(int, *tl); 2506 2507 /* loop thru the dir entries, doctoring them to 4bsd form */ 2508 while (more_dirs && bigenough) { 2509 #ifndef NFS_V2_ONLY 2510 if (v3) { 2511 nfsm_dissect(tl, u_int32_t *, 2512 3 * NFSX_UNSIGNED); 2513 fileno = fxdr_hyper(tl); 2514 len = fxdr_unsigned(int, *(tl + 2)); 2515 } else 2516 #endif 2517 { 2518 nfsm_dissect(tl, u_int32_t *, 2519 2 * NFSX_UNSIGNED); 2520 fileno = fxdr_unsigned(u_quad_t, *tl++); 2521 len = fxdr_unsigned(int, *tl); 2522 } 2523 if (len <= 0 || len > NFS_MAXNAMLEN) { 2524 error = EBADRPC; 2525 m_freem(mrep); 2526 goto nfsmout; 2527 } 2528 /* for cookie stashing */ 2529 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t); 2530 left = NFS_DIRFRAGSIZ - blksiz; 2531 if (reclen > left) { 2532 memset(uiop->uio_iov->iov_base, 0, left); 2533 dp->d_reclen += left; 2534 UIO_ADVANCE(uiop, left); 2535 blksiz = 0; 2536 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2537 } 2538 if (reclen > uiop->uio_resid) 2539 bigenough = 0; 2540 if (bigenough) { 2541 int tlen; 2542 2543 dp = (struct dirent *)uiop->uio_iov->iov_base; 2544 dp->d_fileno = fileno; 2545 dp->d_namlen = len; 2546 dp->d_reclen = reclen; 2547 dp->d_type = DT_UNKNOWN; 2548 blksiz += reclen; 2549 if (blksiz == NFS_DIRFRAGSIZ) 2550 blksiz = 0; 2551 UIO_ADVANCE(uiop, DIRHDSIZ); 2552 nfsm_mtouio(uiop, len); 2553 tlen = reclen - (DIRHDSIZ + len); 2554 (void)memset(uiop->uio_iov->iov_base, 0, tlen); 2555 UIO_ADVANCE(uiop, tlen); 2556 } else 2557 nfsm_adv(nfsm_rndup(len)); 2558 #ifndef NFS_V2_ONLY 2559 if (v3) { 2560 nfsm_dissect(tl, u_int32_t *, 2561 3 * NFSX_UNSIGNED); 2562 } else 2563 #endif 2564 { 2565 nfsm_dissect(tl, u_int32_t *, 2566 2 * NFSX_UNSIGNED); 2567 } 2568 if (bigenough) { 2569 #ifndef NFS_V2_ONLY 2570 if (v3) { 2571 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) 2572 uiop->uio_offset = 2573 fxdr_swapcookie3(tl); 2574 else 2575 uiop->uio_offset = 2576 fxdr_cookie3(tl); 2577 } 2578 else 2579 #endif 2580 { 2581 uiop->uio_offset = 2582 fxdr_unsigned(off_t, *tl); 2583 } 2584 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2585 } 2586 if (v3) 2587 tl += 2; 2588 else 2589 tl++; 2590 more_dirs = fxdr_unsigned(int, *tl); 2591 } 2592 /* 2593 * If at end of rpc data, get the eof boolean 2594 */ 2595 if (!more_dirs) { 2596 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2597 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2598 2599 /* 2600 * kludge: if we got no entries, treat it as EOF. 2601 * some server sometimes send a reply without any 2602 * entries or EOF. 2603 * although it might mean the server has very long name, 2604 * we can't handle such entries anyway. 2605 */ 2606 2607 if (uiop->uio_resid >= NFS_DIRBLKSIZ) 2608 more_dirs = 0; 2609 } 2610 m_freem(mrep); 2611 } 2612 /* 2613 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ 2614 * by increasing d_reclen for the last record. 2615 */ 2616 if (blksiz > 0) { 2617 left = NFS_DIRFRAGSIZ - blksiz; 2618 memset(uiop->uio_iov->iov_base, 0, left); 2619 dp->d_reclen += left; 2620 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2621 UIO_ADVANCE(uiop, left); 2622 } 2623 2624 /* 2625 * We are now either at the end of the directory or have filled the 2626 * block. 2627 */ 2628 if (bigenough) { 2629 dnp->n_direofoffset = uiop->uio_offset; 2630 dnp->n_flag |= NEOFVALID; 2631 } 2632 nfsmout: 2633 return (error); 2634 } 2635 2636 #ifndef NFS_V2_ONLY 2637 /* 2638 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). 2639 */ 2640 int 2641 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, kauth_cred_t cred) 2642 { 2643 int len, left; 2644 struct dirent *dp = NULL; 2645 u_int32_t *tl; 2646 char *cp; 2647 int32_t t1, t2; 2648 struct vnode *newvp; 2649 char *bpos, *dpos, *cp2; 2650 struct mbuf *mreq, *mrep, *md, *mb; 2651 struct nameidata nami, *ndp = &nami; 2652 struct componentname *cnp = &ndp->ni_cnd; 2653 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2654 struct nfsnode *dnp = VTONFS(vp), *np; 2655 nfsfh_t *fhp; 2656 u_quad_t fileno; 2657 int error = 0, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; 2658 int attrflag, fhsize, nrpcs = 0, reclen; 2659 struct nfs_fattr fattr, *fp; 2660 2661 #ifdef DIAGNOSTIC 2662 if (uiop->uio_iovcnt != 1 || uiop->uio_resid != NFS_DIRBLKSIZ) 2663 panic("nfs readdirplusrpc bad uio"); 2664 #endif 2665 ndp->ni_dvp = vp; 2666 newvp = NULLVP; 2667 2668 /* 2669 * Loop around doing readdir rpc's of size nm_readdirsize 2670 * truncated to a multiple of NFS_DIRFRAGSIZ. 2671 * The stopping criteria is EOF or buffer full. 2672 */ 2673 while (more_dirs && bigenough) { 2674 if (nrpcs > 0 && uiop->uio_resid < (nmp->nm_readdirsize / 2)) { 2675 bigenough = 0; 2676 break; 2677 } 2678 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; 2679 nfsm_reqhead(dnp, NFSPROC_READDIRPLUS, 2680 NFSX_FH(1) + 6 * NFSX_UNSIGNED); 2681 nfsm_fhtom(dnp, 1); 2682 nfsm_build(tl, u_int32_t *, 6 * NFSX_UNSIGNED); 2683 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) { 2684 txdr_swapcookie3(uiop->uio_offset, tl); 2685 } else { 2686 txdr_cookie3(uiop->uio_offset, tl); 2687 } 2688 tl += 2; 2689 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2690 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2691 *tl++ = txdr_unsigned(nmp->nm_readdirsize); 2692 *tl = txdr_unsigned(nmp->nm_rsize); 2693 nfsm_request(dnp, NFSPROC_READDIRPLUS, curlwp, cred); 2694 nfsm_postop_attr(vp, attrflag, 0); 2695 if (error) { 2696 m_freem(mrep); 2697 goto nfsmout; 2698 } 2699 nrpcs++; 2700 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2701 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2702 dnp->n_cookieverf.nfsuquad[1] = *tl++; 2703 more_dirs = fxdr_unsigned(int, *tl); 2704 2705 /* loop thru the dir entries, doctoring them to 4bsd form */ 2706 while (more_dirs && bigenough) { 2707 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2708 fileno = fxdr_hyper(tl); 2709 len = fxdr_unsigned(int, *(tl + 2)); 2710 if (len <= 0 || len > NFS_MAXNAMLEN) { 2711 error = EBADRPC; 2712 m_freem(mrep); 2713 goto nfsmout; 2714 } 2715 /* for cookie stashing */ 2716 reclen = _DIRENT_RECLEN(dp, len) + 2 * sizeof(off_t); 2717 left = NFS_DIRFRAGSIZ - blksiz; 2718 if (reclen > left) { 2719 /* 2720 * DIRFRAGSIZ is aligned, no need to align 2721 * again here. 2722 */ 2723 memset(uiop->uio_iov->iov_base, 0, left); 2724 dp->d_reclen += left; 2725 UIO_ADVANCE(uiop, left); 2726 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2727 blksiz = 0; 2728 } 2729 if (reclen > uiop->uio_resid) 2730 bigenough = 0; 2731 if (bigenough) { 2732 int tlen; 2733 2734 dp = (struct dirent *)uiop->uio_iov->iov_base; 2735 dp->d_fileno = fileno; 2736 dp->d_namlen = len; 2737 dp->d_reclen = reclen; 2738 dp->d_type = DT_UNKNOWN; 2739 blksiz += reclen; 2740 if (blksiz == NFS_DIRFRAGSIZ) 2741 blksiz = 0; 2742 UIO_ADVANCE(uiop, DIRHDSIZ); 2743 nfsm_mtouio(uiop, len); 2744 tlen = reclen - (DIRHDSIZ + len); 2745 (void)memset(uiop->uio_iov->iov_base, 0, tlen); 2746 UIO_ADVANCE(uiop, tlen); 2747 cnp->cn_nameptr = dp->d_name; 2748 cnp->cn_namelen = dp->d_namlen; 2749 } else 2750 nfsm_adv(nfsm_rndup(len)); 2751 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2752 if (bigenough) { 2753 if (nmp->nm_iflag & NFSMNT_SWAPCOOKIE) 2754 uiop->uio_offset = 2755 fxdr_swapcookie3(tl); 2756 else 2757 uiop->uio_offset = 2758 fxdr_cookie3(tl); 2759 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2760 } 2761 tl += 2; 2762 2763 /* 2764 * Since the attributes are before the file handle 2765 * (sigh), we must skip over the attributes and then 2766 * come back and get them. 2767 */ 2768 attrflag = fxdr_unsigned(int, *tl); 2769 if (attrflag) { 2770 nfsm_dissect(fp, struct nfs_fattr *, NFSX_V3FATTR); 2771 memcpy(&fattr, fp, NFSX_V3FATTR); 2772 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2773 doit = fxdr_unsigned(int, *tl); 2774 if (doit) { 2775 nfsm_getfh(fhp, fhsize, 1); 2776 if (NFS_CMPFH(dnp, fhp, fhsize)) { 2777 vref(vp); 2778 newvp = vp; 2779 np = dnp; 2780 } else { 2781 error = nfs_nget1(vp->v_mount, fhp, 2782 fhsize, &np, LK_NOWAIT); 2783 if (!error) 2784 newvp = NFSTOV(np); 2785 } 2786 if (!error) { 2787 const char *xcp; 2788 2789 nfs_loadattrcache(&newvp, &fattr, 0, 0); 2790 if (bigenough) { 2791 dp->d_type = 2792 IFTODT(VTTOIF(np->n_vattr->va_type)); 2793 if (cnp->cn_namelen <= NCHNAMLEN) { 2794 ndp->ni_vp = newvp; 2795 xcp = cnp->cn_nameptr + 2796 cnp->cn_namelen; 2797 cnp->cn_hash = 2798 namei_hash(cnp->cn_nameptr, &xcp); 2799 nfs_cache_enter(ndp->ni_dvp, 2800 ndp->ni_vp, cnp); 2801 } 2802 } 2803 } 2804 error = 0; 2805 } 2806 } else { 2807 /* Just skip over the file handle */ 2808 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2809 i = fxdr_unsigned(int, *tl); 2810 nfsm_adv(nfsm_rndup(i)); 2811 } 2812 if (newvp != NULLVP) { 2813 if (newvp == vp) 2814 vrele(newvp); 2815 else 2816 vput(newvp); 2817 newvp = NULLVP; 2818 } 2819 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2820 more_dirs = fxdr_unsigned(int, *tl); 2821 } 2822 /* 2823 * If at end of rpc data, get the eof boolean 2824 */ 2825 if (!more_dirs) { 2826 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2827 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2828 2829 /* 2830 * kludge: see a comment in nfs_readdirrpc. 2831 */ 2832 2833 if (uiop->uio_resid >= NFS_DIRBLKSIZ) 2834 more_dirs = 0; 2835 } 2836 m_freem(mrep); 2837 } 2838 /* 2839 * Fill last record, iff any, out to a multiple of NFS_DIRFRAGSIZ 2840 * by increasing d_reclen for the last record. 2841 */ 2842 if (blksiz > 0) { 2843 left = NFS_DIRFRAGSIZ - blksiz; 2844 memset(uiop->uio_iov->iov_base, 0, left); 2845 dp->d_reclen += left; 2846 NFS_STASHCOOKIE(dp, uiop->uio_offset); 2847 UIO_ADVANCE(uiop, left); 2848 } 2849 2850 /* 2851 * We are now either at the end of the directory or have filled the 2852 * block. 2853 */ 2854 if (bigenough) { 2855 dnp->n_direofoffset = uiop->uio_offset; 2856 dnp->n_flag |= NEOFVALID; 2857 } 2858 nfsmout: 2859 if (newvp != NULLVP) { 2860 if(newvp == vp) 2861 vrele(newvp); 2862 else 2863 vput(newvp); 2864 } 2865 return (error); 2866 } 2867 #endif 2868 2869 /* 2870 * Silly rename. To make the NFS filesystem that is stateless look a little 2871 * more like the "ufs" a remove of an active vnode is translated to a rename 2872 * to a funny looking filename that is removed by nfs_inactive on the 2873 * nfsnode. There is the potential for another process on a different client 2874 * to create the same funny name between the nfs_lookitup() fails and the 2875 * nfs_rename() completes, but... 2876 */ 2877 int 2878 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, bool dolink) 2879 { 2880 struct sillyrename *sp; 2881 struct nfsnode *np; 2882 int error; 2883 pid_t pid; 2884 2885 cache_purge(dvp); 2886 np = VTONFS(vp); 2887 #ifndef DIAGNOSTIC 2888 if (vp->v_type == VDIR) 2889 panic("nfs: sillyrename dir"); 2890 #endif 2891 sp = kmem_alloc(sizeof(*sp), KM_SLEEP); 2892 sp->s_cred = kauth_cred_dup(cnp->cn_cred); 2893 sp->s_dvp = dvp; 2894 vref(dvp); 2895 2896 /* Fudge together a funny name */ 2897 pid = curlwp->l_proc->p_pid; 2898 memcpy(sp->s_name, ".nfsAxxxx4.4", 13); 2899 sp->s_namlen = 12; 2900 sp->s_name[8] = hexdigits[pid & 0xf]; 2901 sp->s_name[7] = hexdigits[(pid >> 4) & 0xf]; 2902 sp->s_name[6] = hexdigits[(pid >> 8) & 0xf]; 2903 sp->s_name[5] = hexdigits[(pid >> 12) & 0xf]; 2904 2905 /* Try lookitups until we get one that isn't there */ 2906 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2907 curlwp, (struct nfsnode **)0) == 0) { 2908 sp->s_name[4]++; 2909 if (sp->s_name[4] > 'z') { 2910 error = EINVAL; 2911 goto bad; 2912 } 2913 } 2914 if (dolink) { 2915 error = nfs_linkrpc(dvp, vp, sp->s_name, sp->s_namlen, 2916 sp->s_cred, curlwp); 2917 /* 2918 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP. 2919 */ 2920 if (error == ENOTSUP) { 2921 error = nfs_renameit(dvp, cnp, sp); 2922 } 2923 } else { 2924 error = nfs_renameit(dvp, cnp, sp); 2925 } 2926 if (error) 2927 goto bad; 2928 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2929 curlwp, &np); 2930 np->n_sillyrename = sp; 2931 return (0); 2932 bad: 2933 vrele(sp->s_dvp); 2934 kauth_cred_free(sp->s_cred); 2935 kmem_free(sp, sizeof(*sp)); 2936 return (error); 2937 } 2938 2939 /* 2940 * Look up a file name and optionally either update the file handle or 2941 * allocate an nfsnode, depending on the value of npp. 2942 * npp == NULL --> just do the lookup 2943 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2944 * handled too 2945 * *npp != NULL --> update the file handle in the vnode 2946 */ 2947 int 2948 nfs_lookitup(struct vnode *dvp, const char *name, int len, kauth_cred_t cred, struct lwp *l, struct nfsnode **npp) 2949 { 2950 u_int32_t *tl; 2951 char *cp; 2952 int32_t t1, t2; 2953 struct vnode *newvp = (struct vnode *)0; 2954 struct nfsnode *np, *dnp = VTONFS(dvp); 2955 char *bpos, *dpos, *cp2; 2956 int error = 0, fhlen; 2957 #ifndef NFS_V2_ONLY 2958 int attrflag; 2959 #endif 2960 struct mbuf *mreq, *mrep, *md, *mb; 2961 nfsfh_t *nfhp; 2962 const int v3 = NFS_ISV3(dvp); 2963 2964 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 2965 nfsm_reqhead(dnp, NFSPROC_LOOKUP, 2966 NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 2967 nfsm_fhtom(dnp, v3); 2968 nfsm_strtom(name, len, NFS_MAXNAMLEN); 2969 nfsm_request(dnp, NFSPROC_LOOKUP, l, cred); 2970 if (npp && !error) { 2971 nfsm_getfh(nfhp, fhlen, v3); 2972 if (*npp) { 2973 np = *npp; 2974 if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) { 2975 kmem_free(np->n_fhp, np->n_fhsize); 2976 np->n_fhp = &np->n_fh; 2977 } 2978 #if NFS_SMALLFH < NFSX_V3FHMAX 2979 else if (np->n_fhsize <= NFS_SMALLFH && fhlen > NFS_SMALLFH) 2980 np->n_fhp = kmem_alloc(fhlen, KM_SLEEP); 2981 #endif 2982 memcpy(np->n_fhp, nfhp, fhlen); 2983 np->n_fhsize = fhlen; 2984 newvp = NFSTOV(np); 2985 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { 2986 vref(dvp); 2987 newvp = dvp; 2988 np = dnp; 2989 } else { 2990 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); 2991 if (error) { 2992 m_freem(mrep); 2993 return (error); 2994 } 2995 newvp = NFSTOV(np); 2996 } 2997 #ifndef NFS_V2_ONLY 2998 if (v3) { 2999 nfsm_postop_attr(newvp, attrflag, 0); 3000 if (!attrflag && *npp == NULL) { 3001 m_freem(mrep); 3002 vput(newvp); 3003 return (ENOENT); 3004 } 3005 } else 3006 #endif 3007 nfsm_loadattr(newvp, (struct vattr *)0, 0); 3008 } 3009 nfsm_reqdone; 3010 if (npp && *npp == NULL) { 3011 if (error) { 3012 if (newvp) 3013 vput(newvp); 3014 } else 3015 *npp = np; 3016 } 3017 return (error); 3018 } 3019 3020 #ifndef NFS_V2_ONLY 3021 /* 3022 * Nfs Version 3 commit rpc 3023 */ 3024 int 3025 nfs_commit(struct vnode *vp, off_t offset, uint32_t cnt, struct lwp *l) 3026 { 3027 char *cp; 3028 u_int32_t *tl; 3029 int32_t t1, t2; 3030 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 3031 char *bpos, *dpos, *cp2; 3032 int error = 0, wccflag = NFSV3_WCCRATTR; 3033 struct mbuf *mreq, *mrep, *md, *mb; 3034 struct nfsnode *np; 3035 3036 KASSERT(NFS_ISV3(vp)); 3037 3038 #ifdef NFS_DEBUG_COMMIT 3039 printf("commit %lu - %lu\n", (unsigned long)offset, 3040 (unsigned long)(offset + cnt)); 3041 #endif 3042 3043 mutex_enter(&nmp->nm_lock); 3044 if ((nmp->nm_iflag & NFSMNT_HASWRITEVERF) == 0) { 3045 mutex_exit(&nmp->nm_lock); 3046 return (0); 3047 } 3048 mutex_exit(&nmp->nm_lock); 3049 nfsstats.rpccnt[NFSPROC_COMMIT]++; 3050 np = VTONFS(vp); 3051 nfsm_reqhead(np, NFSPROC_COMMIT, NFSX_FH(1)); 3052 nfsm_fhtom(np, 1); 3053 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 3054 txdr_hyper(offset, tl); 3055 tl += 2; 3056 *tl = txdr_unsigned(cnt); 3057 nfsm_request(np, NFSPROC_COMMIT, l, np->n_wcred); 3058 nfsm_wcc_data(vp, wccflag, NAC_NOTRUNC, false); 3059 if (!error) { 3060 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF); 3061 mutex_enter(&nmp->nm_lock); 3062 if ((nmp->nm_iflag & NFSMNT_STALEWRITEVERF) || 3063 memcmp(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF)) { 3064 memcpy(nmp->nm_writeverf, tl, NFSX_V3WRITEVERF); 3065 error = NFSERR_STALEWRITEVERF; 3066 nmp->nm_iflag |= NFSMNT_STALEWRITEVERF; 3067 } 3068 mutex_exit(&nmp->nm_lock); 3069 } 3070 nfsm_reqdone; 3071 return (error); 3072 } 3073 #endif 3074 3075 /* 3076 * Kludge City.. 3077 * - make nfs_bmap() essentially a no-op that does no translation 3078 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc 3079 * (Maybe I could use the process's page mapping, but I was concerned that 3080 * Kernel Write might not be enabled and also figured copyout() would do 3081 * a lot more work than memcpy() and also it currently happens in the 3082 * context of the swapper process (2). 3083 */ 3084 int 3085 nfs_bmap(void *v) 3086 { 3087 struct vop_bmap_args /* { 3088 struct vnode *a_vp; 3089 daddr_t a_bn; 3090 struct vnode **a_vpp; 3091 daddr_t *a_bnp; 3092 int *a_runp; 3093 } */ *ap = v; 3094 struct vnode *vp = ap->a_vp; 3095 int bshift = vp->v_mount->mnt_fs_bshift - vp->v_mount->mnt_dev_bshift; 3096 3097 if (ap->a_vpp != NULL) 3098 *ap->a_vpp = vp; 3099 if (ap->a_bnp != NULL) 3100 *ap->a_bnp = ap->a_bn << bshift; 3101 if (ap->a_runp != NULL) 3102 *ap->a_runp = 1024 * 1024; /* XXX */ 3103 return (0); 3104 } 3105 3106 /* 3107 * Strategy routine. 3108 * For async requests when nfsiod(s) are running, queue the request by 3109 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the 3110 * request. 3111 */ 3112 int 3113 nfs_strategy(void *v) 3114 { 3115 struct vop_strategy_args *ap = v; 3116 struct buf *bp = ap->a_bp; 3117 int error = 0; 3118 3119 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC)) 3120 panic("nfs physio/async"); 3121 3122 /* 3123 * If the op is asynchronous and an i/o daemon is waiting 3124 * queue the request, wake it up and wait for completion 3125 * otherwise just do it ourselves. 3126 */ 3127 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp)) 3128 error = nfs_doio(bp); 3129 return (error); 3130 } 3131 3132 /* 3133 * fsync vnode op. Just call nfs_flush() with commit == 1. 3134 */ 3135 /* ARGSUSED */ 3136 int 3137 nfs_fsync(void *v) 3138 { 3139 struct vop_fsync_args /* { 3140 struct vnodeop_desc *a_desc; 3141 struct vnode * a_vp; 3142 kauth_cred_t a_cred; 3143 int a_flags; 3144 off_t offlo; 3145 off_t offhi; 3146 struct lwp * a_l; 3147 } */ *ap = v; 3148 3149 struct vnode *vp = ap->a_vp; 3150 3151 if (vp->v_type != VREG) 3152 return 0; 3153 3154 return (nfs_flush(vp, ap->a_cred, 3155 (ap->a_flags & FSYNC_WAIT) != 0 ? MNT_WAIT : 0, curlwp, 1)); 3156 } 3157 3158 /* 3159 * Flush all the data associated with a vnode. 3160 */ 3161 int 3162 nfs_flush(struct vnode *vp, kauth_cred_t cred, int waitfor, struct lwp *l, 3163 int commit) 3164 { 3165 struct nfsnode *np = VTONFS(vp); 3166 int error; 3167 int flushflags = PGO_ALLPAGES|PGO_CLEANIT|PGO_SYNCIO; 3168 UVMHIST_FUNC("nfs_flush"); UVMHIST_CALLED(ubchist); 3169 3170 mutex_enter(&vp->v_interlock); 3171 error = VOP_PUTPAGES(vp, 0, 0, flushflags); 3172 if (np->n_flag & NWRITEERR) { 3173 error = np->n_error; 3174 np->n_flag &= ~NWRITEERR; 3175 } 3176 UVMHIST_LOG(ubchist, "returning %d", error,0,0,0); 3177 return (error); 3178 } 3179 3180 /* 3181 * Return POSIX pathconf information applicable to nfs. 3182 * 3183 * N.B. The NFS V2 protocol doesn't support this RPC. 3184 */ 3185 /* ARGSUSED */ 3186 int 3187 nfs_pathconf(void *v) 3188 { 3189 struct vop_pathconf_args /* { 3190 struct vnode *a_vp; 3191 int a_name; 3192 register_t *a_retval; 3193 } */ *ap = v; 3194 struct nfsv3_pathconf *pcp; 3195 struct vnode *vp = ap->a_vp; 3196 struct mbuf *mreq, *mrep, *md, *mb; 3197 int32_t t1, t2; 3198 u_int32_t *tl; 3199 char *bpos, *dpos, *cp, *cp2; 3200 int error = 0, attrflag; 3201 #ifndef NFS_V2_ONLY 3202 struct nfsmount *nmp; 3203 unsigned int l; 3204 u_int64_t maxsize; 3205 #endif 3206 const int v3 = NFS_ISV3(vp); 3207 struct nfsnode *np = VTONFS(vp); 3208 3209 switch (ap->a_name) { 3210 /* Names that can be resolved locally. */ 3211 case _PC_PIPE_BUF: 3212 *ap->a_retval = PIPE_BUF; 3213 break; 3214 case _PC_SYNC_IO: 3215 *ap->a_retval = 1; 3216 break; 3217 /* Names that cannot be resolved locally; do an RPC, if possible. */ 3218 case _PC_LINK_MAX: 3219 case _PC_NAME_MAX: 3220 case _PC_CHOWN_RESTRICTED: 3221 case _PC_NO_TRUNC: 3222 if (!v3) { 3223 error = EINVAL; 3224 break; 3225 } 3226 nfsstats.rpccnt[NFSPROC_PATHCONF]++; 3227 nfsm_reqhead(np, NFSPROC_PATHCONF, NFSX_FH(1)); 3228 nfsm_fhtom(np, 1); 3229 nfsm_request(np, NFSPROC_PATHCONF, 3230 curlwp, curlwp->l_cred); /* XXX */ 3231 nfsm_postop_attr(vp, attrflag, 0); 3232 if (!error) { 3233 nfsm_dissect(pcp, struct nfsv3_pathconf *, 3234 NFSX_V3PATHCONF); 3235 switch (ap->a_name) { 3236 case _PC_LINK_MAX: 3237 *ap->a_retval = 3238 fxdr_unsigned(register_t, pcp->pc_linkmax); 3239 break; 3240 case _PC_NAME_MAX: 3241 *ap->a_retval = 3242 fxdr_unsigned(register_t, pcp->pc_namemax); 3243 break; 3244 case _PC_CHOWN_RESTRICTED: 3245 *ap->a_retval = 3246 (pcp->pc_chownrestricted == nfs_true); 3247 break; 3248 case _PC_NO_TRUNC: 3249 *ap->a_retval = 3250 (pcp->pc_notrunc == nfs_true); 3251 break; 3252 } 3253 } 3254 nfsm_reqdone; 3255 break; 3256 case _PC_FILESIZEBITS: 3257 #ifndef NFS_V2_ONLY 3258 if (v3) { 3259 nmp = VFSTONFS(vp->v_mount); 3260 if ((nmp->nm_iflag & NFSMNT_GOTFSINFO) == 0) 3261 if ((error = nfs_fsinfo(nmp, vp, 3262 curlwp->l_cred, curlwp)) != 0) /* XXX */ 3263 break; 3264 for (l = 0, maxsize = nmp->nm_maxfilesize; 3265 (maxsize >> l) > 0; l++) 3266 ; 3267 *ap->a_retval = l + 1; 3268 } else 3269 #endif 3270 { 3271 *ap->a_retval = 32; /* NFS V2 limitation */ 3272 } 3273 break; 3274 default: 3275 error = EINVAL; 3276 break; 3277 } 3278 3279 return (error); 3280 } 3281 3282 /* 3283 * NFS advisory byte-level locks. 3284 */ 3285 int 3286 nfs_advlock(void *v) 3287 { 3288 struct vop_advlock_args /* { 3289 struct vnode *a_vp; 3290 void *a_id; 3291 int a_op; 3292 struct flock *a_fl; 3293 int a_flags; 3294 } */ *ap = v; 3295 struct nfsnode *np = VTONFS(ap->a_vp); 3296 3297 return lf_advlock(ap, &np->n_lockf, np->n_size); 3298 } 3299 3300 /* 3301 * Print out the contents of an nfsnode. 3302 */ 3303 int 3304 nfs_print(void *v) 3305 { 3306 struct vop_print_args /* { 3307 struct vnode *a_vp; 3308 } */ *ap = v; 3309 struct vnode *vp = ap->a_vp; 3310 struct nfsnode *np = VTONFS(vp); 3311 3312 printf("tag VT_NFS, fileid %lld fsid 0x%llx", 3313 (unsigned long long)np->n_vattr->va_fileid, 3314 (unsigned long long)np->n_vattr->va_fsid); 3315 if (vp->v_type == VFIFO) 3316 VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v); 3317 printf("\n"); 3318 return (0); 3319 } 3320 3321 /* 3322 * nfs unlock wrapper. 3323 */ 3324 int 3325 nfs_unlock(void *v) 3326 { 3327 struct vop_unlock_args /* { 3328 struct vnode *a_vp; 3329 int a_flags; 3330 } */ *ap = v; 3331 struct vnode *vp = ap->a_vp; 3332 3333 /* 3334 * VOP_UNLOCK can be called by nfs_loadattrcache 3335 * with v_data == 0. 3336 */ 3337 if (VTONFS(vp)) { 3338 nfs_delayedtruncate(vp); 3339 } 3340 3341 return genfs_unlock(v); 3342 } 3343 3344 /* 3345 * nfs special file access vnode op. 3346 * Essentially just get vattr and then imitate iaccess() since the device is 3347 * local to the client. 3348 */ 3349 int 3350 nfsspec_access(void *v) 3351 { 3352 struct vop_access_args /* { 3353 struct vnode *a_vp; 3354 int a_mode; 3355 kauth_cred_t a_cred; 3356 struct lwp *a_l; 3357 } */ *ap = v; 3358 struct vattr va; 3359 struct vnode *vp = ap->a_vp; 3360 int error; 3361 3362 error = VOP_GETATTR(vp, &va, ap->a_cred); 3363 if (error) 3364 return (error); 3365 3366 /* 3367 * Disallow write attempts on filesystems mounted read-only; 3368 * unless the file is a socket, fifo, or a block or character 3369 * device resident on the filesystem. 3370 */ 3371 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3372 switch (vp->v_type) { 3373 case VREG: 3374 case VDIR: 3375 case VLNK: 3376 return (EROFS); 3377 default: 3378 break; 3379 } 3380 } 3381 3382 return (genfs_can_access(va.va_type, va.va_mode, 3383 va.va_uid, va.va_gid, ap->a_mode, ap->a_cred)); 3384 } 3385 3386 /* 3387 * Read wrapper for special devices. 3388 */ 3389 int 3390 nfsspec_read(void *v) 3391 { 3392 struct vop_read_args /* { 3393 struct vnode *a_vp; 3394 struct uio *a_uio; 3395 int a_ioflag; 3396 kauth_cred_t a_cred; 3397 } */ *ap = v; 3398 struct nfsnode *np = VTONFS(ap->a_vp); 3399 3400 /* 3401 * Set access flag. 3402 */ 3403 np->n_flag |= NACC; 3404 getnanotime(&np->n_atim); 3405 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap)); 3406 } 3407 3408 /* 3409 * Write wrapper for special devices. 3410 */ 3411 int 3412 nfsspec_write(void *v) 3413 { 3414 struct vop_write_args /* { 3415 struct vnode *a_vp; 3416 struct uio *a_uio; 3417 int a_ioflag; 3418 kauth_cred_t a_cred; 3419 } */ *ap = v; 3420 struct nfsnode *np = VTONFS(ap->a_vp); 3421 3422 /* 3423 * Set update flag. 3424 */ 3425 np->n_flag |= NUPD; 3426 getnanotime(&np->n_mtim); 3427 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap)); 3428 } 3429 3430 /* 3431 * Close wrapper for special devices. 3432 * 3433 * Update the times on the nfsnode then do device close. 3434 */ 3435 int 3436 nfsspec_close(void *v) 3437 { 3438 struct vop_close_args /* { 3439 struct vnode *a_vp; 3440 int a_fflag; 3441 kauth_cred_t a_cred; 3442 struct lwp *a_l; 3443 } */ *ap = v; 3444 struct vnode *vp = ap->a_vp; 3445 struct nfsnode *np = VTONFS(vp); 3446 struct vattr vattr; 3447 3448 if (np->n_flag & (NACC | NUPD)) { 3449 np->n_flag |= NCHG; 3450 if (vp->v_usecount == 1 && 3451 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3452 vattr_null(&vattr); 3453 if (np->n_flag & NACC) 3454 vattr.va_atime = np->n_atim; 3455 if (np->n_flag & NUPD) 3456 vattr.va_mtime = np->n_mtim; 3457 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3458 } 3459 } 3460 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap)); 3461 } 3462 3463 /* 3464 * Read wrapper for fifos. 3465 */ 3466 int 3467 nfsfifo_read(void *v) 3468 { 3469 struct vop_read_args /* { 3470 struct vnode *a_vp; 3471 struct uio *a_uio; 3472 int a_ioflag; 3473 kauth_cred_t a_cred; 3474 } */ *ap = v; 3475 struct nfsnode *np = VTONFS(ap->a_vp); 3476 3477 /* 3478 * Set access flag. 3479 */ 3480 np->n_flag |= NACC; 3481 getnanotime(&np->n_atim); 3482 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap)); 3483 } 3484 3485 /* 3486 * Write wrapper for fifos. 3487 */ 3488 int 3489 nfsfifo_write(void *v) 3490 { 3491 struct vop_write_args /* { 3492 struct vnode *a_vp; 3493 struct uio *a_uio; 3494 int a_ioflag; 3495 kauth_cred_t a_cred; 3496 } */ *ap = v; 3497 struct nfsnode *np = VTONFS(ap->a_vp); 3498 3499 /* 3500 * Set update flag. 3501 */ 3502 np->n_flag |= NUPD; 3503 getnanotime(&np->n_mtim); 3504 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap)); 3505 } 3506 3507 /* 3508 * Close wrapper for fifos. 3509 * 3510 * Update the times on the nfsnode then do fifo close. 3511 */ 3512 int 3513 nfsfifo_close(void *v) 3514 { 3515 struct vop_close_args /* { 3516 struct vnode *a_vp; 3517 int a_fflag; 3518 kauth_cred_t a_cred; 3519 struct lwp *a_l; 3520 } */ *ap = v; 3521 struct vnode *vp = ap->a_vp; 3522 struct nfsnode *np = VTONFS(vp); 3523 struct vattr vattr; 3524 3525 if (np->n_flag & (NACC | NUPD)) { 3526 struct timespec ts; 3527 3528 getnanotime(&ts); 3529 if (np->n_flag & NACC) 3530 np->n_atim = ts; 3531 if (np->n_flag & NUPD) 3532 np->n_mtim = ts; 3533 np->n_flag |= NCHG; 3534 if (vp->v_usecount == 1 && 3535 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3536 vattr_null(&vattr); 3537 if (np->n_flag & NACC) 3538 vattr.va_atime = np->n_atim; 3539 if (np->n_flag & NUPD) 3540 vattr.va_mtime = np->n_mtim; 3541 (void)VOP_SETATTR(vp, &vattr, ap->a_cred); 3542 } 3543 } 3544 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap)); 3545 } 3546