1 /* $OpenBSD: nfs_vnops.c,v 1.111 2009/01/24 23:37:56 thib Exp $ */ 2 /* $NetBSD: nfs_vnops.c,v 1.62.4.1 1996/07/08 20:26:52 jtc Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Rick Macklem at The University of Guelph. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95 36 */ 37 38 39 /* 40 * vnode op calls for Sun NFS version 2 and 3 41 */ 42 43 #include <sys/param.h> 44 #include <sys/proc.h> 45 #include <sys/kernel.h> 46 #include <sys/systm.h> 47 #include <sys/resourcevar.h> 48 #include <sys/poll.h> 49 #include <sys/proc.h> 50 #include <sys/mount.h> 51 #include <sys/buf.h> 52 #include <sys/malloc.h> 53 #include <sys/pool.h> 54 #include <sys/mbuf.h> 55 #include <sys/conf.h> 56 #include <sys/namei.h> 57 #include <sys/vnode.h> 58 #include <sys/dirent.h> 59 #include <sys/fcntl.h> 60 #include <sys/lockf.h> 61 #include <sys/hash.h> 62 #include <sys/queue.h> 63 64 #include <uvm/uvm_extern.h> 65 66 #include <miscfs/specfs/specdev.h> 67 #include <miscfs/fifofs/fifo.h> 68 69 #include <nfs/rpcv2.h> 70 #include <nfs/nfsproto.h> 71 #include <nfs/nfs.h> 72 #include <nfs/nfsnode.h> 73 #include <nfs/nfsmount.h> 74 #include <nfs/xdr_subs.h> 75 #include <nfs/nfsm_subs.h> 76 #include <nfs/nfs_var.h> 77 78 #include <net/if.h> 79 #include <netinet/in.h> 80 #include <netinet/in_var.h> 81 82 #include <dev/rndvar.h> 83 84 void nfs_cache_enter(struct vnode *, struct vnode *, struct componentname *); 85 86 /* 87 * Global vfs data structures for nfs 88 */ 89 int (**nfsv2_vnodeop_p)(void *); 90 struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { 91 { &vop_default_desc, eopnotsupp }, 92 { &vop_lookup_desc, nfs_lookup }, /* lookup */ 93 { &vop_create_desc, nfs_create }, /* create */ 94 { &vop_mknod_desc, nfs_mknod }, /* mknod */ 95 { &vop_open_desc, nfs_open }, /* open */ 96 { &vop_close_desc, nfs_close }, /* close */ 97 { &vop_access_desc, nfs_access }, /* access */ 98 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 99 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 100 { &vop_read_desc, nfs_read }, /* read */ 101 { &vop_write_desc, nfs_write }, /* write */ 102 { &vop_ioctl_desc, nfs_ioctl }, /* ioctl */ 103 { &vop_poll_desc, nfs_poll }, /* poll */ 104 { &vop_kqfilter_desc, nfs_kqfilter }, /* kqfilter */ 105 { &vop_revoke_desc, vop_generic_revoke }, /* revoke */ 106 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 107 { &vop_remove_desc, nfs_remove }, /* remove */ 108 { &vop_link_desc, nfs_link }, /* link */ 109 { &vop_rename_desc, nfs_rename }, /* rename */ 110 { &vop_mkdir_desc, nfs_mkdir }, /* mkdir */ 111 { &vop_rmdir_desc, nfs_rmdir }, /* rmdir */ 112 { &vop_symlink_desc, nfs_symlink }, /* symlink */ 113 { &vop_readdir_desc, nfs_readdir }, /* readdir */ 114 { &vop_readlink_desc, nfs_readlink }, /* readlink */ 115 { &vop_abortop_desc, vop_generic_abortop }, /* abortop */ 116 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 117 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 118 { &vop_lock_desc, vop_generic_lock }, /* lock */ 119 { &vop_unlock_desc, vop_generic_unlock }, /* unlock */ 120 { &vop_bmap_desc, nfs_bmap }, /* bmap */ 121 { &vop_strategy_desc, nfs_strategy }, /* strategy */ 122 { &vop_print_desc, nfs_print }, /* print */ 123 { &vop_islocked_desc, vop_generic_islocked }, /* islocked */ 124 { &vop_pathconf_desc, nfs_pathconf }, /* pathconf */ 125 { &vop_advlock_desc, nfs_advlock }, /* advlock */ 126 { &vop_bwrite_desc, nfs_bwrite }, 127 { NULL, NULL } 128 }; 129 struct vnodeopv_desc nfsv2_vnodeop_opv_desc = 130 { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries }; 131 132 /* 133 * Special device vnode ops 134 */ 135 int (**spec_nfsv2nodeop_p)(void *); 136 struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = { 137 { &vop_default_desc, spec_vnoperate }, 138 { &vop_close_desc, nfsspec_close }, /* close */ 139 { &vop_access_desc, nfsspec_access }, /* access */ 140 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 141 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 142 { &vop_read_desc, nfsspec_read }, /* read */ 143 { &vop_write_desc, nfsspec_write }, /* write */ 144 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 145 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 146 { &vop_reclaim_desc, nfs_reclaim }, /* reclaim */ 147 { &vop_lock_desc, vop_generic_lock }, /* lock */ 148 { &vop_unlock_desc, vop_generic_unlock }, /* unlock */ 149 { &vop_print_desc, nfs_print }, /* print */ 150 { &vop_islocked_desc, vop_generic_islocked }, /* islocked */ 151 { NULL, NULL } 152 }; 153 struct vnodeopv_desc spec_nfsv2nodeop_opv_desc = 154 { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries }; 155 156 #ifdef FIFO 157 int (**fifo_nfsv2nodeop_p)(void *); 158 struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = { 159 { &vop_default_desc, fifo_vnoperate }, 160 { &vop_close_desc, nfsfifo_close }, /* close */ 161 { &vop_access_desc, nfsspec_access }, /* access */ 162 { &vop_getattr_desc, nfs_getattr }, /* getattr */ 163 { &vop_setattr_desc, nfs_setattr }, /* setattr */ 164 { &vop_read_desc, nfsfifo_read }, /* read */ 165 { &vop_write_desc, nfsfifo_write }, /* write */ 166 { &vop_fsync_desc, nfs_fsync }, /* fsync */ 167 { &vop_inactive_desc, nfs_inactive }, /* inactive */ 168 { &vop_reclaim_desc, nfsfifo_reclaim }, /* reclaim */ 169 { &vop_lock_desc, vop_generic_lock }, /* lock */ 170 { &vop_unlock_desc, vop_generic_unlock }, /* unlock */ 171 { &vop_print_desc, nfs_print }, /* print */ 172 { &vop_islocked_desc, vop_generic_islocked }, /* islocked */ 173 { &vop_bwrite_desc, vop_generic_bwrite }, 174 { NULL, NULL } 175 }; 176 struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc = 177 { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries }; 178 #endif /* FIFO */ 179 180 /* 181 * Global variables 182 */ 183 extern u_int32_t nfs_true, nfs_false; 184 extern u_int32_t nfs_xdrneg1; 185 extern struct nfsstats nfsstats; 186 extern nfstype nfsv3_type[9]; 187 int nfs_numasync = 0; 188 189 190 void 191 nfs_cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 192 { 193 struct nfsnode *np; 194 195 if (vp != NULL) { 196 np = VTONFS(vp); 197 np->n_ctime = np->n_vattr.va_ctime.tv_sec; 198 } else { 199 np = VTONFS(dvp); 200 if (!np->n_ctime) 201 np->n_ctime = np->n_vattr.va_mtime.tv_sec; 202 } 203 204 cache_enter(dvp, vp, cnp); 205 } 206 207 /* 208 * nfs null call from vfs. 209 */ 210 int 211 nfs_null(vp, cred, procp) 212 struct vnode *vp; 213 struct ucred *cred; 214 struct proc *procp; 215 { 216 caddr_t dpos; 217 int error = 0; 218 struct mbuf *mreq, *mrep, *md, *mb; 219 220 mb = mreq = nfsm_reqhead(0); 221 nfsm_request(vp, NFSPROC_NULL, procp, cred); 222 m_freem(mrep); 223 nfsmout: 224 return (error); 225 } 226 227 /* 228 * nfs access vnode op. 229 * For nfs version 2, just return ok. File accesses may fail later. 230 * For nfs version 3, use the access rpc to check accessibility. If file modes 231 * are changed on the server, accesses might still fail later. 232 */ 233 int 234 nfs_access(v) 235 void *v; 236 { 237 struct vop_access_args *ap = v; 238 struct vnode *vp = ap->a_vp; 239 u_int32_t *tl; 240 int32_t t1; 241 caddr_t dpos, cp2; 242 int error = 0, attrflag; 243 struct mbuf *mreq, *mrep, *md, *mb; 244 u_int32_t mode, rmode; 245 int v3 = NFS_ISV3(vp); 246 int cachevalid; 247 248 struct nfsnode *np = VTONFS(vp); 249 250 /* 251 * Disallow write attempts on filesystems mounted read-only; 252 * unless the file is a socket, fifo, or a block or character 253 * device resident on the filesystem. 254 */ 255 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 256 switch (vp->v_type) { 257 case VREG: 258 case VDIR: 259 case VLNK: 260 return (EROFS); 261 default: 262 break; 263 } 264 } 265 266 /* 267 * Check access cache first. If a request has been made for this uid 268 * shortly before, use the cached result. 269 */ 270 cachevalid = (np->n_accstamp != -1 && 271 (time_second - np->n_accstamp) < nfs_attrtimeo(np) && 272 np->n_accuid == ap->a_cred->cr_uid); 273 274 if (cachevalid) { 275 if (!np->n_accerror) { 276 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 277 return (np->n_accerror); 278 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode) 279 return (np->n_accerror); 280 } 281 282 /* 283 * For nfs v3, do an access rpc, otherwise you are stuck emulating 284 * ufs_access() locally using the vattr. This may not be correct, 285 * since the server may apply other access criteria such as 286 * client uid-->server uid mapping that we do not know about, but 287 * this is better than just returning anything that is lying about 288 * in the cache. 289 */ 290 if (v3) { 291 nfsstats.rpccnt[NFSPROC_ACCESS]++; 292 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_UNSIGNED); 293 nfsm_fhtom(vp, v3); 294 tl = nfsm_build(&mb, NFSX_UNSIGNED); 295 if (ap->a_mode & VREAD) 296 mode = NFSV3ACCESS_READ; 297 else 298 mode = 0; 299 if (vp->v_type == VDIR) { 300 if (ap->a_mode & VWRITE) 301 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | 302 NFSV3ACCESS_DELETE); 303 if (ap->a_mode & VEXEC) 304 mode |= NFSV3ACCESS_LOOKUP; 305 } else { 306 if (ap->a_mode & VWRITE) 307 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); 308 if (ap->a_mode & VEXEC) 309 mode |= NFSV3ACCESS_EXECUTE; 310 } 311 *tl = txdr_unsigned(mode); 312 nfsm_request(vp, NFSPROC_ACCESS, ap->a_p, ap->a_cred); 313 nfsm_postop_attr(vp, attrflag); 314 if (!error) { 315 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 316 rmode = fxdr_unsigned(u_int32_t, *tl); 317 /* 318 * The NFS V3 spec does not clarify whether or not 319 * the returned access bits can be a superset of 320 * the ones requested, so... 321 */ 322 if ((rmode & mode) != mode) 323 error = EACCES; 324 } 325 m_freem(mrep); 326 } else 327 return (nfsspec_access(ap)); 328 329 330 /* 331 * If we got the same result as for a previous, different request, OR 332 * it in. Don't update the timestamp in that case. 333 */ 334 if (!error || error == EACCES) { 335 if (cachevalid && np->n_accstamp != -1 && 336 error == np->n_accerror) { 337 if (!error) 338 np->n_accmode |= ap->a_mode; 339 else { 340 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 341 np->n_accmode = ap->a_mode; 342 } 343 } else { 344 np->n_accstamp = time_second; 345 np->n_accuid = ap->a_cred->cr_uid; 346 np->n_accmode = ap->a_mode; 347 np->n_accerror = error; 348 } 349 } 350 nfsmout: 351 return (error); 352 } 353 354 /* 355 * nfs open vnode op 356 * Check to see if the type is ok 357 * and that deletion is not in progress. 358 * For paged in text files, you will need to flush the page cache 359 * if consistency is lost. 360 */ 361 int 362 nfs_open(v) 363 void *v; 364 { 365 struct vop_open_args *ap = v; 366 struct vnode *vp = ap->a_vp; 367 struct nfsnode *np = VTONFS(vp); 368 struct vattr vattr; 369 int error; 370 371 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 372 #ifdef DIAGNOSTIC 373 printf("open eacces vtyp=%d\n",vp->v_type); 374 #endif 375 return (EACCES); 376 } 377 378 /* 379 * Initialize read and write creds here, for swapfiles 380 * and other paths that don't set the creds themselves. 381 */ 382 383 if (ap->a_mode & FREAD) { 384 if (np->n_rcred) { 385 crfree(np->n_rcred); 386 } 387 np->n_rcred = ap->a_cred; 388 crhold(np->n_rcred); 389 } 390 if (ap->a_mode & FWRITE) { 391 if (np->n_wcred) { 392 crfree(np->n_wcred); 393 } 394 np->n_wcred = ap->a_cred; 395 crhold(np->n_wcred); 396 } 397 398 if (np->n_flag & NMODIFIED) { 399 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 400 if (error == EINTR) 401 return (error); 402 uvm_vnp_uncache(vp); 403 NFS_INVALIDATE_ATTRCACHE(np); 404 if (vp->v_type == VDIR) 405 np->n_direofoffset = 0; 406 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 407 if (error) 408 return (error); 409 np->n_mtime = vattr.va_mtime; 410 } else { 411 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 412 if (error) 413 return (error); 414 if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) { 415 if (vp->v_type == VDIR) 416 np->n_direofoffset = 0; 417 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 418 if (error == EINTR) 419 return (error); 420 uvm_vnp_uncache(vp); 421 np->n_mtime = vattr.va_mtime; 422 } 423 } 424 /* For open/close consistency. */ 425 NFS_INVALIDATE_ATTRCACHE(np); 426 return (0); 427 } 428 429 /* 430 * nfs close vnode op 431 * What an NFS client should do upon close after writing is a debatable issue. 432 * Most NFS clients push delayed writes to the server upon close, basically for 433 * two reasons: 434 * 1 - So that any write errors may be reported back to the client process 435 * doing the close system call. By far the two most likely errors are 436 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 437 * 2 - To put a worst case upper bound on cache inconsistency between 438 * multiple clients for the file. 439 * There is also a consistency problem for Version 2 of the protocol w.r.t. 440 * not being able to tell if other clients are writing a file concurrently, 441 * since there is no way of knowing if the changed modify time in the reply 442 * is only due to the write for this client. 443 * (NFS Version 3 provides weak cache consistency data in the reply that 444 * should be sufficient to detect and handle this case.) 445 * 446 * The current code does the following: 447 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 448 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 449 * or commit them (this satisfies 1 and 2 except for the 450 * case where the server crashes after this close but 451 * before the commit RPC, which is felt to be "good 452 * enough". Changing the last argument to nfs_flush() to 453 * a 1 would force a commit operation, if it is felt a 454 * commit is necessary now. 455 */ 456 int 457 nfs_close(v) 458 void *v; 459 { 460 struct vop_close_args *ap = v; 461 struct vnode *vp = ap->a_vp; 462 struct nfsnode *np = VTONFS(vp); 463 int error = 0; 464 465 if (vp->v_type == VREG) { 466 if (np->n_flag & NMODIFIED) { 467 if (NFS_ISV3(vp)) { 468 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 0); 469 np->n_flag &= ~NMODIFIED; 470 } else 471 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 472 NFS_INVALIDATE_ATTRCACHE(np); 473 } 474 if (np->n_flag & NWRITEERR) { 475 np->n_flag &= ~NWRITEERR; 476 error = np->n_error; 477 } 478 } 479 return (error); 480 } 481 482 /* 483 * nfs getattr call from vfs. 484 */ 485 int 486 nfs_getattr(v) 487 void *v; 488 { 489 struct vop_getattr_args *ap = v; 490 struct vnode *vp = ap->a_vp; 491 struct nfsnode *np = VTONFS(vp); 492 int32_t t1; 493 caddr_t dpos; 494 int error = 0; 495 struct mbuf *mreq, *mrep, *md, *mb; 496 int v3 = NFS_ISV3(vp); 497 498 /* 499 * Update local times for special files. 500 */ 501 if (np->n_flag & (NACC | NUPD)) 502 np->n_flag |= NCHG; 503 /* 504 * First look in the cache. 505 */ 506 if (nfs_getattrcache(vp, ap->a_vap) == 0) 507 return (0); 508 nfsstats.rpccnt[NFSPROC_GETATTR]++; 509 mb = mreq = nfsm_reqhead(NFSX_FH(v3)); 510 nfsm_fhtom(vp, v3); 511 nfsm_request(vp, NFSPROC_GETATTR, ap->a_p, ap->a_cred); 512 if (!error) 513 nfsm_loadattr(vp, ap->a_vap); 514 m_freem(mrep); 515 nfsmout: 516 return (error); 517 } 518 519 /* 520 * nfs setattr call. 521 */ 522 int 523 nfs_setattr(v) 524 void *v; 525 { 526 struct vop_setattr_args *ap = v; 527 struct vnode *vp = ap->a_vp; 528 struct nfsnode *np = VTONFS(vp); 529 struct vattr *vap = ap->a_vap; 530 int hint = NOTE_ATTRIB; 531 int error = 0; 532 u_quad_t tsize = 0; 533 534 /* 535 * Setting of flags is not supported. 536 */ 537 if (vap->va_flags != VNOVAL) 538 return (EOPNOTSUPP); 539 540 /* 541 * Disallow write attempts if the filesystem is mounted read-only. 542 */ 543 if ((vap->va_uid != (uid_t)VNOVAL || 544 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 545 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 546 (vp->v_mount->mnt_flag & MNT_RDONLY)) 547 return (EROFS); 548 if (vap->va_size != VNOVAL) { 549 switch (vp->v_type) { 550 case VDIR: 551 return (EISDIR); 552 case VCHR: 553 case VBLK: 554 case VSOCK: 555 case VFIFO: 556 if (vap->va_mtime.tv_sec == VNOVAL && 557 vap->va_atime.tv_sec == VNOVAL && 558 vap->va_mode == (mode_t)VNOVAL && 559 vap->va_uid == (uid_t)VNOVAL && 560 vap->va_gid == (gid_t)VNOVAL) 561 return (0); 562 vap->va_size = VNOVAL; 563 break; 564 default: 565 /* 566 * Disallow write attempts if the filesystem is 567 * mounted read-only. 568 */ 569 if (vp->v_mount->mnt_flag & MNT_RDONLY) 570 return (EROFS); 571 if (vap->va_size == 0) 572 error = nfs_vinvalbuf(vp, 0, 573 ap->a_cred, ap->a_p); 574 else 575 error = nfs_vinvalbuf(vp, V_SAVE, 576 ap->a_cred, ap->a_p); 577 if (error) 578 return (error); 579 tsize = np->n_size; 580 np->n_size = np->n_vattr.va_size = vap->va_size; 581 uvm_vnp_setsize(vp, np->n_size); 582 }; 583 } else if ((vap->va_mtime.tv_sec != VNOVAL || 584 vap->va_atime.tv_sec != VNOVAL) && 585 vp->v_type == VREG && 586 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 587 ap->a_p)) == EINTR) 588 return (error); 589 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p); 590 if (error && vap->va_size != VNOVAL) { 591 np->n_size = np->n_vattr.va_size = tsize; 592 uvm_vnp_setsize(vp, np->n_size); 593 } 594 595 if (vap->va_size != VNOVAL && vap->va_size < tsize) 596 hint |= NOTE_TRUNCATE; 597 598 VN_KNOTE(vp, hint); /* XXX setattrrpc? */ 599 600 return (error); 601 } 602 603 /* 604 * Do an nfs setattr rpc. 605 */ 606 int 607 nfs_setattrrpc(vp, vap, cred, procp) 608 struct vnode *vp; 609 struct vattr *vap; 610 struct ucred *cred; 611 struct proc *procp; 612 { 613 struct nfsv2_sattr *sp; 614 int32_t t1; 615 caddr_t dpos, cp2; 616 u_int32_t *tl; 617 int error = 0, wccflag = NFSV3_WCCRATTR; 618 struct mbuf *mreq, *mrep, *md, *mb; 619 int v3 = NFS_ISV3(vp); 620 621 nfsstats.rpccnt[NFSPROC_SETATTR]++; 622 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_SATTR(v3)); 623 nfsm_fhtom(vp, v3); 624 if (v3) { 625 nfsm_v3attrbuild(&mb, vap, 1); 626 tl = nfsm_build(&mb, NFSX_UNSIGNED); 627 *tl = nfs_false; 628 } else { 629 sp = nfsm_build(&mb, NFSX_V2SATTR); 630 if (vap->va_mode == (mode_t)VNOVAL) 631 sp->sa_mode = nfs_xdrneg1; 632 else 633 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); 634 if (vap->va_uid == (uid_t)VNOVAL) 635 sp->sa_uid = nfs_xdrneg1; 636 else 637 sp->sa_uid = txdr_unsigned(vap->va_uid); 638 if (vap->va_gid == (gid_t)VNOVAL) 639 sp->sa_gid = nfs_xdrneg1; 640 else 641 sp->sa_gid = txdr_unsigned(vap->va_gid); 642 sp->sa_size = txdr_unsigned(vap->va_size); 643 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 644 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 645 } 646 nfsm_request(vp, NFSPROC_SETATTR, procp, cred); 647 if (v3) { 648 nfsm_wcc_data(vp, wccflag); 649 } else 650 nfsm_loadattr(vp, (struct vattr *)0); 651 m_freem(mrep); 652 nfsmout: 653 return (error); 654 } 655 656 /* 657 * nfs lookup call, one step at a time... 658 * First look in cache 659 * If not found, unlock the directory nfsnode and do the rpc 660 */ 661 int 662 nfs_lookup(v) 663 void *v; 664 { 665 struct vop_lookup_args *ap = v; 666 struct componentname *cnp = ap->a_cnp; 667 struct vnode *dvp = ap->a_dvp; 668 struct vnode **vpp = ap->a_vpp; 669 struct proc *p = cnp->cn_proc; 670 int flags; 671 struct vnode *newvp; 672 u_int32_t *tl; 673 int32_t t1; 674 struct nfsmount *nmp; 675 caddr_t dpos, cp2; 676 struct mbuf *mreq, *mrep, *md, *mb; 677 long len; 678 nfsfh_t *fhp; 679 struct nfsnode *np; 680 int lockparent, wantparent, error = 0, attrflag, fhsize; 681 int v3 = NFS_ISV3(dvp); 682 683 cnp->cn_flags &= ~PDIRUNLOCK; 684 flags = cnp->cn_flags; 685 686 *vpp = NULLVP; 687 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 688 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 689 return (EROFS); 690 if (dvp->v_type != VDIR) 691 return (ENOTDIR); 692 lockparent = flags & LOCKPARENT; 693 wantparent = flags & (LOCKPARENT|WANTPARENT); 694 nmp = VFSTONFS(dvp->v_mount); 695 np = VTONFS(dvp); 696 697 /* 698 * Before tediously performing a linear scan of the directory, 699 * check the name cache to see if the directory/name pair 700 * we are looking for is known already. 701 * If the directory/name pair is found in the name cache, 702 * we have to ensure the directory has not changed from 703 * the time the cache entry has been created. If it has, 704 * the cache entry has to be ignored. 705 */ 706 if ((error = cache_lookup(dvp, vpp, cnp)) >= 0) { 707 struct vattr vattr; 708 int err2; 709 710 if (error && error != ENOENT) { 711 *vpp = NULLVP; 712 return (error); 713 } 714 715 if (cnp->cn_flags & PDIRUNLOCK) { 716 err2 = vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 717 if (err2 != 0) { 718 *vpp = NULLVP; 719 return (err2); 720 } 721 cnp->cn_flags &= ~PDIRUNLOCK; 722 } 723 724 err2 = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_proc); 725 if (err2 != 0) { 726 if (error == 0) { 727 if (*vpp != dvp) 728 vput(*vpp); 729 else 730 vrele(*vpp); 731 } 732 *vpp = NULLVP; 733 return (err2); 734 } 735 736 if (error == ENOENT) { 737 if (!VOP_GETATTR(dvp, &vattr, cnp->cn_cred, 738 cnp->cn_proc) && vattr.va_mtime.tv_sec == 739 VTONFS(dvp)->n_ctime) 740 return (ENOENT); 741 cache_purge(dvp); 742 np->n_ctime = 0; 743 goto dorpc; 744 } 745 746 newvp = *vpp; 747 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_proc) 748 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) 749 { 750 nfsstats.lookupcache_hits++; 751 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 752 cnp->cn_flags |= SAVENAME; 753 if ((!lockparent || !(flags & ISLASTCN)) && 754 newvp != dvp) 755 VOP_UNLOCK(dvp, 0, p); 756 return (0); 757 } 758 cache_purge(newvp); 759 if (newvp != dvp) 760 vput(newvp); 761 else 762 vrele(newvp); 763 *vpp = NULLVP; 764 } 765 dorpc: 766 error = 0; 767 newvp = NULLVP; 768 nfsstats.lookupcache_misses++; 769 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 770 len = cnp->cn_namelen; 771 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); 772 nfsm_fhtom(dvp, v3); 773 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 774 nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_proc, cnp->cn_cred); 775 if (error) { 776 nfsm_postop_attr(dvp, attrflag); 777 m_freem(mrep); 778 goto nfsmout; 779 } 780 nfsm_getfh(fhp, fhsize, v3); 781 782 /* 783 * Handle RENAME case... 784 */ 785 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) { 786 if (NFS_CMPFH(np, fhp, fhsize)) { 787 m_freem(mrep); 788 return (EISDIR); 789 } 790 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 791 if (error) { 792 m_freem(mrep); 793 return (error); 794 } 795 newvp = NFSTOV(np); 796 if (v3) { 797 nfsm_postop_attr(newvp, attrflag); 798 nfsm_postop_attr(dvp, attrflag); 799 } else 800 nfsm_loadattr(newvp, (struct vattr *)0); 801 *vpp = newvp; 802 m_freem(mrep); 803 cnp->cn_flags |= SAVENAME; 804 if (!lockparent) { 805 VOP_UNLOCK(dvp, 0, p); 806 cnp->cn_flags |= PDIRUNLOCK; 807 } 808 return (0); 809 } 810 811 /* 812 * The postop attr handling is duplicated for each if case, 813 * because it should be done while dvp is locked (unlocking 814 * dvp is different for each case). 815 */ 816 817 if (NFS_CMPFH(np, fhp, fhsize)) { 818 VREF(dvp); 819 newvp = dvp; 820 if (v3) { 821 nfsm_postop_attr(newvp, attrflag); 822 nfsm_postop_attr(dvp, attrflag); 823 } else 824 nfsm_loadattr(newvp, (struct vattr *)0); 825 } else if (flags & ISDOTDOT) { 826 VOP_UNLOCK(dvp, 0, p); 827 cnp->cn_flags |= PDIRUNLOCK; 828 829 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 830 if (error) { 831 if (vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p) == 0) 832 cnp->cn_flags &= ~PDIRUNLOCK; 833 m_freem(mrep); 834 return (error); 835 } 836 newvp = NFSTOV(np); 837 838 if (v3) { 839 nfsm_postop_attr(newvp, attrflag); 840 nfsm_postop_attr(dvp, attrflag); 841 } else 842 nfsm_loadattr(newvp, (struct vattr *)0); 843 844 if (lockparent && (flags & ISLASTCN)) { 845 if ((error = vn_lock(dvp, LK_EXCLUSIVE, p))) { 846 m_freem(mrep); 847 vput(newvp); 848 return error; 849 } 850 cnp->cn_flags &= ~PDIRUNLOCK; 851 } 852 853 } else { 854 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 855 if (error) { 856 m_freem(mrep); 857 return error; 858 } 859 newvp = NFSTOV(np); 860 if (v3) { 861 nfsm_postop_attr(newvp, attrflag); 862 nfsm_postop_attr(dvp, attrflag); 863 } else 864 nfsm_loadattr(newvp, (struct vattr *)0); 865 if (!lockparent || !(flags & ISLASTCN)) { 866 VOP_UNLOCK(dvp, 0, p); 867 cnp->cn_flags |= PDIRUNLOCK; 868 } 869 } 870 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 871 cnp->cn_flags |= SAVENAME; 872 if ((cnp->cn_flags & MAKEENTRY) && 873 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) { 874 nfs_cache_enter(dvp, newvp, cnp); 875 } 876 *vpp = newvp; 877 m_freem(mrep); 878 nfsmout: 879 if (error) { 880 /* 881 * We get here only because of errors returned by 882 * the RPC. Otherwise we'll have returned above 883 * (the nfsm_* macros will jump to nfsmout 884 * on error). 885 */ 886 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) && 887 cnp->cn_nameiop != CREATE) { 888 nfs_cache_enter(dvp, NULL, cnp); 889 } 890 if (newvp != NULLVP) { 891 vrele(newvp); 892 if (newvp != dvp) 893 VOP_UNLOCK(newvp, 0, p); 894 } 895 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 896 (flags & ISLASTCN) && error == ENOENT) { 897 if (dvp->v_mount->mnt_flag & MNT_RDONLY) 898 error = EROFS; 899 else 900 error = EJUSTRETURN; 901 } 902 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 903 cnp->cn_flags |= SAVENAME; 904 *vpp = NULL; 905 } 906 return (error); 907 } 908 909 /* 910 * nfs read call. 911 * Just call nfs_bioread() to do the work. 912 */ 913 int 914 nfs_read(v) 915 void *v; 916 { 917 struct vop_read_args *ap = v; 918 struct vnode *vp = ap->a_vp; 919 920 if (vp->v_type != VREG) 921 return (EPERM); 922 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred)); 923 } 924 925 /* 926 * nfs readlink call 927 */ 928 int 929 nfs_readlink(v) 930 void *v; 931 { 932 struct vop_readlink_args *ap = v; 933 struct vnode *vp = ap->a_vp; 934 935 if (vp->v_type != VLNK) 936 return (EPERM); 937 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred)); 938 } 939 940 /* 941 * Do a readlink rpc. 942 * Called by nfs_doio() from below the buffer cache. 943 */ 944 int 945 nfs_readlinkrpc(vp, uiop, cred) 946 struct vnode *vp; 947 struct uio *uiop; 948 struct ucred *cred; 949 { 950 u_int32_t *tl; 951 int32_t t1; 952 caddr_t dpos, cp2; 953 int error = 0, len, attrflag; 954 struct mbuf *mreq, *mrep, *md, *mb; 955 int v3 = NFS_ISV3(vp); 956 957 nfsstats.rpccnt[NFSPROC_READLINK]++; 958 mb = mreq = nfsm_reqhead(NFSX_FH(v3)); 959 nfsm_fhtom(vp, v3); 960 nfsm_request(vp, NFSPROC_READLINK, uiop->uio_procp, cred); 961 if (v3) 962 nfsm_postop_attr(vp, attrflag); 963 if (!error) { 964 nfsm_strsiz(len, NFS_MAXPATHLEN); 965 nfsm_mtouio(uiop, len); 966 } 967 m_freem(mrep); 968 nfsmout: 969 return (error); 970 } 971 972 /* 973 * nfs read rpc call 974 * Ditto above 975 */ 976 int 977 nfs_readrpc(vp, uiop) 978 struct vnode *vp; 979 struct uio *uiop; 980 { 981 u_int32_t *tl; 982 int32_t t1; 983 caddr_t dpos, cp2; 984 struct mbuf *mreq, *mrep, *md, *mb; 985 struct nfsmount *nmp; 986 int error = 0, len, retlen, tsiz, eof, attrflag; 987 int v3 = NFS_ISV3(vp); 988 989 eof = 0; 990 991 nmp = VFSTONFS(vp->v_mount); 992 tsiz = uiop->uio_resid; 993 if (uiop->uio_offset + tsiz > 0xffffffff && !v3) 994 return (EFBIG); 995 while (tsiz > 0) { 996 nfsstats.rpccnt[NFSPROC_READ]++; 997 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; 998 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_UNSIGNED * 3); 999 nfsm_fhtom(vp, v3); 1000 tl = nfsm_build(&mb, NFSX_UNSIGNED * 3); 1001 if (v3) { 1002 txdr_hyper(uiop->uio_offset, tl); 1003 *(tl + 2) = txdr_unsigned(len); 1004 } else { 1005 *tl++ = txdr_unsigned(uiop->uio_offset); 1006 *tl++ = txdr_unsigned(len); 1007 *tl = 0; 1008 } 1009 nfsm_request(vp, NFSPROC_READ, uiop->uio_procp, 1010 VTONFS(vp)->n_rcred); 1011 if (v3) { 1012 nfsm_postop_attr(vp, attrflag); 1013 if (error) { 1014 m_freem(mrep); 1015 goto nfsmout; 1016 } 1017 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1018 eof = fxdr_unsigned(int, *(tl + 1)); 1019 } else 1020 nfsm_loadattr(vp, (struct vattr *)0); 1021 nfsm_strsiz(retlen, nmp->nm_rsize); 1022 nfsm_mtouio(uiop, retlen); 1023 m_freem(mrep); 1024 tsiz -= retlen; 1025 if (v3) { 1026 if (eof || retlen == 0) 1027 tsiz = 0; 1028 } else if (retlen < len) 1029 tsiz = 0; 1030 } 1031 nfsmout: 1032 return (error); 1033 } 1034 1035 /* 1036 * nfs write call 1037 */ 1038 int 1039 nfs_writerpc(vp, uiop, iomode, must_commit) 1040 struct vnode *vp; 1041 struct uio *uiop; 1042 int *iomode, *must_commit; 1043 { 1044 u_int32_t *tl; 1045 int32_t t1, backup; 1046 caddr_t dpos, cp2; 1047 struct mbuf *mreq, *mrep, *md, *mb; 1048 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1049 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit; 1050 int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC; 1051 1052 #ifdef DIAGNOSTIC 1053 if (uiop->uio_iovcnt != 1) 1054 panic("nfs: writerpc iovcnt > 1"); 1055 #endif 1056 *must_commit = 0; 1057 tsiz = uiop->uio_resid; 1058 if (uiop->uio_offset + tsiz > 0xffffffff && !v3) 1059 return (EFBIG); 1060 while (tsiz > 0) { 1061 nfsstats.rpccnt[NFSPROC_WRITE]++; 1062 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz; 1063 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + 5 * NFSX_UNSIGNED + 1064 nfsm_rndup(len)); 1065 nfsm_fhtom(vp, v3); 1066 if (v3) { 1067 tl = nfsm_build(&mb, 5 * NFSX_UNSIGNED); 1068 txdr_hyper(uiop->uio_offset, tl); 1069 tl += 2; 1070 *tl++ = txdr_unsigned(len); 1071 *tl++ = txdr_unsigned(*iomode); 1072 *tl = txdr_unsigned(len); 1073 } else { 1074 u_int32_t x; 1075 1076 tl = nfsm_build(&mb, 4 * NFSX_UNSIGNED); 1077 /* Set both "begin" and "current" to non-garbage. */ 1078 x = txdr_unsigned((u_int32_t)uiop->uio_offset); 1079 *tl++ = x; /* "begin offset" */ 1080 *tl++ = x; /* "current offset" */ 1081 x = txdr_unsigned(len); 1082 *tl++ = x; /* total to this offset */ 1083 *tl = x; /* size of this write */ 1084 1085 } 1086 nfsm_uiotombuf(&mb, uiop, len); 1087 nfsm_request(vp, NFSPROC_WRITE, uiop->uio_procp, 1088 VTONFS(vp)->n_wcred); 1089 if (v3) { 1090 wccflag = NFSV3_WCCCHK; 1091 nfsm_wcc_data(vp, wccflag); 1092 if (!error) { 1093 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED 1094 + NFSX_V3WRITEVERF); 1095 rlen = fxdr_unsigned(int, *tl++); 1096 if (rlen == 0) { 1097 error = NFSERR_IO; 1098 break; 1099 } else if (rlen < len) { 1100 backup = len - rlen; 1101 uiop->uio_iov->iov_base = 1102 (char *)uiop->uio_iov->iov_base - 1103 backup; 1104 uiop->uio_iov->iov_len += backup; 1105 uiop->uio_offset -= backup; 1106 uiop->uio_resid += backup; 1107 len = rlen; 1108 } 1109 commit = fxdr_unsigned(int, *tl++); 1110 1111 /* 1112 * Return the lowest committment level 1113 * obtained by any of the RPCs. 1114 */ 1115 if (committed == NFSV3WRITE_FILESYNC) 1116 committed = commit; 1117 else if (committed == NFSV3WRITE_DATASYNC && 1118 commit == NFSV3WRITE_UNSTABLE) 1119 committed = commit; 1120 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) { 1121 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 1122 NFSX_V3WRITEVERF); 1123 nmp->nm_flag |= NFSMNT_HASWRITEVERF; 1124 } else if (bcmp((caddr_t)tl, 1125 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) { 1126 *must_commit = 1; 1127 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 1128 NFSX_V3WRITEVERF); 1129 } 1130 } 1131 } else 1132 nfsm_loadattr(vp, (struct vattr *)0); 1133 if (wccflag) 1134 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime; 1135 m_freem(mrep); 1136 tsiz -= len; 1137 } 1138 nfsmout: 1139 *iomode = committed; 1140 if (error) 1141 uiop->uio_resid = tsiz; 1142 return (error); 1143 } 1144 1145 /* 1146 * nfs mknod rpc 1147 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1148 * mode set to specify the file type and the size field for rdev. 1149 */ 1150 int 1151 nfs_mknodrpc(dvp, vpp, cnp, vap) 1152 struct vnode *dvp; 1153 struct vnode **vpp; 1154 struct componentname *cnp; 1155 struct vattr *vap; 1156 { 1157 struct nfsv2_sattr *sp; 1158 u_int32_t *tl; 1159 int32_t t1; 1160 struct vnode *newvp = (struct vnode *)0; 1161 struct nfsnode *np = NULL; 1162 char *cp2; 1163 caddr_t dpos; 1164 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1165 struct mbuf *mreq, *mrep, *md, *mb; 1166 u_int32_t rdev; 1167 int v3 = NFS_ISV3(dvp); 1168 1169 if (vap->va_type == VCHR || vap->va_type == VBLK) 1170 rdev = txdr_unsigned(vap->va_rdev); 1171 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1172 rdev = nfs_xdrneg1; 1173 else { 1174 VOP_ABORTOP(dvp, cnp); 1175 vput(dvp); 1176 return (EOPNOTSUPP); 1177 } 1178 nfsstats.rpccnt[NFSPROC_MKNOD]++; 1179 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + 4 * NFSX_UNSIGNED + 1180 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1181 nfsm_fhtom(dvp, v3); 1182 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1183 if (v3) { 1184 tl = nfsm_build(&mb, NFSX_UNSIGNED); 1185 *tl++ = vtonfsv3_type(vap->va_type); 1186 nfsm_v3attrbuild(&mb, vap, 0); 1187 if (vap->va_type == VCHR || vap->va_type == VBLK) { 1188 tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED); 1189 *tl++ = txdr_unsigned(major(vap->va_rdev)); 1190 *tl = txdr_unsigned(minor(vap->va_rdev)); 1191 } 1192 } else { 1193 sp = nfsm_build(&mb, NFSX_V2SATTR); 1194 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1195 sp->sa_uid = nfs_xdrneg1; 1196 sp->sa_gid = nfs_xdrneg1; 1197 sp->sa_size = rdev; 1198 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1199 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1200 } 1201 nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_proc, cnp->cn_cred); 1202 if (!error) { 1203 nfsm_mtofh(dvp, newvp, v3, gotvp); 1204 if (!gotvp) { 1205 if (newvp) { 1206 vrele(newvp); 1207 newvp = (struct vnode *)0; 1208 } 1209 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1210 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1211 if (!error) 1212 newvp = NFSTOV(np); 1213 } 1214 } 1215 if (v3) 1216 nfsm_wcc_data(dvp, wccflag); 1217 m_freem(mrep); 1218 nfsmout: 1219 if (error) { 1220 if (newvp) 1221 vrele(newvp); 1222 } else { 1223 if (cnp->cn_flags & MAKEENTRY) 1224 nfs_cache_enter(dvp, newvp, cnp); 1225 *vpp = newvp; 1226 } 1227 pool_put(&namei_pool, cnp->cn_pnbuf); 1228 VTONFS(dvp)->n_flag |= NMODIFIED; 1229 if (!wccflag) 1230 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1231 vrele(dvp); 1232 return (error); 1233 } 1234 1235 /* 1236 * nfs mknod vop 1237 * just call nfs_mknodrpc() to do the work. 1238 */ 1239 int 1240 nfs_mknod(v) 1241 void *v; 1242 { 1243 struct vop_mknod_args *ap = v; 1244 struct vnode *newvp; 1245 int error; 1246 1247 error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap); 1248 if (!error) 1249 vrele(newvp); 1250 1251 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1252 1253 return (error); 1254 } 1255 1256 int 1257 nfs_create(v) 1258 void *v; 1259 { 1260 struct vop_create_args *ap = v; 1261 struct vnode *dvp = ap->a_dvp; 1262 struct vattr *vap = ap->a_vap; 1263 struct componentname *cnp = ap->a_cnp; 1264 struct nfsv2_sattr *sp; 1265 u_int32_t *tl; 1266 int32_t t1; 1267 struct nfsnode *np = (struct nfsnode *)0; 1268 struct vnode *newvp = (struct vnode *)0; 1269 caddr_t dpos, cp2; 1270 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0; 1271 struct mbuf *mreq, *mrep, *md, *mb; 1272 int v3 = NFS_ISV3(dvp); 1273 1274 /* 1275 * Oops, not for me.. 1276 */ 1277 if (vap->va_type == VSOCK) 1278 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1279 1280 if (vap->va_vaflags & VA_EXCLUSIVE) 1281 fmode |= O_EXCL; 1282 1283 again: 1284 nfsstats.rpccnt[NFSPROC_CREATE]++; 1285 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + 2 * NFSX_UNSIGNED + 1286 nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); 1287 nfsm_fhtom(dvp, v3); 1288 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1289 if (v3) { 1290 tl = nfsm_build(&mb, NFSX_UNSIGNED); 1291 if (fmode & O_EXCL) { 1292 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); 1293 tl = nfsm_build(&mb, NFSX_V3CREATEVERF); 1294 *tl++ = arc4random(); 1295 *tl = arc4random(); 1296 } else { 1297 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED); 1298 nfsm_v3attrbuild(&mb, vap, 0); 1299 } 1300 } else { 1301 sp = nfsm_build(&mb, NFSX_V2SATTR); 1302 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1303 sp->sa_uid = nfs_xdrneg1; 1304 sp->sa_gid = nfs_xdrneg1; 1305 sp->sa_size = 0; 1306 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1307 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1308 } 1309 nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_proc, cnp->cn_cred); 1310 if (!error) { 1311 nfsm_mtofh(dvp, newvp, v3, gotvp); 1312 if (!gotvp) { 1313 if (newvp) { 1314 vrele(newvp); 1315 newvp = (struct vnode *)0; 1316 } 1317 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1318 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1319 if (!error) 1320 newvp = NFSTOV(np); 1321 } 1322 } 1323 if (v3) 1324 nfsm_wcc_data(dvp, wccflag); 1325 m_freem(mrep); 1326 nfsmout: 1327 if (error) { 1328 if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) { 1329 fmode &= ~O_EXCL; 1330 goto again; 1331 } 1332 if (newvp) 1333 vrele(newvp); 1334 } else if (v3 && (fmode & O_EXCL)) 1335 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc); 1336 if (!error) { 1337 if (cnp->cn_flags & MAKEENTRY) 1338 nfs_cache_enter(dvp, newvp, cnp); 1339 *ap->a_vpp = newvp; 1340 } 1341 pool_put(&namei_pool, cnp->cn_pnbuf); 1342 VTONFS(dvp)->n_flag |= NMODIFIED; 1343 if (!wccflag) 1344 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1345 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1346 vrele(dvp); 1347 return (error); 1348 } 1349 1350 /* 1351 * nfs file remove call 1352 * To try and make nfs semantics closer to ufs semantics, a file that has 1353 * other processes using the vnode is renamed instead of removed and then 1354 * removed later on the last close. 1355 * - If v_usecount > 1 1356 * If a rename is not already in the works 1357 * call nfs_sillyrename() to set it up 1358 * else 1359 * do the remove rpc 1360 */ 1361 int 1362 nfs_remove(v) 1363 void *v; 1364 { 1365 struct vop_remove_args *ap = v; 1366 struct vnode *vp = ap->a_vp; 1367 struct vnode *dvp = ap->a_dvp; 1368 struct componentname *cnp = ap->a_cnp; 1369 struct nfsnode *np = VTONFS(vp); 1370 int error = 0; 1371 struct vattr vattr; 1372 1373 #ifdef DIAGNOSTIC 1374 if ((cnp->cn_flags & HASBUF) == 0) 1375 panic("nfs_remove: no name"); 1376 if (vp->v_usecount < 1) 1377 panic("nfs_remove: bad v_usecount"); 1378 #endif 1379 if (vp->v_type == VDIR) 1380 error = EPERM; 1381 else if (vp->v_usecount == 1 || (np->n_sillyrename && 1382 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 && 1383 vattr.va_nlink > 1)) { 1384 /* 1385 * Purge the name cache so that the chance of a lookup for 1386 * the name succeeding while the remove is in progress is 1387 * minimized. Without node locking it can still happen, such 1388 * that an I/O op returns ESTALE, but since you get this if 1389 * another host removes the file.. 1390 */ 1391 cache_purge(vp); 1392 /* 1393 * throw away biocache buffers, mainly to avoid 1394 * unnecessary delayed writes later. 1395 */ 1396 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc); 1397 /* Do the rpc */ 1398 if (error != EINTR) 1399 error = nfs_removerpc(dvp, cnp->cn_nameptr, 1400 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc); 1401 /* 1402 * Kludge City: If the first reply to the remove rpc is lost.. 1403 * the reply to the retransmitted request will be ENOENT 1404 * since the file was in fact removed 1405 * Therefore, we cheat and return success. 1406 */ 1407 if (error == ENOENT) 1408 error = 0; 1409 } else if (!np->n_sillyrename) 1410 error = nfs_sillyrename(dvp, vp, cnp); 1411 pool_put(&namei_pool, cnp->cn_pnbuf); 1412 NFS_INVALIDATE_ATTRCACHE(np); 1413 vrele(dvp); 1414 vrele(vp); 1415 1416 VN_KNOTE(vp, NOTE_DELETE); 1417 VN_KNOTE(dvp, NOTE_WRITE); 1418 1419 return (error); 1420 } 1421 1422 /* 1423 * nfs file remove rpc called from nfs_inactive 1424 */ 1425 int 1426 nfs_removeit(sp) 1427 struct sillyrename *sp; 1428 { 1429 1430 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, 1431 (struct proc *)0)); 1432 } 1433 1434 /* 1435 * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). 1436 */ 1437 int 1438 nfs_removerpc(dvp, name, namelen, cred, proc) 1439 struct vnode *dvp; 1440 char *name; 1441 int namelen; 1442 struct ucred *cred; 1443 struct proc *proc; 1444 { 1445 u_int32_t *tl; 1446 int32_t t1; 1447 caddr_t dpos, cp2; 1448 int error = 0, wccflag = NFSV3_WCCRATTR; 1449 struct mbuf *mreq, *mrep, *md, *mb; 1450 int v3 = NFS_ISV3(dvp); 1451 1452 nfsstats.rpccnt[NFSPROC_REMOVE]++; 1453 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_UNSIGNED + 1454 nfsm_rndup(namelen)); 1455 nfsm_fhtom(dvp, v3); 1456 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 1457 nfsm_request(dvp, NFSPROC_REMOVE, proc, cred); 1458 if (v3) 1459 nfsm_wcc_data(dvp, wccflag); 1460 m_freem(mrep); 1461 nfsmout: 1462 VTONFS(dvp)->n_flag |= NMODIFIED; 1463 if (!wccflag) 1464 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1465 return (error); 1466 } 1467 1468 /* 1469 * nfs file rename call 1470 */ 1471 int 1472 nfs_rename(v) 1473 void *v; 1474 { 1475 struct vop_rename_args *ap = v; 1476 struct vnode *fvp = ap->a_fvp; 1477 struct vnode *tvp = ap->a_tvp; 1478 struct vnode *fdvp = ap->a_fdvp; 1479 struct vnode *tdvp = ap->a_tdvp; 1480 struct componentname *tcnp = ap->a_tcnp; 1481 struct componentname *fcnp = ap->a_fcnp; 1482 int error; 1483 1484 #ifdef DIAGNOSTIC 1485 if ((tcnp->cn_flags & HASBUF) == 0 || 1486 (fcnp->cn_flags & HASBUF) == 0) 1487 panic("nfs_rename: no name"); 1488 #endif 1489 /* Check for cross-device rename */ 1490 if ((fvp->v_mount != tdvp->v_mount) || 1491 (tvp && (fvp->v_mount != tvp->v_mount))) { 1492 error = EXDEV; 1493 goto out; 1494 } 1495 1496 /* 1497 * If the tvp exists and is in use, sillyrename it before doing the 1498 * rename of the new file over it. 1499 */ 1500 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename && 1501 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { 1502 VN_KNOTE(tvp, NOTE_DELETE); 1503 vrele(tvp); 1504 tvp = NULL; 1505 } 1506 1507 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1508 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1509 tcnp->cn_proc); 1510 1511 VN_KNOTE(fdvp, NOTE_WRITE); 1512 VN_KNOTE(tdvp, NOTE_WRITE); 1513 1514 if (fvp->v_type == VDIR) { 1515 if (tvp != NULL && tvp->v_type == VDIR) 1516 cache_purge(tdvp); 1517 cache_purge(fdvp); 1518 } 1519 out: 1520 if (tdvp == tvp) 1521 vrele(tdvp); 1522 else 1523 vput(tdvp); 1524 if (tvp) 1525 vput(tvp); 1526 vrele(fdvp); 1527 vrele(fvp); 1528 /* 1529 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1530 */ 1531 if (error == ENOENT) 1532 error = 0; 1533 return (error); 1534 } 1535 1536 /* 1537 * nfs file rename rpc called from nfs_remove() above 1538 */ 1539 int 1540 nfs_renameit(sdvp, scnp, sp) 1541 struct vnode *sdvp; 1542 struct componentname *scnp; 1543 struct sillyrename *sp; 1544 { 1545 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, 1546 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_proc)); 1547 } 1548 1549 /* 1550 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1551 */ 1552 int 1553 nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, proc) 1554 struct vnode *fdvp; 1555 char *fnameptr; 1556 int fnamelen; 1557 struct vnode *tdvp; 1558 char *tnameptr; 1559 int tnamelen; 1560 struct ucred *cred; 1561 struct proc *proc; 1562 { 1563 u_int32_t *tl; 1564 int32_t t1; 1565 caddr_t dpos, cp2; 1566 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; 1567 struct mbuf *mreq, *mrep, *md, *mb; 1568 int v3 = NFS_ISV3(fdvp); 1569 1570 nfsstats.rpccnt[NFSPROC_RENAME]++; 1571 mb = mreq = nfsm_reqhead((NFSX_FH(v3) + NFSX_UNSIGNED)*2 + 1572 nfsm_rndup(fnamelen) + nfsm_rndup(tnamelen)); 1573 nfsm_fhtom(fdvp, v3); 1574 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); 1575 nfsm_fhtom(tdvp, v3); 1576 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); 1577 nfsm_request(fdvp, NFSPROC_RENAME, proc, cred); 1578 if (v3) { 1579 nfsm_wcc_data(fdvp, fwccflag); 1580 nfsm_wcc_data(tdvp, twccflag); 1581 } 1582 m_freem(mrep); 1583 nfsmout: 1584 VTONFS(fdvp)->n_flag |= NMODIFIED; 1585 VTONFS(tdvp)->n_flag |= NMODIFIED; 1586 if (!fwccflag) 1587 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp)); 1588 if (!twccflag) 1589 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp)); 1590 return (error); 1591 } 1592 1593 /* 1594 * nfs hard link create call 1595 */ 1596 int 1597 nfs_link(v) 1598 void *v; 1599 { 1600 struct vop_link_args *ap = v; 1601 struct vnode *vp = ap->a_vp; 1602 struct vnode *dvp = ap->a_dvp; 1603 struct componentname *cnp = ap->a_cnp; 1604 u_int32_t *tl; 1605 int32_t t1; 1606 caddr_t dpos, cp2; 1607 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; 1608 struct mbuf *mreq, *mrep, *md, *mb; 1609 int v3; 1610 1611 if (dvp->v_mount != vp->v_mount) { 1612 pool_put(&namei_pool, cnp->cn_pnbuf); 1613 if (vp == dvp) 1614 vrele(dvp); 1615 else 1616 vput(dvp); 1617 return (EXDEV); 1618 } 1619 1620 /* 1621 * Push all writes to the server, so that the attribute cache 1622 * doesn't get "out of sync" with the server. 1623 * XXX There should be a better way! 1624 */ 1625 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc); 1626 1627 v3 = NFS_ISV3(vp); 1628 nfsstats.rpccnt[NFSPROC_LINK]++; 1629 mb = mreq = nfsm_reqhead(2 * NFSX_FH(v3) + NFSX_UNSIGNED + 1630 nfsm_rndup(cnp->cn_namelen)); 1631 nfsm_fhtom(vp, v3); 1632 nfsm_fhtom(dvp, v3); 1633 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1634 nfsm_request(vp, NFSPROC_LINK, cnp->cn_proc, cnp->cn_cred); 1635 if (v3) { 1636 nfsm_postop_attr(vp, attrflag); 1637 nfsm_wcc_data(dvp, wccflag); 1638 } 1639 m_freem(mrep); 1640 nfsmout: 1641 pool_put(&namei_pool, cnp->cn_pnbuf); 1642 VTONFS(dvp)->n_flag |= NMODIFIED; 1643 if (!attrflag) 1644 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp)); 1645 if (!wccflag) 1646 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1647 1648 VN_KNOTE(vp, NOTE_LINK); 1649 VN_KNOTE(dvp, NOTE_WRITE); 1650 vput(dvp); 1651 return (error); 1652 } 1653 1654 /* 1655 * nfs symbolic link create call 1656 */ 1657 int 1658 nfs_symlink(v) 1659 void *v; 1660 { 1661 struct vop_symlink_args *ap = v; 1662 struct vnode *dvp = ap->a_dvp; 1663 struct vattr *vap = ap->a_vap; 1664 struct componentname *cnp = ap->a_cnp; 1665 struct nfsv2_sattr *sp; 1666 u_int32_t *tl; 1667 int32_t t1; 1668 caddr_t dpos, cp2; 1669 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; 1670 struct mbuf *mreq, *mrep, *md, *mb; 1671 struct vnode *newvp = (struct vnode *)0; 1672 int v3 = NFS_ISV3(dvp); 1673 1674 nfsstats.rpccnt[NFSPROC_SYMLINK]++; 1675 slen = strlen(ap->a_target); 1676 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + 2 * NFSX_UNSIGNED + 1677 nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3)); 1678 nfsm_fhtom(dvp, v3); 1679 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1680 if (v3) 1681 nfsm_v3attrbuild(&mb, vap, 0); 1682 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); 1683 if (!v3) { 1684 sp = nfsm_build(&mb, NFSX_V2SATTR); 1685 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); 1686 sp->sa_uid = nfs_xdrneg1; 1687 sp->sa_gid = nfs_xdrneg1; 1688 sp->sa_size = nfs_xdrneg1; 1689 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1690 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1691 } 1692 nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_proc, cnp->cn_cred); 1693 if (v3) { 1694 if (!error) 1695 nfsm_mtofh(dvp, newvp, v3, gotvp); 1696 nfsm_wcc_data(dvp, wccflag); 1697 } 1698 m_freem(mrep); 1699 nfsmout: 1700 if (newvp) 1701 vrele(newvp); 1702 pool_put(&namei_pool, cnp->cn_pnbuf); 1703 VTONFS(dvp)->n_flag |= NMODIFIED; 1704 if (!wccflag) 1705 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1706 VN_KNOTE(dvp, NOTE_WRITE); 1707 vrele(dvp); 1708 return (error); 1709 } 1710 1711 /* 1712 * nfs make dir call 1713 */ 1714 int 1715 nfs_mkdir(v) 1716 void *v; 1717 { 1718 struct vop_mkdir_args *ap = v; 1719 struct vnode *dvp = ap->a_dvp; 1720 struct vattr *vap = ap->a_vap; 1721 struct componentname *cnp = ap->a_cnp; 1722 struct nfsv2_sattr *sp; 1723 u_int32_t *tl; 1724 int32_t t1; 1725 int len; 1726 struct nfsnode *np = (struct nfsnode *)0; 1727 struct vnode *newvp = (struct vnode *)0; 1728 caddr_t dpos, cp2; 1729 int error = 0, wccflag = NFSV3_WCCRATTR; 1730 int gotvp = 0; 1731 struct mbuf *mreq, *mrep, *md, *mb; 1732 int v3 = NFS_ISV3(dvp); 1733 1734 len = cnp->cn_namelen; 1735 nfsstats.rpccnt[NFSPROC_MKDIR]++; 1736 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_UNSIGNED + 1737 nfsm_rndup(len) + NFSX_SATTR(v3)); 1738 nfsm_fhtom(dvp, v3); 1739 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 1740 if (v3) { 1741 nfsm_v3attrbuild(&mb, vap, 0); 1742 } else { 1743 sp = nfsm_build(&mb, NFSX_V2SATTR); 1744 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); 1745 sp->sa_uid = nfs_xdrneg1; 1746 sp->sa_gid = nfs_xdrneg1; 1747 sp->sa_size = nfs_xdrneg1; 1748 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1749 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1750 } 1751 nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_proc, cnp->cn_cred); 1752 if (!error) 1753 nfsm_mtofh(dvp, newvp, v3, gotvp); 1754 if (v3) 1755 nfsm_wcc_data(dvp, wccflag); 1756 m_freem(mrep); 1757 nfsmout: 1758 VTONFS(dvp)->n_flag |= NMODIFIED; 1759 if (!wccflag) 1760 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1761 1762 if (error == 0 && newvp == NULL) { 1763 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, 1764 cnp->cn_proc, &np); 1765 if (!error) { 1766 newvp = NFSTOV(np); 1767 if (newvp->v_type != VDIR) 1768 error = EEXIST; 1769 } 1770 } 1771 if (error) { 1772 if (newvp) 1773 vrele(newvp); 1774 } else { 1775 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK); 1776 if (cnp->cn_flags & MAKEENTRY) 1777 nfs_cache_enter(dvp, newvp, cnp); 1778 *ap->a_vpp = newvp; 1779 } 1780 pool_put(&namei_pool, cnp->cn_pnbuf); 1781 vrele(dvp); 1782 return (error); 1783 } 1784 1785 /* 1786 * nfs remove directory call 1787 */ 1788 int 1789 nfs_rmdir(v) 1790 void *v; 1791 { 1792 struct vop_rmdir_args *ap = v; 1793 struct vnode *vp = ap->a_vp; 1794 struct vnode *dvp = ap->a_dvp; 1795 struct componentname *cnp = ap->a_cnp; 1796 u_int32_t *tl; 1797 int32_t t1; 1798 caddr_t dpos, cp2; 1799 int error = 0, wccflag = NFSV3_WCCRATTR; 1800 struct mbuf *mreq, *mrep, *md, *mb; 1801 int v3 = NFS_ISV3(dvp); 1802 1803 if (dvp == vp) { 1804 vrele(dvp); 1805 vrele(dvp); 1806 pool_put(&namei_pool, cnp->cn_pnbuf); 1807 return (EINVAL); 1808 } 1809 nfsstats.rpccnt[NFSPROC_RMDIR]++; 1810 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_UNSIGNED + 1811 nfsm_rndup(cnp->cn_namelen)); 1812 nfsm_fhtom(dvp, v3); 1813 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1814 nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_proc, cnp->cn_cred); 1815 if (v3) 1816 nfsm_wcc_data(dvp, wccflag); 1817 m_freem(mrep); 1818 nfsmout: 1819 pool_put(&namei_pool, cnp->cn_pnbuf); 1820 VTONFS(dvp)->n_flag |= NMODIFIED; 1821 if (!wccflag) 1822 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1823 1824 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK); 1825 VN_KNOTE(vp, NOTE_DELETE); 1826 1827 cache_purge(vp); 1828 vrele(vp); 1829 vrele(dvp); 1830 /* 1831 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 1832 */ 1833 if (error == ENOENT) 1834 error = 0; 1835 return (error); 1836 } 1837 1838 1839 /* 1840 * The readdir logic below has a big design bug. It stores the NFS cookie in 1841 * the returned uio->uio_offset but does not store the verifier (it cannot). 1842 * Instead, the code stores the verifier in the nfsnode and applies that 1843 * verifies to all cookies, no matter what verifier was originally with 1844 * the cookie. 1845 * 1846 * From a practical standpoint, this is not a problem since almost all 1847 * NFS servers do not change the validity of cookies across deletes 1848 * and inserts. 1849 */ 1850 1851 struct nfs_dirent { 1852 u_int32_t cookie[2]; 1853 struct dirent dirent; 1854 }; 1855 1856 #define NFS_DIRHDSIZ (sizeof (struct nfs_dirent) - (MAXNAMLEN + 1)) 1857 #define NFS_DIRENT_OVERHEAD offsetof(struct nfs_dirent, dirent) 1858 1859 /* 1860 * nfs readdir call 1861 */ 1862 int 1863 nfs_readdir(v) 1864 void *v; 1865 { 1866 struct vop_readdir_args *ap = v; 1867 struct vnode *vp = ap->a_vp; 1868 struct nfsnode *np = VTONFS(vp); 1869 struct uio *uio = ap->a_uio; 1870 int tresid, error; 1871 struct vattr vattr; 1872 u_long *cookies = NULL; 1873 int ncookies = 0, cnt; 1874 u_int64_t newoff = uio->uio_offset; 1875 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1876 struct uio readdir_uio; 1877 struct iovec readdir_iovec; 1878 struct proc * p = uio->uio_procp; 1879 int done = 0, eof = 0; 1880 struct ucred *cred = ap->a_cred; 1881 void *data; 1882 1883 if (vp->v_type != VDIR) 1884 return (EPERM); 1885 /* 1886 * First, check for hit on the EOF offset cache 1887 */ 1888 if (np->n_direofoffset != 0 && 1889 uio->uio_offset == np->n_direofoffset) { 1890 if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 && 1891 timespeccmp(&np->n_mtime, &vattr.va_mtime, ==)) { 1892 nfsstats.direofcache_hits++; 1893 *ap->a_eofflag = 1; 1894 return (0); 1895 } 1896 } 1897 1898 if (uio->uio_resid < NFS_FABLKSIZE) 1899 return (EINVAL); 1900 1901 tresid = uio->uio_resid; 1902 1903 if (uio->uio_rw != UIO_READ) 1904 return (EINVAL); 1905 1906 if (ap->a_cookies) { 1907 ncookies = uio->uio_resid / 20; 1908 1909 cookies = malloc(sizeof(*cookies) * ncookies, M_TEMP, 1910 M_WAITOK); 1911 *ap->a_ncookies = ncookies; 1912 *ap->a_cookies = cookies; 1913 } 1914 1915 if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) 1916 (void)nfs_fsinfo(nmp, vp, cred, p); 1917 1918 cnt = 5; 1919 1920 data = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK); 1921 do { 1922 struct nfs_dirent *ndp = data; 1923 1924 readdir_iovec.iov_len = NFS_DIRBLKSIZ; 1925 readdir_iovec.iov_base = data; 1926 readdir_uio.uio_offset = newoff; 1927 readdir_uio.uio_iov = &readdir_iovec; 1928 readdir_uio.uio_iovcnt = 1; 1929 readdir_uio.uio_segflg = UIO_SYSSPACE; 1930 readdir_uio.uio_rw = UIO_READ; 1931 readdir_uio.uio_resid = NFS_DIRBLKSIZ; 1932 readdir_uio.uio_procp = curproc; 1933 1934 if (nmp->nm_flag & NFSMNT_RDIRPLUS) { 1935 error = nfs_readdirplusrpc(vp, &readdir_uio, cred, 1936 &eof); 1937 if (error == NFSERR_NOTSUPP) 1938 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 1939 } 1940 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 1941 error = nfs_readdirrpc(vp, &readdir_uio, cred, &eof); 1942 1943 if (error == NFSERR_BAD_COOKIE) 1944 error = EINVAL; 1945 1946 while (error == 0 && 1947 (ap->a_cookies == NULL || ncookies != 0) && 1948 ndp < (struct nfs_dirent *)readdir_iovec.iov_base) { 1949 struct dirent *dp = &ndp->dirent; 1950 int reclen = dp->d_reclen; 1951 1952 dp->d_reclen -= NFS_DIRENT_OVERHEAD; 1953 1954 if (uio->uio_resid < dp->d_reclen) { 1955 eof = 0; 1956 done = 1; 1957 break; 1958 } 1959 1960 error = uiomove((caddr_t)dp, dp->d_reclen, uio); 1961 if (error) 1962 break; 1963 1964 newoff = fxdr_hyper(&ndp->cookie[0]); 1965 1966 if (ap->a_cookies != NULL) { 1967 *cookies = newoff; 1968 cookies++; 1969 ncookies--; 1970 } 1971 1972 ndp = (struct nfs_dirent *)((u_int8_t *)ndp + reclen); 1973 } 1974 } while (!error && !done && !eof && cnt--); 1975 1976 free(data, M_TEMP); 1977 data = NULL; 1978 1979 if (ap->a_cookies) { 1980 if (error) { 1981 free(*ap->a_cookies, M_TEMP); 1982 *ap->a_cookies = NULL; 1983 *ap->a_ncookies = 0; 1984 } else { 1985 *ap->a_ncookies -= ncookies; 1986 } 1987 } 1988 1989 if (!error) 1990 uio->uio_offset = newoff; 1991 1992 if (!error && (eof || uio->uio_resid == tresid)) { 1993 nfsstats.direofcache_misses++; 1994 *ap->a_eofflag = 1; 1995 return (0); 1996 } 1997 1998 *ap->a_eofflag = 0; 1999 return (error); 2000 } 2001 2002 2003 /* 2004 * The function below stuff the cookies in after the name 2005 */ 2006 2007 /* 2008 * Readdir rpc call. 2009 */ 2010 int 2011 nfs_readdirrpc(struct vnode *vp, 2012 struct uio *uiop, 2013 struct ucred *cred, 2014 int *end_of_directory) 2015 { 2016 int len, left; 2017 struct nfs_dirent *ndp = NULL; 2018 struct dirent *dp = NULL; 2019 u_int32_t *tl; 2020 caddr_t cp; 2021 int32_t t1; 2022 caddr_t dpos, cp2; 2023 struct mbuf *mreq, *mrep, *md, *mb; 2024 nfsuint64 cookie; 2025 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2026 struct nfsnode *dnp = VTONFS(vp); 2027 u_quad_t fileno; 2028 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1; 2029 int attrflag; 2030 int v3 = NFS_ISV3(vp); 2031 2032 #ifdef DIAGNOSTIC 2033 if (uiop->uio_iovcnt != 1 || 2034 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) 2035 panic("nfs readdirrpc bad uio"); 2036 #endif 2037 2038 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0]); 2039 2040 /* 2041 * Loop around doing readdir rpc's of size nm_readdirsize 2042 * truncated to a multiple of NFS_READDIRBLKSIZ. 2043 * The stopping criteria is EOF or buffer full. 2044 */ 2045 while (more_dirs && bigenough) { 2046 nfsstats.rpccnt[NFSPROC_READDIR]++; 2047 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_READDIR(v3)); 2048 nfsm_fhtom(vp, v3); 2049 if (v3) { 2050 tl = nfsm_build(&mb, 5 * NFSX_UNSIGNED); 2051 *tl++ = cookie.nfsuquad[0]; 2052 *tl++ = cookie.nfsuquad[1]; 2053 if (cookie.nfsuquad[0] == 0 && 2054 cookie.nfsuquad[1] == 0) { 2055 *tl++ = 0; 2056 *tl++ = 0; 2057 } else { 2058 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2059 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2060 } 2061 } else { 2062 tl = nfsm_build(&mb, 2 * NFSX_UNSIGNED); 2063 *tl++ = cookie.nfsuquad[1]; 2064 } 2065 *tl = txdr_unsigned(nmp->nm_readdirsize); 2066 nfsm_request(vp, NFSPROC_READDIR, uiop->uio_procp, cred); 2067 if (v3) { 2068 nfsm_postop_attr(vp, attrflag); 2069 if (!error) { 2070 nfsm_dissect(tl, u_int32_t *, 2071 2 * NFSX_UNSIGNED); 2072 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2073 dnp->n_cookieverf.nfsuquad[1] = *tl; 2074 } else { 2075 m_freem(mrep); 2076 goto nfsmout; 2077 } 2078 } 2079 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2080 more_dirs = fxdr_unsigned(int, *tl); 2081 2082 /* loop thru the dir entries, doctoring them to 4bsd form */ 2083 while (more_dirs && bigenough) { 2084 if (v3) { 2085 nfsm_dissect(tl, u_int32_t *, 2086 3 * NFSX_UNSIGNED); 2087 fileno = fxdr_hyper(tl); 2088 len = fxdr_unsigned(int, *(tl + 2)); 2089 } else { 2090 nfsm_dissect(tl, u_int32_t *, 2091 2 * NFSX_UNSIGNED); 2092 fileno = fxdr_unsigned(u_quad_t, *tl++); 2093 len = fxdr_unsigned(int, *tl); 2094 } 2095 if (len <= 0 || len > NFS_MAXNAMLEN) { 2096 error = EBADRPC; 2097 m_freem(mrep); 2098 goto nfsmout; 2099 } 2100 tlen = nfsm_rndup(len + 1); 2101 left = NFS_READDIRBLKSIZ - blksiz; 2102 if ((tlen + NFS_DIRHDSIZ) > left) { 2103 dp->d_reclen += left; 2104 uiop->uio_iov->iov_base += left; 2105 uiop->uio_iov->iov_len -= left; 2106 uiop->uio_resid -= left; 2107 blksiz = 0; 2108 } 2109 if ((tlen + NFS_DIRHDSIZ) > uiop->uio_resid) 2110 bigenough = 0; 2111 if (bigenough) { 2112 ndp = (struct nfs_dirent *) 2113 uiop->uio_iov->iov_base; 2114 dp = &ndp->dirent; 2115 dp->d_fileno = (int)fileno; 2116 dp->d_namlen = len; 2117 dp->d_reclen = tlen + NFS_DIRHDSIZ; 2118 dp->d_type = DT_UNKNOWN; 2119 blksiz += dp->d_reclen; 2120 if (blksiz == NFS_READDIRBLKSIZ) 2121 blksiz = 0; 2122 uiop->uio_resid -= NFS_DIRHDSIZ; 2123 uiop->uio_iov->iov_base = 2124 (char *)uiop->uio_iov->iov_base + 2125 NFS_DIRHDSIZ; 2126 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ; 2127 nfsm_mtouio(uiop, len); 2128 cp = uiop->uio_iov->iov_base; 2129 tlen -= len; 2130 *cp = '\0'; /* null terminate */ 2131 uiop->uio_iov->iov_base += tlen; 2132 uiop->uio_iov->iov_len -= tlen; 2133 uiop->uio_resid -= tlen; 2134 } else 2135 nfsm_adv(nfsm_rndup(len)); 2136 if (v3) { 2137 nfsm_dissect(tl, u_int32_t *, 2138 3 * NFSX_UNSIGNED); 2139 } else { 2140 nfsm_dissect(tl, u_int32_t *, 2141 2 * NFSX_UNSIGNED); 2142 } 2143 if (bigenough) { 2144 if (v3) { 2145 ndp->cookie[0] = cookie.nfsuquad[0] = 2146 *tl++; 2147 } else 2148 ndp->cookie[0] = 0; 2149 2150 ndp->cookie[1] = cookie.nfsuquad[1] = *tl++; 2151 } else if (v3) 2152 tl += 2; 2153 else 2154 tl++; 2155 more_dirs = fxdr_unsigned(int, *tl); 2156 } 2157 /* 2158 * If at end of rpc data, get the eof boolean 2159 */ 2160 if (!more_dirs) { 2161 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2162 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2163 } 2164 m_freem(mrep); 2165 } 2166 /* 2167 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ 2168 * by increasing d_reclen for the last record. 2169 */ 2170 if (blksiz > 0) { 2171 left = NFS_READDIRBLKSIZ - blksiz; 2172 dp->d_reclen += left; 2173 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + 2174 left; 2175 uiop->uio_iov->iov_len -= left; 2176 uiop->uio_resid -= left; 2177 } 2178 2179 /* 2180 * We are now either at the end of the directory or have filled the 2181 * block. 2182 */ 2183 if (bigenough) { 2184 dnp->n_direofoffset = fxdr_hyper(&cookie.nfsuquad[0]); 2185 if (end_of_directory) *end_of_directory = 1; 2186 } else { 2187 if (uiop->uio_resid > 0) 2188 printf("EEK! readdirrpc resid > 0\n"); 2189 } 2190 2191 nfsmout: 2192 return (error); 2193 } 2194 2195 /* 2196 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). 2197 */ 2198 int 2199 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2200 int *end_of_directory) 2201 { 2202 int len, left; 2203 struct nfs_dirent *ndirp = NULL; 2204 struct dirent *dp = NULL; 2205 u_int32_t *tl; 2206 caddr_t cp; 2207 int32_t t1; 2208 struct vnode *newvp; 2209 caddr_t dpos, cp2, dpossav1, dpossav2; 2210 struct mbuf *mreq, *mrep, *md, *mb, *mdsav1, *mdsav2; 2211 struct nameidata nami, *ndp = &nami; 2212 struct componentname *cnp = &ndp->ni_cnd; 2213 nfsuint64 cookie; 2214 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2215 struct nfsnode *dnp = VTONFS(vp), *np; 2216 nfsfh_t *fhp; 2217 u_quad_t fileno; 2218 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; 2219 int attrflag, fhsize; 2220 2221 #ifdef DIAGNOSTIC 2222 if (uiop->uio_iovcnt != 1 || 2223 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) 2224 panic("nfs readdirplusrpc bad uio"); 2225 #endif 2226 ndp->ni_dvp = vp; 2227 newvp = NULLVP; 2228 2229 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0]); 2230 2231 /* 2232 * Loop around doing readdir rpc's of size nm_readdirsize 2233 * truncated to a multiple of NFS_READDIRBLKSIZ. 2234 * The stopping criteria is EOF or buffer full. 2235 */ 2236 while (more_dirs && bigenough) { 2237 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; 2238 mb = mreq = nfsm_reqhead(NFSX_FH(1) + 6 * NFSX_UNSIGNED); 2239 nfsm_fhtom(vp, 1); 2240 tl = nfsm_build(&mb, 6 * NFSX_UNSIGNED); 2241 *tl++ = cookie.nfsuquad[0]; 2242 *tl++ = cookie.nfsuquad[1]; 2243 if (cookie.nfsuquad[0] == 0 && 2244 cookie.nfsuquad[1] == 0) { 2245 *tl++ = 0; 2246 *tl++ = 0; 2247 } else { 2248 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2249 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2250 } 2251 *tl++ = txdr_unsigned(nmp->nm_readdirsize); 2252 *tl = txdr_unsigned(nmp->nm_rsize); 2253 nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_procp, cred); 2254 nfsm_postop_attr(vp, attrflag); 2255 if (error) { 2256 m_freem(mrep); 2257 goto nfsmout; 2258 } 2259 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2260 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2261 dnp->n_cookieverf.nfsuquad[1] = *tl++; 2262 more_dirs = fxdr_unsigned(int, *tl); 2263 2264 /* loop thru the dir entries, doctoring them to 4bsd form */ 2265 while (more_dirs && bigenough) { 2266 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2267 fileno = fxdr_hyper(tl); 2268 len = fxdr_unsigned(int, *(tl + 2)); 2269 if (len <= 0 || len > NFS_MAXNAMLEN) { 2270 error = EBADRPC; 2271 m_freem(mrep); 2272 goto nfsmout; 2273 } 2274 tlen = nfsm_rndup(len + 1); 2275 left = NFS_READDIRBLKSIZ - blksiz; 2276 if ((tlen + NFS_DIRHDSIZ) > left) { 2277 dp->d_reclen += left; 2278 uiop->uio_iov->iov_base = 2279 (char *)uiop->uio_iov->iov_base + left; 2280 uiop->uio_iov->iov_len -= left; 2281 uiop->uio_resid -= left; 2282 blksiz = 0; 2283 } 2284 if ((tlen + NFS_DIRHDSIZ) > uiop->uio_resid) 2285 bigenough = 0; 2286 if (bigenough) { 2287 ndirp = (struct nfs_dirent *) 2288 uiop->uio_iov->iov_base; 2289 dp = &ndirp->dirent; 2290 dp->d_fileno = (int)fileno; 2291 dp->d_namlen = len; 2292 dp->d_reclen = tlen + NFS_DIRHDSIZ; 2293 dp->d_type = DT_UNKNOWN; 2294 blksiz += dp->d_reclen; 2295 if (blksiz == NFS_READDIRBLKSIZ) 2296 blksiz = 0; 2297 uiop->uio_resid -= NFS_DIRHDSIZ; 2298 uiop->uio_iov->iov_base = 2299 (char *)uiop->uio_iov->iov_base + 2300 NFS_DIRHDSIZ; 2301 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ; 2302 cnp->cn_nameptr = uiop->uio_iov->iov_base; 2303 cnp->cn_namelen = len; 2304 nfsm_mtouio(uiop, len); 2305 cp = uiop->uio_iov->iov_base; 2306 tlen -= len; 2307 *cp = '\0'; 2308 uiop->uio_iov->iov_base += tlen; 2309 uiop->uio_iov->iov_len -= tlen; 2310 uiop->uio_resid -= tlen; 2311 } else 2312 nfsm_adv(nfsm_rndup(len)); 2313 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2314 if (bigenough) { 2315 ndirp->cookie[0] = cookie.nfsuquad[0] = *tl++; 2316 ndirp->cookie[1] = cookie.nfsuquad[1] = *tl++; 2317 } else 2318 tl += 2; 2319 2320 /* 2321 * Since the attributes are before the file handle 2322 * (sigh), we must skip over the attributes and then 2323 * come back and get them. 2324 */ 2325 attrflag = fxdr_unsigned(int, *tl); 2326 if (attrflag) { 2327 dpossav1 = dpos; 2328 mdsav1 = md; 2329 nfsm_adv(NFSX_V3FATTR); 2330 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2331 doit = fxdr_unsigned(int, *tl); 2332 if (doit) { 2333 nfsm_getfh(fhp, fhsize, 1); 2334 if (NFS_CMPFH(dnp, fhp, fhsize)) { 2335 VREF(vp); 2336 newvp = vp; 2337 np = dnp; 2338 } else { 2339 error = nfs_nget(vp->v_mount, fhp, 2340 fhsize, &np); 2341 if (error) 2342 doit = 0; 2343 else 2344 newvp = NFSTOV(np); 2345 } 2346 } 2347 if (doit && bigenough) { 2348 dpossav2 = dpos; 2349 dpos = dpossav1; 2350 mdsav2 = md; 2351 md = mdsav1; 2352 nfsm_loadattr(newvp, (struct vattr *)0); 2353 dpos = dpossav2; 2354 md = mdsav2; 2355 dp->d_type = 2356 IFTODT(VTTOIF(np->n_vattr.va_type)); 2357 if (cnp->cn_namelen <= NCHNAMLEN) { 2358 ndp->ni_vp = newvp; 2359 cnp->cn_hash = 2360 hash32_str(cnp->cn_nameptr, 2361 HASHINIT); 2362 cache_purge(ndp->ni_dvp); 2363 nfs_cache_enter(ndp->ni_dvp, ndp->ni_vp, 2364 cnp); 2365 } 2366 } 2367 } else { 2368 /* Just skip over the file handle */ 2369 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2370 i = fxdr_unsigned(int, *tl); 2371 nfsm_adv(nfsm_rndup(i)); 2372 } 2373 if (newvp != NULLVP) { 2374 vrele(newvp); 2375 newvp = NULLVP; 2376 } 2377 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2378 more_dirs = fxdr_unsigned(int, *tl); 2379 } 2380 /* 2381 * If at end of rpc data, get the eof boolean 2382 */ 2383 if (!more_dirs) { 2384 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2385 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2386 } 2387 m_freem(mrep); 2388 } 2389 /* 2390 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ 2391 * by increasing d_reclen for the last record. 2392 */ 2393 if (blksiz > 0) { 2394 left = NFS_READDIRBLKSIZ - blksiz; 2395 dp->d_reclen += left; 2396 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + 2397 left; 2398 uiop->uio_iov->iov_len -= left; 2399 uiop->uio_resid -= left; 2400 } 2401 2402 /* 2403 * We are now either at the end of the directory or have filled the 2404 * block. 2405 */ 2406 if (bigenough) { 2407 dnp->n_direofoffset = fxdr_hyper(&cookie.nfsuquad[0]); 2408 if (end_of_directory) *end_of_directory = 1; 2409 } else { 2410 if (uiop->uio_resid > 0) 2411 printf("EEK! readdirplusrpc resid > 0\n"); 2412 } 2413 2414 nfsmout: 2415 if (newvp != NULLVP) 2416 vrele(newvp); 2417 return (error); 2418 } 2419 2420 /* 2421 * Silly rename. To make the NFS filesystem that is stateless look a little 2422 * more like the "ufs" a remove of an active vnode is translated to a rename 2423 * to a funny looking filename that is removed by nfs_inactive on the 2424 * nfsnode. There is the potential for another process on a different client 2425 * to create the same funny name between the nfs_lookitup() fails and the 2426 * nfs_rename() completes, but... 2427 */ 2428 int 2429 nfs_sillyrename(dvp, vp, cnp) 2430 struct vnode *dvp, *vp; 2431 struct componentname *cnp; 2432 { 2433 struct sillyrename *sp; 2434 struct nfsnode *np; 2435 int error; 2436 2437 cache_purge(dvp); 2438 np = VTONFS(vp); 2439 sp = malloc(sizeof(struct sillyrename), M_NFSREQ, M_WAITOK); 2440 sp->s_cred = crdup(cnp->cn_cred); 2441 sp->s_dvp = dvp; 2442 VREF(dvp); 2443 2444 if (vp->v_type == VDIR) { 2445 #ifdef DIAGNOSTIC 2446 printf("nfs: sillyrename dir\n"); 2447 #endif 2448 error = EINVAL; 2449 goto bad; 2450 } 2451 2452 /* Fudge together a funny name */ 2453 sp->s_namlen = snprintf(sp->s_name, sizeof sp->s_name, 2454 ".nfsA%05x4.4", cnp->cn_proc->p_pid); 2455 if (sp->s_namlen > sizeof sp->s_name) 2456 sp->s_namlen = strlen(sp->s_name); 2457 2458 /* Try lookitups until we get one that isn't there */ 2459 while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2460 cnp->cn_proc, (struct nfsnode **)0) == 0) { 2461 sp->s_name[4]++; 2462 if (sp->s_name[4] > 'z') { 2463 error = EINVAL; 2464 goto bad; 2465 } 2466 } 2467 error = nfs_renameit(dvp, cnp, sp); 2468 if (error) 2469 goto bad; 2470 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2471 cnp->cn_proc, &np); 2472 np->n_sillyrename = sp; 2473 return (0); 2474 bad: 2475 vrele(sp->s_dvp); 2476 crfree(sp->s_cred); 2477 free(sp, M_NFSREQ); 2478 return (error); 2479 } 2480 2481 /* 2482 * Look up a file name and optionally either update the file handle or 2483 * allocate an nfsnode, depending on the value of npp. 2484 * npp == NULL --> just do the lookup 2485 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2486 * handled too 2487 * *npp != NULL --> update the file handle in the vnode 2488 */ 2489 int 2490 nfs_lookitup(dvp, name, len, cred, procp, npp) 2491 struct vnode *dvp; 2492 char *name; 2493 int len; 2494 struct ucred *cred; 2495 struct proc *procp; 2496 struct nfsnode **npp; 2497 { 2498 u_int32_t *tl; 2499 int32_t t1; 2500 struct vnode *newvp = (struct vnode *)0; 2501 struct nfsnode *np, *dnp = VTONFS(dvp); 2502 caddr_t dpos, cp2; 2503 int error = 0, fhlen, attrflag; 2504 struct mbuf *mreq, *mrep, *md, *mb; 2505 nfsfh_t *nfhp; 2506 int v3 = NFS_ISV3(dvp); 2507 2508 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 2509 mb = mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_UNSIGNED + 2510 nfsm_rndup(len)); 2511 nfsm_fhtom(dvp, v3); 2512 nfsm_strtom(name, len, NFS_MAXNAMLEN); 2513 nfsm_request(dvp, NFSPROC_LOOKUP, procp, cred); 2514 if (npp && !error) { 2515 nfsm_getfh(nfhp, fhlen, v3); 2516 if (*npp) { 2517 np = *npp; 2518 np->n_fhp = &np->n_fh; 2519 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen); 2520 np->n_fhsize = fhlen; 2521 newvp = NFSTOV(np); 2522 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { 2523 VREF(dvp); 2524 newvp = dvp; 2525 np = dnp; 2526 } else { 2527 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); 2528 if (error) { 2529 m_freem(mrep); 2530 return (error); 2531 } 2532 newvp = NFSTOV(np); 2533 } 2534 if (v3) { 2535 nfsm_postop_attr(newvp, attrflag); 2536 if (!attrflag && *npp == NULL) { 2537 m_freem(mrep); 2538 vrele(newvp); 2539 return (ENOENT); 2540 } 2541 } else 2542 nfsm_loadattr(newvp, (struct vattr *)0); 2543 } 2544 m_freem(mrep); 2545 nfsmout: 2546 if (npp && *npp == NULL) { 2547 if (error) { 2548 if (newvp) 2549 vrele(newvp); 2550 } else 2551 *npp = np; 2552 } 2553 return (error); 2554 } 2555 2556 /* 2557 * Nfs Version 3 commit rpc 2558 */ 2559 int 2560 nfs_commit(vp, offset, cnt, procp) 2561 struct vnode *vp; 2562 u_quad_t offset; 2563 int cnt; 2564 struct proc *procp; 2565 { 2566 u_int32_t *tl; 2567 int32_t t1; 2568 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2569 caddr_t dpos, cp2; 2570 int error = 0, wccflag = NFSV3_WCCRATTR; 2571 struct mbuf *mreq, *mrep, *md, *mb; 2572 2573 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) 2574 return (0); 2575 nfsstats.rpccnt[NFSPROC_COMMIT]++; 2576 mb = mreq = nfsm_reqhead(NFSX_FH(1)); 2577 nfsm_fhtom(vp, 1); 2578 tl = nfsm_build(&mb, 3 * NFSX_UNSIGNED); 2579 txdr_hyper(offset, tl); 2580 tl += 2; 2581 *tl = txdr_unsigned(cnt); 2582 nfsm_request(vp, NFSPROC_COMMIT, procp, VTONFS(vp)->n_wcred); 2583 nfsm_wcc_data(vp, wccflag); 2584 if (!error) { 2585 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF); 2586 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl, 2587 NFSX_V3WRITEVERF)) { 2588 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 2589 NFSX_V3WRITEVERF); 2590 error = NFSERR_STALEWRITEVERF; 2591 } 2592 } 2593 m_freem(mrep); 2594 nfsmout: 2595 return (error); 2596 } 2597 2598 /* 2599 * Kludge City.. 2600 * - make nfs_bmap() essentially a no-op that does no translation 2601 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc 2602 * (Maybe I could use the process's page mapping, but I was concerned that 2603 * Kernel Write might not be enabled and also figured copyout() would do 2604 * a lot more work than bcopy() and also it currently happens in the 2605 * context of the swapper process (2). 2606 */ 2607 int 2608 nfs_bmap(v) 2609 void *v; 2610 { 2611 struct vop_bmap_args *ap = v; 2612 struct vnode *vp = ap->a_vp; 2613 2614 if (ap->a_vpp != NULL) 2615 *ap->a_vpp = vp; 2616 if (ap->a_bnp != NULL) 2617 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize); 2618 return (0); 2619 } 2620 2621 /* 2622 * Strategy routine. 2623 * For async requests when nfsiod(s) are running, queue the request by 2624 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the 2625 * request. 2626 */ 2627 int 2628 nfs_strategy(v) 2629 void *v; 2630 { 2631 struct vop_strategy_args *ap = v; 2632 struct buf *bp = ap->a_bp; 2633 struct proc *p; 2634 int error = 0; 2635 2636 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC)) 2637 panic("nfs physio/async"); 2638 if (bp->b_flags & B_ASYNC) 2639 p = NULL; 2640 else 2641 p = curproc; /* XXX */ 2642 /* 2643 * If the op is asynchronous and an i/o daemon is waiting 2644 * queue the request, wake it up and wait for completion 2645 * otherwise just do it ourselves. 2646 */ 2647 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp)) 2648 error = nfs_doio(bp, p); 2649 return (error); 2650 } 2651 2652 /* 2653 * fsync vnode op. Just call nfs_flush() with commit == 1. 2654 */ 2655 int 2656 nfs_fsync(v) 2657 void *v; 2658 { 2659 struct vop_fsync_args *ap = v; 2660 2661 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1)); 2662 } 2663 2664 /* 2665 * Flush all the blocks associated with a vnode. 2666 * Walk through the buffer pool and push any dirty pages 2667 * associated with the vnode. 2668 */ 2669 int 2670 nfs_flush(vp, cred, waitfor, p, commit) 2671 struct vnode *vp; 2672 struct ucred *cred; 2673 int waitfor; 2674 struct proc *p; 2675 int commit; 2676 { 2677 struct nfsnode *np = VTONFS(vp); 2678 struct buf *bp; 2679 int i; 2680 struct buf *nbp; 2681 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2682 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos; 2683 int passone = 1; 2684 u_quad_t off = (u_quad_t)-1, endoff = 0, toff; 2685 #ifndef NFS_COMMITBVECSIZ 2686 #define NFS_COMMITBVECSIZ 20 2687 #endif 2688 struct buf *bvec[NFS_COMMITBVECSIZ]; 2689 2690 if (nmp->nm_flag & NFSMNT_INT) 2691 slpflag = PCATCH; 2692 if (!commit) 2693 passone = 0; 2694 /* 2695 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the 2696 * server, but nas not been committed to stable storage on the server 2697 * yet. On the first pass, the byte range is worked out and the commit 2698 * rpc is done. On the second pass, nfs_writebp() is called to do the 2699 * job. 2700 */ 2701 again: 2702 bvecpos = 0; 2703 if (NFS_ISV3(vp) && commit) { 2704 s = splbio(); 2705 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) { 2706 nbp = LIST_NEXT(bp, b_vnbufs); 2707 if (bvecpos >= NFS_COMMITBVECSIZ) 2708 break; 2709 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) 2710 != (B_DELWRI | B_NEEDCOMMIT)) 2711 continue; 2712 bremfree(bp); 2713 bp->b_flags |= B_WRITEINPROG; 2714 buf_acquire(bp); 2715 /* 2716 * A list of these buffers is kept so that the 2717 * second loop knows which buffers have actually 2718 * been committed. This is necessary, since there 2719 * may be a race between the commit rpc and new 2720 * uncommitted writes on the file. 2721 */ 2722 bvec[bvecpos++] = bp; 2723 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2724 bp->b_dirtyoff; 2725 if (toff < off) 2726 off = toff; 2727 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); 2728 if (toff > endoff) 2729 endoff = toff; 2730 } 2731 splx(s); 2732 } 2733 if (bvecpos > 0) { 2734 /* 2735 * Commit data on the server, as required. 2736 */ 2737 retv = nfs_commit(vp, off, (int)(endoff - off), p); 2738 if (retv == NFSERR_STALEWRITEVERF) 2739 nfs_clearcommit(vp->v_mount); 2740 /* 2741 * Now, either mark the blocks I/O done or mark the 2742 * blocks dirty, depending on whether the commit 2743 * succeeded. 2744 */ 2745 for (i = 0; i < bvecpos; i++) { 2746 bp = bvec[i]; 2747 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG); 2748 if (retv) 2749 brelse(bp); 2750 else { 2751 s = splbio(); 2752 buf_undirty(bp); 2753 vp->v_numoutput++; 2754 bp->b_flags |= B_ASYNC; 2755 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR); 2756 bp->b_dirtyoff = bp->b_dirtyend = 0; 2757 biodone(bp); 2758 splx(s); 2759 } 2760 } 2761 } 2762 2763 /* 2764 * Start/do any write(s) that are required. 2765 */ 2766 loop: 2767 s = splbio(); 2768 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) { 2769 nbp = LIST_NEXT(bp, b_vnbufs); 2770 if (bp->b_flags & B_BUSY) { 2771 if (waitfor != MNT_WAIT || passone) 2772 continue; 2773 bp->b_flags |= B_WANTED; 2774 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 2775 "nfsfsync", slptimeo); 2776 splx(s); 2777 if (error) { 2778 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) 2779 return (EINTR); 2780 if (slpflag == PCATCH) { 2781 slpflag = 0; 2782 slptimeo = 2 * hz; 2783 } 2784 } 2785 goto loop; 2786 } 2787 if ((bp->b_flags & B_DELWRI) == 0) 2788 panic("nfs_fsync: not dirty"); 2789 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) 2790 continue; 2791 bremfree(bp); 2792 if (passone || !commit) { 2793 bp->b_flags |= B_ASYNC; 2794 } else { 2795 bp->b_flags |= (B_ASYNC|B_WRITEINPROG|B_NEEDCOMMIT); 2796 } 2797 buf_acquire(bp); 2798 splx(s); 2799 VOP_BWRITE(bp); 2800 goto loop; 2801 } 2802 splx(s); 2803 if (passone) { 2804 passone = 0; 2805 goto again; 2806 } 2807 if (waitfor == MNT_WAIT) { 2808 loop2: 2809 s = splbio(); 2810 error = vwaitforio(vp, slpflag, "nfs_fsync", slptimeo); 2811 splx(s); 2812 if (error) { 2813 if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) 2814 return (EINTR); 2815 if (slpflag == PCATCH) { 2816 slpflag = 0; 2817 slptimeo = 2 * hz; 2818 } 2819 goto loop2; 2820 } 2821 2822 if (LIST_FIRST(&vp->v_dirtyblkhd) && commit) { 2823 #if 0 2824 vprint("nfs_fsync: dirty", vp); 2825 #endif 2826 goto loop; 2827 } 2828 } 2829 if (np->n_flag & NWRITEERR) { 2830 error = np->n_error; 2831 np->n_flag &= ~NWRITEERR; 2832 } 2833 return (error); 2834 } 2835 2836 /* 2837 * Return POSIX pathconf information applicable to nfs. 2838 * 2839 * The NFS V2 protocol doesn't support this, so just return EINVAL 2840 * for V2. 2841 */ 2842 /* ARGSUSED */ 2843 int 2844 nfs_pathconf(v) 2845 void *v; 2846 { 2847 #if 0 2848 struct vop_pathconf_args *ap = v; 2849 #endif 2850 2851 return (EINVAL); 2852 } 2853 2854 /* 2855 * NFS advisory byte-level locks. 2856 */ 2857 int 2858 nfs_advlock(v) 2859 void *v; 2860 { 2861 struct vop_advlock_args *ap = v; 2862 struct nfsnode *np = VTONFS(ap->a_vp); 2863 2864 return (lf_advlock(&np->n_lockf, np->n_size, ap->a_id, ap->a_op, 2865 ap->a_fl, ap->a_flags)); 2866 } 2867 2868 /* 2869 * Print out the contents of an nfsnode. 2870 */ 2871 int 2872 nfs_print(v) 2873 void *v; 2874 { 2875 struct vop_print_args *ap = v; 2876 struct vnode *vp = ap->a_vp; 2877 struct nfsnode *np = VTONFS(vp); 2878 2879 printf("tag VT_NFS, fileid %ld fsid 0x%lx", 2880 np->n_vattr.va_fileid, np->n_vattr.va_fsid); 2881 #ifdef FIFO 2882 if (vp->v_type == VFIFO) 2883 fifo_printinfo(vp); 2884 #endif 2885 printf("\n"); 2886 return (0); 2887 } 2888 2889 /* 2890 * Just call nfs_writebp() with the force argument set to 1. 2891 */ 2892 int 2893 nfs_bwrite(v) 2894 void *v; 2895 { 2896 struct vop_bwrite_args *ap = v; 2897 2898 return (nfs_writebp(ap->a_bp, 1)); 2899 } 2900 2901 /* 2902 * This is a clone of vop_generic_bwrite(), except that B_WRITEINPROG isn't set unless 2903 * the force flag is one and it also handles the B_NEEDCOMMIT flag. 2904 */ 2905 int 2906 nfs_writebp(bp, force) 2907 struct buf *bp; 2908 int force; 2909 { 2910 int oldflags = bp->b_flags, retv = 1; 2911 struct proc *p = curproc; /* XXX */ 2912 off_t off; 2913 size_t cnt; 2914 int s; 2915 struct vnode *vp; 2916 struct nfsnode *np; 2917 2918 if(!(bp->b_flags & B_BUSY)) 2919 panic("bwrite: buffer is not busy???"); 2920 2921 vp = bp->b_vp; 2922 np = VTONFS(vp); 2923 2924 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR); 2925 2926 s = splbio(); 2927 buf_undirty(bp); 2928 2929 if ((oldflags & B_ASYNC) && !(oldflags & B_DELWRI) && p) 2930 ++p->p_stats->p_ru.ru_oublock; 2931 2932 bp->b_vp->v_numoutput++; 2933 splx(s); 2934 2935 /* 2936 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not 2937 * an actual write will have to be scheduled via. VOP_STRATEGY(). 2938 * If B_WRITEINPROG is already set, then push it with a write anyhow. 2939 */ 2940 if ((oldflags & (B_NEEDCOMMIT | B_WRITEINPROG)) == B_NEEDCOMMIT) { 2941 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 2942 cnt = bp->b_dirtyend - bp->b_dirtyoff; 2943 2944 rw_enter_write(&np->n_commitlock); 2945 if (!(bp->b_flags & B_NEEDCOMMIT)) { 2946 rw_exit_write(&np->n_commitlock); 2947 return (0); 2948 } 2949 2950 /* 2951 * If it's already been commited by somebody else, 2952 * bail. 2953 */ 2954 if (!nfs_in_committed_range(vp, bp)) { 2955 int pushedrange = 0; 2956 /* 2957 * Since we're going to do this, push as much 2958 * as we can. 2959 */ 2960 2961 if (nfs_in_tobecommitted_range(vp, bp)) { 2962 pushedrange = 1; 2963 off = np->n_pushlo; 2964 cnt = np->n_pushhi - np->n_pushlo; 2965 } 2966 2967 bp->b_flags |= B_WRITEINPROG; 2968 retv = nfs_commit(bp->b_vp, off, cnt, bp->b_proc); 2969 bp->b_flags &= ~B_WRITEINPROG; 2970 2971 if (retv == 0) { 2972 if (pushedrange) 2973 nfs_merge_commit_ranges(vp); 2974 else 2975 nfs_add_committed_range(vp, bp); 2976 } 2977 } else 2978 retv = 0; /* It has already been commited. */ 2979 2980 rw_exit_write(&np->n_commitlock); 2981 if (!retv) { 2982 bp->b_dirtyoff = bp->b_dirtyend = 0; 2983 bp->b_flags &= ~B_NEEDCOMMIT; 2984 s = splbio(); 2985 biodone(bp); 2986 splx(s); 2987 } else if (retv == NFSERR_STALEWRITEVERF) 2988 nfs_clearcommit(bp->b_vp->v_mount); 2989 } 2990 if (retv) { 2991 if (force) 2992 bp->b_flags |= B_WRITEINPROG; 2993 VOP_STRATEGY(bp); 2994 } 2995 2996 if( (oldflags & B_ASYNC) == 0) { 2997 int rtval; 2998 2999 bp->b_flags |= B_RAW; 3000 rtval = biowait(bp); 3001 if (!(oldflags & B_DELWRI) && p) { 3002 ++p->p_stats->p_ru.ru_oublock; 3003 } 3004 brelse(bp); 3005 return (rtval); 3006 } 3007 3008 return (0); 3009 } 3010 3011 /* 3012 * nfs special file access vnode op. 3013 * Essentially just get vattr and then imitate iaccess() since the device is 3014 * local to the client. 3015 */ 3016 int 3017 nfsspec_access(v) 3018 void *v; 3019 { 3020 struct vop_access_args *ap = v; 3021 struct vattr va; 3022 struct vnode *vp = ap->a_vp; 3023 int error; 3024 3025 /* 3026 * Disallow write attempts on filesystems mounted read-only; 3027 * unless the file is a socket, fifo, or a block or character 3028 * device resident on the filesystem. 3029 */ 3030 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3031 switch (vp->v_type) { 3032 case VREG: 3033 case VDIR: 3034 case VLNK: 3035 return (EROFS); 3036 default: 3037 break; 3038 } 3039 } 3040 3041 error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_p); 3042 if (error) 3043 return (error); 3044 3045 return (vaccess(vp->v_type, va.va_mode, va.va_uid, va.va_gid, 3046 ap->a_mode, ap->a_cred)); 3047 } 3048 3049 int 3050 nfs_poll(v) 3051 void *v; 3052 { 3053 struct vop_poll_args *ap = v; 3054 3055 /* 3056 * We should really check to see if I/O is possible. 3057 */ 3058 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 3059 } 3060 3061 /* 3062 * Read wrapper for special devices. 3063 */ 3064 int 3065 nfsspec_read(v) 3066 void *v; 3067 { 3068 struct vop_read_args *ap = v; 3069 struct nfsnode *np = VTONFS(ap->a_vp); 3070 3071 /* 3072 * Set access flag. 3073 */ 3074 np->n_flag |= NACC; 3075 getnanotime(&np->n_atim); 3076 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap)); 3077 } 3078 3079 /* 3080 * Write wrapper for special devices. 3081 */ 3082 int 3083 nfsspec_write(v) 3084 void *v; 3085 { 3086 struct vop_write_args *ap = v; 3087 struct nfsnode *np = VTONFS(ap->a_vp); 3088 3089 /* 3090 * Set update flag. 3091 */ 3092 np->n_flag |= NUPD; 3093 getnanotime(&np->n_mtim); 3094 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap)); 3095 } 3096 3097 /* 3098 * Close wrapper for special devices. 3099 * 3100 * Update the times on the nfsnode then do device close. 3101 */ 3102 int 3103 nfsspec_close(v) 3104 void *v; 3105 { 3106 struct vop_close_args *ap = v; 3107 struct vnode *vp = ap->a_vp; 3108 struct nfsnode *np = VTONFS(vp); 3109 struct vattr vattr; 3110 3111 if (np->n_flag & (NACC | NUPD)) { 3112 np->n_flag |= NCHG; 3113 if (vp->v_usecount == 1 && 3114 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3115 VATTR_NULL(&vattr); 3116 if (np->n_flag & NACC) 3117 vattr.va_atime = np->n_atim; 3118 if (np->n_flag & NUPD) 3119 vattr.va_mtime = np->n_mtim; 3120 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3121 } 3122 } 3123 return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap)); 3124 } 3125 3126 #ifdef FIFO 3127 /* 3128 * Read wrapper for fifos. 3129 */ 3130 int 3131 nfsfifo_read(v) 3132 void *v; 3133 { 3134 struct vop_read_args *ap = v; 3135 extern int (**fifo_vnodeop_p)(void *); 3136 struct nfsnode *np = VTONFS(ap->a_vp); 3137 3138 /* 3139 * Set access flag. 3140 */ 3141 np->n_flag |= NACC; 3142 getnanotime(&np->n_atim); 3143 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap)); 3144 } 3145 3146 /* 3147 * Write wrapper for fifos. 3148 */ 3149 int 3150 nfsfifo_write(v) 3151 void *v; 3152 { 3153 struct vop_write_args *ap = v; 3154 extern int (**fifo_vnodeop_p)(void *); 3155 struct nfsnode *np = VTONFS(ap->a_vp); 3156 3157 /* 3158 * Set update flag. 3159 */ 3160 np->n_flag |= NUPD; 3161 getnanotime(&np->n_mtim); 3162 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap)); 3163 } 3164 3165 /* 3166 * Close wrapper for fifos. 3167 * 3168 * Update the times on the nfsnode then do fifo close. 3169 */ 3170 int 3171 nfsfifo_close(v) 3172 void *v; 3173 { 3174 struct vop_close_args *ap = v; 3175 struct vnode *vp = ap->a_vp; 3176 struct nfsnode *np = VTONFS(vp); 3177 struct vattr vattr; 3178 extern int (**fifo_vnodeop_p)(void *); 3179 3180 if (np->n_flag & (NACC | NUPD)) { 3181 if (np->n_flag & NACC) { 3182 getnanotime(&np->n_atim); 3183 } 3184 if (np->n_flag & NUPD) { 3185 getnanotime(&np->n_mtim); 3186 } 3187 np->n_flag |= NCHG; 3188 if (vp->v_usecount == 1 && 3189 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3190 VATTR_NULL(&vattr); 3191 if (np->n_flag & NACC) 3192 vattr.va_atime = np->n_atim; 3193 if (np->n_flag & NUPD) 3194 vattr.va_mtime = np->n_mtim; 3195 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3196 } 3197 } 3198 return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap)); 3199 } 3200 3201 int 3202 nfsfifo_reclaim(void *v) 3203 { 3204 fifo_reclaim(v); 3205 return (nfs_reclaim(v)); 3206 } 3207 #endif /* ! FIFO */ 3208