1 /* $OpenBSD: nfs_vnops.c,v 1.152 2014/07/12 18:43:52 tedu Exp $ */ 2 /* $NetBSD: nfs_vnops.c,v 1.62.4.1 1996/07/08 20:26:52 jtc Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Rick Macklem at The University of Guelph. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95 36 */ 37 38 39 /* 40 * vnode op calls for Sun NFS version 2 and 3 41 */ 42 43 #include <sys/param.h> 44 #include <sys/proc.h> 45 #include <sys/kernel.h> 46 #include <sys/systm.h> 47 #include <sys/resourcevar.h> 48 #include <sys/poll.h> 49 #include <sys/proc.h> 50 #include <sys/mount.h> 51 #include <sys/buf.h> 52 #include <sys/malloc.h> 53 #include <sys/pool.h> 54 #include <sys/mbuf.h> 55 #include <sys/conf.h> 56 #include <sys/namei.h> 57 #include <sys/vnode.h> 58 #include <sys/dirent.h> 59 #include <sys/fcntl.h> 60 #include <sys/lockf.h> 61 #include <sys/hash.h> 62 #include <sys/queue.h> 63 #include <sys/specdev.h> 64 #include <sys/unistd.h> 65 66 #include <miscfs/fifofs/fifo.h> 67 68 #include <nfs/rpcv2.h> 69 #include <nfs/nfsproto.h> 70 #include <nfs/nfs.h> 71 #include <nfs/nfsnode.h> 72 #include <nfs/nfsmount.h> 73 #include <nfs/xdr_subs.h> 74 #include <nfs/nfsm_subs.h> 75 #include <nfs/nfs_var.h> 76 77 #include <net/if.h> 78 #include <netinet/in.h> 79 80 #include <dev/rndvar.h> 81 82 void nfs_cache_enter(struct vnode *, struct vnode *, struct componentname *); 83 84 /* Global vfs data structures for nfs. */ 85 struct vops nfs_vops = { 86 .vop_lookup = nfs_lookup, 87 .vop_create = nfs_create, 88 .vop_mknod = nfs_mknod, 89 .vop_open = nfs_open, 90 .vop_close = nfs_close, 91 .vop_access = nfs_access, 92 .vop_getattr = nfs_getattr, 93 .vop_setattr = nfs_setattr, 94 .vop_read = nfs_read, 95 .vop_write = nfs_write, 96 .vop_ioctl = nfs_ioctl, 97 .vop_poll = nfs_poll, 98 .vop_kqfilter = nfs_kqfilter, 99 .vop_revoke = vop_generic_revoke, 100 .vop_fsync = nfs_fsync, 101 .vop_remove = nfs_remove, 102 .vop_link = nfs_link, 103 .vop_rename = nfs_rename, 104 .vop_mkdir = nfs_mkdir, 105 .vop_rmdir = nfs_rmdir, 106 .vop_symlink = nfs_symlink, 107 .vop_readdir = nfs_readdir, 108 .vop_readlink = nfs_readlink, 109 .vop_abortop = vop_generic_abortop, 110 .vop_inactive = nfs_inactive, 111 .vop_reclaim = nfs_reclaim, 112 .vop_lock = vop_generic_lock, /* XXX: beck@ must fix this. */ 113 .vop_unlock = vop_generic_unlock, 114 .vop_bmap = nfs_bmap, 115 .vop_strategy = nfs_strategy, 116 .vop_print = nfs_print, 117 .vop_islocked = vop_generic_islocked, 118 .vop_pathconf = nfs_pathconf, 119 .vop_advlock = nfs_advlock, 120 .vop_bwrite = nfs_bwrite 121 }; 122 123 /* Special device vnode ops. */ 124 struct vops nfs_specvops = { 125 .vop_close = nfsspec_close, 126 .vop_access = nfsspec_access, 127 .vop_getattr = nfs_getattr, 128 .vop_setattr = nfs_setattr, 129 .vop_read = nfsspec_read, 130 .vop_write = nfsspec_write, 131 .vop_fsync = nfs_fsync, 132 .vop_inactive = nfs_inactive, 133 .vop_reclaim = nfs_reclaim, 134 .vop_lock = vop_generic_lock, 135 .vop_unlock = vop_generic_unlock, 136 .vop_print = nfs_print, 137 .vop_islocked = vop_generic_islocked, 138 139 /* XXX: Keep in sync with spec_vops. */ 140 .vop_lookup = vop_generic_lookup, 141 .vop_create = spec_badop, 142 .vop_mknod = spec_badop, 143 .vop_open = spec_open, 144 .vop_ioctl = spec_ioctl, 145 .vop_poll = spec_poll, 146 .vop_kqfilter = spec_kqfilter, 147 .vop_revoke = vop_generic_revoke, 148 .vop_remove = spec_badop, 149 .vop_link = spec_badop, 150 .vop_rename = spec_badop, 151 .vop_mkdir = spec_badop, 152 .vop_rmdir = spec_badop, 153 .vop_symlink = spec_badop, 154 .vop_readdir = spec_badop, 155 .vop_readlink = spec_badop, 156 .vop_abortop = spec_badop, 157 .vop_bmap = vop_generic_bmap, 158 .vop_strategy = spec_strategy, 159 .vop_pathconf = spec_pathconf, 160 .vop_advlock = spec_advlock, 161 .vop_bwrite = vop_generic_bwrite, 162 }; 163 164 #ifdef FIFO 165 struct vops nfs_fifovops = { 166 .vop_close = nfsfifo_close, 167 .vop_access = nfsspec_access, 168 .vop_getattr = nfs_getattr, 169 .vop_setattr = nfs_setattr, 170 .vop_read = nfsfifo_read, 171 .vop_write = nfsfifo_write, 172 .vop_fsync = nfs_fsync, 173 .vop_inactive = nfs_inactive, 174 .vop_reclaim = nfsfifo_reclaim, 175 .vop_lock = vop_generic_lock, 176 .vop_unlock = vop_generic_unlock, 177 .vop_print = nfs_print, 178 .vop_islocked = vop_generic_islocked, 179 .vop_bwrite = vop_generic_bwrite, 180 181 /* XXX: Keep in sync with fifo_vops. */ 182 .vop_lookup = vop_generic_lookup, 183 .vop_create = fifo_badop, 184 .vop_mknod = fifo_badop, 185 .vop_open = fifo_open, 186 .vop_ioctl = fifo_ioctl, 187 .vop_poll = fifo_poll, 188 .vop_kqfilter = fifo_kqfilter, 189 .vop_revoke = vop_generic_revoke, 190 .vop_remove = fifo_badop, 191 .vop_link = fifo_badop, 192 .vop_rename = fifo_badop, 193 .vop_mkdir = fifo_badop, 194 .vop_rmdir = fifo_badop, 195 .vop_symlink = fifo_badop, 196 .vop_readdir = fifo_badop, 197 .vop_readlink = fifo_badop, 198 .vop_abortop = fifo_badop, 199 .vop_bmap = vop_generic_bmap, 200 .vop_strategy = fifo_badop, 201 .vop_pathconf = fifo_pathconf, 202 .vop_advlock = fifo_advlock, 203 }; 204 #endif /* FIFO */ 205 206 /* 207 * Global variables 208 */ 209 extern u_int32_t nfs_true, nfs_false; 210 extern u_int32_t nfs_xdrneg1; 211 extern struct nfsstats nfsstats; 212 extern nfstype nfsv3_type[9]; 213 int nfs_numasync = 0; 214 215 void 216 nfs_cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 217 { 218 struct nfsnode *np; 219 220 if (vp != NULL) { 221 np = VTONFS(vp); 222 np->n_ctime = np->n_vattr.va_ctime.tv_sec; 223 } else { 224 np = VTONFS(dvp); 225 if (!np->n_ctime) 226 np->n_ctime = np->n_vattr.va_mtime.tv_sec; 227 } 228 229 cache_enter(dvp, vp, cnp); 230 } 231 232 /* 233 * nfs null call from vfs. 234 */ 235 int 236 nfs_null(struct vnode *vp, struct ucred *cred, struct proc *procp) 237 { 238 struct nfsm_info info; 239 int error = 0; 240 241 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(0); 242 error = nfs_request(vp, NFSPROC_NULL, &info); 243 m_freem(info.nmi_mrep); 244 return (error); 245 } 246 247 /* 248 * nfs access vnode op. 249 * For nfs version 2, just return ok. File accesses may fail later. 250 * For nfs version 3, use the access rpc to check accessibility. If file modes 251 * are changed on the server, accesses might still fail later. 252 */ 253 int 254 nfs_access(void *v) 255 { 256 struct vop_access_args *ap = v; 257 struct vnode *vp = ap->a_vp; 258 u_int32_t *tl; 259 int32_t t1; 260 caddr_t cp2; 261 int error = 0, attrflag; 262 u_int32_t mode, rmode; 263 int v3 = NFS_ISV3(vp); 264 int cachevalid; 265 struct nfsm_info info; 266 267 struct nfsnode *np = VTONFS(vp); 268 269 /* 270 * Disallow write attempts on filesystems mounted read-only; 271 * unless the file is a socket, fifo, or a block or character 272 * device resident on the filesystem. 273 */ 274 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 275 switch (vp->v_type) { 276 case VREG: 277 case VDIR: 278 case VLNK: 279 return (EROFS); 280 default: 281 break; 282 } 283 } 284 285 /* 286 * Check access cache first. If a request has been made for this uid 287 * shortly before, use the cached result. 288 */ 289 cachevalid = (np->n_accstamp != -1 && 290 (time_second - np->n_accstamp) < nfs_attrtimeo(np) && 291 np->n_accuid == ap->a_cred->cr_uid); 292 293 if (cachevalid) { 294 if (!np->n_accerror) { 295 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 296 return (np->n_accerror); 297 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode) 298 return (np->n_accerror); 299 } 300 301 /* 302 * For nfs v3, do an access rpc, otherwise you are stuck emulating 303 * ufs_access() locally using the vattr. This may not be correct, 304 * since the server may apply other access criteria such as 305 * client uid-->server uid mapping that we do not know about, but 306 * this is better than just returning anything that is lying about 307 * in the cache. 308 */ 309 if (v3) { 310 nfsstats.rpccnt[NFSPROC_ACCESS]++; 311 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_UNSIGNED); 312 nfsm_fhtom(&info, vp, v3); 313 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 314 if (ap->a_mode & VREAD) 315 mode = NFSV3ACCESS_READ; 316 else 317 mode = 0; 318 if (vp->v_type == VDIR) { 319 if (ap->a_mode & VWRITE) 320 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | 321 NFSV3ACCESS_DELETE); 322 if (ap->a_mode & VEXEC) 323 mode |= NFSV3ACCESS_LOOKUP; 324 } else { 325 if (ap->a_mode & VWRITE) 326 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); 327 if (ap->a_mode & VEXEC) 328 mode |= NFSV3ACCESS_EXECUTE; 329 } 330 *tl = txdr_unsigned(mode); 331 332 info.nmi_procp = ap->a_p; 333 info.nmi_cred = ap->a_cred; 334 error = nfs_request(vp, NFSPROC_ACCESS, &info); 335 336 nfsm_postop_attr(vp, attrflag); 337 if (error) { 338 m_freem(info.nmi_mrep); 339 goto nfsmout; 340 } 341 342 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 343 rmode = fxdr_unsigned(u_int32_t, *tl); 344 /* 345 * The NFS V3 spec does not clarify whether or not 346 * the returned access bits can be a superset of 347 * the ones requested, so... 348 */ 349 if ((rmode & mode) != mode) 350 error = EACCES; 351 352 m_freem(info.nmi_mrep); 353 } else 354 return (nfsspec_access(ap)); 355 356 357 /* 358 * If we got the same result as for a previous, different request, OR 359 * it in. Don't update the timestamp in that case. 360 */ 361 if (!error || error == EACCES) { 362 if (cachevalid && np->n_accstamp != -1 && 363 error == np->n_accerror) { 364 if (!error) 365 np->n_accmode |= ap->a_mode; 366 else { 367 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 368 np->n_accmode = ap->a_mode; 369 } 370 } else { 371 np->n_accstamp = time_second; 372 np->n_accuid = ap->a_cred->cr_uid; 373 np->n_accmode = ap->a_mode; 374 np->n_accerror = error; 375 } 376 } 377 nfsmout: 378 return (error); 379 } 380 381 /* 382 * nfs open vnode op 383 * Check to see if the type is ok 384 * and that deletion is not in progress. 385 * For paged in text files, you will need to flush the page cache 386 * if consistency is lost. 387 */ 388 int 389 nfs_open(void *v) 390 { 391 struct vop_open_args *ap = v; 392 struct vnode *vp = ap->a_vp; 393 struct nfsnode *np = VTONFS(vp); 394 struct vattr vattr; 395 int error; 396 397 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 398 #ifdef DIAGNOSTIC 399 printf("open eacces vtyp=%d\n",vp->v_type); 400 #endif 401 return (EACCES); 402 } 403 404 /* 405 * Initialize read and write creds here, for swapfiles 406 * and other paths that don't set the creds themselves. 407 */ 408 409 if (ap->a_mode & FREAD) { 410 if (np->n_rcred) { 411 crfree(np->n_rcred); 412 } 413 np->n_rcred = ap->a_cred; 414 crhold(np->n_rcred); 415 } 416 if (ap->a_mode & FWRITE) { 417 if (np->n_wcred) { 418 crfree(np->n_wcred); 419 } 420 np->n_wcred = ap->a_cred; 421 crhold(np->n_wcred); 422 } 423 424 if (np->n_flag & NMODIFIED) { 425 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 426 if (error == EINTR) 427 return (error); 428 uvm_vnp_uncache(vp); 429 NFS_INVALIDATE_ATTRCACHE(np); 430 if (vp->v_type == VDIR) 431 np->n_direofoffset = 0; 432 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 433 if (error) 434 return (error); 435 np->n_mtime = vattr.va_mtime; 436 } else { 437 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 438 if (error) 439 return (error); 440 if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) { 441 if (vp->v_type == VDIR) 442 np->n_direofoffset = 0; 443 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 444 if (error == EINTR) 445 return (error); 446 uvm_vnp_uncache(vp); 447 np->n_mtime = vattr.va_mtime; 448 } 449 } 450 /* For open/close consistency. */ 451 NFS_INVALIDATE_ATTRCACHE(np); 452 return (0); 453 } 454 455 /* 456 * nfs close vnode op 457 * What an NFS client should do upon close after writing is a debatable issue. 458 * Most NFS clients push delayed writes to the server upon close, basically for 459 * two reasons: 460 * 1 - So that any write errors may be reported back to the client process 461 * doing the close system call. By far the two most likely errors are 462 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 463 * 2 - To put a worst case upper bound on cache inconsistency between 464 * multiple clients for the file. 465 * There is also a consistency problem for Version 2 of the protocol w.r.t. 466 * not being able to tell if other clients are writing a file concurrently, 467 * since there is no way of knowing if the changed modify time in the reply 468 * is only due to the write for this client. 469 * (NFS Version 3 provides weak cache consistency data in the reply that 470 * should be sufficient to detect and handle this case.) 471 * 472 * The current code does the following: 473 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 474 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 475 * or commit them (this satisfies 1 and 2 except for the 476 * case where the server crashes after this close but 477 * before the commit RPC, which is felt to be "good 478 * enough". Changing the last argument to nfs_flush() to 479 * a 1 would force a commit operation, if it is felt a 480 * commit is necessary now. 481 */ 482 int 483 nfs_close(void *v) 484 { 485 struct vop_close_args *ap = v; 486 struct vnode *vp = ap->a_vp; 487 struct nfsnode *np = VTONFS(vp); 488 int error = 0; 489 490 if (vp->v_type == VREG) { 491 if (np->n_flag & NMODIFIED) { 492 if (NFS_ISV3(vp)) { 493 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 0); 494 np->n_flag &= ~NMODIFIED; 495 } else 496 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 497 NFS_INVALIDATE_ATTRCACHE(np); 498 } 499 if (np->n_flag & NWRITEERR) { 500 np->n_flag &= ~NWRITEERR; 501 error = np->n_error; 502 } 503 } 504 return (error); 505 } 506 507 /* 508 * nfs getattr call from vfs. 509 */ 510 int 511 nfs_getattr(void *v) 512 { 513 struct vop_getattr_args *ap = v; 514 struct vnode *vp = ap->a_vp; 515 struct nfsnode *np = VTONFS(vp); 516 struct nfsm_info info; 517 int32_t t1; 518 int error = 0; 519 520 info.nmi_v3 = NFS_ISV3(vp); 521 522 /* 523 * Update local times for special files. 524 */ 525 if (np->n_flag & (NACC | NUPD)) 526 np->n_flag |= NCHG; 527 /* 528 * First look in the cache. 529 */ 530 if (nfs_getattrcache(vp, ap->a_vap) == 0) 531 return (0); 532 533 nfsstats.rpccnt[NFSPROC_GETATTR]++; 534 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)); 535 nfsm_fhtom(&info, vp, info.nmi_v3); 536 info.nmi_procp = ap->a_p; 537 info.nmi_cred = ap->a_cred; 538 error = nfs_request(vp, NFSPROC_GETATTR, &info); 539 if (!error) 540 nfsm_loadattr(vp, ap->a_vap); 541 m_freem(info.nmi_mrep); 542 nfsmout: 543 return (error); 544 } 545 546 /* 547 * nfs setattr call. 548 */ 549 int 550 nfs_setattr(void *v) 551 { 552 struct vop_setattr_args *ap = v; 553 struct vnode *vp = ap->a_vp; 554 struct nfsnode *np = VTONFS(vp); 555 struct vattr *vap = ap->a_vap; 556 int hint = NOTE_ATTRIB; 557 int error = 0; 558 u_quad_t tsize = 0; 559 560 /* 561 * Setting of flags is not supported. 562 */ 563 if (vap->va_flags != VNOVAL) 564 return (EOPNOTSUPP); 565 566 /* 567 * Disallow write attempts if the filesystem is mounted read-only. 568 */ 569 if ((vap->va_uid != (uid_t)VNOVAL || 570 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 571 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && 572 (vp->v_mount->mnt_flag & MNT_RDONLY)) 573 return (EROFS); 574 if (vap->va_size != VNOVAL) { 575 switch (vp->v_type) { 576 case VDIR: 577 return (EISDIR); 578 case VCHR: 579 case VBLK: 580 case VSOCK: 581 case VFIFO: 582 if (vap->va_mtime.tv_sec == VNOVAL && 583 vap->va_atime.tv_sec == VNOVAL && 584 vap->va_mode == (mode_t)VNOVAL && 585 vap->va_uid == (uid_t)VNOVAL && 586 vap->va_gid == (gid_t)VNOVAL) 587 return (0); 588 vap->va_size = VNOVAL; 589 break; 590 default: 591 /* 592 * Disallow write attempts if the filesystem is 593 * mounted read-only. 594 */ 595 if (vp->v_mount->mnt_flag & MNT_RDONLY) 596 return (EROFS); 597 if (vap->va_size == 0) 598 error = nfs_vinvalbuf(vp, 0, 599 ap->a_cred, ap->a_p); 600 else 601 error = nfs_vinvalbuf(vp, V_SAVE, 602 ap->a_cred, ap->a_p); 603 if (error) 604 return (error); 605 tsize = np->n_size; 606 np->n_size = np->n_vattr.va_size = vap->va_size; 607 uvm_vnp_setsize(vp, np->n_size); 608 }; 609 } else if ((vap->va_mtime.tv_sec != VNOVAL || 610 vap->va_atime.tv_sec != VNOVAL) && 611 vp->v_type == VREG && 612 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 613 ap->a_p)) == EINTR) 614 return (error); 615 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p); 616 if (error && vap->va_size != VNOVAL) { 617 np->n_size = np->n_vattr.va_size = tsize; 618 uvm_vnp_setsize(vp, np->n_size); 619 } 620 621 if (vap->va_size != VNOVAL && vap->va_size < tsize) 622 hint |= NOTE_TRUNCATE; 623 624 VN_KNOTE(vp, hint); /* XXX setattrrpc? */ 625 626 return (error); 627 } 628 629 /* 630 * Do an nfs setattr rpc. 631 */ 632 int 633 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred, 634 struct proc *procp) 635 { 636 struct nfsv2_sattr *sp; 637 struct nfsm_info info; 638 int32_t t1; 639 caddr_t cp2; 640 u_int32_t *tl; 641 int error = 0, wccflag = NFSV3_WCCRATTR; 642 int v3 = NFS_ISV3(vp); 643 644 info.nmi_v3 = NFS_ISV3(vp); 645 646 nfsstats.rpccnt[NFSPROC_SETATTR]++; 647 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_SATTR(v3)); 648 nfsm_fhtom(&info, vp, v3); 649 650 if (info.nmi_v3) { 651 nfsm_v3attrbuild(&info.nmi_mb, vap, 1); 652 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 653 *tl = nfs_false; 654 } else { 655 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 656 if (vap->va_mode == (mode_t)VNOVAL) 657 sp->sa_mode = nfs_xdrneg1; 658 else 659 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); 660 if (vap->va_uid == (uid_t)VNOVAL) 661 sp->sa_uid = nfs_xdrneg1; 662 else 663 sp->sa_uid = txdr_unsigned(vap->va_uid); 664 if (vap->va_gid == (gid_t)VNOVAL) 665 sp->sa_gid = nfs_xdrneg1; 666 else 667 sp->sa_gid = txdr_unsigned(vap->va_gid); 668 sp->sa_size = txdr_unsigned(vap->va_size); 669 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 670 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 671 } 672 673 info.nmi_procp = procp; 674 info.nmi_cred = cred; 675 error = nfs_request(vp, NFSPROC_SETATTR, &info); 676 677 if (info.nmi_v3) 678 nfsm_wcc_data(vp, wccflag); 679 else if (error == 0) 680 nfsm_loadattr(vp, NULL); 681 682 m_freem(info.nmi_mrep); 683 nfsmout: 684 return (error); 685 } 686 687 /* 688 * nfs lookup call, one step at a time... 689 * First look in cache 690 * If not found, unlock the directory nfsnode and do the rpc 691 */ 692 int 693 nfs_lookup(void *v) 694 { 695 struct vop_lookup_args *ap = v; 696 struct componentname *cnp = ap->a_cnp; 697 struct vnode *dvp = ap->a_dvp; 698 struct vnode **vpp = ap->a_vpp; 699 struct proc *p = cnp->cn_proc; 700 struct nfsm_info info; 701 int flags; 702 struct vnode *newvp; 703 u_int32_t *tl; 704 int32_t t1; 705 struct nfsmount *nmp; 706 caddr_t cp2; 707 long len; 708 nfsfh_t *fhp; 709 struct nfsnode *np; 710 int lockparent, wantparent, error = 0, attrflag, fhsize; 711 712 info.nmi_v3 = NFS_ISV3(dvp); 713 714 cnp->cn_flags &= ~PDIRUNLOCK; 715 flags = cnp->cn_flags; 716 717 *vpp = NULLVP; 718 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 719 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 720 return (EROFS); 721 if (dvp->v_type != VDIR) 722 return (ENOTDIR); 723 lockparent = flags & LOCKPARENT; 724 wantparent = flags & (LOCKPARENT|WANTPARENT); 725 nmp = VFSTONFS(dvp->v_mount); 726 np = VTONFS(dvp); 727 728 /* 729 * Before tediously performing a linear scan of the directory, 730 * check the name cache to see if the directory/name pair 731 * we are looking for is known already. 732 * If the directory/name pair is found in the name cache, 733 * we have to ensure the directory has not changed from 734 * the time the cache entry has been created. If it has, 735 * the cache entry has to be ignored. 736 */ 737 if ((error = cache_lookup(dvp, vpp, cnp)) >= 0) { 738 struct vattr vattr; 739 int err2; 740 741 if (error && error != ENOENT) { 742 *vpp = NULLVP; 743 return (error); 744 } 745 746 if (cnp->cn_flags & PDIRUNLOCK) { 747 err2 = vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 748 if (err2 != 0) { 749 *vpp = NULLVP; 750 return (err2); 751 } 752 cnp->cn_flags &= ~PDIRUNLOCK; 753 } 754 755 err2 = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_proc); 756 if (err2 != 0) { 757 if (error == 0) { 758 if (*vpp != dvp) 759 vput(*vpp); 760 else 761 vrele(*vpp); 762 } 763 *vpp = NULLVP; 764 return (err2); 765 } 766 767 if (error == ENOENT) { 768 if (!VOP_GETATTR(dvp, &vattr, cnp->cn_cred, 769 cnp->cn_proc) && vattr.va_mtime.tv_sec == 770 VTONFS(dvp)->n_ctime) 771 return (ENOENT); 772 cache_purge(dvp); 773 np->n_ctime = 0; 774 goto dorpc; 775 } 776 777 newvp = *vpp; 778 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_proc) 779 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) 780 { 781 nfsstats.lookupcache_hits++; 782 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 783 cnp->cn_flags |= SAVENAME; 784 if ((!lockparent || !(flags & ISLASTCN)) && 785 newvp != dvp) 786 VOP_UNLOCK(dvp, 0, p); 787 return (0); 788 } 789 cache_purge(newvp); 790 if (newvp != dvp) 791 vput(newvp); 792 else 793 vrele(newvp); 794 *vpp = NULLVP; 795 } 796 dorpc: 797 error = 0; 798 newvp = NULLVP; 799 nfsstats.lookupcache_misses++; 800 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 801 len = cnp->cn_namelen; 802 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 803 NFSX_UNSIGNED + nfsm_rndup(len)); 804 nfsm_fhtom(&info, dvp, info.nmi_v3); 805 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 806 807 info.nmi_procp = cnp->cn_proc; 808 info.nmi_cred = cnp->cn_cred; 809 error = nfs_request(dvp, NFSPROC_LOOKUP, &info); 810 811 if (error) { 812 if (info.nmi_v3) 813 nfsm_postop_attr(dvp, attrflag); 814 m_freem(info.nmi_mrep); 815 goto nfsmout; 816 } 817 818 nfsm_getfh(fhp, fhsize, info.nmi_v3); 819 820 /* 821 * Handle RENAME case... 822 */ 823 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) { 824 if (NFS_CMPFH(np, fhp, fhsize)) { 825 m_freem(info.nmi_mrep); 826 return (EISDIR); 827 } 828 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 829 if (error) { 830 m_freem(info.nmi_mrep); 831 return (error); 832 } 833 newvp = NFSTOV(np); 834 if (info.nmi_v3) { 835 nfsm_postop_attr(newvp, attrflag); 836 nfsm_postop_attr(dvp, attrflag); 837 } else 838 nfsm_loadattr(newvp, NULL); 839 *vpp = newvp; 840 m_freem(info.nmi_mrep); 841 cnp->cn_flags |= SAVENAME; 842 if (!lockparent) { 843 VOP_UNLOCK(dvp, 0, p); 844 cnp->cn_flags |= PDIRUNLOCK; 845 } 846 return (0); 847 } 848 849 /* 850 * The postop attr handling is duplicated for each if case, 851 * because it should be done while dvp is locked (unlocking 852 * dvp is different for each case). 853 */ 854 855 if (NFS_CMPFH(np, fhp, fhsize)) { 856 vref(dvp); 857 newvp = dvp; 858 if (info.nmi_v3) { 859 nfsm_postop_attr(newvp, attrflag); 860 nfsm_postop_attr(dvp, attrflag); 861 } else 862 nfsm_loadattr(newvp, NULL); 863 } else if (flags & ISDOTDOT) { 864 VOP_UNLOCK(dvp, 0, p); 865 cnp->cn_flags |= PDIRUNLOCK; 866 867 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 868 if (error) { 869 if (vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p) == 0) 870 cnp->cn_flags &= ~PDIRUNLOCK; 871 m_freem(info.nmi_mrep); 872 return (error); 873 } 874 newvp = NFSTOV(np); 875 876 if (info.nmi_v3) { 877 nfsm_postop_attr(newvp, attrflag); 878 nfsm_postop_attr(dvp, attrflag); 879 } else 880 nfsm_loadattr(newvp, NULL); 881 882 if (lockparent && (flags & ISLASTCN)) { 883 if ((error = vn_lock(dvp, LK_EXCLUSIVE, p))) { 884 m_freem(info.nmi_mrep); 885 vput(newvp); 886 return error; 887 } 888 cnp->cn_flags &= ~PDIRUNLOCK; 889 } 890 891 } else { 892 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 893 if (error) { 894 m_freem(info.nmi_mrep); 895 return error; 896 } 897 newvp = NFSTOV(np); 898 if (info.nmi_v3) { 899 nfsm_postop_attr(newvp, attrflag); 900 nfsm_postop_attr(dvp, attrflag); 901 } else 902 nfsm_loadattr(newvp, NULL); 903 if (!lockparent || !(flags & ISLASTCN)) { 904 VOP_UNLOCK(dvp, 0, p); 905 cnp->cn_flags |= PDIRUNLOCK; 906 } 907 } 908 909 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 910 cnp->cn_flags |= SAVENAME; 911 if ((cnp->cn_flags & MAKEENTRY) && 912 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) { 913 nfs_cache_enter(dvp, newvp, cnp); 914 } 915 916 *vpp = newvp; 917 m_freem(info.nmi_mrep); 918 919 nfsmout: 920 if (error) { 921 /* 922 * We get here only because of errors returned by 923 * the RPC. Otherwise we'll have returned above 924 * (the nfsm_* macros will jump to nfsmout 925 * on error). 926 */ 927 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) && 928 cnp->cn_nameiop != CREATE) { 929 nfs_cache_enter(dvp, NULL, cnp); 930 } 931 if (newvp != NULLVP) { 932 vrele(newvp); 933 if (newvp != dvp) 934 VOP_UNLOCK(newvp, 0, p); 935 } 936 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 937 (flags & ISLASTCN) && error == ENOENT) { 938 if (dvp->v_mount->mnt_flag & MNT_RDONLY) 939 error = EROFS; 940 else 941 error = EJUSTRETURN; 942 } 943 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 944 cnp->cn_flags |= SAVENAME; 945 *vpp = NULL; 946 } 947 return (error); 948 } 949 950 /* 951 * nfs read call. 952 * Just call nfs_bioread() to do the work. 953 */ 954 int 955 nfs_read(void *v) 956 { 957 struct vop_read_args *ap = v; 958 struct vnode *vp = ap->a_vp; 959 960 if (vp->v_type != VREG) 961 return (EPERM); 962 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred)); 963 } 964 965 /* 966 * nfs readlink call 967 */ 968 int 969 nfs_readlink(void *v) 970 { 971 struct vop_readlink_args *ap = v; 972 struct vnode *vp = ap->a_vp; 973 974 if (vp->v_type != VLNK) 975 return (EPERM); 976 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred)); 977 } 978 979 /* 980 * Do a readlink rpc. 981 * Called by nfs_doio() from below the buffer cache. 982 */ 983 int 984 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 985 { 986 struct nfsm_info info; 987 u_int32_t *tl; 988 int32_t t1; 989 caddr_t cp2; 990 int error = 0, len, attrflag; 991 992 info.nmi_v3 = NFS_ISV3(vp); 993 994 nfsstats.rpccnt[NFSPROC_READLINK]++; 995 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)); 996 nfsm_fhtom(&info, vp, info.nmi_v3); 997 998 info.nmi_procp = curproc; 999 info.nmi_cred = cred; 1000 error = nfs_request(vp, NFSPROC_READLINK, &info); 1001 1002 if (info.nmi_v3) 1003 nfsm_postop_attr(vp, attrflag); 1004 if (!error) { 1005 nfsm_strsiz(len, NFS_MAXPATHLEN); 1006 nfsm_mtouio(uiop, len); 1007 } 1008 1009 m_freem(info.nmi_mrep); 1010 1011 nfsmout: 1012 return (error); 1013 } 1014 1015 /* 1016 * nfs read rpc call 1017 * Ditto above 1018 */ 1019 int 1020 nfs_readrpc(struct vnode *vp, struct uio *uiop) 1021 { 1022 struct nfsm_info info; 1023 u_int32_t *tl; 1024 int32_t t1; 1025 caddr_t cp2; 1026 struct nfsmount *nmp; 1027 int error = 0, len, retlen, tsiz, eof, attrflag; 1028 1029 info.nmi_v3 = NFS_ISV3(vp); 1030 1031 eof = 0; 1032 1033 nmp = VFSTONFS(vp->v_mount); 1034 tsiz = uiop->uio_resid; 1035 if (uiop->uio_offset + tsiz > 0xffffffff && !info.nmi_v3) 1036 return (EFBIG); 1037 while (tsiz > 0) { 1038 nfsstats.rpccnt[NFSPROC_READ]++; 1039 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; 1040 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1041 NFSX_UNSIGNED * 3); 1042 nfsm_fhtom(&info, vp, info.nmi_v3); 1043 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED * 3); 1044 if (info.nmi_v3) { 1045 txdr_hyper(uiop->uio_offset, tl); 1046 *(tl + 2) = txdr_unsigned(len); 1047 } else { 1048 *tl++ = txdr_unsigned(uiop->uio_offset); 1049 *tl++ = txdr_unsigned(len); 1050 *tl = 0; 1051 } 1052 1053 info.nmi_procp = curproc; 1054 info.nmi_cred = VTONFS(vp)->n_rcred; 1055 error = nfs_request(vp, NFSPROC_READ, &info); 1056 if (info.nmi_v3) 1057 nfsm_postop_attr(vp, attrflag); 1058 if (error) { 1059 m_freem(info.nmi_mrep); 1060 goto nfsmout; 1061 } 1062 1063 if (info.nmi_v3) { 1064 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1065 eof = fxdr_unsigned(int, *(tl + 1)); 1066 } else { 1067 nfsm_loadattr(vp, NULL); 1068 } 1069 1070 nfsm_strsiz(retlen, nmp->nm_rsize); 1071 nfsm_mtouio(uiop, retlen); 1072 m_freem(info.nmi_mrep); 1073 tsiz -= retlen; 1074 if (info.nmi_v3) { 1075 if (eof || retlen == 0) 1076 tsiz = 0; 1077 } else if (retlen < len) 1078 tsiz = 0; 1079 } 1080 1081 nfsmout: 1082 return (error); 1083 } 1084 1085 /* 1086 * nfs write call 1087 */ 1088 int 1089 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, int *must_commit) 1090 { 1091 struct nfsm_info info; 1092 u_int32_t *tl; 1093 int32_t t1, backup; 1094 caddr_t cp2; 1095 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1096 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit; 1097 int committed = NFSV3WRITE_FILESYNC; 1098 1099 info.nmi_v3 = NFS_ISV3(vp); 1100 1101 #ifdef DIAGNOSTIC 1102 if (uiop->uio_iovcnt != 1) 1103 panic("nfs: writerpc iovcnt > 1"); 1104 #endif 1105 *must_commit = 0; 1106 tsiz = uiop->uio_resid; 1107 if (uiop->uio_offset + tsiz > 0xffffffff && !info.nmi_v3) 1108 return (EFBIG); 1109 while (tsiz > 0) { 1110 nfsstats.rpccnt[NFSPROC_WRITE]++; 1111 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz; 1112 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) 1113 + 5 * NFSX_UNSIGNED + nfsm_rndup(len)); 1114 nfsm_fhtom(&info, vp, info.nmi_v3); 1115 if (info.nmi_v3) { 1116 tl = nfsm_build(&info.nmi_mb, 5 * NFSX_UNSIGNED); 1117 txdr_hyper(uiop->uio_offset, tl); 1118 tl += 2; 1119 *tl++ = txdr_unsigned(len); 1120 *tl++ = txdr_unsigned(*iomode); 1121 *tl = txdr_unsigned(len); 1122 } else { 1123 u_int32_t x; 1124 1125 tl = nfsm_build(&info.nmi_mb, 4 * NFSX_UNSIGNED); 1126 /* Set both "begin" and "current" to non-garbage. */ 1127 x = txdr_unsigned((u_int32_t)uiop->uio_offset); 1128 *tl++ = x; /* "begin offset" */ 1129 *tl++ = x; /* "current offset" */ 1130 x = txdr_unsigned(len); 1131 *tl++ = x; /* total to this offset */ 1132 *tl = x; /* size of this write */ 1133 1134 } 1135 nfsm_uiotombuf(&info.nmi_mb, uiop, len); 1136 1137 info.nmi_procp = curproc; 1138 info.nmi_cred = VTONFS(vp)->n_wcred; 1139 error = nfs_request(vp, NFSPROC_WRITE, &info); 1140 if (info.nmi_v3) { 1141 wccflag = NFSV3_WCCCHK; 1142 nfsm_wcc_data(vp, wccflag); 1143 } 1144 1145 if (error) { 1146 m_freem(info.nmi_mrep); 1147 goto nfsmout; 1148 } 1149 1150 if (info.nmi_v3) { 1151 wccflag = NFSV3_WCCCHK; 1152 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED 1153 + NFSX_V3WRITEVERF); 1154 rlen = fxdr_unsigned(int, *tl++); 1155 if (rlen == 0) { 1156 error = NFSERR_IO; 1157 break; 1158 } else if (rlen < len) { 1159 backup = len - rlen; 1160 uiop->uio_iov->iov_base = 1161 (char *)uiop->uio_iov->iov_base - 1162 backup; 1163 uiop->uio_iov->iov_len += backup; 1164 uiop->uio_offset -= backup; 1165 uiop->uio_resid += backup; 1166 len = rlen; 1167 } 1168 commit = fxdr_unsigned(int, *tl++); 1169 1170 /* 1171 * Return the lowest committment level 1172 * obtained by any of the RPCs. 1173 */ 1174 if (committed == NFSV3WRITE_FILESYNC) 1175 committed = commit; 1176 else if (committed == NFSV3WRITE_DATASYNC && 1177 commit == NFSV3WRITE_UNSTABLE) 1178 committed = commit; 1179 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) { 1180 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 1181 NFSX_V3WRITEVERF); 1182 nmp->nm_flag |= NFSMNT_HASWRITEVERF; 1183 } else if (bcmp((caddr_t)tl, 1184 (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) { 1185 *must_commit = 1; 1186 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 1187 NFSX_V3WRITEVERF); 1188 } 1189 } else { 1190 nfsm_loadattr(vp, NULL); 1191 } 1192 if (wccflag) 1193 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime; 1194 m_freem(info.nmi_mrep); 1195 tsiz -= len; 1196 } 1197 nfsmout: 1198 *iomode = committed; 1199 if (error) 1200 uiop->uio_resid = tsiz; 1201 return (error); 1202 } 1203 1204 /* 1205 * nfs mknod rpc 1206 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1207 * mode set to specify the file type and the size field for rdev. 1208 */ 1209 int 1210 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1211 struct vattr *vap) 1212 { 1213 struct nfsv2_sattr *sp; 1214 struct nfsm_info info; 1215 u_int32_t *tl; 1216 int32_t t1; 1217 struct vnode *newvp = NULL; 1218 struct nfsnode *np = NULL; 1219 char *cp2; 1220 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1221 u_int32_t rdev; 1222 1223 info.nmi_v3 = NFS_ISV3(dvp); 1224 1225 if (vap->va_type == VCHR || vap->va_type == VBLK) 1226 rdev = txdr_unsigned(vap->va_rdev); 1227 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1228 rdev = nfs_xdrneg1; 1229 else { 1230 VOP_ABORTOP(dvp, cnp); 1231 vput(dvp); 1232 return (EOPNOTSUPP); 1233 } 1234 nfsstats.rpccnt[NFSPROC_MKNOD]++; 1235 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1236 4 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + 1237 NFSX_SATTR(info.nmi_v3)); 1238 nfsm_fhtom(&info, dvp, info.nmi_v3); 1239 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1240 1241 if (info.nmi_v3) { 1242 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 1243 *tl++ = vtonfsv3_type(vap->va_type); 1244 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1245 if (vap->va_type == VCHR || vap->va_type == VBLK) { 1246 tl = nfsm_build(&info.nmi_mb, 2 * NFSX_UNSIGNED); 1247 *tl++ = txdr_unsigned(major(vap->va_rdev)); 1248 *tl = txdr_unsigned(minor(vap->va_rdev)); 1249 } 1250 } else { 1251 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1252 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1253 sp->sa_uid = nfs_xdrneg1; 1254 sp->sa_gid = nfs_xdrneg1; 1255 sp->sa_size = rdev; 1256 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1257 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1258 } 1259 1260 KASSERT(cnp->cn_proc == curproc); 1261 info.nmi_procp = cnp->cn_proc; 1262 info.nmi_cred = cnp->cn_cred; 1263 error = nfs_request(dvp, NFSPROC_MKNOD, &info); 1264 if (!error) { 1265 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1266 if (!gotvp) { 1267 if (newvp) { 1268 vrele(newvp); 1269 newvp = NULL; 1270 } 1271 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1272 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1273 if (!error) 1274 newvp = NFSTOV(np); 1275 } 1276 } 1277 if (info.nmi_v3) 1278 nfsm_wcc_data(dvp, wccflag); 1279 m_freem(info.nmi_mrep); 1280 1281 nfsmout: 1282 if (error) { 1283 if (newvp) 1284 vrele(newvp); 1285 } else { 1286 if (cnp->cn_flags & MAKEENTRY) 1287 nfs_cache_enter(dvp, newvp, cnp); 1288 *vpp = newvp; 1289 } 1290 pool_put(&namei_pool, cnp->cn_pnbuf); 1291 VTONFS(dvp)->n_flag |= NMODIFIED; 1292 if (!wccflag) 1293 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1294 vrele(dvp); 1295 return (error); 1296 } 1297 1298 /* 1299 * nfs mknod vop 1300 * just call nfs_mknodrpc() to do the work. 1301 */ 1302 int 1303 nfs_mknod(void *v) 1304 { 1305 struct vop_mknod_args *ap = v; 1306 struct vnode *newvp; 1307 int error; 1308 1309 error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap); 1310 if (!error) 1311 vrele(newvp); 1312 1313 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1314 1315 return (error); 1316 } 1317 1318 int 1319 nfs_create(void *v) 1320 { 1321 struct vop_create_args *ap = v; 1322 struct vnode *dvp = ap->a_dvp; 1323 struct vattr *vap = ap->a_vap; 1324 struct componentname *cnp = ap->a_cnp; 1325 struct nfsv2_sattr *sp; 1326 struct nfsm_info info; 1327 u_int32_t *tl; 1328 int32_t t1; 1329 struct nfsnode *np = NULL; 1330 struct vnode *newvp = NULL; 1331 caddr_t cp2; 1332 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0; 1333 1334 info.nmi_v3 = NFS_ISV3(dvp); 1335 1336 /* 1337 * Oops, not for me.. 1338 */ 1339 if (vap->va_type == VSOCK) 1340 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1341 1342 if (vap->va_vaflags & VA_EXCLUSIVE) 1343 fmode |= O_EXCL; 1344 1345 again: 1346 nfsstats.rpccnt[NFSPROC_CREATE]++; 1347 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1348 2 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + 1349 NFSX_SATTR(info.nmi_v3)); 1350 nfsm_fhtom(&info, dvp, info.nmi_v3); 1351 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1352 if (info.nmi_v3) { 1353 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 1354 if (fmode & O_EXCL) { 1355 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); 1356 tl = nfsm_build(&info.nmi_mb, NFSX_V3CREATEVERF); 1357 arc4random_buf(tl, sizeof(*tl) * 2); 1358 } else { 1359 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED); 1360 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1361 } 1362 } else { 1363 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1364 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1365 sp->sa_uid = nfs_xdrneg1; 1366 sp->sa_gid = nfs_xdrneg1; 1367 sp->sa_size = 0; 1368 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1369 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1370 } 1371 1372 KASSERT(cnp->cn_proc == curproc); 1373 info.nmi_procp = cnp->cn_proc; 1374 info.nmi_cred = cnp->cn_cred; 1375 error = nfs_request(dvp, NFSPROC_CREATE, &info); 1376 if (!error) { 1377 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1378 if (!gotvp) { 1379 if (newvp) { 1380 vrele(newvp); 1381 newvp = NULL; 1382 } 1383 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1384 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1385 if (!error) 1386 newvp = NFSTOV(np); 1387 } 1388 } 1389 if (info.nmi_v3) 1390 nfsm_wcc_data(dvp, wccflag); 1391 m_freem(info.nmi_mrep); 1392 1393 nfsmout: 1394 if (error) { 1395 if (info.nmi_v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) { 1396 fmode &= ~O_EXCL; 1397 goto again; 1398 } 1399 if (newvp) 1400 vrele(newvp); 1401 } else if (info.nmi_v3 && (fmode & O_EXCL)) 1402 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc); 1403 if (!error) { 1404 if (cnp->cn_flags & MAKEENTRY) 1405 nfs_cache_enter(dvp, newvp, cnp); 1406 *ap->a_vpp = newvp; 1407 } 1408 pool_put(&namei_pool, cnp->cn_pnbuf); 1409 VTONFS(dvp)->n_flag |= NMODIFIED; 1410 if (!wccflag) 1411 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1412 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1413 vrele(dvp); 1414 return (error); 1415 } 1416 1417 /* 1418 * nfs file remove call 1419 * To try and make nfs semantics closer to ufs semantics, a file that has 1420 * other processes using the vnode is renamed instead of removed and then 1421 * removed later on the last close. 1422 * - If v_usecount > 1 1423 * If a rename is not already in the works 1424 * call nfs_sillyrename() to set it up 1425 * else 1426 * do the remove rpc 1427 */ 1428 int 1429 nfs_remove(void *v) 1430 { 1431 struct vop_remove_args *ap = v; 1432 struct vnode *vp = ap->a_vp; 1433 struct vnode *dvp = ap->a_dvp; 1434 struct componentname *cnp = ap->a_cnp; 1435 struct nfsnode *np = VTONFS(vp); 1436 int error = 0; 1437 struct vattr vattr; 1438 1439 #ifdef DIAGNOSTIC 1440 if ((cnp->cn_flags & HASBUF) == 0) 1441 panic("nfs_remove: no name"); 1442 if (vp->v_usecount < 1) 1443 panic("nfs_remove: bad v_usecount"); 1444 #endif 1445 if (vp->v_type == VDIR) 1446 error = EPERM; 1447 else if (vp->v_usecount == 1 || (np->n_sillyrename && 1448 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 && 1449 vattr.va_nlink > 1)) { 1450 /* 1451 * Purge the name cache so that the chance of a lookup for 1452 * the name succeeding while the remove is in progress is 1453 * minimized. Without node locking it can still happen, such 1454 * that an I/O op returns ESTALE, but since you get this if 1455 * another host removes the file.. 1456 */ 1457 cache_purge(vp); 1458 /* 1459 * throw away biocache buffers, mainly to avoid 1460 * unnecessary delayed writes later. 1461 */ 1462 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc); 1463 /* Do the rpc */ 1464 if (error != EINTR) 1465 error = nfs_removerpc(dvp, cnp->cn_nameptr, 1466 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc); 1467 /* 1468 * Kludge City: If the first reply to the remove rpc is lost.. 1469 * the reply to the retransmitted request will be ENOENT 1470 * since the file was in fact removed 1471 * Therefore, we cheat and return success. 1472 */ 1473 if (error == ENOENT) 1474 error = 0; 1475 } else if (!np->n_sillyrename) 1476 error = nfs_sillyrename(dvp, vp, cnp); 1477 pool_put(&namei_pool, cnp->cn_pnbuf); 1478 NFS_INVALIDATE_ATTRCACHE(np); 1479 vrele(dvp); 1480 vrele(vp); 1481 1482 VN_KNOTE(vp, NOTE_DELETE); 1483 VN_KNOTE(dvp, NOTE_WRITE); 1484 1485 return (error); 1486 } 1487 1488 /* 1489 * nfs file remove rpc called from nfs_inactive 1490 */ 1491 int 1492 nfs_removeit(struct sillyrename *sp) 1493 { 1494 /* 1495 * Make sure that the directory vnode is still valid. 1496 * XXX we should lock sp->s_dvp here. 1497 * 1498 * NFS can potentially try to nuke a silly *after* the directory 1499 * has already been pushed out on a forced unmount. Since the silly 1500 * is going to go away anyway, this is fine. 1501 */ 1502 if (sp->s_dvp->v_type == VBAD) 1503 return (0); 1504 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, 1505 NULL)); 1506 } 1507 1508 /* 1509 * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). 1510 */ 1511 int 1512 nfs_removerpc(struct vnode *dvp, char *name, int namelen, struct ucred *cred, 1513 struct proc *proc) 1514 { 1515 struct nfsm_info info; 1516 u_int32_t *tl; 1517 int32_t t1; 1518 caddr_t cp2; 1519 int error = 0, wccflag = NFSV3_WCCRATTR; 1520 1521 info.nmi_v3 = NFS_ISV3(dvp); 1522 1523 nfsstats.rpccnt[NFSPROC_REMOVE]++; 1524 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1525 NFSX_UNSIGNED + nfsm_rndup(namelen)); 1526 nfsm_fhtom(&info, dvp, info.nmi_v3); 1527 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 1528 1529 info.nmi_procp = proc; 1530 info.nmi_cred = cred; 1531 error = nfs_request(dvp, NFSPROC_REMOVE, &info); 1532 if (info.nmi_v3) 1533 nfsm_wcc_data(dvp, wccflag); 1534 m_freem(info.nmi_mrep); 1535 1536 nfsmout: 1537 VTONFS(dvp)->n_flag |= NMODIFIED; 1538 if (!wccflag) 1539 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1540 return (error); 1541 } 1542 1543 /* 1544 * nfs file rename call 1545 */ 1546 int 1547 nfs_rename(void *v) 1548 { 1549 struct vop_rename_args *ap = v; 1550 struct vnode *fvp = ap->a_fvp; 1551 struct vnode *tvp = ap->a_tvp; 1552 struct vnode *fdvp = ap->a_fdvp; 1553 struct vnode *tdvp = ap->a_tdvp; 1554 struct componentname *tcnp = ap->a_tcnp; 1555 struct componentname *fcnp = ap->a_fcnp; 1556 int error; 1557 1558 #ifdef DIAGNOSTIC 1559 if ((tcnp->cn_flags & HASBUF) == 0 || 1560 (fcnp->cn_flags & HASBUF) == 0) 1561 panic("nfs_rename: no name"); 1562 #endif 1563 /* Check for cross-device rename */ 1564 if ((fvp->v_mount != tdvp->v_mount) || 1565 (tvp && (fvp->v_mount != tvp->v_mount))) { 1566 error = EXDEV; 1567 goto out; 1568 } 1569 1570 /* 1571 * If the tvp exists and is in use, sillyrename it before doing the 1572 * rename of the new file over it. 1573 */ 1574 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename && 1575 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { 1576 VN_KNOTE(tvp, NOTE_DELETE); 1577 vrele(tvp); 1578 tvp = NULL; 1579 } 1580 1581 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1582 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1583 tcnp->cn_proc); 1584 1585 VN_KNOTE(fdvp, NOTE_WRITE); 1586 VN_KNOTE(tdvp, NOTE_WRITE); 1587 1588 if (fvp->v_type == VDIR) { 1589 if (tvp != NULL && tvp->v_type == VDIR) 1590 cache_purge(tdvp); 1591 cache_purge(fdvp); 1592 } 1593 out: 1594 if (tdvp == tvp) 1595 vrele(tdvp); 1596 else 1597 vput(tdvp); 1598 if (tvp) 1599 vput(tvp); 1600 vrele(fdvp); 1601 vrele(fvp); 1602 /* 1603 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1604 */ 1605 if (error == ENOENT) 1606 error = 0; 1607 return (error); 1608 } 1609 1610 /* 1611 * nfs file rename rpc called from nfs_remove() above 1612 */ 1613 int 1614 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, 1615 struct sillyrename *sp) 1616 { 1617 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, 1618 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curproc)); 1619 } 1620 1621 /* 1622 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1623 */ 1624 int 1625 nfs_renamerpc(struct vnode *fdvp, char *fnameptr, int fnamelen, 1626 struct vnode *tdvp, char *tnameptr, int tnamelen, struct ucred *cred, 1627 struct proc *proc) 1628 { 1629 struct nfsm_info info; 1630 u_int32_t *tl; 1631 int32_t t1; 1632 caddr_t cp2; 1633 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; 1634 1635 info.nmi_v3 = NFS_ISV3(fdvp); 1636 1637 nfsstats.rpccnt[NFSPROC_RENAME]++; 1638 info.nmi_mb = info.nmi_mreq = nfsm_reqhead((NFSX_FH(info.nmi_v3) + 1639 NFSX_UNSIGNED) * 2 + nfsm_rndup(fnamelen) + nfsm_rndup(tnamelen)); 1640 nfsm_fhtom(&info, fdvp, info.nmi_v3); 1641 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); 1642 nfsm_fhtom(&info, tdvp, info.nmi_v3); 1643 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); 1644 1645 info.nmi_procp = proc; 1646 info.nmi_cred = cred; 1647 error = nfs_request(fdvp, NFSPROC_RENAME, &info); 1648 if (info.nmi_v3) { 1649 nfsm_wcc_data(fdvp, fwccflag); 1650 nfsm_wcc_data(tdvp, twccflag); 1651 } 1652 m_freem(info.nmi_mrep); 1653 1654 nfsmout: 1655 VTONFS(fdvp)->n_flag |= NMODIFIED; 1656 VTONFS(tdvp)->n_flag |= NMODIFIED; 1657 if (!fwccflag) 1658 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp)); 1659 if (!twccflag) 1660 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp)); 1661 return (error); 1662 } 1663 1664 /* 1665 * nfs hard link create call 1666 */ 1667 int 1668 nfs_link(void *v) 1669 { 1670 struct vop_link_args *ap = v; 1671 struct vnode *vp = ap->a_vp; 1672 struct vnode *dvp = ap->a_dvp; 1673 struct componentname *cnp = ap->a_cnp; 1674 struct nfsm_info info; 1675 u_int32_t *tl; 1676 int32_t t1; 1677 caddr_t cp2; 1678 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; 1679 1680 info.nmi_v3 = NFS_ISV3(vp); 1681 1682 if (dvp->v_mount != vp->v_mount) { 1683 pool_put(&namei_pool, cnp->cn_pnbuf); 1684 vput(dvp); 1685 return (EXDEV); 1686 } 1687 1688 /* 1689 * Push all writes to the server, so that the attribute cache 1690 * doesn't get "out of sync" with the server. 1691 * XXX There should be a better way! 1692 */ 1693 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc); 1694 1695 nfsstats.rpccnt[NFSPROC_LINK]++; 1696 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(2 * NFSX_FH(info.nmi_v3) + 1697 NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 1698 nfsm_fhtom(&info, vp, info.nmi_v3); 1699 nfsm_fhtom(&info, dvp, info.nmi_v3); 1700 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1701 1702 info.nmi_procp = cnp->cn_proc; 1703 info.nmi_cred = cnp->cn_cred; 1704 error = nfs_request(vp, NFSPROC_LINK, &info); 1705 if (info.nmi_v3) { 1706 nfsm_postop_attr(vp, attrflag); 1707 nfsm_wcc_data(dvp, wccflag); 1708 } 1709 m_freem(info.nmi_mrep); 1710 nfsmout: 1711 pool_put(&namei_pool, cnp->cn_pnbuf); 1712 VTONFS(dvp)->n_flag |= NMODIFIED; 1713 if (!attrflag) 1714 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp)); 1715 if (!wccflag) 1716 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1717 1718 VN_KNOTE(vp, NOTE_LINK); 1719 VN_KNOTE(dvp, NOTE_WRITE); 1720 vput(dvp); 1721 return (error); 1722 } 1723 1724 /* 1725 * nfs symbolic link create call 1726 */ 1727 int 1728 nfs_symlink(void *v) 1729 { 1730 struct vop_symlink_args *ap = v; 1731 struct vnode *dvp = ap->a_dvp; 1732 struct vattr *vap = ap->a_vap; 1733 struct componentname *cnp = ap->a_cnp; 1734 struct nfsv2_sattr *sp; 1735 struct nfsm_info info; 1736 u_int32_t *tl; 1737 int32_t t1; 1738 caddr_t cp2; 1739 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; 1740 struct vnode *newvp = NULL; 1741 1742 info.nmi_v3 = NFS_ISV3(dvp); 1743 1744 nfsstats.rpccnt[NFSPROC_SYMLINK]++; 1745 slen = strlen(ap->a_target); 1746 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1747 2 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + 1748 NFSX_SATTR(info.nmi_v3)); 1749 nfsm_fhtom(&info, dvp, info.nmi_v3); 1750 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1751 if (info.nmi_v3) 1752 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1753 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); 1754 if (!info.nmi_v3) { 1755 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1756 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); 1757 sp->sa_uid = nfs_xdrneg1; 1758 sp->sa_gid = nfs_xdrneg1; 1759 sp->sa_size = nfs_xdrneg1; 1760 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1761 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1762 } 1763 1764 info.nmi_procp = cnp->cn_proc; 1765 info.nmi_cred = cnp->cn_cred; 1766 error = nfs_request(dvp, NFSPROC_SYMLINK, &info); 1767 if (info.nmi_v3) { 1768 if (!error) 1769 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1770 nfsm_wcc_data(dvp, wccflag); 1771 } 1772 m_freem(info.nmi_mrep); 1773 1774 nfsmout: 1775 if (newvp) 1776 vrele(newvp); 1777 pool_put(&namei_pool, cnp->cn_pnbuf); 1778 VTONFS(dvp)->n_flag |= NMODIFIED; 1779 if (!wccflag) 1780 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1781 VN_KNOTE(dvp, NOTE_WRITE); 1782 vrele(dvp); 1783 return (error); 1784 } 1785 1786 /* 1787 * nfs make dir call 1788 */ 1789 int 1790 nfs_mkdir(void *v) 1791 { 1792 struct vop_mkdir_args *ap = v; 1793 struct vnode *dvp = ap->a_dvp; 1794 struct vattr *vap = ap->a_vap; 1795 struct componentname *cnp = ap->a_cnp; 1796 struct nfsv2_sattr *sp; 1797 struct nfsm_info info; 1798 u_int32_t *tl; 1799 int32_t t1; 1800 int len; 1801 struct nfsnode *np = NULL; 1802 struct vnode *newvp = NULL; 1803 caddr_t cp2; 1804 int error = 0, wccflag = NFSV3_WCCRATTR; 1805 int gotvp = 0; 1806 1807 info.nmi_v3 = NFS_ISV3(dvp); 1808 1809 len = cnp->cn_namelen; 1810 nfsstats.rpccnt[NFSPROC_MKDIR]++; 1811 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1812 NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(info.nmi_v3)); 1813 nfsm_fhtom(&info, dvp, info.nmi_v3); 1814 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 1815 1816 if (info.nmi_v3) { 1817 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1818 } else { 1819 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1820 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); 1821 sp->sa_uid = nfs_xdrneg1; 1822 sp->sa_gid = nfs_xdrneg1; 1823 sp->sa_size = nfs_xdrneg1; 1824 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1825 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1826 } 1827 1828 info.nmi_procp = cnp->cn_proc; 1829 info.nmi_cred = cnp->cn_cred; 1830 error = nfs_request(dvp, NFSPROC_MKDIR, &info); 1831 if (!error) 1832 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1833 if (info.nmi_v3) 1834 nfsm_wcc_data(dvp, wccflag); 1835 m_freem(info.nmi_mrep); 1836 1837 nfsmout: 1838 VTONFS(dvp)->n_flag |= NMODIFIED; 1839 if (!wccflag) 1840 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1841 1842 if (error == 0 && newvp == NULL) { 1843 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, 1844 cnp->cn_proc, &np); 1845 if (!error) { 1846 newvp = NFSTOV(np); 1847 if (newvp->v_type != VDIR) 1848 error = EEXIST; 1849 } 1850 } 1851 if (error) { 1852 if (newvp) 1853 vrele(newvp); 1854 } else { 1855 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK); 1856 if (cnp->cn_flags & MAKEENTRY) 1857 nfs_cache_enter(dvp, newvp, cnp); 1858 *ap->a_vpp = newvp; 1859 } 1860 pool_put(&namei_pool, cnp->cn_pnbuf); 1861 vrele(dvp); 1862 return (error); 1863 } 1864 1865 /* 1866 * nfs remove directory call 1867 */ 1868 int 1869 nfs_rmdir(void *v) 1870 { 1871 struct vop_rmdir_args *ap = v; 1872 struct vnode *vp = ap->a_vp; 1873 struct vnode *dvp = ap->a_dvp; 1874 struct componentname *cnp = ap->a_cnp; 1875 struct nfsm_info info; 1876 u_int32_t *tl; 1877 int32_t t1; 1878 caddr_t cp2; 1879 int error = 0, wccflag = NFSV3_WCCRATTR; 1880 1881 info.nmi_v3 = NFS_ISV3(dvp); 1882 1883 if (dvp == vp) { 1884 vrele(dvp); 1885 vrele(dvp); 1886 pool_put(&namei_pool, cnp->cn_pnbuf); 1887 return (EINVAL); 1888 } 1889 1890 nfsstats.rpccnt[NFSPROC_RMDIR]++; 1891 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1892 NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 1893 nfsm_fhtom(&info, dvp, info.nmi_v3); 1894 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1895 1896 info.nmi_procp = cnp->cn_proc; 1897 info.nmi_cred = cnp->cn_cred; 1898 error = nfs_request(dvp, NFSPROC_RMDIR, &info); 1899 if (info.nmi_v3) 1900 nfsm_wcc_data(dvp, wccflag); 1901 m_freem(info.nmi_mrep); 1902 1903 nfsmout: 1904 pool_put(&namei_pool, cnp->cn_pnbuf); 1905 VTONFS(dvp)->n_flag |= NMODIFIED; 1906 if (!wccflag) 1907 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1908 1909 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK); 1910 VN_KNOTE(vp, NOTE_DELETE); 1911 1912 cache_purge(vp); 1913 vrele(vp); 1914 vrele(dvp); 1915 /* 1916 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 1917 */ 1918 if (error == ENOENT) 1919 error = 0; 1920 return (error); 1921 } 1922 1923 1924 /* 1925 * The readdir logic below has a big design bug. It stores the NFS cookie in 1926 * the returned uio->uio_offset but does not store the verifier (it cannot). 1927 * Instead, the code stores the verifier in the nfsnode and applies that 1928 * verifies to all cookies, no matter what verifier was originally with 1929 * the cookie. 1930 * 1931 * From a practical standpoint, this is not a problem since almost all 1932 * NFS servers do not change the validity of cookies across deletes 1933 * and inserts. 1934 */ 1935 1936 struct nfs_dirent { 1937 u_int32_t cookie[2]; 1938 struct dirent dirent; 1939 }; 1940 1941 #define NFS_DIRHDSIZ (sizeof (struct nfs_dirent) - (MAXNAMLEN + 1)) 1942 #define NFS_DIRENT_OVERHEAD offsetof(struct nfs_dirent, dirent) 1943 1944 /* 1945 * nfs readdir call 1946 */ 1947 int 1948 nfs_readdir(void *v) 1949 { 1950 struct vop_readdir_args *ap = v; 1951 struct vnode *vp = ap->a_vp; 1952 struct nfsnode *np = VTONFS(vp); 1953 struct uio *uio = ap->a_uio; 1954 int tresid, error = 0; 1955 struct vattr vattr; 1956 int cnt; 1957 u_int64_t newoff = uio->uio_offset; 1958 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1959 struct uio readdir_uio; 1960 struct iovec readdir_iovec; 1961 struct proc * p = uio->uio_procp; 1962 int done = 0, eof = 0; 1963 struct ucred *cred = ap->a_cred; 1964 void *data; 1965 1966 if (vp->v_type != VDIR) 1967 return (EPERM); 1968 /* 1969 * First, check for hit on the EOF offset cache 1970 */ 1971 if (np->n_direofoffset != 0 && 1972 uio->uio_offset == np->n_direofoffset) { 1973 if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 && 1974 timespeccmp(&np->n_mtime, &vattr.va_mtime, ==)) { 1975 nfsstats.direofcache_hits++; 1976 *ap->a_eofflag = 1; 1977 return (0); 1978 } 1979 } 1980 1981 if (uio->uio_resid < NFS_FABLKSIZE) 1982 return (EINVAL); 1983 1984 tresid = uio->uio_resid; 1985 1986 if (uio->uio_rw != UIO_READ) 1987 return (EINVAL); 1988 1989 if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) 1990 (void)nfs_fsinfo(nmp, vp, cred, p); 1991 1992 cnt = 5; 1993 1994 data = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK); 1995 do { 1996 struct nfs_dirent *ndp = data; 1997 1998 readdir_iovec.iov_len = NFS_DIRBLKSIZ; 1999 readdir_iovec.iov_base = data; 2000 readdir_uio.uio_offset = newoff; 2001 readdir_uio.uio_iov = &readdir_iovec; 2002 readdir_uio.uio_iovcnt = 1; 2003 readdir_uio.uio_segflg = UIO_SYSSPACE; 2004 readdir_uio.uio_rw = UIO_READ; 2005 readdir_uio.uio_resid = NFS_DIRBLKSIZ; 2006 readdir_uio.uio_procp = curproc; 2007 2008 if (nmp->nm_flag & NFSMNT_RDIRPLUS) { 2009 error = nfs_readdirplusrpc(vp, &readdir_uio, cred, 2010 &eof); 2011 if (error == NFSERR_NOTSUPP) 2012 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 2013 } 2014 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 2015 error = nfs_readdirrpc(vp, &readdir_uio, cred, &eof); 2016 2017 if (error == NFSERR_BAD_COOKIE) 2018 error = EINVAL; 2019 2020 while (error == 0 && 2021 ndp < (struct nfs_dirent *)readdir_iovec.iov_base) { 2022 struct dirent *dp = &ndp->dirent; 2023 int reclen = dp->d_reclen; 2024 2025 dp->d_reclen -= NFS_DIRENT_OVERHEAD; 2026 dp->d_off = fxdr_hyper(&ndp->cookie[0]); 2027 2028 if (uio->uio_resid < dp->d_reclen) { 2029 eof = 0; 2030 done = 1; 2031 break; 2032 } 2033 2034 if ((error = uiomove(dp, dp->d_reclen, uio))) 2035 break; 2036 2037 newoff = fxdr_hyper(&ndp->cookie[0]); 2038 2039 ndp = (struct nfs_dirent *)((u_int8_t *)ndp + reclen); 2040 } 2041 } while (!error && !done && !eof && cnt--); 2042 2043 free(data, M_TEMP, 0); 2044 data = NULL; 2045 2046 uio->uio_offset = newoff; 2047 2048 if (!error && (eof || uio->uio_resid == tresid)) { 2049 nfsstats.direofcache_misses++; 2050 *ap->a_eofflag = 1; 2051 return (0); 2052 } 2053 2054 *ap->a_eofflag = 0; 2055 return (error); 2056 } 2057 2058 2059 /* 2060 * The function below stuff the cookies in after the name 2061 */ 2062 2063 /* 2064 * Readdir rpc call. 2065 */ 2066 int 2067 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2068 int *end_of_directory) 2069 { 2070 int len, left; 2071 struct nfs_dirent *ndp = NULL; 2072 struct dirent *dp = NULL; 2073 struct nfsm_info info; 2074 u_int32_t *tl; 2075 caddr_t cp; 2076 int32_t t1; 2077 caddr_t cp2; 2078 nfsuint64 cookie; 2079 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2080 struct nfsnode *dnp = VTONFS(vp); 2081 u_quad_t fileno; 2082 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1; 2083 int attrflag; 2084 2085 info.nmi_v3 = NFS_ISV3(vp); 2086 2087 #ifdef DIAGNOSTIC 2088 if (uiop->uio_iovcnt != 1 || 2089 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) 2090 panic("nfs readdirrpc bad uio"); 2091 #endif 2092 2093 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0]); 2094 2095 /* 2096 * Loop around doing readdir rpc's of size nm_readdirsize 2097 * truncated to a multiple of NFS_READDIRBLKSIZ. 2098 * The stopping criteria is EOF or buffer full. 2099 */ 2100 while (more_dirs && bigenough) { 2101 nfsstats.rpccnt[NFSPROC_READDIR]++; 2102 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) 2103 + NFSX_READDIR(info.nmi_v3)); 2104 nfsm_fhtom(&info, vp, info.nmi_v3); 2105 if (info.nmi_v3) { 2106 tl = nfsm_build(&info.nmi_mb, 5 * NFSX_UNSIGNED); 2107 *tl++ = cookie.nfsuquad[0]; 2108 *tl++ = cookie.nfsuquad[1]; 2109 if (cookie.nfsuquad[0] == 0 && 2110 cookie.nfsuquad[1] == 0) { 2111 *tl++ = 0; 2112 *tl++ = 0; 2113 } else { 2114 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2115 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2116 } 2117 } else { 2118 tl = nfsm_build(&info.nmi_mb, 2 * NFSX_UNSIGNED); 2119 *tl++ = cookie.nfsuquad[1]; 2120 } 2121 *tl = txdr_unsigned(nmp->nm_readdirsize); 2122 2123 info.nmi_procp = uiop->uio_procp; 2124 info.nmi_cred = cred; 2125 error = nfs_request(vp, NFSPROC_READDIR, &info); 2126 if (info.nmi_v3) 2127 nfsm_postop_attr(vp, attrflag); 2128 2129 if (error) { 2130 m_freem(info.nmi_mrep); 2131 goto nfsmout; 2132 } 2133 2134 if (info.nmi_v3) { 2135 nfsm_dissect(tl, u_int32_t *, 2136 2 * NFSX_UNSIGNED); 2137 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2138 dnp->n_cookieverf.nfsuquad[1] = *tl; 2139 } 2140 2141 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2142 more_dirs = fxdr_unsigned(int, *tl); 2143 2144 /* loop thru the dir entries, doctoring them to dirent form */ 2145 while (more_dirs && bigenough) { 2146 if (info.nmi_v3) { 2147 nfsm_dissect(tl, u_int32_t *, 2148 3 * NFSX_UNSIGNED); 2149 fileno = fxdr_hyper(tl); 2150 len = fxdr_unsigned(int, *(tl + 2)); 2151 } else { 2152 nfsm_dissect(tl, u_int32_t *, 2153 2 * NFSX_UNSIGNED); 2154 fileno = fxdr_unsigned(u_quad_t, *tl++); 2155 len = fxdr_unsigned(int, *tl); 2156 } 2157 if (len <= 0 || len > NFS_MAXNAMLEN) { 2158 error = EBADRPC; 2159 m_freem(info.nmi_mrep); 2160 goto nfsmout; 2161 } 2162 tlen = DIRENT_RECSIZE(len) + NFS_DIRENT_OVERHEAD; 2163 left = NFS_READDIRBLKSIZ - blksiz; 2164 if (tlen > left) { 2165 dp->d_reclen += left; 2166 uiop->uio_iov->iov_base += left; 2167 uiop->uio_iov->iov_len -= left; 2168 uiop->uio_resid -= left; 2169 blksiz = 0; 2170 } 2171 if (tlen > uiop->uio_resid) 2172 bigenough = 0; 2173 if (bigenough) { 2174 ndp = (struct nfs_dirent *) 2175 uiop->uio_iov->iov_base; 2176 dp = &ndp->dirent; 2177 dp->d_fileno = fileno; 2178 dp->d_namlen = len; 2179 dp->d_reclen = tlen; 2180 dp->d_type = DT_UNKNOWN; 2181 blksiz += tlen; 2182 if (blksiz == NFS_READDIRBLKSIZ) 2183 blksiz = 0; 2184 uiop->uio_resid -= NFS_DIRHDSIZ; 2185 uiop->uio_iov->iov_base = 2186 (char *)uiop->uio_iov->iov_base + 2187 NFS_DIRHDSIZ; 2188 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ; 2189 nfsm_mtouio(uiop, len); 2190 cp = uiop->uio_iov->iov_base; 2191 tlen -= NFS_DIRHDSIZ + len; 2192 *cp = '\0'; /* null terminate */ 2193 uiop->uio_iov->iov_base += tlen; 2194 uiop->uio_iov->iov_len -= tlen; 2195 uiop->uio_resid -= tlen; 2196 } else 2197 nfsm_adv(nfsm_rndup(len)); 2198 if (info.nmi_v3) { 2199 nfsm_dissect(tl, u_int32_t *, 2200 3 * NFSX_UNSIGNED); 2201 } else { 2202 nfsm_dissect(tl, u_int32_t *, 2203 2 * NFSX_UNSIGNED); 2204 } 2205 if (bigenough) { 2206 if (info.nmi_v3) { 2207 ndp->cookie[0] = cookie.nfsuquad[0] = 2208 *tl++; 2209 } else 2210 ndp->cookie[0] = 0; 2211 2212 ndp->cookie[1] = cookie.nfsuquad[1] = *tl++; 2213 } else if (info.nmi_v3) 2214 tl += 2; 2215 else 2216 tl++; 2217 more_dirs = fxdr_unsigned(int, *tl); 2218 } 2219 /* 2220 * If at end of rpc data, get the eof boolean 2221 */ 2222 if (!more_dirs) { 2223 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2224 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2225 } 2226 m_freem(info.nmi_mrep); 2227 } 2228 /* 2229 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ 2230 * by increasing d_reclen for the last record. 2231 */ 2232 if (blksiz > 0) { 2233 left = NFS_READDIRBLKSIZ - blksiz; 2234 dp->d_reclen += left; 2235 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + 2236 left; 2237 uiop->uio_iov->iov_len -= left; 2238 uiop->uio_resid -= left; 2239 } 2240 2241 /* 2242 * We are now either at the end of the directory or have filled the 2243 * block. 2244 */ 2245 if (bigenough) { 2246 dnp->n_direofoffset = fxdr_hyper(&cookie.nfsuquad[0]); 2247 if (end_of_directory) *end_of_directory = 1; 2248 } else { 2249 if (uiop->uio_resid > 0) 2250 printf("EEK! readdirrpc resid > 0\n"); 2251 } 2252 2253 nfsmout: 2254 return (error); 2255 } 2256 2257 /* 2258 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). 2259 */ 2260 int 2261 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2262 int *end_of_directory) 2263 { 2264 int len, left; 2265 struct nfs_dirent *ndirp = NULL; 2266 struct dirent *dp = NULL; 2267 struct nfsm_info info; 2268 u_int32_t *tl; 2269 caddr_t cp; 2270 int32_t t1; 2271 struct vnode *newvp; 2272 caddr_t cp2, dpossav1, dpossav2; 2273 struct mbuf *mdsav1, *mdsav2; 2274 struct nameidata nami, *ndp = &nami; 2275 struct componentname *cnp = &ndp->ni_cnd; 2276 nfsuint64 cookie; 2277 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2278 struct nfsnode *dnp = VTONFS(vp), *np; 2279 nfsfh_t *fhp; 2280 u_quad_t fileno; 2281 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; 2282 int attrflag, fhsize; 2283 2284 #ifdef DIAGNOSTIC 2285 if (uiop->uio_iovcnt != 1 || 2286 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) 2287 panic("nfs readdirplusrpc bad uio"); 2288 #endif 2289 ndp->ni_dvp = vp; 2290 newvp = NULLVP; 2291 2292 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0]); 2293 2294 /* 2295 * Loop around doing readdir rpc's of size nm_readdirsize 2296 * truncated to a multiple of NFS_READDIRBLKSIZ. 2297 * The stopping criteria is EOF or buffer full. 2298 */ 2299 while (more_dirs && bigenough) { 2300 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; 2301 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(1) + 6 * NFSX_UNSIGNED); 2302 nfsm_fhtom(&info, vp, 1); 2303 tl = nfsm_build(&info.nmi_mb, 6 * NFSX_UNSIGNED); 2304 *tl++ = cookie.nfsuquad[0]; 2305 *tl++ = cookie.nfsuquad[1]; 2306 if (cookie.nfsuquad[0] == 0 && 2307 cookie.nfsuquad[1] == 0) { 2308 *tl++ = 0; 2309 *tl++ = 0; 2310 } else { 2311 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2312 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2313 } 2314 *tl++ = txdr_unsigned(nmp->nm_readdirsize); 2315 *tl = txdr_unsigned(nmp->nm_rsize); 2316 2317 info.nmi_procp = uiop->uio_procp; 2318 info.nmi_cred = cred; 2319 error = nfs_request(vp, NFSPROC_READDIRPLUS, &info); 2320 nfsm_postop_attr(vp, attrflag); 2321 if (error) { 2322 m_freem(info.nmi_mrep); 2323 goto nfsmout; 2324 } 2325 2326 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2327 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2328 dnp->n_cookieverf.nfsuquad[1] = *tl++; 2329 more_dirs = fxdr_unsigned(int, *tl); 2330 2331 /* loop thru the dir entries, doctoring them to 4bsd form */ 2332 while (more_dirs && bigenough) { 2333 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2334 fileno = fxdr_hyper(tl); 2335 len = fxdr_unsigned(int, *(tl + 2)); 2336 if (len <= 0 || len > NFS_MAXNAMLEN) { 2337 error = EBADRPC; 2338 m_freem(info.nmi_mrep); 2339 goto nfsmout; 2340 } 2341 tlen = DIRENT_RECSIZE(len) + NFS_DIRENT_OVERHEAD; 2342 left = NFS_READDIRBLKSIZ - blksiz; 2343 if (tlen > left) { 2344 dp->d_reclen += left; 2345 uiop->uio_iov->iov_base = 2346 (char *)uiop->uio_iov->iov_base + left; 2347 uiop->uio_iov->iov_len -= left; 2348 uiop->uio_resid -= left; 2349 blksiz = 0; 2350 } 2351 if (tlen > uiop->uio_resid) 2352 bigenough = 0; 2353 if (bigenough) { 2354 ndirp = (struct nfs_dirent *) 2355 uiop->uio_iov->iov_base; 2356 dp = &ndirp->dirent; 2357 dp->d_fileno = fileno; 2358 dp->d_namlen = len; 2359 dp->d_reclen = tlen; 2360 dp->d_type = DT_UNKNOWN; 2361 blksiz += tlen; 2362 if (blksiz == NFS_READDIRBLKSIZ) 2363 blksiz = 0; 2364 uiop->uio_resid -= NFS_DIRHDSIZ; 2365 uiop->uio_iov->iov_base = 2366 (char *)uiop->uio_iov->iov_base + 2367 NFS_DIRHDSIZ; 2368 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ; 2369 cnp->cn_nameptr = uiop->uio_iov->iov_base; 2370 cnp->cn_namelen = len; 2371 nfsm_mtouio(uiop, len); 2372 cp = uiop->uio_iov->iov_base; 2373 tlen -= NFS_DIRHDSIZ + len; 2374 *cp = '\0'; 2375 uiop->uio_iov->iov_base += tlen; 2376 uiop->uio_iov->iov_len -= tlen; 2377 uiop->uio_resid -= tlen; 2378 } else 2379 nfsm_adv(nfsm_rndup(len)); 2380 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2381 if (bigenough) { 2382 ndirp->cookie[0] = cookie.nfsuquad[0] = *tl++; 2383 ndirp->cookie[1] = cookie.nfsuquad[1] = *tl++; 2384 } else 2385 tl += 2; 2386 2387 /* 2388 * Since the attributes are before the file handle 2389 * (sigh), we must skip over the attributes and then 2390 * come back and get them. 2391 */ 2392 attrflag = fxdr_unsigned(int, *tl); 2393 if (attrflag) { 2394 dpossav1 = info.nmi_dpos; 2395 mdsav1 = info.nmi_md; 2396 nfsm_adv(NFSX_V3FATTR); 2397 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2398 doit = fxdr_unsigned(int, *tl); 2399 if (doit) { 2400 nfsm_getfh(fhp, fhsize, 1); 2401 if (NFS_CMPFH(dnp, fhp, fhsize)) { 2402 vref(vp); 2403 newvp = vp; 2404 np = dnp; 2405 } else { 2406 error = nfs_nget(vp->v_mount, 2407 fhp, fhsize, &np); 2408 if (error) 2409 doit = 0; 2410 else 2411 newvp = NFSTOV(np); 2412 } 2413 } 2414 if (doit && bigenough) { 2415 dpossav2 = info.nmi_dpos; 2416 info.nmi_dpos = dpossav1; 2417 mdsav2 = info.nmi_md; 2418 info.nmi_md = mdsav1; 2419 nfsm_loadattr(newvp, NULL); 2420 info.nmi_dpos = dpossav2; 2421 info.nmi_md = mdsav2; 2422 dp->d_type = IFTODT( 2423 VTTOIF(np->n_vattr.va_type)); 2424 if (cnp->cn_namelen <= 2425 NAMECACHE_MAXLEN) { 2426 ndp->ni_vp = newvp; 2427 cache_purge(ndp->ni_dvp); 2428 nfs_cache_enter(ndp->ni_dvp, 2429 ndp->ni_vp, cnp); 2430 } 2431 } 2432 } else { 2433 /* Just skip over the file handle */ 2434 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2435 i = fxdr_unsigned(int, *tl); 2436 nfsm_adv(nfsm_rndup(i)); 2437 } 2438 if (newvp != NULLVP) { 2439 vrele(newvp); 2440 newvp = NULLVP; 2441 } 2442 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2443 more_dirs = fxdr_unsigned(int, *tl); 2444 } 2445 /* 2446 * If at end of rpc data, get the eof boolean 2447 */ 2448 if (!more_dirs) { 2449 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2450 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2451 } 2452 m_freem(info.nmi_mrep); 2453 } 2454 /* 2455 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ 2456 * by increasing d_reclen for the last record. 2457 */ 2458 if (blksiz > 0) { 2459 left = NFS_READDIRBLKSIZ - blksiz; 2460 dp->d_reclen += left; 2461 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + 2462 left; 2463 uiop->uio_iov->iov_len -= left; 2464 uiop->uio_resid -= left; 2465 } 2466 2467 /* 2468 * We are now either at the end of the directory or have filled the 2469 * block. 2470 */ 2471 if (bigenough) { 2472 dnp->n_direofoffset = fxdr_hyper(&cookie.nfsuquad[0]); 2473 if (end_of_directory) *end_of_directory = 1; 2474 } else { 2475 if (uiop->uio_resid > 0) 2476 printf("EEK! readdirplusrpc resid > 0\n"); 2477 } 2478 2479 nfsmout: 2480 if (newvp != NULLVP) 2481 vrele(newvp); 2482 return (error); 2483 } 2484 2485 /* 2486 * Silly rename. To make the NFS filesystem that is stateless look a little 2487 * more like the "ufs" a remove of an active vnode is translated to a rename 2488 * to a funny looking filename that is removed by nfs_inactive on the 2489 * nfsnode. There is the potential for another process on a different client 2490 * to create the same funny name between the nfs_lookitup() fails and the 2491 * nfs_rename() completes, but... 2492 */ 2493 int 2494 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 2495 { 2496 struct sillyrename *sp; 2497 struct nfsnode *np; 2498 int error; 2499 2500 cache_purge(dvp); 2501 np = VTONFS(vp); 2502 sp = malloc(sizeof(struct sillyrename), M_NFSREQ, M_WAITOK); 2503 sp->s_cred = crdup(cnp->cn_cred); 2504 sp->s_dvp = dvp; 2505 vref(dvp); 2506 2507 if (vp->v_type == VDIR) { 2508 #ifdef DIAGNOSTIC 2509 printf("nfs: sillyrename dir\n"); 2510 #endif 2511 error = EINVAL; 2512 goto bad; 2513 } 2514 2515 /* Try lookitups until we get one that isn't there */ 2516 while (1) { 2517 /* Fudge together a funny name */ 2518 u_int32_t rnd[2]; 2519 2520 arc4random_buf(&rnd, sizeof rnd); 2521 sp->s_namlen = snprintf(sp->s_name, sizeof sp->s_name, 2522 ".nfs%08X%08X", rnd[0], rnd[1]); 2523 if (sp->s_namlen > sizeof sp->s_name) 2524 sp->s_namlen = strlen(sp->s_name); 2525 2526 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2527 cnp->cn_proc, NULL)) 2528 break; 2529 } 2530 2531 error = nfs_renameit(dvp, cnp, sp); 2532 if (error) 2533 goto bad; 2534 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2535 cnp->cn_proc, &np); 2536 np->n_sillyrename = sp; 2537 return (0); 2538 bad: 2539 vrele(sp->s_dvp); 2540 crfree(sp->s_cred); 2541 free(sp, M_NFSREQ, 0); 2542 return (error); 2543 } 2544 2545 /* 2546 * Look up a file name and optionally either update the file handle or 2547 * allocate an nfsnode, depending on the value of npp. 2548 * npp == NULL --> just do the lookup 2549 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2550 * handled too 2551 * *npp != NULL --> update the file handle in the vnode 2552 */ 2553 int 2554 nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred, 2555 struct proc *procp, struct nfsnode **npp) 2556 { 2557 struct nfsm_info info; 2558 u_int32_t *tl; 2559 int32_t t1; 2560 struct vnode *newvp = NULL; 2561 struct nfsnode *np, *dnp = VTONFS(dvp); 2562 caddr_t cp2; 2563 int error = 0, fhlen, attrflag; 2564 nfsfh_t *nfhp; 2565 2566 info.nmi_v3 = NFS_ISV3(dvp); 2567 2568 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 2569 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + NFSX_UNSIGNED + 2570 nfsm_rndup(len)); 2571 nfsm_fhtom(&info, dvp, info.nmi_v3); 2572 nfsm_strtom(name, len, NFS_MAXNAMLEN); 2573 2574 info.nmi_procp = procp; 2575 info.nmi_cred = cred; 2576 error = nfs_request(dvp, NFSPROC_LOOKUP, &info); 2577 if (error && !info.nmi_v3) { 2578 m_freem(info.nmi_mrep); 2579 goto nfsmout; 2580 } 2581 2582 if (npp && !error) { 2583 nfsm_getfh(nfhp, fhlen, info.nmi_v3); 2584 if (*npp) { 2585 np = *npp; 2586 np->n_fhp = &np->n_fh; 2587 bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen); 2588 np->n_fhsize = fhlen; 2589 newvp = NFSTOV(np); 2590 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { 2591 vref(dvp); 2592 newvp = dvp; 2593 np = dnp; 2594 } else { 2595 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); 2596 if (error) { 2597 m_freem(info.nmi_mrep); 2598 return (error); 2599 } 2600 newvp = NFSTOV(np); 2601 } 2602 if (info.nmi_v3) { 2603 nfsm_postop_attr(newvp, attrflag); 2604 if (!attrflag && *npp == NULL) { 2605 m_freem(info.nmi_mrep); 2606 vrele(newvp); 2607 return (ENOENT); 2608 } 2609 } else 2610 nfsm_loadattr(newvp, NULL); 2611 } 2612 m_freem(info.nmi_mrep); 2613 nfsmout: 2614 if (npp && *npp == NULL) { 2615 if (error) { 2616 if (newvp) 2617 vrele(newvp); 2618 } else 2619 *npp = np; 2620 } 2621 return (error); 2622 } 2623 2624 /* 2625 * Nfs Version 3 commit rpc 2626 */ 2627 int 2628 nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct proc *procp) 2629 { 2630 struct nfsm_info info; 2631 u_int32_t *tl; 2632 int32_t t1; 2633 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2634 caddr_t cp2; 2635 int error = 0, wccflag = NFSV3_WCCRATTR; 2636 2637 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) 2638 return (0); 2639 nfsstats.rpccnt[NFSPROC_COMMIT]++; 2640 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(1)); 2641 nfsm_fhtom(&info, vp, 1); 2642 2643 tl = nfsm_build(&info.nmi_mb, 3 * NFSX_UNSIGNED); 2644 txdr_hyper(offset, tl); 2645 tl += 2; 2646 *tl = txdr_unsigned(cnt); 2647 2648 info.nmi_procp = procp; 2649 info.nmi_cred = VTONFS(vp)->n_wcred; 2650 error = nfs_request(vp, NFSPROC_COMMIT, &info); 2651 nfsm_wcc_data(vp, wccflag); 2652 2653 if (!error) { 2654 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF); 2655 if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl, 2656 NFSX_V3WRITEVERF)) { 2657 bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, 2658 NFSX_V3WRITEVERF); 2659 error = NFSERR_STALEWRITEVERF; 2660 } 2661 } 2662 m_freem(info.nmi_mrep); 2663 2664 nfsmout: 2665 return (error); 2666 } 2667 2668 /* 2669 * Kludge City.. 2670 * - make nfs_bmap() essentially a no-op that does no translation 2671 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc 2672 * (Maybe I could use the process's page mapping, but I was concerned that 2673 * Kernel Write might not be enabled and also figured copyout() would do 2674 * a lot more work than bcopy() and also it currently happens in the 2675 * context of the swapper process (2). 2676 */ 2677 int 2678 nfs_bmap(void *v) 2679 { 2680 struct vop_bmap_args *ap = v; 2681 struct vnode *vp = ap->a_vp; 2682 2683 if (ap->a_vpp != NULL) 2684 *ap->a_vpp = vp; 2685 if (ap->a_bnp != NULL) 2686 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize); 2687 return (0); 2688 } 2689 2690 /* 2691 * Strategy routine. 2692 * For async requests when nfsiod(s) are running, queue the request by 2693 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the 2694 * request. 2695 */ 2696 int 2697 nfs_strategy(void *v) 2698 { 2699 struct vop_strategy_args *ap = v; 2700 struct buf *bp = ap->a_bp; 2701 struct proc *p; 2702 int error = 0; 2703 2704 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC)) 2705 panic("nfs physio/async"); 2706 if (bp->b_flags & B_ASYNC) 2707 p = NULL; 2708 else 2709 p = curproc; /* XXX */ 2710 /* 2711 * If the op is asynchronous and an i/o daemon is waiting 2712 * queue the request, wake it up and wait for completion 2713 * otherwise just do it ourselves. 2714 */ 2715 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp, 0)) 2716 error = nfs_doio(bp, p); 2717 return (error); 2718 } 2719 2720 /* 2721 * fsync vnode op. Just call nfs_flush() with commit == 1. 2722 */ 2723 int 2724 nfs_fsync(void *v) 2725 { 2726 struct vop_fsync_args *ap = v; 2727 2728 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1)); 2729 } 2730 2731 /* 2732 * Flush all the blocks associated with a vnode. 2733 * Walk through the buffer pool and push any dirty pages 2734 * associated with the vnode. 2735 */ 2736 int 2737 nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct proc *p, 2738 int commit) 2739 { 2740 struct nfsnode *np = VTONFS(vp); 2741 struct buf *bp; 2742 int i; 2743 struct buf *nbp; 2744 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2745 int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos; 2746 int passone = 1; 2747 u_quad_t off = (u_quad_t)-1, endoff = 0, toff; 2748 #ifndef NFS_COMMITBVECSIZ 2749 #define NFS_COMMITBVECSIZ 20 2750 #endif 2751 struct buf *bvec[NFS_COMMITBVECSIZ]; 2752 2753 if (nmp->nm_flag & NFSMNT_INT) 2754 slpflag = PCATCH; 2755 if (!commit) 2756 passone = 0; 2757 /* 2758 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the 2759 * server, but nas not been committed to stable storage on the server 2760 * yet. On the first pass, the byte range is worked out and the commit 2761 * rpc is done. On the second pass, nfs_writebp() is called to do the 2762 * job. 2763 */ 2764 again: 2765 bvecpos = 0; 2766 if (NFS_ISV3(vp) && commit) { 2767 s = splbio(); 2768 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) { 2769 if (bvecpos >= NFS_COMMITBVECSIZ) 2770 break; 2771 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) 2772 != (B_DELWRI | B_NEEDCOMMIT)) { 2773 nbp = LIST_NEXT(bp, b_vnbufs); 2774 continue; 2775 } 2776 bremfree(bp); 2777 bp->b_flags |= B_WRITEINPROG; 2778 buf_acquire(bp); 2779 nbp = LIST_NEXT(bp, b_vnbufs); 2780 2781 /* 2782 * A list of these buffers is kept so that the 2783 * second loop knows which buffers have actually 2784 * been committed. This is necessary, since there 2785 * may be a race between the commit rpc and new 2786 * uncommitted writes on the file. 2787 */ 2788 bvec[bvecpos++] = bp; 2789 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2790 bp->b_dirtyoff; 2791 if (toff < off) 2792 off = toff; 2793 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); 2794 if (toff > endoff) 2795 endoff = toff; 2796 } 2797 splx(s); 2798 } 2799 if (bvecpos > 0) { 2800 /* 2801 * Commit data on the server, as required. 2802 */ 2803 bcstats.pendingwrites++; 2804 bcstats.numwrites++; 2805 retv = nfs_commit(vp, off, (int)(endoff - off), p); 2806 if (retv == NFSERR_STALEWRITEVERF) 2807 nfs_clearcommit(vp->v_mount); 2808 /* 2809 * Now, either mark the blocks I/O done or mark the 2810 * blocks dirty, depending on whether the commit 2811 * succeeded. 2812 */ 2813 for (i = 0; i < bvecpos; i++) { 2814 bp = bvec[i]; 2815 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG); 2816 if (retv) { 2817 if (i == 0) 2818 bcstats.pendingwrites--; 2819 brelse(bp); 2820 } else { 2821 if (i > 0) 2822 bcstats.pendingwrites++; 2823 s = splbio(); 2824 buf_undirty(bp); 2825 vp->v_numoutput++; 2826 bp->b_flags |= B_ASYNC; 2827 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR); 2828 bp->b_dirtyoff = bp->b_dirtyend = 0; 2829 biodone(bp); 2830 splx(s); 2831 } 2832 } 2833 } 2834 2835 /* 2836 * Start/do any write(s) that are required. 2837 */ 2838 loop: 2839 s = splbio(); 2840 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp != NULL; bp = nbp) { 2841 nbp = LIST_NEXT(bp, b_vnbufs); 2842 if (bp->b_flags & B_BUSY) { 2843 if (waitfor != MNT_WAIT || passone) 2844 continue; 2845 bp->b_flags |= B_WANTED; 2846 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), 2847 "nfsfsync", slptimeo); 2848 splx(s); 2849 if (error) { 2850 if (nfs_sigintr(nmp, NULL, p)) 2851 return (EINTR); 2852 if (slpflag == PCATCH) { 2853 slpflag = 0; 2854 slptimeo = 2 * hz; 2855 } 2856 } 2857 goto loop; 2858 } 2859 if ((bp->b_flags & B_DELWRI) == 0) 2860 panic("nfs_fsync: not dirty"); 2861 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) 2862 continue; 2863 bremfree(bp); 2864 if (passone || !commit) { 2865 bp->b_flags |= B_ASYNC; 2866 } else { 2867 bp->b_flags |= (B_ASYNC|B_WRITEINPROG|B_NEEDCOMMIT); 2868 } 2869 buf_acquire(bp); 2870 splx(s); 2871 VOP_BWRITE(bp); 2872 goto loop; 2873 } 2874 splx(s); 2875 if (passone) { 2876 passone = 0; 2877 goto again; 2878 } 2879 if (waitfor == MNT_WAIT) { 2880 loop2: 2881 s = splbio(); 2882 error = vwaitforio(vp, slpflag, "nfs_fsync", slptimeo); 2883 splx(s); 2884 if (error) { 2885 if (nfs_sigintr(nmp, NULL, p)) 2886 return (EINTR); 2887 if (slpflag == PCATCH) { 2888 slpflag = 0; 2889 slptimeo = 2 * hz; 2890 } 2891 goto loop2; 2892 } 2893 2894 if (LIST_FIRST(&vp->v_dirtyblkhd) && commit) { 2895 #if 0 2896 vprint("nfs_fsync: dirty", vp); 2897 #endif 2898 goto loop; 2899 } 2900 } 2901 if (np->n_flag & NWRITEERR) { 2902 error = np->n_error; 2903 np->n_flag &= ~NWRITEERR; 2904 } 2905 return (error); 2906 } 2907 2908 /* 2909 * Return POSIX pathconf information applicable to nfs. 2910 * Fake it. For v3 we could ask the server, but such code 2911 * hasn't been written yet. 2912 */ 2913 /* ARGSUSED */ 2914 int 2915 nfs_pathconf(void *v) 2916 { 2917 struct vop_pathconf_args *ap = v; 2918 struct nfsmount *nmp = VFSTONFS(ap->a_vp->v_mount); 2919 int error = 0; 2920 2921 switch (ap->a_name) { 2922 case _PC_LINK_MAX: 2923 *ap->a_retval = LINK_MAX; 2924 break; 2925 case _PC_NAME_MAX: 2926 *ap->a_retval = NAME_MAX; 2927 break; 2928 case _PC_CHOWN_RESTRICTED: 2929 *ap->a_retval = 1; 2930 break; 2931 case _PC_NO_TRUNC: 2932 *ap->a_retval = 1; 2933 break; 2934 case _PC_ALLOC_SIZE_MIN: 2935 *ap->a_retval = NFS_FABLKSIZE; 2936 break; 2937 case _PC_FILESIZEBITS: 2938 *ap->a_retval = 64; 2939 break; 2940 case _PC_REC_INCR_XFER_SIZE: 2941 *ap->a_retval = min(nmp->nm_rsize, nmp->nm_wsize); 2942 break; 2943 case _PC_REC_MAX_XFER_SIZE: 2944 *ap->a_retval = -1; /* means ``unlimited'' */ 2945 break; 2946 case _PC_REC_MIN_XFER_SIZE: 2947 *ap->a_retval = min(nmp->nm_rsize, nmp->nm_wsize); 2948 break; 2949 case _PC_REC_XFER_ALIGN: 2950 *ap->a_retval = PAGE_SIZE; 2951 break; 2952 case _PC_SYMLINK_MAX: 2953 *ap->a_retval = MAXPATHLEN; 2954 break; 2955 case _PC_2_SYMLINKS: 2956 *ap->a_retval = 1; 2957 break; 2958 case _PC_TIMESTAMP_RESOLUTION: 2959 *ap->a_retval = NFS_ISV3(ap->a_vp) ? 1 : 1000; 2960 break; 2961 default: 2962 error = EINVAL; 2963 break; 2964 } 2965 2966 return (error); 2967 } 2968 2969 /* 2970 * NFS advisory byte-level locks. 2971 */ 2972 int 2973 nfs_advlock(void *v) 2974 { 2975 struct vop_advlock_args *ap = v; 2976 struct nfsnode *np = VTONFS(ap->a_vp); 2977 2978 return (lf_advlock(&np->n_lockf, np->n_size, ap->a_id, ap->a_op, 2979 ap->a_fl, ap->a_flags)); 2980 } 2981 2982 /* 2983 * Print out the contents of an nfsnode. 2984 */ 2985 int 2986 nfs_print(void *v) 2987 { 2988 struct vop_print_args *ap = v; 2989 struct vnode *vp = ap->a_vp; 2990 struct nfsnode *np = VTONFS(vp); 2991 2992 printf("tag VT_NFS, fileid %lld fsid 0x%lx", 2993 np->n_vattr.va_fileid, np->n_vattr.va_fsid); 2994 #ifdef FIFO 2995 if (vp->v_type == VFIFO) 2996 fifo_printinfo(vp); 2997 #endif 2998 printf("\n"); 2999 return (0); 3000 } 3001 3002 /* 3003 * Just call nfs_writebp() with the force argument set to 1. 3004 */ 3005 int 3006 nfs_bwrite(void *v) 3007 { 3008 struct vop_bwrite_args *ap = v; 3009 3010 return (nfs_writebp(ap->a_bp, 1)); 3011 } 3012 3013 /* 3014 * This is a clone of vop_generic_bwrite(), except that B_WRITEINPROG isn't set unless 3015 * the force flag is one and it also handles the B_NEEDCOMMIT flag. 3016 */ 3017 int 3018 nfs_writebp(struct buf *bp, int force) 3019 { 3020 int oldflags = bp->b_flags, retv = 1; 3021 struct proc *p = curproc; /* XXX */ 3022 off_t off; 3023 size_t cnt; 3024 int s; 3025 struct vnode *vp; 3026 struct nfsnode *np; 3027 3028 if(!(bp->b_flags & B_BUSY)) 3029 panic("bwrite: buffer is not busy???"); 3030 3031 vp = bp->b_vp; 3032 np = VTONFS(vp); 3033 3034 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR); 3035 3036 s = splbio(); 3037 buf_undirty(bp); 3038 3039 if ((oldflags & B_ASYNC) && !(oldflags & B_DELWRI) && p) 3040 ++p->p_ru.ru_oublock; 3041 3042 bp->b_vp->v_numoutput++; 3043 splx(s); 3044 3045 /* 3046 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not 3047 * an actual write will have to be scheduled via. VOP_STRATEGY(). 3048 * If B_WRITEINPROG is already set, then push it with a write anyhow. 3049 */ 3050 if ((oldflags & (B_NEEDCOMMIT | B_WRITEINPROG)) == B_NEEDCOMMIT) { 3051 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 3052 cnt = bp->b_dirtyend - bp->b_dirtyoff; 3053 3054 rw_enter_write(&np->n_commitlock); 3055 if (!(bp->b_flags & B_NEEDCOMMIT)) { 3056 rw_exit_write(&np->n_commitlock); 3057 return (0); 3058 } 3059 3060 /* 3061 * If it's already been commited by somebody else, 3062 * bail. 3063 */ 3064 if (!nfs_in_committed_range(vp, bp)) { 3065 int pushedrange = 0; 3066 /* 3067 * Since we're going to do this, push as much 3068 * as we can. 3069 */ 3070 3071 if (nfs_in_tobecommitted_range(vp, bp)) { 3072 pushedrange = 1; 3073 off = np->n_pushlo; 3074 cnt = np->n_pushhi - np->n_pushlo; 3075 } 3076 3077 bp->b_flags |= B_WRITEINPROG; 3078 bcstats.pendingwrites++; 3079 bcstats.numwrites++; 3080 retv = nfs_commit(bp->b_vp, off, cnt, curproc); 3081 bp->b_flags &= ~B_WRITEINPROG; 3082 3083 if (retv == 0) { 3084 if (pushedrange) 3085 nfs_merge_commit_ranges(vp); 3086 else 3087 nfs_add_committed_range(vp, bp); 3088 } else 3089 bcstats.pendingwrites--; 3090 } else 3091 retv = 0; /* It has already been commited. */ 3092 3093 rw_exit_write(&np->n_commitlock); 3094 if (!retv) { 3095 bp->b_dirtyoff = bp->b_dirtyend = 0; 3096 bp->b_flags &= ~B_NEEDCOMMIT; 3097 s = splbio(); 3098 biodone(bp); 3099 splx(s); 3100 } else if (retv == NFSERR_STALEWRITEVERF) 3101 nfs_clearcommit(bp->b_vp->v_mount); 3102 } 3103 if (retv) { 3104 if (force) 3105 bp->b_flags |= B_WRITEINPROG; 3106 VOP_STRATEGY(bp); 3107 } 3108 3109 if( (oldflags & B_ASYNC) == 0) { 3110 int rtval; 3111 3112 bp->b_flags |= B_RAW; 3113 rtval = biowait(bp); 3114 if (!(oldflags & B_DELWRI) && p) { 3115 ++p->p_ru.ru_oublock; 3116 } 3117 brelse(bp); 3118 return (rtval); 3119 } 3120 3121 return (0); 3122 } 3123 3124 /* 3125 * nfs special file access vnode op. 3126 * Essentially just get vattr and then imitate iaccess() since the device is 3127 * local to the client. 3128 */ 3129 int 3130 nfsspec_access(void *v) 3131 { 3132 struct vop_access_args *ap = v; 3133 struct vattr va; 3134 struct vnode *vp = ap->a_vp; 3135 int error; 3136 3137 /* 3138 * Disallow write attempts on filesystems mounted read-only; 3139 * unless the file is a socket, fifo, or a block or character 3140 * device resident on the filesystem. 3141 */ 3142 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3143 switch (vp->v_type) { 3144 case VREG: 3145 case VDIR: 3146 case VLNK: 3147 return (EROFS); 3148 default: 3149 break; 3150 } 3151 } 3152 3153 error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_p); 3154 if (error) 3155 return (error); 3156 3157 return (vaccess(vp->v_type, va.va_mode, va.va_uid, va.va_gid, 3158 ap->a_mode, ap->a_cred)); 3159 } 3160 3161 int 3162 nfs_poll(void *v) 3163 { 3164 struct vop_poll_args *ap = v; 3165 3166 /* 3167 * We should really check to see if I/O is possible. 3168 */ 3169 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 3170 } 3171 3172 /* 3173 * Read wrapper for special devices. 3174 */ 3175 int 3176 nfsspec_read(void *v) 3177 { 3178 struct vop_read_args *ap = v; 3179 struct nfsnode *np = VTONFS(ap->a_vp); 3180 3181 /* 3182 * Set access flag. 3183 */ 3184 np->n_flag |= NACC; 3185 getnanotime(&np->n_atim); 3186 return (spec_read(ap)); 3187 } 3188 3189 /* 3190 * Write wrapper for special devices. 3191 */ 3192 int 3193 nfsspec_write(void *v) 3194 { 3195 struct vop_write_args *ap = v; 3196 struct nfsnode *np = VTONFS(ap->a_vp); 3197 3198 /* 3199 * Set update flag. 3200 */ 3201 np->n_flag |= NUPD; 3202 getnanotime(&np->n_mtim); 3203 return (spec_write(ap)); 3204 } 3205 3206 /* 3207 * Close wrapper for special devices. 3208 * 3209 * Update the times on the nfsnode then do device close. 3210 */ 3211 int 3212 nfsspec_close(void *v) 3213 { 3214 struct vop_close_args *ap = v; 3215 struct vnode *vp = ap->a_vp; 3216 struct nfsnode *np = VTONFS(vp); 3217 struct vattr vattr; 3218 3219 if (np->n_flag & (NACC | NUPD)) { 3220 np->n_flag |= NCHG; 3221 if (vp->v_usecount == 1 && 3222 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3223 VATTR_NULL(&vattr); 3224 if (np->n_flag & NACC) 3225 vattr.va_atime = np->n_atim; 3226 if (np->n_flag & NUPD) 3227 vattr.va_mtime = np->n_mtim; 3228 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3229 } 3230 } 3231 return (spec_close(ap)); 3232 } 3233 3234 #ifdef FIFO 3235 /* 3236 * Read wrapper for fifos. 3237 */ 3238 int 3239 nfsfifo_read(void *v) 3240 { 3241 struct vop_read_args *ap = v; 3242 struct nfsnode *np = VTONFS(ap->a_vp); 3243 3244 /* 3245 * Set access flag. 3246 */ 3247 np->n_flag |= NACC; 3248 getnanotime(&np->n_atim); 3249 return (fifo_read(ap)); 3250 } 3251 3252 /* 3253 * Write wrapper for fifos. 3254 */ 3255 int 3256 nfsfifo_write(void *v) 3257 { 3258 struct vop_write_args *ap = v; 3259 struct nfsnode *np = VTONFS(ap->a_vp); 3260 3261 /* 3262 * Set update flag. 3263 */ 3264 np->n_flag |= NUPD; 3265 getnanotime(&np->n_mtim); 3266 return (fifo_write(ap)); 3267 } 3268 3269 /* 3270 * Close wrapper for fifos. 3271 * 3272 * Update the times on the nfsnode then do fifo close. 3273 */ 3274 int 3275 nfsfifo_close(void *v) 3276 { 3277 struct vop_close_args *ap = v; 3278 struct vnode *vp = ap->a_vp; 3279 struct nfsnode *np = VTONFS(vp); 3280 struct vattr vattr; 3281 3282 if (np->n_flag & (NACC | NUPD)) { 3283 if (np->n_flag & NACC) { 3284 getnanotime(&np->n_atim); 3285 } 3286 if (np->n_flag & NUPD) { 3287 getnanotime(&np->n_mtim); 3288 } 3289 np->n_flag |= NCHG; 3290 if (vp->v_usecount == 1 && 3291 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3292 VATTR_NULL(&vattr); 3293 if (np->n_flag & NACC) 3294 vattr.va_atime = np->n_atim; 3295 if (np->n_flag & NUPD) 3296 vattr.va_mtime = np->n_mtim; 3297 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3298 } 3299 } 3300 return (fifo_close(ap)); 3301 } 3302 3303 int 3304 nfsfifo_reclaim(void *v) 3305 { 3306 fifo_reclaim(v); 3307 return (nfs_reclaim(v)); 3308 } 3309 #endif /* ! FIFO */ 3310