1 /* $OpenBSD: nfs_vnops.c,v 1.182 2020/01/20 23:21:56 claudio Exp $ */ 2 /* $NetBSD: nfs_vnops.c,v 1.62.4.1 1996/07/08 20:26:52 jtc Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Rick Macklem at The University of Guelph. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95 36 */ 37 38 39 /* 40 * vnode op calls for Sun NFS version 2 and 3 41 */ 42 43 #include <sys/param.h> 44 #include <sys/kernel.h> 45 #include <sys/systm.h> 46 #include <sys/resourcevar.h> 47 #include <sys/poll.h> 48 #include <sys/proc.h> 49 #include <sys/mount.h> 50 #include <sys/buf.h> 51 #include <sys/malloc.h> 52 #include <sys/pool.h> 53 #include <sys/mbuf.h> 54 #include <sys/conf.h> 55 #include <sys/namei.h> 56 #include <sys/vnode.h> 57 #include <sys/lock.h> 58 #include <sys/dirent.h> 59 #include <sys/fcntl.h> 60 #include <sys/lockf.h> 61 #include <sys/queue.h> 62 #include <sys/specdev.h> 63 #include <sys/unistd.h> 64 65 #include <miscfs/fifofs/fifo.h> 66 67 #include <nfs/rpcv2.h> 68 #include <nfs/nfsproto.h> 69 #include <nfs/nfs.h> 70 #include <nfs/nfsnode.h> 71 #include <nfs/nfsmount.h> 72 #include <nfs/xdr_subs.h> 73 #include <nfs/nfsm_subs.h> 74 #include <nfs/nfs_var.h> 75 76 #include <uvm/uvm_extern.h> 77 78 #include <netinet/in.h> 79 80 int nfs_access(void *); 81 int nfs_advlock(void *); 82 int nfs_bmap(void *); 83 int nfs_bwrite(void *); 84 int nfs_close(void *); 85 int nfs_commit(struct vnode *, u_quad_t, int, struct proc *); 86 int nfs_create(void *); 87 int nfs_flush(struct vnode *, struct ucred *, int, struct proc *, int); 88 int nfs_fsync(void *); 89 int nfs_getattr(void *); 90 int nfs_getreq(struct nfsrv_descript *, struct nfsd *, int); 91 int nfs_islocked(void *); 92 int nfs_link(void *); 93 int nfs_lock(void *); 94 int nfs_lookitup(struct vnode *, char *, int, struct ucred *, struct proc *, 95 struct nfsnode **); 96 int nfs_lookup(void *); 97 int nfs_mkdir(void *); 98 int nfs_mknod(void *); 99 int nfs_mknodrpc(struct vnode *, struct vnode **, struct componentname *, 100 struct vattr *); 101 int nfs_null(struct vnode *, struct ucred *, struct proc *); 102 int nfs_open(void *); 103 int nfs_pathconf(void *); 104 int nfs_poll(void *); 105 int nfs_print(void *); 106 int nfs_read(void *); 107 int nfs_readdir(void *); 108 int nfs_readdirplusrpc(struct vnode *, struct uio *, struct ucred *, int *, 109 struct proc *); 110 int nfs_readdirrpc(struct vnode *, struct uio *, struct ucred *, int *); 111 int nfs_remove(void *); 112 int nfs_removerpc(struct vnode *, char *, int, struct ucred *, struct proc *); 113 int nfs_rename(void *); 114 int nfs_renameit(struct vnode *, struct componentname *, struct sillyrename *); 115 int nfs_renamerpc(struct vnode *, char *, int, struct vnode *, char *, int, 116 struct ucred *, struct proc *); 117 int nfs_rmdir(void *); 118 int nfs_setattr(void *); 119 int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *, 120 struct proc *); 121 int nfs_sillyrename(struct vnode *, struct vnode *, 122 struct componentname *); 123 int nfs_strategy(void *); 124 int nfs_symlink(void *); 125 int nfs_unlock(void *); 126 127 void nfs_cache_enter(struct vnode *, struct vnode *, struct componentname *); 128 129 int nfsfifo_close(void *); 130 int nfsfifo_read(void *); 131 int nfsfifo_reclaim(void *); 132 int nfsfifo_write(void *); 133 134 int nfsspec_access(void *); 135 int nfsspec_close(void *); 136 int nfsspec_read(void *); 137 int nfsspec_write(void *); 138 139 /* Global vfs data structures for nfs. */ 140 const struct vops nfs_vops = { 141 .vop_lookup = nfs_lookup, 142 .vop_create = nfs_create, 143 .vop_mknod = nfs_mknod, 144 .vop_open = nfs_open, 145 .vop_close = nfs_close, 146 .vop_access = nfs_access, 147 .vop_getattr = nfs_getattr, 148 .vop_setattr = nfs_setattr, 149 .vop_read = nfs_read, 150 .vop_write = nfs_write, 151 .vop_ioctl = nfs_ioctl, 152 .vop_poll = nfs_poll, 153 .vop_kqfilter = nfs_kqfilter, 154 .vop_revoke = vop_generic_revoke, 155 .vop_fsync = nfs_fsync, 156 .vop_remove = nfs_remove, 157 .vop_link = nfs_link, 158 .vop_rename = nfs_rename, 159 .vop_mkdir = nfs_mkdir, 160 .vop_rmdir = nfs_rmdir, 161 .vop_symlink = nfs_symlink, 162 .vop_readdir = nfs_readdir, 163 .vop_readlink = nfs_readlink, 164 .vop_abortop = vop_generic_abortop, 165 .vop_inactive = nfs_inactive, 166 .vop_reclaim = nfs_reclaim, 167 .vop_lock = nfs_lock, 168 .vop_unlock = nfs_unlock, 169 .vop_bmap = nfs_bmap, 170 .vop_strategy = nfs_strategy, 171 .vop_print = nfs_print, 172 .vop_islocked = nfs_islocked, 173 .vop_pathconf = nfs_pathconf, 174 .vop_advlock = nfs_advlock, 175 .vop_bwrite = nfs_bwrite 176 }; 177 178 /* Special device vnode ops. */ 179 const struct vops nfs_specvops = { 180 .vop_close = nfsspec_close, 181 .vop_access = nfsspec_access, 182 .vop_getattr = nfs_getattr, 183 .vop_setattr = nfs_setattr, 184 .vop_read = nfsspec_read, 185 .vop_write = nfsspec_write, 186 .vop_fsync = nfs_fsync, 187 .vop_inactive = nfs_inactive, 188 .vop_reclaim = nfs_reclaim, 189 .vop_lock = nfs_lock, 190 .vop_unlock = nfs_unlock, 191 .vop_print = nfs_print, 192 .vop_islocked = nfs_islocked, 193 194 /* XXX: Keep in sync with spec_vops. */ 195 .vop_lookup = vop_generic_lookup, 196 .vop_create = spec_badop, 197 .vop_mknod = spec_badop, 198 .vop_open = spec_open, 199 .vop_ioctl = spec_ioctl, 200 .vop_poll = spec_poll, 201 .vop_kqfilter = spec_kqfilter, 202 .vop_revoke = vop_generic_revoke, 203 .vop_remove = spec_badop, 204 .vop_link = spec_badop, 205 .vop_rename = spec_badop, 206 .vop_mkdir = spec_badop, 207 .vop_rmdir = spec_badop, 208 .vop_symlink = spec_badop, 209 .vop_readdir = spec_badop, 210 .vop_readlink = spec_badop, 211 .vop_abortop = spec_badop, 212 .vop_bmap = vop_generic_bmap, 213 .vop_strategy = spec_strategy, 214 .vop_pathconf = spec_pathconf, 215 .vop_advlock = spec_advlock, 216 .vop_bwrite = vop_generic_bwrite, 217 }; 218 219 #ifdef FIFO 220 const struct vops nfs_fifovops = { 221 .vop_close = nfsfifo_close, 222 .vop_access = nfsspec_access, 223 .vop_getattr = nfs_getattr, 224 .vop_setattr = nfs_setattr, 225 .vop_read = nfsfifo_read, 226 .vop_write = nfsfifo_write, 227 .vop_fsync = nfs_fsync, 228 .vop_inactive = nfs_inactive, 229 .vop_reclaim = nfsfifo_reclaim, 230 .vop_lock = nfs_lock, 231 .vop_unlock = nfs_unlock, 232 .vop_print = nfs_print, 233 .vop_islocked = nfs_islocked, 234 .vop_bwrite = vop_generic_bwrite, 235 236 /* XXX: Keep in sync with fifo_vops. */ 237 .vop_lookup = vop_generic_lookup, 238 .vop_create = fifo_badop, 239 .vop_mknod = fifo_badop, 240 .vop_open = fifo_open, 241 .vop_ioctl = fifo_ioctl, 242 .vop_poll = fifo_poll, 243 .vop_kqfilter = fifo_kqfilter, 244 .vop_revoke = vop_generic_revoke, 245 .vop_remove = fifo_badop, 246 .vop_link = fifo_badop, 247 .vop_rename = fifo_badop, 248 .vop_mkdir = fifo_badop, 249 .vop_rmdir = fifo_badop, 250 .vop_symlink = fifo_badop, 251 .vop_readdir = fifo_badop, 252 .vop_readlink = fifo_badop, 253 .vop_abortop = fifo_badop, 254 .vop_bmap = vop_generic_bmap, 255 .vop_strategy = fifo_badop, 256 .vop_pathconf = fifo_pathconf, 257 .vop_advlock = fifo_advlock, 258 }; 259 #endif /* FIFO */ 260 261 /* 262 * Global variables 263 */ 264 extern u_int32_t nfs_true, nfs_false; 265 extern u_int32_t nfs_xdrneg1; 266 extern struct nfsstats nfsstats; 267 extern nfstype nfsv3_type[9]; 268 int nfs_numasync = 0; 269 270 void 271 nfs_cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 272 { 273 struct nfsnode *np; 274 275 if (vp != NULL) { 276 np = VTONFS(vp); 277 np->n_ctime = np->n_vattr.va_ctime.tv_sec; 278 } else { 279 np = VTONFS(dvp); 280 if (!np->n_ctime) 281 np->n_ctime = np->n_vattr.va_mtime.tv_sec; 282 } 283 284 cache_enter(dvp, vp, cnp); 285 } 286 287 /* 288 * nfs null call from vfs. 289 */ 290 int 291 nfs_null(struct vnode *vp, struct ucred *cred, struct proc *procp) 292 { 293 struct nfsm_info info; 294 int error = 0; 295 296 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(0); 297 error = nfs_request(vp, NFSPROC_NULL, &info); 298 m_freem(info.nmi_mrep); 299 return (error); 300 } 301 302 /* 303 * nfs access vnode op. 304 * For nfs version 2, just return ok. File accesses may fail later. 305 * For nfs version 3, use the access rpc to check accessibility. If file modes 306 * are changed on the server, accesses might still fail later. 307 */ 308 int 309 nfs_access(void *v) 310 { 311 struct vop_access_args *ap = v; 312 struct vnode *vp = ap->a_vp; 313 u_int32_t *tl; 314 int32_t t1; 315 caddr_t cp2; 316 int error = 0, attrflag; 317 u_int32_t mode, rmode; 318 int v3 = NFS_ISV3(vp); 319 int cachevalid; 320 struct nfsm_info info; 321 322 struct nfsnode *np = VTONFS(vp); 323 324 /* 325 * Disallow write attempts on filesystems mounted read-only; 326 * unless the file is a socket, fifo, or a block or character 327 * device resident on the filesystem. 328 */ 329 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 330 switch (vp->v_type) { 331 case VREG: 332 case VDIR: 333 case VLNK: 334 return (EROFS); 335 default: 336 break; 337 } 338 } 339 340 /* 341 * Check access cache first. If a request has been made for this uid 342 * shortly before, use the cached result. 343 */ 344 cachevalid = (np->n_accstamp != -1 && 345 (time_second - np->n_accstamp) < nfs_attrtimeo(np) && 346 np->n_accuid == ap->a_cred->cr_uid); 347 348 if (cachevalid) { 349 if (!np->n_accerror) { 350 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 351 return (np->n_accerror); 352 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode) 353 return (np->n_accerror); 354 } 355 356 /* 357 * For nfs v3, do an access rpc, otherwise you are stuck emulating 358 * ufs_access() locally using the vattr. This may not be correct, 359 * since the server may apply other access criteria such as 360 * client uid-->server uid mapping that we do not know about, but 361 * this is better than just returning anything that is lying about 362 * in the cache. 363 */ 364 if (v3) { 365 nfsstats.rpccnt[NFSPROC_ACCESS]++; 366 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_UNSIGNED); 367 nfsm_fhtom(&info, vp, v3); 368 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 369 if (ap->a_mode & VREAD) 370 mode = NFSV3ACCESS_READ; 371 else 372 mode = 0; 373 if (vp->v_type == VDIR) { 374 if (ap->a_mode & VWRITE) 375 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | 376 NFSV3ACCESS_DELETE); 377 if (ap->a_mode & VEXEC) 378 mode |= NFSV3ACCESS_LOOKUP; 379 } else { 380 if (ap->a_mode & VWRITE) 381 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); 382 if (ap->a_mode & VEXEC) 383 mode |= NFSV3ACCESS_EXECUTE; 384 } 385 *tl = txdr_unsigned(mode); 386 387 info.nmi_procp = ap->a_p; 388 info.nmi_cred = ap->a_cred; 389 error = nfs_request(vp, NFSPROC_ACCESS, &info); 390 391 nfsm_postop_attr(vp, attrflag); 392 if (error) { 393 m_freem(info.nmi_mrep); 394 goto nfsmout; 395 } 396 397 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 398 rmode = fxdr_unsigned(u_int32_t, *tl); 399 /* 400 * The NFS V3 spec does not clarify whether or not 401 * the returned access bits can be a superset of 402 * the ones requested, so... 403 */ 404 if ((rmode & mode) != mode) 405 error = EACCES; 406 407 m_freem(info.nmi_mrep); 408 } else 409 return (nfsspec_access(ap)); 410 411 412 /* 413 * If we got the same result as for a previous, different request, OR 414 * it in. Don't update the timestamp in that case. 415 */ 416 if (!error || error == EACCES) { 417 if (cachevalid && np->n_accstamp != -1 && 418 error == np->n_accerror) { 419 if (!error) 420 np->n_accmode |= ap->a_mode; 421 else { 422 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 423 np->n_accmode = ap->a_mode; 424 } 425 } else { 426 np->n_accstamp = time_second; 427 np->n_accuid = ap->a_cred->cr_uid; 428 np->n_accmode = ap->a_mode; 429 np->n_accerror = error; 430 } 431 } 432 nfsmout: 433 return (error); 434 } 435 436 /* 437 * nfs open vnode op 438 * Check to see if the type is ok 439 * and that deletion is not in progress. 440 * For paged in text files, you will need to flush the page cache 441 * if consistency is lost. 442 */ 443 int 444 nfs_open(void *v) 445 { 446 struct vop_open_args *ap = v; 447 struct vnode *vp = ap->a_vp; 448 struct nfsnode *np = VTONFS(vp); 449 struct vattr vattr; 450 int error; 451 452 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 453 #ifdef DIAGNOSTIC 454 printf("open eacces vtyp=%d\n",vp->v_type); 455 #endif 456 return (EACCES); 457 } 458 459 /* 460 * Initialize read and write creds here, for swapfiles 461 * and other paths that don't set the creds themselves. 462 */ 463 464 if (ap->a_mode & FREAD) { 465 if (np->n_rcred) { 466 crfree(np->n_rcred); 467 } 468 np->n_rcred = ap->a_cred; 469 crhold(np->n_rcred); 470 } 471 if (ap->a_mode & FWRITE) { 472 if (np->n_wcred) { 473 crfree(np->n_wcred); 474 } 475 np->n_wcred = ap->a_cred; 476 crhold(np->n_wcred); 477 } 478 479 if (np->n_flag & NMODIFIED) { 480 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 481 if (error == EINTR) 482 return (error); 483 uvm_vnp_uncache(vp); 484 NFS_INVALIDATE_ATTRCACHE(np); 485 if (vp->v_type == VDIR) 486 np->n_direofoffset = 0; 487 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 488 if (error) 489 return (error); 490 np->n_mtime = vattr.va_mtime; 491 } else { 492 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 493 if (error) 494 return (error); 495 if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) { 496 if (vp->v_type == VDIR) 497 np->n_direofoffset = 0; 498 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 499 if (error == EINTR) 500 return (error); 501 uvm_vnp_uncache(vp); 502 np->n_mtime = vattr.va_mtime; 503 } 504 } 505 /* For open/close consistency. */ 506 NFS_INVALIDATE_ATTRCACHE(np); 507 return (0); 508 } 509 510 /* 511 * nfs close vnode op 512 * What an NFS client should do upon close after writing is a debatable issue. 513 * Most NFS clients push delayed writes to the server upon close, basically for 514 * two reasons: 515 * 1 - So that any write errors may be reported back to the client process 516 * doing the close system call. By far the two most likely errors are 517 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 518 * 2 - To put a worst case upper bound on cache inconsistency between 519 * multiple clients for the file. 520 * There is also a consistency problem for Version 2 of the protocol w.r.t. 521 * not being able to tell if other clients are writing a file concurrently, 522 * since there is no way of knowing if the changed modify time in the reply 523 * is only due to the write for this client. 524 * (NFS Version 3 provides weak cache consistency data in the reply that 525 * should be sufficient to detect and handle this case.) 526 * 527 * The current code does the following: 528 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 529 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 530 * or commit them (this satisfies 1 and 2 except for the 531 * case where the server crashes after this close but 532 * before the commit RPC, which is felt to be "good 533 * enough". Changing the last argument to nfs_flush() to 534 * a 1 would force a commit operation, if it is felt a 535 * commit is necessary now. 536 */ 537 int 538 nfs_close(void *v) 539 { 540 struct vop_close_args *ap = v; 541 struct vnode *vp = ap->a_vp; 542 struct nfsnode *np = VTONFS(vp); 543 int error = 0; 544 545 if (vp->v_type == VREG) { 546 if (np->n_flag & NMODIFIED) { 547 if (NFS_ISV3(vp)) { 548 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 0); 549 np->n_flag &= ~NMODIFIED; 550 } else 551 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 552 NFS_INVALIDATE_ATTRCACHE(np); 553 } 554 if (np->n_flag & NWRITEERR) { 555 np->n_flag &= ~NWRITEERR; 556 error = np->n_error; 557 } 558 } 559 return (error); 560 } 561 562 /* 563 * nfs getattr call from vfs. 564 */ 565 int 566 nfs_getattr(void *v) 567 { 568 struct vop_getattr_args *ap = v; 569 struct vnode *vp = ap->a_vp; 570 struct nfsnode *np = VTONFS(vp); 571 struct nfsm_info info; 572 int32_t t1; 573 int error = 0; 574 575 info.nmi_v3 = NFS_ISV3(vp); 576 577 /* 578 * Update local times for special files. 579 */ 580 if (np->n_flag & (NACC | NUPD)) 581 np->n_flag |= NCHG; 582 /* 583 * First look in the cache. 584 */ 585 if (nfs_getattrcache(vp, ap->a_vap) == 0) 586 return (0); 587 588 nfsstats.rpccnt[NFSPROC_GETATTR]++; 589 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)); 590 nfsm_fhtom(&info, vp, info.nmi_v3); 591 info.nmi_procp = ap->a_p; 592 info.nmi_cred = ap->a_cred; 593 error = nfs_request(vp, NFSPROC_GETATTR, &info); 594 if (!error) 595 nfsm_loadattr(vp, ap->a_vap); 596 m_freem(info.nmi_mrep); 597 nfsmout: 598 return (error); 599 } 600 601 /* 602 * nfs setattr call. 603 */ 604 int 605 nfs_setattr(void *v) 606 { 607 struct vop_setattr_args *ap = v; 608 struct vnode *vp = ap->a_vp; 609 struct nfsnode *np = VTONFS(vp); 610 struct vattr *vap = ap->a_vap; 611 int hint = NOTE_ATTRIB; 612 int error = 0; 613 u_quad_t tsize = 0; 614 615 /* 616 * Setting of flags is not supported. 617 */ 618 if (vap->va_flags != VNOVAL) 619 return (EOPNOTSUPP); 620 621 /* 622 * Disallow write attempts if the filesystem is mounted read-only. 623 */ 624 if ((vap->va_uid != (uid_t)VNOVAL || 625 vap->va_gid != (gid_t)VNOVAL || 626 vap->va_atime.tv_nsec != VNOVAL || 627 vap->va_mtime.tv_nsec != VNOVAL || 628 vap->va_mode != (mode_t)VNOVAL) && 629 (vp->v_mount->mnt_flag & MNT_RDONLY)) 630 return (EROFS); 631 if (vap->va_size != VNOVAL) { 632 switch (vp->v_type) { 633 case VDIR: 634 return (EISDIR); 635 case VCHR: 636 case VBLK: 637 case VSOCK: 638 case VFIFO: 639 if (vap->va_mtime.tv_nsec == VNOVAL && 640 vap->va_atime.tv_nsec == VNOVAL && 641 vap->va_mode == (mode_t)VNOVAL && 642 vap->va_uid == (uid_t)VNOVAL && 643 vap->va_gid == (gid_t)VNOVAL) 644 return (0); 645 vap->va_size = VNOVAL; 646 break; 647 default: 648 /* 649 * Disallow write attempts if the filesystem is 650 * mounted read-only. 651 */ 652 if (vp->v_mount->mnt_flag & MNT_RDONLY) 653 return (EROFS); 654 if (vap->va_size == 0) 655 error = nfs_vinvalbuf(vp, 0, 656 ap->a_cred, ap->a_p); 657 else 658 error = nfs_vinvalbuf(vp, V_SAVE, 659 ap->a_cred, ap->a_p); 660 if (error) 661 return (error); 662 tsize = np->n_size; 663 np->n_size = np->n_vattr.va_size = vap->va_size; 664 uvm_vnp_setsize(vp, np->n_size); 665 }; 666 } else if ((vap->va_mtime.tv_nsec != VNOVAL || 667 vap->va_atime.tv_nsec != VNOVAL) && 668 vp->v_type == VREG && 669 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 670 ap->a_p)) == EINTR) 671 return (error); 672 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p); 673 if (error && vap->va_size != VNOVAL) { 674 np->n_size = np->n_vattr.va_size = tsize; 675 uvm_vnp_setsize(vp, np->n_size); 676 } 677 678 if (vap->va_size != VNOVAL && vap->va_size < tsize) 679 hint |= NOTE_TRUNCATE; 680 681 VN_KNOTE(vp, hint); /* XXX setattrrpc? */ 682 683 return (error); 684 } 685 686 /* 687 * Do an nfs setattr rpc. 688 */ 689 int 690 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred, 691 struct proc *procp) 692 { 693 struct nfsv2_sattr *sp; 694 struct nfsm_info info; 695 int32_t t1; 696 caddr_t cp2; 697 u_int32_t *tl; 698 int error = 0, wccflag = NFSV3_WCCRATTR; 699 int v3 = NFS_ISV3(vp); 700 701 info.nmi_v3 = NFS_ISV3(vp); 702 703 nfsstats.rpccnt[NFSPROC_SETATTR]++; 704 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_SATTR(v3)); 705 nfsm_fhtom(&info, vp, v3); 706 707 if (info.nmi_v3) { 708 nfsm_v3attrbuild(&info.nmi_mb, vap, 1); 709 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 710 *tl = nfs_false; 711 } else { 712 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 713 if (vap->va_mode == (mode_t)VNOVAL) 714 sp->sa_mode = nfs_xdrneg1; 715 else 716 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); 717 if (vap->va_uid == (uid_t)VNOVAL) 718 sp->sa_uid = nfs_xdrneg1; 719 else 720 sp->sa_uid = txdr_unsigned(vap->va_uid); 721 if (vap->va_gid == (gid_t)VNOVAL) 722 sp->sa_gid = nfs_xdrneg1; 723 else 724 sp->sa_gid = txdr_unsigned(vap->va_gid); 725 sp->sa_size = txdr_unsigned(vap->va_size); 726 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 727 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 728 } 729 730 info.nmi_procp = procp; 731 info.nmi_cred = cred; 732 error = nfs_request(vp, NFSPROC_SETATTR, &info); 733 734 if (info.nmi_v3) 735 nfsm_wcc_data(vp, wccflag); 736 else if (error == 0) 737 nfsm_loadattr(vp, NULL); 738 739 m_freem(info.nmi_mrep); 740 nfsmout: 741 return (error); 742 } 743 744 /* 745 * nfs lookup call, one step at a time... 746 * First look in cache 747 * If not found, unlock the directory nfsnode and do the rpc 748 */ 749 int 750 nfs_lookup(void *v) 751 { 752 struct vop_lookup_args *ap = v; 753 struct componentname *cnp = ap->a_cnp; 754 struct vnode *dvp = ap->a_dvp; 755 struct vnode **vpp = ap->a_vpp; 756 struct nfsm_info info; 757 int flags; 758 struct vnode *newvp; 759 u_int32_t *tl; 760 int32_t t1; 761 struct nfsmount *nmp; 762 caddr_t cp2; 763 long len; 764 nfsfh_t *fhp; 765 struct nfsnode *np; 766 int lockparent, wantparent, error = 0, attrflag, fhsize; 767 768 info.nmi_v3 = NFS_ISV3(dvp); 769 770 cnp->cn_flags &= ~PDIRUNLOCK; 771 flags = cnp->cn_flags; 772 773 *vpp = NULLVP; 774 newvp = NULLVP; 775 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 776 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 777 return (EROFS); 778 if (dvp->v_type != VDIR) 779 return (ENOTDIR); 780 lockparent = flags & LOCKPARENT; 781 wantparent = flags & (LOCKPARENT|WANTPARENT); 782 nmp = VFSTONFS(dvp->v_mount); 783 np = VTONFS(dvp); 784 785 /* 786 * Before tediously performing a linear scan of the directory, 787 * check the name cache to see if the directory/name pair 788 * we are looking for is known already. 789 * If the directory/name pair is found in the name cache, 790 * we have to ensure the directory has not changed from 791 * the time the cache entry has been created. If it has, 792 * the cache entry has to be ignored. 793 */ 794 if ((error = cache_lookup(dvp, vpp, cnp)) >= 0) { 795 struct vattr vattr; 796 int err2; 797 798 if (error && error != ENOENT) { 799 *vpp = NULLVP; 800 return (error); 801 } 802 803 if (cnp->cn_flags & PDIRUNLOCK) { 804 err2 = vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 805 if (err2 != 0) { 806 *vpp = NULLVP; 807 return (err2); 808 } 809 cnp->cn_flags &= ~PDIRUNLOCK; 810 } 811 812 err2 = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_proc); 813 if (err2 != 0) { 814 if (error == 0) { 815 if (*vpp != dvp) 816 vput(*vpp); 817 else 818 vrele(*vpp); 819 } 820 *vpp = NULLVP; 821 return (err2); 822 } 823 824 if (error == ENOENT) { 825 if (!VOP_GETATTR(dvp, &vattr, cnp->cn_cred, 826 cnp->cn_proc) && vattr.va_mtime.tv_sec == 827 VTONFS(dvp)->n_ctime) 828 return (ENOENT); 829 cache_purge(dvp); 830 np->n_ctime = 0; 831 goto dorpc; 832 } 833 834 newvp = *vpp; 835 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_proc) 836 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) 837 { 838 nfsstats.lookupcache_hits++; 839 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 840 cnp->cn_flags |= SAVENAME; 841 if ((!lockparent || !(flags & ISLASTCN)) && 842 newvp != dvp) { 843 VOP_UNLOCK(dvp); 844 cnp->cn_flags |= PDIRUNLOCK; 845 } 846 return (0); 847 } 848 cache_purge(newvp); 849 if (newvp != dvp) 850 vput(newvp); 851 else 852 vrele(newvp); 853 *vpp = NULLVP; 854 } 855 dorpc: 856 error = 0; 857 newvp = NULLVP; 858 nfsstats.lookupcache_misses++; 859 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 860 len = cnp->cn_namelen; 861 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 862 NFSX_UNSIGNED + nfsm_rndup(len)); 863 nfsm_fhtom(&info, dvp, info.nmi_v3); 864 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 865 866 info.nmi_procp = cnp->cn_proc; 867 info.nmi_cred = cnp->cn_cred; 868 error = nfs_request(dvp, NFSPROC_LOOKUP, &info); 869 870 if (error) { 871 if (info.nmi_v3) 872 nfsm_postop_attr(dvp, attrflag); 873 m_freem(info.nmi_mrep); 874 goto nfsmout; 875 } 876 877 nfsm_getfh(fhp, fhsize, info.nmi_v3); 878 879 /* 880 * Handle RENAME case... 881 */ 882 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) { 883 if (NFS_CMPFH(np, fhp, fhsize)) { 884 m_freem(info.nmi_mrep); 885 return (EISDIR); 886 } 887 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 888 if (error) { 889 m_freem(info.nmi_mrep); 890 return (error); 891 } 892 newvp = NFSTOV(np); 893 if (info.nmi_v3) { 894 nfsm_postop_attr(newvp, attrflag); 895 nfsm_postop_attr(dvp, attrflag); 896 } else 897 nfsm_loadattr(newvp, NULL); 898 *vpp = newvp; 899 m_freem(info.nmi_mrep); 900 cnp->cn_flags |= SAVENAME; 901 if (!lockparent) { 902 VOP_UNLOCK(dvp); 903 cnp->cn_flags |= PDIRUNLOCK; 904 } 905 return (0); 906 } 907 908 /* 909 * The postop attr handling is duplicated for each if case, 910 * because it should be done while dvp is locked (unlocking 911 * dvp is different for each case). 912 */ 913 914 if (NFS_CMPFH(np, fhp, fhsize)) { 915 vref(dvp); 916 newvp = dvp; 917 if (info.nmi_v3) { 918 nfsm_postop_attr(newvp, attrflag); 919 nfsm_postop_attr(dvp, attrflag); 920 } else 921 nfsm_loadattr(newvp, NULL); 922 } else if (flags & ISDOTDOT) { 923 VOP_UNLOCK(dvp); 924 cnp->cn_flags |= PDIRUNLOCK; 925 926 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 927 if (error) { 928 if (vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY) == 0) 929 cnp->cn_flags &= ~PDIRUNLOCK; 930 m_freem(info.nmi_mrep); 931 return (error); 932 } 933 newvp = NFSTOV(np); 934 935 if (info.nmi_v3) { 936 nfsm_postop_attr(newvp, attrflag); 937 nfsm_postop_attr(dvp, attrflag); 938 } else 939 nfsm_loadattr(newvp, NULL); 940 941 if (lockparent && (flags & ISLASTCN)) { 942 if ((error = vn_lock(dvp, LK_EXCLUSIVE))) { 943 m_freem(info.nmi_mrep); 944 vput(newvp); 945 return error; 946 } 947 cnp->cn_flags &= ~PDIRUNLOCK; 948 } 949 950 } else { 951 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 952 if (error) { 953 m_freem(info.nmi_mrep); 954 return error; 955 } 956 newvp = NFSTOV(np); 957 if (info.nmi_v3) { 958 nfsm_postop_attr(newvp, attrflag); 959 nfsm_postop_attr(dvp, attrflag); 960 } else 961 nfsm_loadattr(newvp, NULL); 962 if (!lockparent || !(flags & ISLASTCN)) { 963 VOP_UNLOCK(dvp); 964 cnp->cn_flags |= PDIRUNLOCK; 965 } 966 } 967 968 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 969 cnp->cn_flags |= SAVENAME; 970 if ((cnp->cn_flags & MAKEENTRY) && 971 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) { 972 nfs_cache_enter(dvp, newvp, cnp); 973 } 974 975 *vpp = newvp; 976 m_freem(info.nmi_mrep); 977 978 nfsmout: 979 if (error) { 980 /* 981 * We get here only because of errors returned by 982 * the RPC. Otherwise we'll have returned above 983 * (the nfsm_* macros will jump to nfsmout 984 * on error). 985 */ 986 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) && 987 cnp->cn_nameiop != CREATE) { 988 nfs_cache_enter(dvp, NULL, cnp); 989 } 990 if (newvp != NULLVP) { 991 if (newvp != dvp) 992 vput(newvp); 993 else 994 vrele(newvp); 995 } 996 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 997 (flags & ISLASTCN) && error == ENOENT) { 998 if (dvp->v_mount->mnt_flag & MNT_RDONLY) 999 error = EROFS; 1000 else 1001 error = EJUSTRETURN; 1002 } 1003 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 1004 cnp->cn_flags |= SAVENAME; 1005 *vpp = NULL; 1006 } 1007 return (error); 1008 } 1009 1010 /* 1011 * nfs read call. 1012 * Just call nfs_bioread() to do the work. 1013 */ 1014 int 1015 nfs_read(void *v) 1016 { 1017 struct vop_read_args *ap = v; 1018 struct vnode *vp = ap->a_vp; 1019 1020 if (vp->v_type != VREG) 1021 return (EPERM); 1022 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred)); 1023 } 1024 1025 /* 1026 * nfs readlink call 1027 */ 1028 int 1029 nfs_readlink(void *v) 1030 { 1031 struct vop_readlink_args *ap = v; 1032 struct vnode *vp = ap->a_vp; 1033 1034 if (vp->v_type != VLNK) 1035 return (EPERM); 1036 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred)); 1037 } 1038 1039 /* 1040 * Lock an inode. 1041 */ 1042 int 1043 nfs_lock(void *v) 1044 { 1045 struct vop_lock_args *ap = v; 1046 struct vnode *vp = ap->a_vp; 1047 1048 return rrw_enter(&VTONFS(vp)->n_lock, ap->a_flags & LK_RWFLAGS); 1049 } 1050 1051 /* 1052 * Unlock an inode. 1053 */ 1054 int 1055 nfs_unlock(void *v) 1056 { 1057 struct vop_unlock_args *ap = v; 1058 struct vnode *vp = ap->a_vp; 1059 1060 rrw_exit(&VTONFS(vp)->n_lock); 1061 return 0; 1062 } 1063 1064 /* 1065 * Check for a locked inode. 1066 */ 1067 int 1068 nfs_islocked(void *v) 1069 { 1070 struct vop_islocked_args *ap = v; 1071 1072 return rrw_status(&VTONFS(ap->a_vp)->n_lock); 1073 } 1074 1075 /* 1076 * Do a readlink rpc. 1077 * Called by nfs_doio() from below the buffer cache. 1078 */ 1079 int 1080 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 1081 { 1082 struct nfsm_info info; 1083 u_int32_t *tl; 1084 int32_t t1; 1085 caddr_t cp2; 1086 int error = 0, len, attrflag; 1087 1088 info.nmi_v3 = NFS_ISV3(vp); 1089 1090 nfsstats.rpccnt[NFSPROC_READLINK]++; 1091 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)); 1092 nfsm_fhtom(&info, vp, info.nmi_v3); 1093 1094 info.nmi_procp = curproc; 1095 info.nmi_cred = cred; 1096 error = nfs_request(vp, NFSPROC_READLINK, &info); 1097 1098 if (info.nmi_v3) 1099 nfsm_postop_attr(vp, attrflag); 1100 if (!error) { 1101 nfsm_strsiz(len, NFS_MAXPATHLEN); 1102 nfsm_mtouio(uiop, len); 1103 } 1104 1105 m_freem(info.nmi_mrep); 1106 1107 nfsmout: 1108 return (error); 1109 } 1110 1111 /* 1112 * nfs read rpc call 1113 * Ditto above 1114 */ 1115 int 1116 nfs_readrpc(struct vnode *vp, struct uio *uiop) 1117 { 1118 struct nfsm_info info; 1119 u_int32_t *tl; 1120 int32_t t1; 1121 caddr_t cp2; 1122 struct nfsmount *nmp; 1123 int error = 0, len, retlen, tsiz, eof, attrflag; 1124 1125 info.nmi_v3 = NFS_ISV3(vp); 1126 1127 eof = 0; 1128 1129 nmp = VFSTONFS(vp->v_mount); 1130 tsiz = uiop->uio_resid; 1131 if (uiop->uio_offset + tsiz > 0xffffffff && !info.nmi_v3) 1132 return (EFBIG); 1133 while (tsiz > 0) { 1134 nfsstats.rpccnt[NFSPROC_READ]++; 1135 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; 1136 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1137 NFSX_UNSIGNED * 3); 1138 nfsm_fhtom(&info, vp, info.nmi_v3); 1139 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED * 3); 1140 if (info.nmi_v3) { 1141 txdr_hyper(uiop->uio_offset, tl); 1142 *(tl + 2) = txdr_unsigned(len); 1143 } else { 1144 *tl++ = txdr_unsigned(uiop->uio_offset); 1145 *tl++ = txdr_unsigned(len); 1146 *tl = 0; 1147 } 1148 1149 info.nmi_procp = curproc; 1150 info.nmi_cred = VTONFS(vp)->n_rcred; 1151 error = nfs_request(vp, NFSPROC_READ, &info); 1152 if (info.nmi_v3) 1153 nfsm_postop_attr(vp, attrflag); 1154 if (error) { 1155 m_freem(info.nmi_mrep); 1156 goto nfsmout; 1157 } 1158 1159 if (info.nmi_v3) { 1160 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1161 eof = fxdr_unsigned(int, *(tl + 1)); 1162 } else { 1163 nfsm_loadattr(vp, NULL); 1164 } 1165 1166 nfsm_strsiz(retlen, nmp->nm_rsize); 1167 nfsm_mtouio(uiop, retlen); 1168 m_freem(info.nmi_mrep); 1169 tsiz -= retlen; 1170 if (info.nmi_v3) { 1171 if (eof || retlen == 0) 1172 tsiz = 0; 1173 } else if (retlen < len) 1174 tsiz = 0; 1175 } 1176 1177 nfsmout: 1178 return (error); 1179 } 1180 1181 /* 1182 * nfs write call 1183 */ 1184 int 1185 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, int *must_commit) 1186 { 1187 struct nfsm_info info; 1188 u_int32_t *tl; 1189 int32_t t1, backup; 1190 caddr_t cp2; 1191 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1192 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit; 1193 int committed = NFSV3WRITE_FILESYNC; 1194 1195 info.nmi_v3 = NFS_ISV3(vp); 1196 1197 #ifdef DIAGNOSTIC 1198 if (uiop->uio_iovcnt != 1) 1199 panic("nfs: writerpc iovcnt > 1"); 1200 #endif 1201 *must_commit = 0; 1202 tsiz = uiop->uio_resid; 1203 if (uiop->uio_offset + tsiz > 0xffffffff && !info.nmi_v3) 1204 return (EFBIG); 1205 while (tsiz > 0) { 1206 nfsstats.rpccnt[NFSPROC_WRITE]++; 1207 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz; 1208 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) 1209 + 5 * NFSX_UNSIGNED + nfsm_rndup(len)); 1210 nfsm_fhtom(&info, vp, info.nmi_v3); 1211 if (info.nmi_v3) { 1212 tl = nfsm_build(&info.nmi_mb, 5 * NFSX_UNSIGNED); 1213 txdr_hyper(uiop->uio_offset, tl); 1214 tl += 2; 1215 *tl++ = txdr_unsigned(len); 1216 *tl++ = txdr_unsigned(*iomode); 1217 *tl = txdr_unsigned(len); 1218 } else { 1219 u_int32_t x; 1220 1221 tl = nfsm_build(&info.nmi_mb, 4 * NFSX_UNSIGNED); 1222 /* Set both "begin" and "current" to non-garbage. */ 1223 x = txdr_unsigned((u_int32_t)uiop->uio_offset); 1224 *tl++ = x; /* "begin offset" */ 1225 *tl++ = x; /* "current offset" */ 1226 x = txdr_unsigned(len); 1227 *tl++ = x; /* total to this offset */ 1228 *tl = x; /* size of this write */ 1229 1230 } 1231 nfsm_uiotombuf(&info.nmi_mb, uiop, len); 1232 1233 info.nmi_procp = curproc; 1234 info.nmi_cred = VTONFS(vp)->n_wcred; 1235 error = nfs_request(vp, NFSPROC_WRITE, &info); 1236 if (info.nmi_v3) { 1237 wccflag = NFSV3_WCCCHK; 1238 nfsm_wcc_data(vp, wccflag); 1239 } 1240 1241 if (error) { 1242 m_freem(info.nmi_mrep); 1243 goto nfsmout; 1244 } 1245 1246 if (info.nmi_v3) { 1247 wccflag = NFSV3_WCCCHK; 1248 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED 1249 + NFSX_V3WRITEVERF); 1250 rlen = fxdr_unsigned(int, *tl++); 1251 if (rlen <= 0) { 1252 error = NFSERR_IO; 1253 break; 1254 } else if (rlen < len) { 1255 backup = len - rlen; 1256 uiop->uio_iov->iov_base = 1257 (char *)uiop->uio_iov->iov_base - 1258 backup; 1259 uiop->uio_iov->iov_len += backup; 1260 uiop->uio_offset -= backup; 1261 uiop->uio_resid += backup; 1262 len = rlen; 1263 } 1264 commit = fxdr_unsigned(int, *tl++); 1265 1266 /* 1267 * Return the lowest committment level 1268 * obtained by any of the RPCs. 1269 */ 1270 if (committed == NFSV3WRITE_FILESYNC) 1271 committed = commit; 1272 else if (committed == NFSV3WRITE_DATASYNC && 1273 commit == NFSV3WRITE_UNSTABLE) 1274 committed = commit; 1275 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) { 1276 bcopy(tl, nmp->nm_verf, 1277 NFSX_V3WRITEVERF); 1278 nmp->nm_flag |= NFSMNT_HASWRITEVERF; 1279 } else if (bcmp(tl, 1280 nmp->nm_verf, NFSX_V3WRITEVERF)) { 1281 *must_commit = 1; 1282 bcopy(tl, nmp->nm_verf, 1283 NFSX_V3WRITEVERF); 1284 } 1285 } else { 1286 nfsm_loadattr(vp, NULL); 1287 } 1288 if (wccflag) 1289 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime; 1290 m_freem(info.nmi_mrep); 1291 tsiz -= len; 1292 } 1293 nfsmout: 1294 *iomode = committed; 1295 if (error) 1296 uiop->uio_resid = tsiz; 1297 return (error); 1298 } 1299 1300 /* 1301 * nfs mknod rpc 1302 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1303 * mode set to specify the file type and the size field for rdev. 1304 */ 1305 int 1306 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1307 struct vattr *vap) 1308 { 1309 struct nfsv2_sattr *sp; 1310 struct nfsm_info info; 1311 u_int32_t *tl; 1312 int32_t t1; 1313 struct vnode *newvp = NULL; 1314 struct nfsnode *np = NULL; 1315 char *cp2; 1316 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1317 u_int32_t rdev; 1318 1319 info.nmi_v3 = NFS_ISV3(dvp); 1320 1321 if (vap->va_type == VCHR || vap->va_type == VBLK) 1322 rdev = txdr_unsigned(vap->va_rdev); 1323 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1324 rdev = nfs_xdrneg1; 1325 else { 1326 VOP_ABORTOP(dvp, cnp); 1327 return (EOPNOTSUPP); 1328 } 1329 nfsstats.rpccnt[NFSPROC_MKNOD]++; 1330 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1331 4 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + 1332 NFSX_SATTR(info.nmi_v3)); 1333 nfsm_fhtom(&info, dvp, info.nmi_v3); 1334 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1335 1336 if (info.nmi_v3) { 1337 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 1338 *tl++ = vtonfsv3_type(vap->va_type); 1339 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1340 if (vap->va_type == VCHR || vap->va_type == VBLK) { 1341 tl = nfsm_build(&info.nmi_mb, 2 * NFSX_UNSIGNED); 1342 *tl++ = txdr_unsigned(major(vap->va_rdev)); 1343 *tl = txdr_unsigned(minor(vap->va_rdev)); 1344 } 1345 } else { 1346 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1347 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1348 sp->sa_uid = nfs_xdrneg1; 1349 sp->sa_gid = nfs_xdrneg1; 1350 sp->sa_size = rdev; 1351 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1352 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1353 } 1354 1355 KASSERT(cnp->cn_proc == curproc); 1356 info.nmi_procp = cnp->cn_proc; 1357 info.nmi_cred = cnp->cn_cred; 1358 error = nfs_request(dvp, NFSPROC_MKNOD, &info); 1359 if (!error) { 1360 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1361 if (!gotvp) { 1362 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1363 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1364 if (!error) 1365 newvp = NFSTOV(np); 1366 } 1367 } 1368 if (info.nmi_v3) 1369 nfsm_wcc_data(dvp, wccflag); 1370 m_freem(info.nmi_mrep); 1371 1372 nfsmout: 1373 if (error) { 1374 if (newvp) 1375 vput(newvp); 1376 } else { 1377 if (cnp->cn_flags & MAKEENTRY) 1378 nfs_cache_enter(dvp, newvp, cnp); 1379 *vpp = newvp; 1380 } 1381 pool_put(&namei_pool, cnp->cn_pnbuf); 1382 VTONFS(dvp)->n_flag |= NMODIFIED; 1383 if (!wccflag) 1384 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1385 return (error); 1386 } 1387 1388 /* 1389 * nfs mknod vop 1390 * just call nfs_mknodrpc() to do the work. 1391 */ 1392 int 1393 nfs_mknod(void *v) 1394 { 1395 struct vop_mknod_args *ap = v; 1396 struct vnode *newvp; 1397 int error; 1398 1399 error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap); 1400 if (!error) 1401 vput(newvp); 1402 1403 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1404 1405 return (error); 1406 } 1407 1408 int 1409 nfs_create(void *v) 1410 { 1411 struct vop_create_args *ap = v; 1412 struct vnode *dvp = ap->a_dvp; 1413 struct vattr *vap = ap->a_vap; 1414 struct componentname *cnp = ap->a_cnp; 1415 struct nfsv2_sattr *sp; 1416 struct nfsm_info info; 1417 u_int32_t *tl; 1418 int32_t t1; 1419 struct nfsnode *np = NULL; 1420 struct vnode *newvp = NULL; 1421 caddr_t cp2; 1422 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0; 1423 1424 info.nmi_v3 = NFS_ISV3(dvp); 1425 1426 /* 1427 * Oops, not for me.. 1428 */ 1429 if (vap->va_type == VSOCK) 1430 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1431 1432 if (vap->va_vaflags & VA_EXCLUSIVE) 1433 fmode |= O_EXCL; 1434 1435 again: 1436 nfsstats.rpccnt[NFSPROC_CREATE]++; 1437 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1438 2 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + 1439 NFSX_SATTR(info.nmi_v3)); 1440 nfsm_fhtom(&info, dvp, info.nmi_v3); 1441 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1442 if (info.nmi_v3) { 1443 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 1444 if (fmode & O_EXCL) { 1445 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); 1446 tl = nfsm_build(&info.nmi_mb, NFSX_V3CREATEVERF); 1447 arc4random_buf(tl, sizeof(*tl) * 2); 1448 } else { 1449 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED); 1450 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1451 } 1452 } else { 1453 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1454 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1455 sp->sa_uid = nfs_xdrneg1; 1456 sp->sa_gid = nfs_xdrneg1; 1457 sp->sa_size = 0; 1458 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1459 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1460 } 1461 1462 KASSERT(cnp->cn_proc == curproc); 1463 info.nmi_procp = cnp->cn_proc; 1464 info.nmi_cred = cnp->cn_cred; 1465 error = nfs_request(dvp, NFSPROC_CREATE, &info); 1466 if (!error) { 1467 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1468 if (!gotvp) { 1469 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1470 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1471 if (!error) 1472 newvp = NFSTOV(np); 1473 } 1474 } 1475 if (info.nmi_v3) 1476 nfsm_wcc_data(dvp, wccflag); 1477 m_freem(info.nmi_mrep); 1478 1479 nfsmout: 1480 if (error) { 1481 if (newvp) { 1482 vput(newvp); 1483 newvp = NULL; 1484 } 1485 if (info.nmi_v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) { 1486 fmode &= ~O_EXCL; 1487 goto again; 1488 } 1489 } else if (info.nmi_v3 && (fmode & O_EXCL)) 1490 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc); 1491 if (!error) { 1492 if (cnp->cn_flags & MAKEENTRY) 1493 nfs_cache_enter(dvp, newvp, cnp); 1494 *ap->a_vpp = newvp; 1495 } 1496 pool_put(&namei_pool, cnp->cn_pnbuf); 1497 VTONFS(dvp)->n_flag |= NMODIFIED; 1498 if (!wccflag) 1499 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1500 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1501 return (error); 1502 } 1503 1504 /* 1505 * nfs file remove call 1506 * To try and make nfs semantics closer to ufs semantics, a file that has 1507 * other processes using the vnode is renamed instead of removed and then 1508 * removed later on the last close. 1509 * - If v_usecount > 1 1510 * If a rename is not already in the works 1511 * call nfs_sillyrename() to set it up 1512 * else 1513 * do the remove rpc 1514 */ 1515 int 1516 nfs_remove(void *v) 1517 { 1518 struct vop_remove_args *ap = v; 1519 struct vnode *vp = ap->a_vp; 1520 struct vnode *dvp = ap->a_dvp; 1521 struct componentname *cnp = ap->a_cnp; 1522 struct nfsnode *np = VTONFS(vp); 1523 int error = 0; 1524 struct vattr vattr; 1525 1526 #ifdef DIAGNOSTIC 1527 if ((cnp->cn_flags & HASBUF) == 0) 1528 panic("nfs_remove: no name"); 1529 if (vp->v_usecount < 1) 1530 panic("nfs_remove: bad v_usecount"); 1531 #endif 1532 if (vp->v_type == VDIR) 1533 error = EPERM; 1534 else if (vp->v_usecount == 1 || (np->n_sillyrename && 1535 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 && 1536 vattr.va_nlink > 1)) { 1537 /* 1538 * Purge the name cache so that the chance of a lookup for 1539 * the name succeeding while the remove is in progress is 1540 * minimized. Without node locking it can still happen, such 1541 * that an I/O op returns ESTALE, but since you get this if 1542 * another host removes the file.. 1543 */ 1544 cache_purge(vp); 1545 /* 1546 * throw away biocache buffers, mainly to avoid 1547 * unnecessary delayed writes later. 1548 */ 1549 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc); 1550 /* Do the rpc */ 1551 if (error != EINTR) 1552 error = nfs_removerpc(dvp, cnp->cn_nameptr, 1553 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc); 1554 /* 1555 * Kludge City: If the first reply to the remove rpc is lost.. 1556 * the reply to the retransmitted request will be ENOENT 1557 * since the file was in fact removed 1558 * Therefore, we cheat and return success. 1559 */ 1560 if (error == ENOENT) 1561 error = 0; 1562 } else if (!np->n_sillyrename) 1563 error = nfs_sillyrename(dvp, vp, cnp); 1564 pool_put(&namei_pool, cnp->cn_pnbuf); 1565 NFS_INVALIDATE_ATTRCACHE(np); 1566 VN_KNOTE(vp, NOTE_DELETE); 1567 VN_KNOTE(dvp, NOTE_WRITE); 1568 if (vp == dvp) 1569 vrele(vp); 1570 else 1571 vput(vp); 1572 vput(dvp); 1573 return (error); 1574 } 1575 1576 /* 1577 * nfs file remove rpc called from nfs_inactive 1578 */ 1579 int 1580 nfs_removeit(struct sillyrename *sp) 1581 { 1582 KASSERT(VOP_ISLOCKED(sp->s_dvp)); 1583 /* 1584 * Make sure that the directory vnode is still valid. 1585 * 1586 * NFS can potentially try to nuke a silly *after* the directory 1587 * has already been pushed out on a forced unmount. Since the silly 1588 * is going to go away anyway, this is fine. 1589 */ 1590 if (sp->s_dvp->v_type == VBAD) 1591 return (0); 1592 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, 1593 NULL)); 1594 } 1595 1596 /* 1597 * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). 1598 */ 1599 int 1600 nfs_removerpc(struct vnode *dvp, char *name, int namelen, struct ucred *cred, 1601 struct proc *proc) 1602 { 1603 struct nfsm_info info; 1604 u_int32_t *tl; 1605 int32_t t1; 1606 caddr_t cp2; 1607 int error = 0, wccflag = NFSV3_WCCRATTR; 1608 1609 info.nmi_v3 = NFS_ISV3(dvp); 1610 1611 nfsstats.rpccnt[NFSPROC_REMOVE]++; 1612 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1613 NFSX_UNSIGNED + nfsm_rndup(namelen)); 1614 nfsm_fhtom(&info, dvp, info.nmi_v3); 1615 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 1616 1617 info.nmi_procp = proc; 1618 info.nmi_cred = cred; 1619 error = nfs_request(dvp, NFSPROC_REMOVE, &info); 1620 if (info.nmi_v3) 1621 nfsm_wcc_data(dvp, wccflag); 1622 m_freem(info.nmi_mrep); 1623 1624 nfsmout: 1625 VTONFS(dvp)->n_flag |= NMODIFIED; 1626 if (!wccflag) 1627 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1628 return (error); 1629 } 1630 1631 /* 1632 * nfs file rename call 1633 */ 1634 int 1635 nfs_rename(void *v) 1636 { 1637 struct vop_rename_args *ap = v; 1638 struct vnode *fvp = ap->a_fvp; 1639 struct vnode *tvp = ap->a_tvp; 1640 struct vnode *fdvp = ap->a_fdvp; 1641 struct vnode *tdvp = ap->a_tdvp; 1642 struct componentname *tcnp = ap->a_tcnp; 1643 struct componentname *fcnp = ap->a_fcnp; 1644 int error; 1645 1646 #ifdef DIAGNOSTIC 1647 if ((tcnp->cn_flags & HASBUF) == 0 || 1648 (fcnp->cn_flags & HASBUF) == 0) 1649 panic("nfs_rename: no name"); 1650 #endif 1651 /* Check for cross-device rename */ 1652 if ((fvp->v_mount != tdvp->v_mount) || 1653 (tvp && (fvp->v_mount != tvp->v_mount))) { 1654 error = EXDEV; 1655 goto out; 1656 } 1657 1658 /* 1659 * If the tvp exists and is in use, sillyrename it before doing the 1660 * rename of the new file over it. 1661 */ 1662 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename && 1663 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { 1664 VN_KNOTE(tvp, NOTE_DELETE); 1665 vput(tvp); 1666 tvp = NULL; 1667 } 1668 1669 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1670 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1671 tcnp->cn_proc); 1672 1673 VN_KNOTE(fdvp, NOTE_WRITE); 1674 VN_KNOTE(tdvp, NOTE_WRITE); 1675 1676 if (fvp->v_type == VDIR) { 1677 if (tvp != NULL && tvp->v_type == VDIR) 1678 cache_purge(tdvp); 1679 cache_purge(fdvp); 1680 } 1681 out: 1682 if (tdvp == tvp) 1683 vrele(tdvp); 1684 else 1685 vput(tdvp); 1686 if (tvp) 1687 vput(tvp); 1688 vrele(fdvp); 1689 vrele(fvp); 1690 /* 1691 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1692 */ 1693 if (error == ENOENT) 1694 error = 0; 1695 return (error); 1696 } 1697 1698 /* 1699 * nfs file rename rpc called from nfs_remove() above 1700 */ 1701 int 1702 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, 1703 struct sillyrename *sp) 1704 { 1705 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, 1706 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curproc)); 1707 } 1708 1709 /* 1710 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1711 */ 1712 int 1713 nfs_renamerpc(struct vnode *fdvp, char *fnameptr, int fnamelen, 1714 struct vnode *tdvp, char *tnameptr, int tnamelen, struct ucred *cred, 1715 struct proc *proc) 1716 { 1717 struct nfsm_info info; 1718 u_int32_t *tl; 1719 int32_t t1; 1720 caddr_t cp2; 1721 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; 1722 1723 info.nmi_v3 = NFS_ISV3(fdvp); 1724 1725 nfsstats.rpccnt[NFSPROC_RENAME]++; 1726 info.nmi_mb = info.nmi_mreq = nfsm_reqhead((NFSX_FH(info.nmi_v3) + 1727 NFSX_UNSIGNED) * 2 + nfsm_rndup(fnamelen) + nfsm_rndup(tnamelen)); 1728 nfsm_fhtom(&info, fdvp, info.nmi_v3); 1729 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); 1730 nfsm_fhtom(&info, tdvp, info.nmi_v3); 1731 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); 1732 1733 info.nmi_procp = proc; 1734 info.nmi_cred = cred; 1735 error = nfs_request(fdvp, NFSPROC_RENAME, &info); 1736 if (info.nmi_v3) { 1737 nfsm_wcc_data(fdvp, fwccflag); 1738 nfsm_wcc_data(tdvp, twccflag); 1739 } 1740 m_freem(info.nmi_mrep); 1741 1742 nfsmout: 1743 VTONFS(fdvp)->n_flag |= NMODIFIED; 1744 VTONFS(tdvp)->n_flag |= NMODIFIED; 1745 if (!fwccflag) 1746 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp)); 1747 if (!twccflag) 1748 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp)); 1749 return (error); 1750 } 1751 1752 /* 1753 * nfs hard link create call 1754 */ 1755 int 1756 nfs_link(void *v) 1757 { 1758 struct vop_link_args *ap = v; 1759 struct vnode *vp = ap->a_vp; 1760 struct vnode *dvp = ap->a_dvp; 1761 struct componentname *cnp = ap->a_cnp; 1762 struct nfsm_info info; 1763 u_int32_t *tl; 1764 int32_t t1; 1765 caddr_t cp2; 1766 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; 1767 1768 info.nmi_v3 = NFS_ISV3(vp); 1769 1770 if (dvp->v_mount != vp->v_mount) { 1771 pool_put(&namei_pool, cnp->cn_pnbuf); 1772 vput(dvp); 1773 return (EXDEV); 1774 } 1775 error = vn_lock(vp, LK_EXCLUSIVE); 1776 if (error != 0) { 1777 VOP_ABORTOP(dvp, cnp); 1778 vput(dvp); 1779 return (error); 1780 } 1781 1782 /* 1783 * Push all writes to the server, so that the attribute cache 1784 * doesn't get "out of sync" with the server. 1785 * XXX There should be a better way! 1786 */ 1787 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc); 1788 1789 nfsstats.rpccnt[NFSPROC_LINK]++; 1790 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(2 * NFSX_FH(info.nmi_v3) + 1791 NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 1792 nfsm_fhtom(&info, vp, info.nmi_v3); 1793 nfsm_fhtom(&info, dvp, info.nmi_v3); 1794 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1795 1796 info.nmi_procp = cnp->cn_proc; 1797 info.nmi_cred = cnp->cn_cred; 1798 error = nfs_request(vp, NFSPROC_LINK, &info); 1799 if (info.nmi_v3) { 1800 nfsm_postop_attr(vp, attrflag); 1801 nfsm_wcc_data(dvp, wccflag); 1802 } 1803 m_freem(info.nmi_mrep); 1804 nfsmout: 1805 pool_put(&namei_pool, cnp->cn_pnbuf); 1806 VTONFS(dvp)->n_flag |= NMODIFIED; 1807 if (!attrflag) 1808 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp)); 1809 if (!wccflag) 1810 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1811 1812 VN_KNOTE(vp, NOTE_LINK); 1813 VN_KNOTE(dvp, NOTE_WRITE); 1814 VOP_UNLOCK(vp); 1815 vput(dvp); 1816 return (error); 1817 } 1818 1819 /* 1820 * nfs symbolic link create call 1821 */ 1822 int 1823 nfs_symlink(void *v) 1824 { 1825 struct vop_symlink_args *ap = v; 1826 struct vnode *dvp = ap->a_dvp; 1827 struct vattr *vap = ap->a_vap; 1828 struct componentname *cnp = ap->a_cnp; 1829 struct nfsv2_sattr *sp; 1830 struct nfsm_info info; 1831 u_int32_t *tl; 1832 int32_t t1; 1833 caddr_t cp2; 1834 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; 1835 struct vnode *newvp = NULL; 1836 1837 info.nmi_v3 = NFS_ISV3(dvp); 1838 1839 nfsstats.rpccnt[NFSPROC_SYMLINK]++; 1840 slen = strlen(ap->a_target); 1841 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1842 2 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + 1843 NFSX_SATTR(info.nmi_v3)); 1844 nfsm_fhtom(&info, dvp, info.nmi_v3); 1845 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1846 if (info.nmi_v3) 1847 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1848 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); 1849 if (!info.nmi_v3) { 1850 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1851 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); 1852 sp->sa_uid = nfs_xdrneg1; 1853 sp->sa_gid = nfs_xdrneg1; 1854 sp->sa_size = nfs_xdrneg1; 1855 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1856 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1857 } 1858 1859 info.nmi_procp = cnp->cn_proc; 1860 info.nmi_cred = cnp->cn_cred; 1861 error = nfs_request(dvp, NFSPROC_SYMLINK, &info); 1862 if (info.nmi_v3) { 1863 if (!error) 1864 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1865 nfsm_wcc_data(dvp, wccflag); 1866 } 1867 m_freem(info.nmi_mrep); 1868 1869 nfsmout: 1870 if (newvp) 1871 vput(newvp); 1872 pool_put(&namei_pool, cnp->cn_pnbuf); 1873 VTONFS(dvp)->n_flag |= NMODIFIED; 1874 if (!wccflag) 1875 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1876 VN_KNOTE(dvp, NOTE_WRITE); 1877 vput(dvp); 1878 return (error); 1879 } 1880 1881 /* 1882 * nfs make dir call 1883 */ 1884 int 1885 nfs_mkdir(void *v) 1886 { 1887 struct vop_mkdir_args *ap = v; 1888 struct vnode *dvp = ap->a_dvp; 1889 struct vattr *vap = ap->a_vap; 1890 struct componentname *cnp = ap->a_cnp; 1891 struct nfsv2_sattr *sp; 1892 struct nfsm_info info; 1893 u_int32_t *tl; 1894 int32_t t1; 1895 int len; 1896 struct nfsnode *np = NULL; 1897 struct vnode *newvp = NULL; 1898 caddr_t cp2; 1899 int error = 0, wccflag = NFSV3_WCCRATTR; 1900 int gotvp = 0; 1901 1902 info.nmi_v3 = NFS_ISV3(dvp); 1903 1904 len = cnp->cn_namelen; 1905 nfsstats.rpccnt[NFSPROC_MKDIR]++; 1906 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1907 NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(info.nmi_v3)); 1908 nfsm_fhtom(&info, dvp, info.nmi_v3); 1909 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 1910 1911 if (info.nmi_v3) { 1912 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1913 } else { 1914 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1915 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); 1916 sp->sa_uid = nfs_xdrneg1; 1917 sp->sa_gid = nfs_xdrneg1; 1918 sp->sa_size = nfs_xdrneg1; 1919 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1920 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1921 } 1922 1923 info.nmi_procp = cnp->cn_proc; 1924 info.nmi_cred = cnp->cn_cred; 1925 error = nfs_request(dvp, NFSPROC_MKDIR, &info); 1926 if (!error) 1927 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1928 if (info.nmi_v3) 1929 nfsm_wcc_data(dvp, wccflag); 1930 m_freem(info.nmi_mrep); 1931 1932 nfsmout: 1933 VTONFS(dvp)->n_flag |= NMODIFIED; 1934 if (!wccflag) 1935 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1936 1937 if (error == 0 && newvp == NULL) { 1938 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, 1939 cnp->cn_proc, &np); 1940 if (!error) { 1941 newvp = NFSTOV(np); 1942 if (newvp->v_type != VDIR) 1943 error = EEXIST; 1944 } 1945 } 1946 if (error) { 1947 if (newvp) 1948 vput(newvp); 1949 } else { 1950 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK); 1951 if (cnp->cn_flags & MAKEENTRY) 1952 nfs_cache_enter(dvp, newvp, cnp); 1953 *ap->a_vpp = newvp; 1954 } 1955 pool_put(&namei_pool, cnp->cn_pnbuf); 1956 vput(dvp); 1957 return (error); 1958 } 1959 1960 /* 1961 * nfs remove directory call 1962 */ 1963 int 1964 nfs_rmdir(void *v) 1965 { 1966 struct vop_rmdir_args *ap = v; 1967 struct vnode *vp = ap->a_vp; 1968 struct vnode *dvp = ap->a_dvp; 1969 struct componentname *cnp = ap->a_cnp; 1970 struct nfsm_info info; 1971 u_int32_t *tl; 1972 int32_t t1; 1973 caddr_t cp2; 1974 int error = 0, wccflag = NFSV3_WCCRATTR; 1975 1976 info.nmi_v3 = NFS_ISV3(dvp); 1977 1978 nfsstats.rpccnt[NFSPROC_RMDIR]++; 1979 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1980 NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 1981 nfsm_fhtom(&info, dvp, info.nmi_v3); 1982 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1983 1984 info.nmi_procp = cnp->cn_proc; 1985 info.nmi_cred = cnp->cn_cred; 1986 error = nfs_request(dvp, NFSPROC_RMDIR, &info); 1987 if (info.nmi_v3) 1988 nfsm_wcc_data(dvp, wccflag); 1989 m_freem(info.nmi_mrep); 1990 1991 nfsmout: 1992 pool_put(&namei_pool, cnp->cn_pnbuf); 1993 VTONFS(dvp)->n_flag |= NMODIFIED; 1994 if (!wccflag) 1995 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1996 1997 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK); 1998 VN_KNOTE(vp, NOTE_DELETE); 1999 2000 cache_purge(vp); 2001 vput(vp); 2002 vput(dvp); 2003 /* 2004 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 2005 */ 2006 if (error == ENOENT) 2007 error = 0; 2008 return (error); 2009 } 2010 2011 2012 /* 2013 * The readdir logic below has a big design bug. It stores the NFS cookie in 2014 * the returned uio->uio_offset but does not store the verifier (it cannot). 2015 * Instead, the code stores the verifier in the nfsnode and applies that 2016 * verifies to all cookies, no matter what verifier was originally with 2017 * the cookie. 2018 * 2019 * From a practical standpoint, this is not a problem since almost all 2020 * NFS servers do not change the validity of cookies across deletes 2021 * and inserts. 2022 */ 2023 2024 struct nfs_dirent { 2025 u_int32_t cookie[2]; 2026 struct dirent dirent; 2027 }; 2028 2029 #define NFS_DIRHDSIZ (sizeof (struct nfs_dirent) - (MAXNAMLEN + 1)) 2030 #define NFS_DIRENT_OVERHEAD offsetof(struct nfs_dirent, dirent) 2031 2032 /* 2033 * nfs readdir call 2034 */ 2035 int 2036 nfs_readdir(void *v) 2037 { 2038 struct vop_readdir_args *ap = v; 2039 struct vnode *vp = ap->a_vp; 2040 struct nfsnode *np = VTONFS(vp); 2041 struct uio *uio = ap->a_uio; 2042 int tresid, error = 0; 2043 struct vattr vattr; 2044 int cnt; 2045 u_int64_t newoff = uio->uio_offset; 2046 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2047 struct uio readdir_uio; 2048 struct iovec readdir_iovec; 2049 struct proc * p = uio->uio_procp; 2050 int done = 0, eof = 0; 2051 struct ucred *cred = ap->a_cred; 2052 void *data; 2053 2054 if (vp->v_type != VDIR) 2055 return (EPERM); 2056 /* 2057 * First, check for hit on the EOF offset cache 2058 */ 2059 if (np->n_direofoffset != 0 && 2060 uio->uio_offset == np->n_direofoffset) { 2061 if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 && 2062 timespeccmp(&np->n_mtime, &vattr.va_mtime, ==)) { 2063 nfsstats.direofcache_hits++; 2064 *ap->a_eofflag = 1; 2065 return (0); 2066 } 2067 } 2068 2069 if (uio->uio_resid < NFS_FABLKSIZE) 2070 return (EINVAL); 2071 2072 tresid = uio->uio_resid; 2073 2074 if (uio->uio_rw != UIO_READ) 2075 return (EINVAL); 2076 2077 if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) 2078 (void)nfs_fsinfo(nmp, vp, cred, p); 2079 2080 cnt = 5; 2081 2082 /* M_ZERO to avoid leaking kernel data in dirent padding */ 2083 data = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK|M_ZERO); 2084 do { 2085 struct nfs_dirent *ndp = data; 2086 2087 readdir_iovec.iov_len = NFS_DIRBLKSIZ; 2088 readdir_iovec.iov_base = data; 2089 readdir_uio.uio_offset = newoff; 2090 readdir_uio.uio_iov = &readdir_iovec; 2091 readdir_uio.uio_iovcnt = 1; 2092 readdir_uio.uio_segflg = UIO_SYSSPACE; 2093 readdir_uio.uio_rw = UIO_READ; 2094 readdir_uio.uio_resid = NFS_DIRBLKSIZ; 2095 readdir_uio.uio_procp = curproc; 2096 2097 if (nmp->nm_flag & NFSMNT_RDIRPLUS) { 2098 error = nfs_readdirplusrpc(vp, &readdir_uio, cred, 2099 &eof, p); 2100 if (error == NFSERR_NOTSUPP) 2101 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 2102 } 2103 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 2104 error = nfs_readdirrpc(vp, &readdir_uio, cred, &eof); 2105 2106 if (error == NFSERR_BAD_COOKIE) 2107 error = EINVAL; 2108 2109 while (error == 0 && 2110 ndp < (struct nfs_dirent *)readdir_iovec.iov_base) { 2111 struct dirent *dp = &ndp->dirent; 2112 int reclen = dp->d_reclen; 2113 2114 dp->d_reclen -= NFS_DIRENT_OVERHEAD; 2115 dp->d_off = fxdr_hyper(&ndp->cookie[0]); 2116 2117 if (uio->uio_resid < dp->d_reclen) { 2118 eof = 0; 2119 done = 1; 2120 break; 2121 } 2122 2123 if ((error = uiomove(dp, dp->d_reclen, uio))) 2124 break; 2125 2126 newoff = fxdr_hyper(&ndp->cookie[0]); 2127 2128 ndp = (struct nfs_dirent *)((u_int8_t *)ndp + reclen); 2129 } 2130 } while (!error && !done && !eof && cnt--); 2131 2132 free(data, M_TEMP, NFS_DIRBLKSIZ); 2133 data = NULL; 2134 2135 uio->uio_offset = newoff; 2136 2137 if (!error && (eof || uio->uio_resid == tresid)) { 2138 nfsstats.direofcache_misses++; 2139 *ap->a_eofflag = 1; 2140 return (0); 2141 } 2142 2143 *ap->a_eofflag = 0; 2144 return (error); 2145 } 2146 2147 2148 /* 2149 * The function below stuff the cookies in after the name 2150 */ 2151 2152 /* 2153 * Readdir rpc call. 2154 */ 2155 int 2156 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2157 int *end_of_directory) 2158 { 2159 int len, left; 2160 struct nfs_dirent *ndp = NULL; 2161 struct dirent *dp = NULL; 2162 struct nfsm_info info; 2163 u_int32_t *tl; 2164 caddr_t cp; 2165 int32_t t1; 2166 caddr_t cp2; 2167 nfsuint64 cookie; 2168 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2169 struct nfsnode *dnp = VTONFS(vp); 2170 u_quad_t fileno; 2171 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1; 2172 int attrflag; 2173 2174 info.nmi_v3 = NFS_ISV3(vp); 2175 2176 #ifdef DIAGNOSTIC 2177 if (uiop->uio_iovcnt != 1 || 2178 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) 2179 panic("nfs readdirrpc bad uio"); 2180 #endif 2181 2182 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0]); 2183 2184 /* 2185 * Loop around doing readdir rpc's of size nm_readdirsize 2186 * truncated to a multiple of NFS_READDIRBLKSIZ. 2187 * The stopping criteria is EOF or buffer full. 2188 */ 2189 while (more_dirs && bigenough) { 2190 nfsstats.rpccnt[NFSPROC_READDIR]++; 2191 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) 2192 + NFSX_READDIR(info.nmi_v3)); 2193 nfsm_fhtom(&info, vp, info.nmi_v3); 2194 if (info.nmi_v3) { 2195 tl = nfsm_build(&info.nmi_mb, 5 * NFSX_UNSIGNED); 2196 *tl++ = cookie.nfsuquad[0]; 2197 *tl++ = cookie.nfsuquad[1]; 2198 if (cookie.nfsuquad[0] == 0 && 2199 cookie.nfsuquad[1] == 0) { 2200 *tl++ = 0; 2201 *tl++ = 0; 2202 } else { 2203 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2204 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2205 } 2206 } else { 2207 tl = nfsm_build(&info.nmi_mb, 2 * NFSX_UNSIGNED); 2208 *tl++ = cookie.nfsuquad[1]; 2209 } 2210 *tl = txdr_unsigned(nmp->nm_readdirsize); 2211 2212 info.nmi_procp = uiop->uio_procp; 2213 info.nmi_cred = cred; 2214 error = nfs_request(vp, NFSPROC_READDIR, &info); 2215 if (info.nmi_v3) 2216 nfsm_postop_attr(vp, attrflag); 2217 2218 if (error) { 2219 m_freem(info.nmi_mrep); 2220 goto nfsmout; 2221 } 2222 2223 if (info.nmi_v3) { 2224 nfsm_dissect(tl, u_int32_t *, 2225 2 * NFSX_UNSIGNED); 2226 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2227 dnp->n_cookieverf.nfsuquad[1] = *tl; 2228 } 2229 2230 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2231 more_dirs = fxdr_unsigned(int, *tl); 2232 2233 /* loop thru the dir entries, doctoring them to dirent form */ 2234 while (more_dirs && bigenough) { 2235 if (info.nmi_v3) { 2236 nfsm_dissect(tl, u_int32_t *, 2237 3 * NFSX_UNSIGNED); 2238 fileno = fxdr_hyper(tl); 2239 len = fxdr_unsigned(int, *(tl + 2)); 2240 } else { 2241 nfsm_dissect(tl, u_int32_t *, 2242 2 * NFSX_UNSIGNED); 2243 fileno = fxdr_unsigned(u_quad_t, *tl++); 2244 len = fxdr_unsigned(int, *tl); 2245 } 2246 if (len <= 0 || len > NFS_MAXNAMLEN) { 2247 error = EBADRPC; 2248 m_freem(info.nmi_mrep); 2249 goto nfsmout; 2250 } 2251 tlen = DIRENT_RECSIZE(len) + NFS_DIRENT_OVERHEAD; 2252 left = NFS_READDIRBLKSIZ - blksiz; 2253 if (tlen > left) { 2254 dp->d_reclen += left; 2255 uiop->uio_iov->iov_base += left; 2256 uiop->uio_iov->iov_len -= left; 2257 uiop->uio_resid -= left; 2258 blksiz = 0; 2259 } 2260 if (tlen > uiop->uio_resid) 2261 bigenough = 0; 2262 if (bigenough) { 2263 ndp = (struct nfs_dirent *) 2264 uiop->uio_iov->iov_base; 2265 dp = &ndp->dirent; 2266 dp->d_fileno = fileno; 2267 dp->d_namlen = len; 2268 dp->d_reclen = tlen; 2269 dp->d_type = DT_UNKNOWN; 2270 blksiz += tlen; 2271 if (blksiz == NFS_READDIRBLKSIZ) 2272 blksiz = 0; 2273 uiop->uio_resid -= NFS_DIRHDSIZ; 2274 uiop->uio_iov->iov_base = 2275 (char *)uiop->uio_iov->iov_base + 2276 NFS_DIRHDSIZ; 2277 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ; 2278 nfsm_mtouio(uiop, len); 2279 cp = uiop->uio_iov->iov_base; 2280 tlen -= NFS_DIRHDSIZ + len; 2281 *cp = '\0'; /* null terminate */ 2282 uiop->uio_iov->iov_base += tlen; 2283 uiop->uio_iov->iov_len -= tlen; 2284 uiop->uio_resid -= tlen; 2285 } else 2286 nfsm_adv(nfsm_rndup(len)); 2287 if (info.nmi_v3) { 2288 nfsm_dissect(tl, u_int32_t *, 2289 3 * NFSX_UNSIGNED); 2290 } else { 2291 nfsm_dissect(tl, u_int32_t *, 2292 2 * NFSX_UNSIGNED); 2293 } 2294 if (bigenough) { 2295 if (info.nmi_v3) { 2296 ndp->cookie[0] = cookie.nfsuquad[0] = 2297 *tl++; 2298 } else 2299 ndp->cookie[0] = 0; 2300 2301 ndp->cookie[1] = cookie.nfsuquad[1] = *tl++; 2302 } else if (info.nmi_v3) 2303 tl += 2; 2304 else 2305 tl++; 2306 more_dirs = fxdr_unsigned(int, *tl); 2307 } 2308 /* 2309 * If at end of rpc data, get the eof boolean 2310 */ 2311 if (!more_dirs) { 2312 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2313 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2314 } 2315 m_freem(info.nmi_mrep); 2316 } 2317 /* 2318 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ 2319 * by increasing d_reclen for the last record. 2320 */ 2321 if (blksiz > 0) { 2322 left = NFS_READDIRBLKSIZ - blksiz; 2323 dp->d_reclen += left; 2324 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + 2325 left; 2326 uiop->uio_iov->iov_len -= left; 2327 uiop->uio_resid -= left; 2328 } 2329 2330 /* 2331 * We are now either at the end of the directory or have filled the 2332 * block. 2333 */ 2334 if (bigenough) { 2335 dnp->n_direofoffset = fxdr_hyper(&cookie.nfsuquad[0]); 2336 if (end_of_directory) *end_of_directory = 1; 2337 } else { 2338 if (uiop->uio_resid > 0) 2339 printf("EEK! readdirrpc resid > 0\n"); 2340 } 2341 2342 nfsmout: 2343 return (error); 2344 } 2345 2346 /* 2347 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). 2348 */ 2349 int 2350 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2351 int *end_of_directory, struct proc *p) 2352 { 2353 int len, left; 2354 struct nfs_dirent *ndirp = NULL; 2355 struct dirent *dp = NULL; 2356 struct nfsm_info info; 2357 u_int32_t *tl; 2358 caddr_t cp; 2359 int32_t t1; 2360 struct vnode *newvp; 2361 caddr_t cp2, dpossav1, dpossav2; 2362 struct mbuf *mdsav1, *mdsav2; 2363 struct nameidata nami, *ndp = &nami; 2364 struct componentname *cnp = &ndp->ni_cnd; 2365 nfsuint64 cookie; 2366 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2367 struct nfsnode *dnp = VTONFS(vp), *np; 2368 nfsfh_t *fhp; 2369 u_quad_t fileno; 2370 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; 2371 int attrflag, fhsize; 2372 2373 #ifdef DIAGNOSTIC 2374 if (uiop->uio_iovcnt != 1 || 2375 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) 2376 panic("nfs readdirplusrpc bad uio"); 2377 #endif 2378 NDINIT(ndp, 0, 0, UIO_SYSSPACE, NULL, p); 2379 ndp->ni_dvp = vp; 2380 newvp = NULLVP; 2381 2382 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0]); 2383 2384 /* 2385 * Loop around doing readdir rpc's of size nm_readdirsize 2386 * truncated to a multiple of NFS_READDIRBLKSIZ. 2387 * The stopping criteria is EOF or buffer full. 2388 */ 2389 while (more_dirs && bigenough) { 2390 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; 2391 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(1) + 6 * NFSX_UNSIGNED); 2392 nfsm_fhtom(&info, vp, 1); 2393 tl = nfsm_build(&info.nmi_mb, 6 * NFSX_UNSIGNED); 2394 *tl++ = cookie.nfsuquad[0]; 2395 *tl++ = cookie.nfsuquad[1]; 2396 if (cookie.nfsuquad[0] == 0 && 2397 cookie.nfsuquad[1] == 0) { 2398 *tl++ = 0; 2399 *tl++ = 0; 2400 } else { 2401 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2402 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2403 } 2404 *tl++ = txdr_unsigned(nmp->nm_readdirsize); 2405 *tl = txdr_unsigned(nmp->nm_rsize); 2406 2407 info.nmi_procp = uiop->uio_procp; 2408 info.nmi_cred = cred; 2409 error = nfs_request(vp, NFSPROC_READDIRPLUS, &info); 2410 nfsm_postop_attr(vp, attrflag); 2411 if (error) { 2412 m_freem(info.nmi_mrep); 2413 goto nfsmout; 2414 } 2415 2416 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2417 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2418 dnp->n_cookieverf.nfsuquad[1] = *tl++; 2419 more_dirs = fxdr_unsigned(int, *tl); 2420 2421 /* loop thru the dir entries, doctoring them to 4bsd form */ 2422 while (more_dirs && bigenough) { 2423 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2424 fileno = fxdr_hyper(tl); 2425 len = fxdr_unsigned(int, *(tl + 2)); 2426 if (len <= 0 || len > NFS_MAXNAMLEN) { 2427 error = EBADRPC; 2428 m_freem(info.nmi_mrep); 2429 goto nfsmout; 2430 } 2431 tlen = DIRENT_RECSIZE(len) + NFS_DIRENT_OVERHEAD; 2432 left = NFS_READDIRBLKSIZ - blksiz; 2433 if (tlen > left) { 2434 dp->d_reclen += left; 2435 uiop->uio_iov->iov_base = 2436 (char *)uiop->uio_iov->iov_base + left; 2437 uiop->uio_iov->iov_len -= left; 2438 uiop->uio_resid -= left; 2439 blksiz = 0; 2440 } 2441 if (tlen > uiop->uio_resid) 2442 bigenough = 0; 2443 if (bigenough) { 2444 ndirp = (struct nfs_dirent *) 2445 uiop->uio_iov->iov_base; 2446 dp = &ndirp->dirent; 2447 dp->d_fileno = fileno; 2448 dp->d_namlen = len; 2449 dp->d_reclen = tlen; 2450 dp->d_type = DT_UNKNOWN; 2451 blksiz += tlen; 2452 if (blksiz == NFS_READDIRBLKSIZ) 2453 blksiz = 0; 2454 uiop->uio_resid -= NFS_DIRHDSIZ; 2455 uiop->uio_iov->iov_base = 2456 (char *)uiop->uio_iov->iov_base + 2457 NFS_DIRHDSIZ; 2458 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ; 2459 cnp->cn_nameptr = uiop->uio_iov->iov_base; 2460 cnp->cn_namelen = len; 2461 nfsm_mtouio(uiop, len); 2462 cp = uiop->uio_iov->iov_base; 2463 tlen -= NFS_DIRHDSIZ + len; 2464 *cp = '\0'; 2465 uiop->uio_iov->iov_base += tlen; 2466 uiop->uio_iov->iov_len -= tlen; 2467 uiop->uio_resid -= tlen; 2468 } else 2469 nfsm_adv(nfsm_rndup(len)); 2470 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2471 if (bigenough) { 2472 ndirp->cookie[0] = cookie.nfsuquad[0] = *tl++; 2473 ndirp->cookie[1] = cookie.nfsuquad[1] = *tl++; 2474 } else 2475 tl += 2; 2476 2477 /* 2478 * Since the attributes are before the file handle 2479 * (sigh), we must skip over the attributes and then 2480 * come back and get them. 2481 */ 2482 attrflag = fxdr_unsigned(int, *tl); 2483 if (attrflag) { 2484 dpossav1 = info.nmi_dpos; 2485 mdsav1 = info.nmi_md; 2486 nfsm_adv(NFSX_V3FATTR); 2487 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2488 doit = fxdr_unsigned(int, *tl); 2489 if (doit) { 2490 nfsm_getfh(fhp, fhsize, 1); 2491 if (NFS_CMPFH(dnp, fhp, fhsize)) { 2492 vref(vp); 2493 newvp = vp; 2494 np = dnp; 2495 } else { 2496 error = nfs_nget(vp->v_mount, 2497 fhp, fhsize, &np); 2498 if (error) 2499 doit = 0; 2500 else 2501 newvp = NFSTOV(np); 2502 } 2503 } 2504 if (doit && bigenough) { 2505 dpossav2 = info.nmi_dpos; 2506 info.nmi_dpos = dpossav1; 2507 mdsav2 = info.nmi_md; 2508 info.nmi_md = mdsav1; 2509 nfsm_loadattr(newvp, NULL); 2510 info.nmi_dpos = dpossav2; 2511 info.nmi_md = mdsav2; 2512 dp->d_type = IFTODT( 2513 VTTOIF(np->n_vattr.va_type)); 2514 if (cnp->cn_namelen <= 2515 NAMECACHE_MAXLEN) { 2516 ndp->ni_vp = newvp; 2517 cache_purge(ndp->ni_dvp); 2518 nfs_cache_enter(ndp->ni_dvp, 2519 ndp->ni_vp, cnp); 2520 } 2521 } 2522 } else { 2523 /* Just skip over the file handle */ 2524 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2525 i = fxdr_unsigned(int, *tl); 2526 if (i > 0) 2527 nfsm_adv(nfsm_rndup(i)); 2528 } 2529 if (newvp != NULLVP) { 2530 if (newvp == vp) 2531 vrele(newvp); 2532 else 2533 vput(newvp); 2534 newvp = NULLVP; 2535 } 2536 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2537 more_dirs = fxdr_unsigned(int, *tl); 2538 } 2539 /* 2540 * If at end of rpc data, get the eof boolean 2541 */ 2542 if (!more_dirs) { 2543 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2544 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2545 } 2546 m_freem(info.nmi_mrep); 2547 } 2548 /* 2549 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ 2550 * by increasing d_reclen for the last record. 2551 */ 2552 if (blksiz > 0) { 2553 left = NFS_READDIRBLKSIZ - blksiz; 2554 dp->d_reclen += left; 2555 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + 2556 left; 2557 uiop->uio_iov->iov_len -= left; 2558 uiop->uio_resid -= left; 2559 } 2560 2561 /* 2562 * We are now either at the end of the directory or have filled the 2563 * block. 2564 */ 2565 if (bigenough) { 2566 dnp->n_direofoffset = fxdr_hyper(&cookie.nfsuquad[0]); 2567 if (end_of_directory) *end_of_directory = 1; 2568 } else { 2569 if (uiop->uio_resid > 0) 2570 printf("EEK! readdirplusrpc resid > 0\n"); 2571 } 2572 2573 nfsmout: 2574 if (newvp != NULLVP) { 2575 if (newvp == vp) 2576 vrele(newvp); 2577 else 2578 vput(newvp); 2579 } 2580 return (error); 2581 } 2582 2583 /* 2584 * Silly rename. To make the NFS filesystem that is stateless look a little 2585 * more like the "ufs" a remove of an active vnode is translated to a rename 2586 * to a funny looking filename that is removed by nfs_inactive on the 2587 * nfsnode. There is the potential for another process on a different client 2588 * to create the same funny name between the nfs_lookitup() fails and the 2589 * nfs_rename() completes, but... 2590 */ 2591 int 2592 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 2593 { 2594 struct sillyrename *sp; 2595 struct nfsnode *np; 2596 int error; 2597 2598 cache_purge(dvp); 2599 np = VTONFS(vp); 2600 sp = malloc(sizeof(*sp), M_NFSREQ, M_WAITOK); 2601 sp->s_cred = crdup(cnp->cn_cred); 2602 sp->s_dvp = dvp; 2603 vref(dvp); 2604 2605 if (vp->v_type == VDIR) { 2606 #ifdef DIAGNOSTIC 2607 printf("nfs: sillyrename dir\n"); 2608 #endif 2609 error = EINVAL; 2610 goto bad; 2611 } 2612 2613 /* Try lookitups until we get one that isn't there */ 2614 while (1) { 2615 /* Fudge together a funny name */ 2616 u_int32_t rnd[2]; 2617 2618 arc4random_buf(&rnd, sizeof rnd); 2619 sp->s_namlen = snprintf(sp->s_name, sizeof sp->s_name, 2620 ".nfs%08X%08X", rnd[0], rnd[1]); 2621 if (sp->s_namlen > sizeof sp->s_name) 2622 sp->s_namlen = strlen(sp->s_name); 2623 2624 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2625 cnp->cn_proc, NULL)) 2626 break; 2627 } 2628 2629 error = nfs_renameit(dvp, cnp, sp); 2630 if (error) 2631 goto bad; 2632 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2633 cnp->cn_proc, &np); 2634 np->n_sillyrename = sp; 2635 return (0); 2636 bad: 2637 vrele(sp->s_dvp); 2638 crfree(sp->s_cred); 2639 free(sp, M_NFSREQ, sizeof(*sp)); 2640 return (error); 2641 } 2642 2643 /* 2644 * Look up a file name and optionally either update the file handle or 2645 * allocate an nfsnode, depending on the value of npp. 2646 * npp == NULL --> just do the lookup 2647 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2648 * handled too 2649 * *npp != NULL --> update the file handle in the vnode 2650 */ 2651 int 2652 nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred, 2653 struct proc *procp, struct nfsnode **npp) 2654 { 2655 struct nfsm_info info; 2656 u_int32_t *tl; 2657 int32_t t1; 2658 struct vnode *newvp = NULL; 2659 struct nfsnode *np, *dnp = VTONFS(dvp); 2660 caddr_t cp2; 2661 int error = 0, fhlen, attrflag = 0; 2662 nfsfh_t *nfhp; 2663 2664 info.nmi_v3 = NFS_ISV3(dvp); 2665 2666 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 2667 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + NFSX_UNSIGNED + 2668 nfsm_rndup(len)); 2669 nfsm_fhtom(&info, dvp, info.nmi_v3); 2670 nfsm_strtom(name, len, NFS_MAXNAMLEN); 2671 2672 info.nmi_procp = procp; 2673 info.nmi_cred = cred; 2674 error = nfs_request(dvp, NFSPROC_LOOKUP, &info); 2675 if (error && !info.nmi_v3) { 2676 m_freem(info.nmi_mrep); 2677 goto nfsmout; 2678 } 2679 2680 if (npp && !error) { 2681 nfsm_getfh(nfhp, fhlen, info.nmi_v3); 2682 if (*npp) { 2683 np = *npp; 2684 np->n_fhp = &np->n_fh; 2685 bcopy(nfhp, np->n_fhp, fhlen); 2686 np->n_fhsize = fhlen; 2687 newvp = NFSTOV(np); 2688 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { 2689 vref(dvp); 2690 newvp = dvp; 2691 np = dnp; 2692 } else { 2693 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); 2694 if (error) { 2695 m_freem(info.nmi_mrep); 2696 return (error); 2697 } 2698 newvp = NFSTOV(np); 2699 } 2700 if (info.nmi_v3) { 2701 nfsm_postop_attr(newvp, attrflag); 2702 if (!attrflag && *npp == NULL) { 2703 m_freem(info.nmi_mrep); 2704 if (newvp == dvp) 2705 vrele(newvp); 2706 else 2707 vput(newvp); 2708 return (ENOENT); 2709 } 2710 } else 2711 nfsm_loadattr(newvp, NULL); 2712 } 2713 m_freem(info.nmi_mrep); 2714 nfsmout: 2715 if (npp && *npp == NULL) { 2716 if (error) { 2717 if (newvp == dvp) 2718 vrele(newvp); 2719 else 2720 vput(newvp); 2721 } else 2722 *npp = np; 2723 } 2724 return (error); 2725 } 2726 2727 /* 2728 * Nfs Version 3 commit rpc 2729 */ 2730 int 2731 nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct proc *procp) 2732 { 2733 struct nfsm_info info; 2734 u_int32_t *tl; 2735 int32_t t1; 2736 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2737 caddr_t cp2; 2738 int error = 0, wccflag = NFSV3_WCCRATTR; 2739 2740 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) 2741 return (0); 2742 nfsstats.rpccnt[NFSPROC_COMMIT]++; 2743 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(1)); 2744 nfsm_fhtom(&info, vp, 1); 2745 2746 tl = nfsm_build(&info.nmi_mb, 3 * NFSX_UNSIGNED); 2747 txdr_hyper(offset, tl); 2748 tl += 2; 2749 *tl = txdr_unsigned(cnt); 2750 2751 info.nmi_procp = procp; 2752 info.nmi_cred = VTONFS(vp)->n_wcred; 2753 error = nfs_request(vp, NFSPROC_COMMIT, &info); 2754 nfsm_wcc_data(vp, wccflag); 2755 2756 if (!error) { 2757 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF); 2758 if (bcmp(nmp->nm_verf, tl, 2759 NFSX_V3WRITEVERF)) { 2760 bcopy(tl, nmp->nm_verf, 2761 NFSX_V3WRITEVERF); 2762 error = NFSERR_STALEWRITEVERF; 2763 } 2764 } 2765 m_freem(info.nmi_mrep); 2766 2767 nfsmout: 2768 return (error); 2769 } 2770 2771 /* 2772 * Kludge City.. 2773 * - make nfs_bmap() essentially a no-op that does no translation 2774 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc 2775 * (Maybe I could use the process's page mapping, but I was concerned that 2776 * Kernel Write might not be enabled and also figured copyout() would do 2777 * a lot more work than bcopy() and also it currently happens in the 2778 * context of the swapper process (2). 2779 */ 2780 int 2781 nfs_bmap(void *v) 2782 { 2783 struct vop_bmap_args *ap = v; 2784 struct vnode *vp = ap->a_vp; 2785 2786 if (ap->a_vpp != NULL) 2787 *ap->a_vpp = vp; 2788 if (ap->a_bnp != NULL) 2789 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize); 2790 return (0); 2791 } 2792 2793 /* 2794 * Strategy routine. 2795 * For async requests when nfsiod(s) are running, queue the request by 2796 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the 2797 * request. 2798 */ 2799 int 2800 nfs_strategy(void *v) 2801 { 2802 struct vop_strategy_args *ap = v; 2803 struct buf *bp = ap->a_bp; 2804 struct proc *p; 2805 int error = 0; 2806 2807 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC)) 2808 panic("nfs physio/async"); 2809 if (bp->b_flags & B_ASYNC) 2810 p = NULL; 2811 else 2812 p = curproc; /* XXX */ 2813 /* 2814 * If the op is asynchronous and an i/o daemon is waiting 2815 * queue the request, wake it up and wait for completion 2816 * otherwise just do it ourselves. 2817 */ 2818 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp, 0)) 2819 error = nfs_doio(bp, p); 2820 return (error); 2821 } 2822 2823 /* 2824 * fsync vnode op. Just call nfs_flush() with commit == 1. 2825 */ 2826 int 2827 nfs_fsync(void *v) 2828 { 2829 struct vop_fsync_args *ap = v; 2830 2831 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1)); 2832 } 2833 2834 /* 2835 * Flush all the blocks associated with a vnode. 2836 * Walk through the buffer pool and push any dirty pages 2837 * associated with the vnode. 2838 */ 2839 int 2840 nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct proc *p, 2841 int commit) 2842 { 2843 struct nfsnode *np = VTONFS(vp); 2844 struct buf *bp; 2845 int i; 2846 struct buf *nbp; 2847 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2848 uint64_t slptimeo = INFSLP; 2849 int s, error = 0, slpflag = 0, retv, bvecpos; 2850 int passone = 1; 2851 u_quad_t off = (u_quad_t)-1, endoff = 0, toff; 2852 #ifndef NFS_COMMITBVECSIZ 2853 #define NFS_COMMITBVECSIZ 20 2854 #endif 2855 struct buf *bvec[NFS_COMMITBVECSIZ]; 2856 2857 if (nmp->nm_flag & NFSMNT_INT) 2858 slpflag = PCATCH; 2859 if (!commit) 2860 passone = 0; 2861 /* 2862 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the 2863 * server, but nas not been committed to stable storage on the server 2864 * yet. On the first pass, the byte range is worked out and the commit 2865 * rpc is done. On the second pass, nfs_writebp() is called to do the 2866 * job. 2867 */ 2868 again: 2869 bvecpos = 0; 2870 if (NFS_ISV3(vp) && commit) { 2871 s = splbio(); 2872 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) { 2873 if (bvecpos >= NFS_COMMITBVECSIZ) 2874 break; 2875 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) 2876 != (B_DELWRI | B_NEEDCOMMIT)) 2877 continue; 2878 bremfree(bp); 2879 bp->b_flags |= B_WRITEINPROG; 2880 buf_acquire(bp); 2881 2882 /* 2883 * A list of these buffers is kept so that the 2884 * second loop knows which buffers have actually 2885 * been committed. This is necessary, since there 2886 * may be a race between the commit rpc and new 2887 * uncommitted writes on the file. 2888 */ 2889 bvec[bvecpos++] = bp; 2890 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2891 bp->b_dirtyoff; 2892 if (toff < off) 2893 off = toff; 2894 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); 2895 if (toff > endoff) 2896 endoff = toff; 2897 } 2898 splx(s); 2899 } 2900 if (bvecpos > 0) { 2901 /* 2902 * Commit data on the server, as required. 2903 */ 2904 bcstats.pendingwrites++; 2905 bcstats.numwrites++; 2906 retv = nfs_commit(vp, off, (int)(endoff - off), p); 2907 if (retv == NFSERR_STALEWRITEVERF) 2908 nfs_clearcommit(vp->v_mount); 2909 /* 2910 * Now, either mark the blocks I/O done or mark the 2911 * blocks dirty, depending on whether the commit 2912 * succeeded. 2913 */ 2914 for (i = 0; i < bvecpos; i++) { 2915 bp = bvec[i]; 2916 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG); 2917 if (retv) { 2918 if (i == 0) 2919 bcstats.pendingwrites--; 2920 brelse(bp); 2921 } else { 2922 if (i > 0) 2923 bcstats.pendingwrites++; 2924 s = splbio(); 2925 buf_undirty(bp); 2926 vp->v_numoutput++; 2927 bp->b_flags |= B_ASYNC; 2928 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR); 2929 bp->b_dirtyoff = bp->b_dirtyend = 0; 2930 biodone(bp); 2931 splx(s); 2932 } 2933 } 2934 } 2935 2936 /* 2937 * Start/do any write(s) that are required. 2938 */ 2939 loop: 2940 s = splbio(); 2941 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) { 2942 if (bp->b_flags & B_BUSY) { 2943 if (waitfor != MNT_WAIT || passone) 2944 continue; 2945 bp->b_flags |= B_WANTED; 2946 error = tsleep_nsec(bp, slpflag | (PRIBIO + 1), 2947 "nfsfsync", slptimeo); 2948 splx(s); 2949 if (error) { 2950 if (nfs_sigintr(nmp, NULL, p)) 2951 return (EINTR); 2952 if (slpflag == PCATCH) { 2953 slpflag = 0; 2954 slptimeo = SEC_TO_NSEC(2); 2955 } 2956 } 2957 goto loop; 2958 } 2959 if ((bp->b_flags & B_DELWRI) == 0) 2960 panic("nfs_fsync: not dirty"); 2961 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) 2962 continue; 2963 bremfree(bp); 2964 if (passone || !commit) { 2965 bp->b_flags |= B_ASYNC; 2966 } else { 2967 bp->b_flags |= (B_ASYNC|B_WRITEINPROG|B_NEEDCOMMIT); 2968 } 2969 buf_acquire(bp); 2970 splx(s); 2971 VOP_BWRITE(bp); 2972 goto loop; 2973 } 2974 splx(s); 2975 if (passone) { 2976 passone = 0; 2977 goto again; 2978 } 2979 if (waitfor == MNT_WAIT) { 2980 loop2: 2981 s = splbio(); 2982 error = vwaitforio(vp, slpflag, "nfs_fsync", slptimeo); 2983 splx(s); 2984 if (error) { 2985 if (nfs_sigintr(nmp, NULL, p)) 2986 return (EINTR); 2987 if (slpflag == PCATCH) { 2988 slpflag = 0; 2989 slptimeo = SEC_TO_NSEC(2); 2990 } 2991 goto loop2; 2992 } 2993 2994 if (!LIST_EMPTY(&vp->v_dirtyblkhd) && commit) { 2995 #if 0 2996 vprint("nfs_fsync: dirty", vp); 2997 #endif 2998 goto loop; 2999 } 3000 } 3001 if (np->n_flag & NWRITEERR) { 3002 error = np->n_error; 3003 np->n_flag &= ~NWRITEERR; 3004 } 3005 return (error); 3006 } 3007 3008 /* 3009 * Return POSIX pathconf information applicable to nfs. 3010 * Fake it. For v3 we could ask the server, but such code 3011 * hasn't been written yet. 3012 */ 3013 /* ARGSUSED */ 3014 int 3015 nfs_pathconf(void *v) 3016 { 3017 struct vop_pathconf_args *ap = v; 3018 struct nfsmount *nmp = VFSTONFS(ap->a_vp->v_mount); 3019 int error = 0; 3020 3021 switch (ap->a_name) { 3022 case _PC_LINK_MAX: 3023 *ap->a_retval = LINK_MAX; 3024 break; 3025 case _PC_NAME_MAX: 3026 *ap->a_retval = NAME_MAX; 3027 break; 3028 case _PC_CHOWN_RESTRICTED: 3029 *ap->a_retval = 1; 3030 break; 3031 case _PC_NO_TRUNC: 3032 *ap->a_retval = 1; 3033 break; 3034 case _PC_ALLOC_SIZE_MIN: 3035 *ap->a_retval = NFS_FABLKSIZE; 3036 break; 3037 case _PC_FILESIZEBITS: 3038 *ap->a_retval = 64; 3039 break; 3040 case _PC_REC_INCR_XFER_SIZE: 3041 *ap->a_retval = min(nmp->nm_rsize, nmp->nm_wsize); 3042 break; 3043 case _PC_REC_MAX_XFER_SIZE: 3044 *ap->a_retval = -1; /* means ``unlimited'' */ 3045 break; 3046 case _PC_REC_MIN_XFER_SIZE: 3047 *ap->a_retval = min(nmp->nm_rsize, nmp->nm_wsize); 3048 break; 3049 case _PC_REC_XFER_ALIGN: 3050 *ap->a_retval = PAGE_SIZE; 3051 break; 3052 case _PC_SYMLINK_MAX: 3053 *ap->a_retval = MAXPATHLEN; 3054 break; 3055 case _PC_2_SYMLINKS: 3056 *ap->a_retval = 1; 3057 break; 3058 case _PC_TIMESTAMP_RESOLUTION: 3059 *ap->a_retval = NFS_ISV3(ap->a_vp) ? 1 : 1000; 3060 break; 3061 default: 3062 error = EINVAL; 3063 break; 3064 } 3065 3066 return (error); 3067 } 3068 3069 /* 3070 * NFS advisory byte-level locks. 3071 */ 3072 int 3073 nfs_advlock(void *v) 3074 { 3075 struct vop_advlock_args *ap = v; 3076 struct nfsnode *np = VTONFS(ap->a_vp); 3077 3078 return (lf_advlock(&np->n_lockf, np->n_size, ap->a_id, ap->a_op, 3079 ap->a_fl, ap->a_flags)); 3080 } 3081 3082 /* 3083 * Print out the contents of an nfsnode. 3084 */ 3085 int 3086 nfs_print(void *v) 3087 { 3088 struct vop_print_args *ap = v; 3089 struct vnode *vp = ap->a_vp; 3090 struct nfsnode *np = VTONFS(vp); 3091 3092 printf("tag VT_NFS, fileid %lld fsid 0x%lx", 3093 np->n_vattr.va_fileid, np->n_vattr.va_fsid); 3094 #ifdef FIFO 3095 if (vp->v_type == VFIFO) 3096 fifo_printinfo(vp); 3097 #endif 3098 printf("\n"); 3099 return (0); 3100 } 3101 3102 /* 3103 * Just call nfs_writebp() with the force argument set to 1. 3104 */ 3105 int 3106 nfs_bwrite(void *v) 3107 { 3108 struct vop_bwrite_args *ap = v; 3109 3110 return (nfs_writebp(ap->a_bp, 1)); 3111 } 3112 3113 /* 3114 * This is a clone of vop_generic_bwrite(), except that B_WRITEINPROG isn't set unless 3115 * the force flag is one and it also handles the B_NEEDCOMMIT flag. 3116 */ 3117 int 3118 nfs_writebp(struct buf *bp, int force) 3119 { 3120 int oldflags = bp->b_flags, retv = 1; 3121 struct proc *p = curproc; /* XXX */ 3122 off_t off; 3123 size_t cnt; 3124 int s; 3125 struct vnode *vp; 3126 struct nfsnode *np; 3127 3128 if(!(bp->b_flags & B_BUSY)) 3129 panic("bwrite: buffer is not busy???"); 3130 3131 vp = bp->b_vp; 3132 np = VTONFS(vp); 3133 3134 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR); 3135 3136 s = splbio(); 3137 buf_undirty(bp); 3138 3139 if ((oldflags & B_ASYNC) && !(oldflags & B_DELWRI) && p) 3140 ++p->p_ru.ru_oublock; 3141 3142 bp->b_vp->v_numoutput++; 3143 splx(s); 3144 3145 /* 3146 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not 3147 * an actual write will have to be scheduled via. VOP_STRATEGY(). 3148 * If B_WRITEINPROG is already set, then push it with a write anyhow. 3149 */ 3150 if ((oldflags & (B_NEEDCOMMIT | B_WRITEINPROG)) == B_NEEDCOMMIT) { 3151 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 3152 cnt = bp->b_dirtyend - bp->b_dirtyoff; 3153 3154 rw_enter_write(&np->n_commitlock); 3155 if (!(bp->b_flags & B_NEEDCOMMIT)) { 3156 rw_exit_write(&np->n_commitlock); 3157 return (0); 3158 } 3159 3160 /* 3161 * If it's already been committed by somebody else, 3162 * bail. 3163 */ 3164 if (!nfs_in_committed_range(vp, bp)) { 3165 int pushedrange = 0; 3166 /* 3167 * Since we're going to do this, push as much 3168 * as we can. 3169 */ 3170 3171 if (nfs_in_tobecommitted_range(vp, bp)) { 3172 pushedrange = 1; 3173 off = np->n_pushlo; 3174 cnt = np->n_pushhi - np->n_pushlo; 3175 } 3176 3177 bp->b_flags |= B_WRITEINPROG; 3178 bcstats.pendingwrites++; 3179 bcstats.numwrites++; 3180 retv = nfs_commit(bp->b_vp, off, cnt, curproc); 3181 bp->b_flags &= ~B_WRITEINPROG; 3182 3183 if (retv == 0) { 3184 if (pushedrange) 3185 nfs_merge_commit_ranges(vp); 3186 else 3187 nfs_add_committed_range(vp, bp); 3188 } else 3189 bcstats.pendingwrites--; 3190 } else 3191 retv = 0; /* It has already been commited. */ 3192 3193 rw_exit_write(&np->n_commitlock); 3194 if (!retv) { 3195 bp->b_dirtyoff = bp->b_dirtyend = 0; 3196 bp->b_flags &= ~B_NEEDCOMMIT; 3197 s = splbio(); 3198 biodone(bp); 3199 splx(s); 3200 } else if (retv == NFSERR_STALEWRITEVERF) 3201 nfs_clearcommit(bp->b_vp->v_mount); 3202 } 3203 if (retv) { 3204 buf_flip_dma(bp); 3205 if (force) 3206 bp->b_flags |= B_WRITEINPROG; 3207 VOP_STRATEGY(bp); 3208 } 3209 3210 if( (oldflags & B_ASYNC) == 0) { 3211 int rtval; 3212 3213 bp->b_flags |= B_RAW; 3214 rtval = biowait(bp); 3215 if (!(oldflags & B_DELWRI) && p) { 3216 ++p->p_ru.ru_oublock; 3217 } 3218 brelse(bp); 3219 return (rtval); 3220 } 3221 3222 return (0); 3223 } 3224 3225 /* 3226 * nfs special file access vnode op. 3227 * Essentially just get vattr and then imitate iaccess() since the device is 3228 * local to the client. 3229 */ 3230 int 3231 nfsspec_access(void *v) 3232 { 3233 struct vop_access_args *ap = v; 3234 struct vattr va; 3235 struct vnode *vp = ap->a_vp; 3236 int error; 3237 3238 /* 3239 * Disallow write attempts on filesystems mounted read-only; 3240 * unless the file is a socket, fifo, or a block or character 3241 * device resident on the filesystem. 3242 */ 3243 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3244 switch (vp->v_type) { 3245 case VREG: 3246 case VDIR: 3247 case VLNK: 3248 return (EROFS); 3249 default: 3250 break; 3251 } 3252 } 3253 3254 error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_p); 3255 if (error) 3256 return (error); 3257 3258 return (vaccess(vp->v_type, va.va_mode, va.va_uid, va.va_gid, 3259 ap->a_mode, ap->a_cred)); 3260 } 3261 3262 int 3263 nfs_poll(void *v) 3264 { 3265 struct vop_poll_args *ap = v; 3266 3267 /* 3268 * We should really check to see if I/O is possible. 3269 */ 3270 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 3271 } 3272 3273 /* 3274 * Read wrapper for special devices. 3275 */ 3276 int 3277 nfsspec_read(void *v) 3278 { 3279 struct vop_read_args *ap = v; 3280 struct nfsnode *np = VTONFS(ap->a_vp); 3281 3282 /* 3283 * Set access flag. 3284 */ 3285 np->n_flag |= NACC; 3286 getnanotime(&np->n_atim); 3287 return (spec_read(ap)); 3288 } 3289 3290 /* 3291 * Write wrapper for special devices. 3292 */ 3293 int 3294 nfsspec_write(void *v) 3295 { 3296 struct vop_write_args *ap = v; 3297 struct nfsnode *np = VTONFS(ap->a_vp); 3298 3299 /* 3300 * Set update flag. 3301 */ 3302 np->n_flag |= NUPD; 3303 getnanotime(&np->n_mtim); 3304 return (spec_write(ap)); 3305 } 3306 3307 /* 3308 * Close wrapper for special devices. 3309 * 3310 * Update the times on the nfsnode then do device close. 3311 */ 3312 int 3313 nfsspec_close(void *v) 3314 { 3315 struct vop_close_args *ap = v; 3316 struct vnode *vp = ap->a_vp; 3317 struct nfsnode *np = VTONFS(vp); 3318 struct vattr vattr; 3319 3320 if (np->n_flag & (NACC | NUPD)) { 3321 np->n_flag |= NCHG; 3322 if (vp->v_usecount == 1 && 3323 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3324 VATTR_NULL(&vattr); 3325 if (np->n_flag & NACC) 3326 vattr.va_atime = np->n_atim; 3327 if (np->n_flag & NUPD) 3328 vattr.va_mtime = np->n_mtim; 3329 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3330 } 3331 } 3332 return (spec_close(ap)); 3333 } 3334 3335 #ifdef FIFO 3336 /* 3337 * Read wrapper for fifos. 3338 */ 3339 int 3340 nfsfifo_read(void *v) 3341 { 3342 struct vop_read_args *ap = v; 3343 struct nfsnode *np = VTONFS(ap->a_vp); 3344 3345 /* 3346 * Set access flag. 3347 */ 3348 np->n_flag |= NACC; 3349 getnanotime(&np->n_atim); 3350 return (fifo_read(ap)); 3351 } 3352 3353 /* 3354 * Write wrapper for fifos. 3355 */ 3356 int 3357 nfsfifo_write(void *v) 3358 { 3359 struct vop_write_args *ap = v; 3360 struct nfsnode *np = VTONFS(ap->a_vp); 3361 3362 /* 3363 * Set update flag. 3364 */ 3365 np->n_flag |= NUPD; 3366 getnanotime(&np->n_mtim); 3367 return (fifo_write(ap)); 3368 } 3369 3370 /* 3371 * Close wrapper for fifos. 3372 * 3373 * Update the times on the nfsnode then do fifo close. 3374 */ 3375 int 3376 nfsfifo_close(void *v) 3377 { 3378 struct vop_close_args *ap = v; 3379 struct vnode *vp = ap->a_vp; 3380 struct nfsnode *np = VTONFS(vp); 3381 struct vattr vattr; 3382 3383 if (np->n_flag & (NACC | NUPD)) { 3384 if (np->n_flag & NACC) { 3385 getnanotime(&np->n_atim); 3386 } 3387 if (np->n_flag & NUPD) { 3388 getnanotime(&np->n_mtim); 3389 } 3390 np->n_flag |= NCHG; 3391 if (vp->v_usecount == 1 && 3392 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3393 VATTR_NULL(&vattr); 3394 if (np->n_flag & NACC) 3395 vattr.va_atime = np->n_atim; 3396 if (np->n_flag & NUPD) 3397 vattr.va_mtime = np->n_mtim; 3398 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3399 } 3400 } 3401 return (fifo_close(ap)); 3402 } 3403 3404 int 3405 nfsfifo_reclaim(void *v) 3406 { 3407 fifo_reclaim(v); 3408 return (nfs_reclaim(v)); 3409 } 3410 #endif /* ! FIFO */ 3411