1 /* $OpenBSD: nfs_vnops.c,v 1.187 2021/10/02 08:51:41 semarie Exp $ */ 2 /* $NetBSD: nfs_vnops.c,v 1.62.4.1 1996/07/08 20:26:52 jtc Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Rick Macklem at The University of Guelph. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95 36 */ 37 38 39 /* 40 * vnode op calls for Sun NFS version 2 and 3 41 */ 42 43 #include <sys/param.h> 44 #include <sys/kernel.h> 45 #include <sys/systm.h> 46 #include <sys/resourcevar.h> 47 #include <sys/poll.h> 48 #include <sys/proc.h> 49 #include <sys/mount.h> 50 #include <sys/buf.h> 51 #include <sys/malloc.h> 52 #include <sys/pool.h> 53 #include <sys/mbuf.h> 54 #include <sys/conf.h> 55 #include <sys/namei.h> 56 #include <sys/vnode.h> 57 #include <sys/lock.h> 58 #include <sys/dirent.h> 59 #include <sys/fcntl.h> 60 #include <sys/lockf.h> 61 #include <sys/queue.h> 62 #include <sys/specdev.h> 63 #include <sys/unistd.h> 64 65 #include <miscfs/fifofs/fifo.h> 66 67 #include <nfs/rpcv2.h> 68 #include <nfs/nfsproto.h> 69 #include <nfs/nfs.h> 70 #include <nfs/nfsnode.h> 71 #include <nfs/nfsmount.h> 72 #include <nfs/xdr_subs.h> 73 #include <nfs/nfsm_subs.h> 74 #include <nfs/nfs_var.h> 75 76 #include <uvm/uvm_extern.h> 77 78 #include <netinet/in.h> 79 80 int nfs_access(void *); 81 int nfs_advlock(void *); 82 int nfs_bmap(void *); 83 int nfs_bwrite(void *); 84 int nfs_close(void *); 85 int nfs_commit(struct vnode *, u_quad_t, int, struct proc *); 86 int nfs_create(void *); 87 int nfs_flush(struct vnode *, struct ucred *, int, struct proc *, int); 88 int nfs_fsync(void *); 89 int nfs_getattr(void *); 90 int nfs_getreq(struct nfsrv_descript *, struct nfsd *, int); 91 int nfs_islocked(void *); 92 int nfs_link(void *); 93 int nfs_lock(void *); 94 int nfs_lookitup(struct vnode *, char *, int, struct ucred *, struct proc *, 95 struct nfsnode **); 96 int nfs_lookup(void *); 97 int nfs_mkdir(void *); 98 int nfs_mknod(void *); 99 int nfs_mknodrpc(struct vnode *, struct vnode **, struct componentname *, 100 struct vattr *); 101 int nfs_null(struct vnode *, struct ucred *, struct proc *); 102 int nfs_open(void *); 103 int nfs_pathconf(void *); 104 int nfs_poll(void *); 105 int nfs_print(void *); 106 int nfs_read(void *); 107 int nfs_readdir(void *); 108 int nfs_readdirplusrpc(struct vnode *, struct uio *, struct ucred *, int *, 109 struct proc *); 110 int nfs_readdirrpc(struct vnode *, struct uio *, struct ucred *, int *); 111 int nfs_remove(void *); 112 int nfs_removerpc(struct vnode *, char *, int, struct ucred *, struct proc *); 113 int nfs_rename(void *); 114 int nfs_renameit(struct vnode *, struct componentname *, struct sillyrename *); 115 int nfs_renamerpc(struct vnode *, char *, int, struct vnode *, char *, int, 116 struct ucred *, struct proc *); 117 int nfs_rmdir(void *); 118 int nfs_setattr(void *); 119 int nfs_setattrrpc(struct vnode *, struct vattr *, struct ucred *, 120 struct proc *); 121 int nfs_sillyrename(struct vnode *, struct vnode *, 122 struct componentname *); 123 int nfs_strategy(void *); 124 int nfs_symlink(void *); 125 int nfs_unlock(void *); 126 127 void nfs_cache_enter(struct vnode *, struct vnode *, struct componentname *); 128 129 int nfsfifo_close(void *); 130 int nfsfifo_read(void *); 131 int nfsfifo_reclaim(void *); 132 int nfsfifo_write(void *); 133 134 int nfsspec_access(void *); 135 int nfsspec_close(void *); 136 int nfsspec_read(void *); 137 int nfsspec_write(void *); 138 139 /* Global vfs data structures for nfs. */ 140 const struct vops nfs_vops = { 141 .vop_lookup = nfs_lookup, 142 .vop_create = nfs_create, 143 .vop_mknod = nfs_mknod, 144 .vop_open = nfs_open, 145 .vop_close = nfs_close, 146 .vop_access = nfs_access, 147 .vop_getattr = nfs_getattr, 148 .vop_setattr = nfs_setattr, 149 .vop_read = nfs_read, 150 .vop_write = nfs_write, 151 .vop_ioctl = nfs_ioctl, 152 .vop_poll = nfs_poll, 153 .vop_kqfilter = nfs_kqfilter, 154 .vop_revoke = vop_generic_revoke, 155 .vop_fsync = nfs_fsync, 156 .vop_remove = nfs_remove, 157 .vop_link = nfs_link, 158 .vop_rename = nfs_rename, 159 .vop_mkdir = nfs_mkdir, 160 .vop_rmdir = nfs_rmdir, 161 .vop_symlink = nfs_symlink, 162 .vop_readdir = nfs_readdir, 163 .vop_readlink = nfs_readlink, 164 .vop_abortop = vop_generic_abortop, 165 .vop_inactive = nfs_inactive, 166 .vop_reclaim = nfs_reclaim, 167 .vop_lock = nfs_lock, 168 .vop_unlock = nfs_unlock, 169 .vop_bmap = nfs_bmap, 170 .vop_strategy = nfs_strategy, 171 .vop_print = nfs_print, 172 .vop_islocked = nfs_islocked, 173 .vop_pathconf = nfs_pathconf, 174 .vop_advlock = nfs_advlock, 175 .vop_bwrite = nfs_bwrite 176 }; 177 178 /* Special device vnode ops. */ 179 const struct vops nfs_specvops = { 180 .vop_close = nfsspec_close, 181 .vop_access = nfsspec_access, 182 .vop_getattr = nfs_getattr, 183 .vop_setattr = nfs_setattr, 184 .vop_read = nfsspec_read, 185 .vop_write = nfsspec_write, 186 .vop_fsync = nfs_fsync, 187 .vop_inactive = nfs_inactive, 188 .vop_reclaim = nfs_reclaim, 189 .vop_lock = nfs_lock, 190 .vop_unlock = nfs_unlock, 191 .vop_print = nfs_print, 192 .vop_islocked = nfs_islocked, 193 194 /* XXX: Keep in sync with spec_vops. */ 195 .vop_lookup = vop_generic_lookup, 196 .vop_create = vop_generic_badop, 197 .vop_mknod = vop_generic_badop, 198 .vop_open = spec_open, 199 .vop_ioctl = spec_ioctl, 200 .vop_poll = spec_poll, 201 .vop_kqfilter = spec_kqfilter, 202 .vop_revoke = vop_generic_revoke, 203 .vop_remove = vop_generic_badop, 204 .vop_link = vop_generic_badop, 205 .vop_rename = vop_generic_badop, 206 .vop_mkdir = vop_generic_badop, 207 .vop_rmdir = vop_generic_badop, 208 .vop_symlink = vop_generic_badop, 209 .vop_readdir = vop_generic_badop, 210 .vop_readlink = vop_generic_badop, 211 .vop_abortop = vop_generic_badop, 212 .vop_bmap = vop_generic_bmap, 213 .vop_strategy = spec_strategy, 214 .vop_pathconf = spec_pathconf, 215 .vop_advlock = spec_advlock, 216 .vop_bwrite = vop_generic_bwrite, 217 }; 218 219 #ifdef FIFO 220 const struct vops nfs_fifovops = { 221 .vop_close = nfsfifo_close, 222 .vop_access = nfsspec_access, 223 .vop_getattr = nfs_getattr, 224 .vop_setattr = nfs_setattr, 225 .vop_read = nfsfifo_read, 226 .vop_write = nfsfifo_write, 227 .vop_fsync = nfs_fsync, 228 .vop_inactive = nfs_inactive, 229 .vop_reclaim = nfsfifo_reclaim, 230 .vop_lock = nfs_lock, 231 .vop_unlock = nfs_unlock, 232 .vop_print = nfs_print, 233 .vop_islocked = nfs_islocked, 234 .vop_bwrite = vop_generic_bwrite, 235 236 /* XXX: Keep in sync with fifo_vops. */ 237 .vop_lookup = vop_generic_lookup, 238 .vop_create = vop_generic_badop, 239 .vop_mknod = vop_generic_badop, 240 .vop_open = fifo_open, 241 .vop_ioctl = fifo_ioctl, 242 .vop_poll = fifo_poll, 243 .vop_kqfilter = fifo_kqfilter, 244 .vop_revoke = vop_generic_revoke, 245 .vop_remove = vop_generic_badop, 246 .vop_link = vop_generic_badop, 247 .vop_rename = vop_generic_badop, 248 .vop_mkdir = vop_generic_badop, 249 .vop_rmdir = vop_generic_badop, 250 .vop_symlink = vop_generic_badop, 251 .vop_readdir = vop_generic_badop, 252 .vop_readlink = vop_generic_badop, 253 .vop_abortop = vop_generic_badop, 254 .vop_bmap = vop_generic_bmap, 255 .vop_strategy = vop_generic_badop, 256 .vop_pathconf = fifo_pathconf, 257 .vop_advlock = fifo_advlock, 258 }; 259 #endif /* FIFO */ 260 261 /* 262 * Global variables 263 */ 264 extern u_int32_t nfs_true, nfs_false; 265 extern u_int32_t nfs_xdrneg1; 266 extern struct nfsstats nfsstats; 267 extern nfstype nfsv3_type[9]; 268 int nfs_numasync = 0; 269 270 void 271 nfs_cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 272 { 273 struct nfsnode *np; 274 275 if (vp != NULL) { 276 np = VTONFS(vp); 277 np->n_ctime = np->n_vattr.va_ctime.tv_sec; 278 } else { 279 np = VTONFS(dvp); 280 if (!np->n_ctime) 281 np->n_ctime = np->n_vattr.va_mtime.tv_sec; 282 } 283 284 cache_enter(dvp, vp, cnp); 285 } 286 287 /* 288 * nfs null call from vfs. 289 */ 290 int 291 nfs_null(struct vnode *vp, struct ucred *cred, struct proc *procp) 292 { 293 struct nfsm_info info; 294 int error = 0; 295 296 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(0); 297 error = nfs_request(vp, NFSPROC_NULL, &info); 298 m_freem(info.nmi_mrep); 299 return (error); 300 } 301 302 /* 303 * nfs access vnode op. 304 * For nfs version 2, just return ok. File accesses may fail later. 305 * For nfs version 3, use the access rpc to check accessibility. If file modes 306 * are changed on the server, accesses might still fail later. 307 */ 308 int 309 nfs_access(void *v) 310 { 311 struct vop_access_args *ap = v; 312 struct vnode *vp = ap->a_vp; 313 u_int32_t *tl; 314 int32_t t1; 315 caddr_t cp2; 316 int error = 0, attrflag; 317 u_int32_t mode, rmode; 318 int v3 = NFS_ISV3(vp); 319 int cachevalid; 320 struct nfsm_info info; 321 322 struct nfsnode *np = VTONFS(vp); 323 324 /* 325 * Disallow write attempts on filesystems mounted read-only; 326 * unless the file is a socket, fifo, or a block or character 327 * device resident on the filesystem. 328 */ 329 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 330 switch (vp->v_type) { 331 case VREG: 332 case VDIR: 333 case VLNK: 334 return (EROFS); 335 default: 336 break; 337 } 338 } 339 340 /* 341 * Check access cache first. If a request has been made for this uid 342 * shortly before, use the cached result. 343 */ 344 cachevalid = (np->n_accstamp != -1 && 345 (gettime() - np->n_accstamp) < nfs_attrtimeo(np) && 346 np->n_accuid == ap->a_cred->cr_uid); 347 348 if (cachevalid) { 349 if (!np->n_accerror) { 350 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 351 return (np->n_accerror); 352 } else if ((np->n_accmode & ap->a_mode) == np->n_accmode) 353 return (np->n_accerror); 354 } 355 356 /* 357 * For nfs v3, do an access rpc, otherwise you are stuck emulating 358 * ufs_access() locally using the vattr. This may not be correct, 359 * since the server may apply other access criteria such as 360 * client uid-->server uid mapping that we do not know about, but 361 * this is better than just returning anything that is lying about 362 * in the cache. 363 */ 364 if (v3) { 365 nfsstats.rpccnt[NFSPROC_ACCESS]++; 366 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_UNSIGNED); 367 nfsm_fhtom(&info, vp, v3); 368 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 369 if (ap->a_mode & VREAD) 370 mode = NFSV3ACCESS_READ; 371 else 372 mode = 0; 373 if (vp->v_type == VDIR) { 374 if (ap->a_mode & VWRITE) 375 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | 376 NFSV3ACCESS_DELETE); 377 if (ap->a_mode & VEXEC) 378 mode |= NFSV3ACCESS_LOOKUP; 379 } else { 380 if (ap->a_mode & VWRITE) 381 mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); 382 if (ap->a_mode & VEXEC) 383 mode |= NFSV3ACCESS_EXECUTE; 384 } 385 *tl = txdr_unsigned(mode); 386 387 info.nmi_procp = ap->a_p; 388 info.nmi_cred = ap->a_cred; 389 error = nfs_request(vp, NFSPROC_ACCESS, &info); 390 391 nfsm_postop_attr(vp, attrflag); 392 if (error) { 393 m_freem(info.nmi_mrep); 394 goto nfsmout; 395 } 396 397 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 398 rmode = fxdr_unsigned(u_int32_t, *tl); 399 /* 400 * The NFS V3 spec does not clarify whether or not 401 * the returned access bits can be a superset of 402 * the ones requested, so... 403 */ 404 if ((rmode & mode) != mode) 405 error = EACCES; 406 407 m_freem(info.nmi_mrep); 408 } else 409 return (nfsspec_access(ap)); 410 411 412 /* 413 * If we got the same result as for a previous, different request, OR 414 * it in. Don't update the timestamp in that case. 415 */ 416 if (!error || error == EACCES) { 417 if (cachevalid && np->n_accstamp != -1 && 418 error == np->n_accerror) { 419 if (!error) 420 np->n_accmode |= ap->a_mode; 421 else { 422 if ((np->n_accmode & ap->a_mode) == ap->a_mode) 423 np->n_accmode = ap->a_mode; 424 } 425 } else { 426 np->n_accstamp = gettime(); 427 np->n_accuid = ap->a_cred->cr_uid; 428 np->n_accmode = ap->a_mode; 429 np->n_accerror = error; 430 } 431 } 432 nfsmout: 433 return (error); 434 } 435 436 /* 437 * nfs open vnode op 438 * Check to see if the type is ok 439 * and that deletion is not in progress. 440 * For paged in text files, you will need to flush the page cache 441 * if consistency is lost. 442 */ 443 int 444 nfs_open(void *v) 445 { 446 struct vop_open_args *ap = v; 447 struct vnode *vp = ap->a_vp; 448 struct nfsnode *np = VTONFS(vp); 449 struct vattr vattr; 450 int error; 451 452 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 453 #ifdef DIAGNOSTIC 454 printf("open eacces vtyp=%d\n",vp->v_type); 455 #endif 456 return (EACCES); 457 } 458 459 /* 460 * Initialize read and write creds here, for swapfiles 461 * and other paths that don't set the creds themselves. 462 */ 463 464 if (ap->a_mode & FREAD) { 465 if (np->n_rcred) { 466 crfree(np->n_rcred); 467 } 468 np->n_rcred = ap->a_cred; 469 crhold(np->n_rcred); 470 } 471 if (ap->a_mode & FWRITE) { 472 if (np->n_wcred) { 473 crfree(np->n_wcred); 474 } 475 np->n_wcred = ap->a_cred; 476 crhold(np->n_wcred); 477 } 478 479 if (np->n_flag & NMODIFIED) { 480 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 481 if (error == EINTR) 482 return (error); 483 uvm_vnp_uncache(vp); 484 NFS_INVALIDATE_ATTRCACHE(np); 485 if (vp->v_type == VDIR) 486 np->n_direofoffset = 0; 487 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 488 if (error) 489 return (error); 490 np->n_mtime = vattr.va_mtime; 491 } else { 492 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); 493 if (error) 494 return (error); 495 if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) { 496 if (vp->v_type == VDIR) 497 np->n_direofoffset = 0; 498 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 499 if (error == EINTR) 500 return (error); 501 uvm_vnp_uncache(vp); 502 np->n_mtime = vattr.va_mtime; 503 } 504 } 505 /* For open/close consistency. */ 506 NFS_INVALIDATE_ATTRCACHE(np); 507 return (0); 508 } 509 510 /* 511 * nfs close vnode op 512 * What an NFS client should do upon close after writing is a debatable issue. 513 * Most NFS clients push delayed writes to the server upon close, basically for 514 * two reasons: 515 * 1 - So that any write errors may be reported back to the client process 516 * doing the close system call. By far the two most likely errors are 517 * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. 518 * 2 - To put a worst case upper bound on cache inconsistency between 519 * multiple clients for the file. 520 * There is also a consistency problem for Version 2 of the protocol w.r.t. 521 * not being able to tell if other clients are writing a file concurrently, 522 * since there is no way of knowing if the changed modify time in the reply 523 * is only due to the write for this client. 524 * (NFS Version 3 provides weak cache consistency data in the reply that 525 * should be sufficient to detect and handle this case.) 526 * 527 * The current code does the following: 528 * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers 529 * for NFS Version 3 - flush dirty buffers to the server but don't invalidate 530 * or commit them (this satisfies 1 and 2 except for the 531 * case where the server crashes after this close but 532 * before the commit RPC, which is felt to be "good 533 * enough". Changing the last argument to nfs_flush() to 534 * a 1 would force a commit operation, if it is felt a 535 * commit is necessary now. 536 */ 537 int 538 nfs_close(void *v) 539 { 540 struct vop_close_args *ap = v; 541 struct vnode *vp = ap->a_vp; 542 struct nfsnode *np = VTONFS(vp); 543 int error = 0; 544 545 if (vp->v_type == VREG) { 546 if (np->n_flag & NMODIFIED) { 547 if (NFS_ISV3(vp)) { 548 error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 0); 549 np->n_flag &= ~NMODIFIED; 550 } else 551 error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p); 552 NFS_INVALIDATE_ATTRCACHE(np); 553 } 554 if (np->n_flag & NWRITEERR) { 555 np->n_flag &= ~NWRITEERR; 556 error = np->n_error; 557 } 558 } 559 return (error); 560 } 561 562 /* 563 * nfs getattr call from vfs. 564 */ 565 int 566 nfs_getattr(void *v) 567 { 568 struct vop_getattr_args *ap = v; 569 struct vnode *vp = ap->a_vp; 570 struct nfsnode *np = VTONFS(vp); 571 struct nfsm_info info; 572 int32_t t1; 573 int error = 0; 574 575 info.nmi_v3 = NFS_ISV3(vp); 576 577 /* 578 * Update local times for special files. 579 */ 580 if (np->n_flag & (NACC | NUPD)) 581 np->n_flag |= NCHG; 582 /* 583 * First look in the cache. 584 */ 585 if (nfs_getattrcache(vp, ap->a_vap) == 0) 586 return (0); 587 588 nfsstats.rpccnt[NFSPROC_GETATTR]++; 589 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)); 590 nfsm_fhtom(&info, vp, info.nmi_v3); 591 info.nmi_procp = ap->a_p; 592 info.nmi_cred = ap->a_cred; 593 error = nfs_request(vp, NFSPROC_GETATTR, &info); 594 if (!error) 595 nfsm_loadattr(vp, ap->a_vap); 596 m_freem(info.nmi_mrep); 597 nfsmout: 598 return (error); 599 } 600 601 /* 602 * nfs setattr call. 603 */ 604 int 605 nfs_setattr(void *v) 606 { 607 struct vop_setattr_args *ap = v; 608 struct vnode *vp = ap->a_vp; 609 struct nfsnode *np = VTONFS(vp); 610 struct vattr *vap = ap->a_vap; 611 int hint = NOTE_ATTRIB; 612 int error = 0; 613 u_quad_t tsize = 0; 614 615 /* 616 * Setting of flags is not supported. 617 */ 618 if (vap->va_flags != VNOVAL) 619 return (EOPNOTSUPP); 620 621 /* 622 * Disallow write attempts if the filesystem is mounted read-only. 623 */ 624 if ((vap->va_uid != (uid_t)VNOVAL || 625 vap->va_gid != (gid_t)VNOVAL || 626 vap->va_atime.tv_nsec != VNOVAL || 627 vap->va_mtime.tv_nsec != VNOVAL || 628 vap->va_mode != (mode_t)VNOVAL) && 629 (vp->v_mount->mnt_flag & MNT_RDONLY)) 630 return (EROFS); 631 if (vap->va_size != VNOVAL) { 632 switch (vp->v_type) { 633 case VDIR: 634 return (EISDIR); 635 case VCHR: 636 case VBLK: 637 case VSOCK: 638 case VFIFO: 639 if (vap->va_mtime.tv_nsec == VNOVAL && 640 vap->va_atime.tv_nsec == VNOVAL && 641 vap->va_mode == (mode_t)VNOVAL && 642 vap->va_uid == (uid_t)VNOVAL && 643 vap->va_gid == (gid_t)VNOVAL) 644 return (0); 645 vap->va_size = VNOVAL; 646 break; 647 default: 648 /* 649 * Disallow write attempts if the filesystem is 650 * mounted read-only. 651 */ 652 if (vp->v_mount->mnt_flag & MNT_RDONLY) 653 return (EROFS); 654 if (vap->va_size == 0) 655 error = nfs_vinvalbuf(vp, 0, 656 ap->a_cred, ap->a_p); 657 else 658 error = nfs_vinvalbuf(vp, V_SAVE, 659 ap->a_cred, ap->a_p); 660 if (error) 661 return (error); 662 tsize = np->n_size; 663 np->n_size = np->n_vattr.va_size = vap->va_size; 664 uvm_vnp_setsize(vp, np->n_size); 665 }; 666 } else if ((vap->va_mtime.tv_nsec != VNOVAL || 667 vap->va_atime.tv_nsec != VNOVAL) && 668 vp->v_type == VREG && 669 (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, 670 ap->a_p)) == EINTR) 671 return (error); 672 error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p); 673 if (error && vap->va_size != VNOVAL) { 674 np->n_size = np->n_vattr.va_size = tsize; 675 uvm_vnp_setsize(vp, np->n_size); 676 } 677 678 if (vap->va_size != VNOVAL && vap->va_size < tsize) 679 hint |= NOTE_TRUNCATE; 680 681 VN_KNOTE(vp, hint); /* XXX setattrrpc? */ 682 683 return (error); 684 } 685 686 /* 687 * Do an nfs setattr rpc. 688 */ 689 int 690 nfs_setattrrpc(struct vnode *vp, struct vattr *vap, struct ucred *cred, 691 struct proc *procp) 692 { 693 struct nfsv2_sattr *sp; 694 struct nfsm_info info; 695 int32_t t1; 696 caddr_t cp2; 697 u_int32_t *tl; 698 int error = 0, wccflag = NFSV3_WCCRATTR; 699 int v3 = NFS_ISV3(vp); 700 701 info.nmi_v3 = NFS_ISV3(vp); 702 703 nfsstats.rpccnt[NFSPROC_SETATTR]++; 704 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(v3) + NFSX_SATTR(v3)); 705 nfsm_fhtom(&info, vp, v3); 706 707 if (info.nmi_v3) { 708 nfsm_v3attrbuild(&info.nmi_mb, vap, 1); 709 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 710 *tl = nfs_false; 711 } else { 712 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 713 if (vap->va_mode == (mode_t)VNOVAL) 714 sp->sa_mode = nfs_xdrneg1; 715 else 716 sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); 717 if (vap->va_uid == (uid_t)VNOVAL) 718 sp->sa_uid = nfs_xdrneg1; 719 else 720 sp->sa_uid = txdr_unsigned(vap->va_uid); 721 if (vap->va_gid == (gid_t)VNOVAL) 722 sp->sa_gid = nfs_xdrneg1; 723 else 724 sp->sa_gid = txdr_unsigned(vap->va_gid); 725 sp->sa_size = txdr_unsigned(vap->va_size); 726 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 727 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 728 } 729 730 info.nmi_procp = procp; 731 info.nmi_cred = cred; 732 error = nfs_request(vp, NFSPROC_SETATTR, &info); 733 734 if (info.nmi_v3) 735 nfsm_wcc_data(vp, wccflag); 736 else if (error == 0) 737 nfsm_loadattr(vp, NULL); 738 739 m_freem(info.nmi_mrep); 740 nfsmout: 741 return (error); 742 } 743 744 /* 745 * nfs lookup call, one step at a time... 746 * First look in cache 747 * If not found, unlock the directory nfsnode and do the rpc 748 */ 749 int 750 nfs_lookup(void *v) 751 { 752 struct vop_lookup_args *ap = v; 753 struct componentname *cnp = ap->a_cnp; 754 struct vnode *dvp = ap->a_dvp; 755 struct vnode **vpp = ap->a_vpp; 756 struct nfsm_info info; 757 int flags; 758 struct vnode *newvp; 759 u_int32_t *tl; 760 int32_t t1; 761 struct nfsmount *nmp; 762 caddr_t cp2; 763 long len; 764 nfsfh_t *fhp; 765 struct nfsnode *np; 766 int lockparent, wantparent, error = 0, attrflag, fhsize; 767 768 info.nmi_v3 = NFS_ISV3(dvp); 769 770 cnp->cn_flags &= ~PDIRUNLOCK; 771 flags = cnp->cn_flags; 772 773 *vpp = NULLVP; 774 newvp = NULLVP; 775 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 776 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 777 return (EROFS); 778 if (dvp->v_type != VDIR) 779 return (ENOTDIR); 780 lockparent = flags & LOCKPARENT; 781 wantparent = flags & (LOCKPARENT|WANTPARENT); 782 nmp = VFSTONFS(dvp->v_mount); 783 np = VTONFS(dvp); 784 785 /* 786 * Before tediously performing a linear scan of the directory, 787 * check the name cache to see if the directory/name pair 788 * we are looking for is known already. 789 * If the directory/name pair is found in the name cache, 790 * we have to ensure the directory has not changed from 791 * the time the cache entry has been created. If it has, 792 * the cache entry has to be ignored. 793 */ 794 if ((error = cache_lookup(dvp, vpp, cnp)) >= 0) { 795 struct vattr vattr; 796 int err2; 797 798 if (error && error != ENOENT) { 799 *vpp = NULLVP; 800 return (error); 801 } 802 803 if (cnp->cn_flags & PDIRUNLOCK) { 804 err2 = vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 805 if (err2 != 0) { 806 *vpp = NULLVP; 807 return (err2); 808 } 809 cnp->cn_flags &= ~PDIRUNLOCK; 810 } 811 812 err2 = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_proc); 813 if (err2 != 0) { 814 if (error == 0) { 815 if (*vpp != dvp) 816 vput(*vpp); 817 else 818 vrele(*vpp); 819 } 820 *vpp = NULLVP; 821 return (err2); 822 } 823 824 if (error == ENOENT) { 825 if (!VOP_GETATTR(dvp, &vattr, cnp->cn_cred, 826 cnp->cn_proc) && vattr.va_mtime.tv_sec == 827 VTONFS(dvp)->n_ctime) 828 return (ENOENT); 829 cache_purge(dvp); 830 np->n_ctime = 0; 831 goto dorpc; 832 } 833 834 newvp = *vpp; 835 if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, cnp->cn_proc) 836 && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) 837 { 838 nfsstats.lookupcache_hits++; 839 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 840 cnp->cn_flags |= SAVENAME; 841 if ((!lockparent || !(flags & ISLASTCN)) && 842 newvp != dvp) { 843 VOP_UNLOCK(dvp); 844 cnp->cn_flags |= PDIRUNLOCK; 845 } 846 return (0); 847 } 848 cache_purge(newvp); 849 if (newvp != dvp) 850 vput(newvp); 851 else 852 vrele(newvp); 853 *vpp = NULLVP; 854 } 855 dorpc: 856 error = 0; 857 newvp = NULLVP; 858 nfsstats.lookupcache_misses++; 859 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 860 len = cnp->cn_namelen; 861 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 862 NFSX_UNSIGNED + nfsm_rndup(len)); 863 nfsm_fhtom(&info, dvp, info.nmi_v3); 864 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 865 866 info.nmi_procp = cnp->cn_proc; 867 info.nmi_cred = cnp->cn_cred; 868 error = nfs_request(dvp, NFSPROC_LOOKUP, &info); 869 870 if (error) { 871 if (info.nmi_v3) 872 nfsm_postop_attr(dvp, attrflag); 873 m_freem(info.nmi_mrep); 874 goto nfsmout; 875 } 876 877 nfsm_getfh(fhp, fhsize, info.nmi_v3); 878 879 /* 880 * Handle RENAME case... 881 */ 882 if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) { 883 if (NFS_CMPFH(np, fhp, fhsize)) { 884 m_freem(info.nmi_mrep); 885 return (EISDIR); 886 } 887 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 888 if (error) { 889 m_freem(info.nmi_mrep); 890 return (error); 891 } 892 newvp = NFSTOV(np); 893 if (info.nmi_v3) { 894 nfsm_postop_attr(newvp, attrflag); 895 nfsm_postop_attr(dvp, attrflag); 896 } else 897 nfsm_loadattr(newvp, NULL); 898 *vpp = newvp; 899 m_freem(info.nmi_mrep); 900 cnp->cn_flags |= SAVENAME; 901 if (!lockparent) { 902 VOP_UNLOCK(dvp); 903 cnp->cn_flags |= PDIRUNLOCK; 904 } 905 return (0); 906 } 907 908 /* 909 * The postop attr handling is duplicated for each if case, 910 * because it should be done while dvp is locked (unlocking 911 * dvp is different for each case). 912 */ 913 914 if (NFS_CMPFH(np, fhp, fhsize)) { 915 vref(dvp); 916 newvp = dvp; 917 if (info.nmi_v3) { 918 nfsm_postop_attr(newvp, attrflag); 919 nfsm_postop_attr(dvp, attrflag); 920 } else 921 nfsm_loadattr(newvp, NULL); 922 } else if (flags & ISDOTDOT) { 923 VOP_UNLOCK(dvp); 924 cnp->cn_flags |= PDIRUNLOCK; 925 926 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 927 if (error) { 928 if (vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY) == 0) 929 cnp->cn_flags &= ~PDIRUNLOCK; 930 m_freem(info.nmi_mrep); 931 return (error); 932 } 933 newvp = NFSTOV(np); 934 935 if (info.nmi_v3) { 936 nfsm_postop_attr(newvp, attrflag); 937 nfsm_postop_attr(dvp, attrflag); 938 } else 939 nfsm_loadattr(newvp, NULL); 940 941 if (lockparent && (flags & ISLASTCN)) { 942 if ((error = vn_lock(dvp, LK_EXCLUSIVE))) { 943 m_freem(info.nmi_mrep); 944 vput(newvp); 945 return error; 946 } 947 cnp->cn_flags &= ~PDIRUNLOCK; 948 } 949 950 } else { 951 error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); 952 if (error) { 953 m_freem(info.nmi_mrep); 954 return error; 955 } 956 newvp = NFSTOV(np); 957 if (info.nmi_v3) { 958 nfsm_postop_attr(newvp, attrflag); 959 nfsm_postop_attr(dvp, attrflag); 960 } else 961 nfsm_loadattr(newvp, NULL); 962 if (!lockparent || !(flags & ISLASTCN)) { 963 VOP_UNLOCK(dvp); 964 cnp->cn_flags |= PDIRUNLOCK; 965 } 966 } 967 968 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 969 cnp->cn_flags |= SAVENAME; 970 if ((cnp->cn_flags & MAKEENTRY) && 971 (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) { 972 nfs_cache_enter(dvp, newvp, cnp); 973 } 974 975 *vpp = newvp; 976 m_freem(info.nmi_mrep); 977 978 nfsmout: 979 if (error) { 980 /* 981 * We get here only because of errors returned by 982 * the RPC. Otherwise we'll have returned above 983 * (the nfsm_* macros will jump to nfsmout 984 * on error). 985 */ 986 if (error == ENOENT && (cnp->cn_flags & MAKEENTRY) && 987 cnp->cn_nameiop != CREATE) { 988 nfs_cache_enter(dvp, NULL, cnp); 989 } 990 if (newvp != NULLVP) { 991 if (newvp != dvp) 992 vput(newvp); 993 else 994 vrele(newvp); 995 } 996 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && 997 (flags & ISLASTCN) && error == ENOENT) { 998 if (dvp->v_mount->mnt_flag & MNT_RDONLY) 999 error = EROFS; 1000 else 1001 error = EJUSTRETURN; 1002 } 1003 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) 1004 cnp->cn_flags |= SAVENAME; 1005 *vpp = NULL; 1006 } 1007 return (error); 1008 } 1009 1010 /* 1011 * nfs read call. 1012 * Just call nfs_bioread() to do the work. 1013 */ 1014 int 1015 nfs_read(void *v) 1016 { 1017 struct vop_read_args *ap = v; 1018 struct vnode *vp = ap->a_vp; 1019 1020 if (vp->v_type != VREG) 1021 return (EPERM); 1022 return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred)); 1023 } 1024 1025 /* 1026 * nfs readlink call 1027 */ 1028 int 1029 nfs_readlink(void *v) 1030 { 1031 struct vop_readlink_args *ap = v; 1032 struct vnode *vp = ap->a_vp; 1033 1034 if (vp->v_type != VLNK) 1035 return (EPERM); 1036 return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred)); 1037 } 1038 1039 /* 1040 * Lock an inode. 1041 */ 1042 int 1043 nfs_lock(void *v) 1044 { 1045 struct vop_lock_args *ap = v; 1046 struct vnode *vp = ap->a_vp; 1047 1048 return rrw_enter(&VTONFS(vp)->n_lock, ap->a_flags & LK_RWFLAGS); 1049 } 1050 1051 /* 1052 * Unlock an inode. 1053 */ 1054 int 1055 nfs_unlock(void *v) 1056 { 1057 struct vop_unlock_args *ap = v; 1058 struct vnode *vp = ap->a_vp; 1059 1060 rrw_exit(&VTONFS(vp)->n_lock); 1061 return 0; 1062 } 1063 1064 /* 1065 * Check for a locked inode. 1066 */ 1067 int 1068 nfs_islocked(void *v) 1069 { 1070 struct vop_islocked_args *ap = v; 1071 1072 return rrw_status(&VTONFS(ap->a_vp)->n_lock); 1073 } 1074 1075 /* 1076 * Do a readlink rpc. 1077 * Called by nfs_doio() from below the buffer cache. 1078 */ 1079 int 1080 nfs_readlinkrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred) 1081 { 1082 struct nfsm_info info; 1083 u_int32_t *tl; 1084 int32_t t1; 1085 caddr_t cp2; 1086 int error = 0, len, attrflag; 1087 1088 info.nmi_v3 = NFS_ISV3(vp); 1089 1090 nfsstats.rpccnt[NFSPROC_READLINK]++; 1091 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3)); 1092 nfsm_fhtom(&info, vp, info.nmi_v3); 1093 1094 info.nmi_procp = curproc; 1095 info.nmi_cred = cred; 1096 error = nfs_request(vp, NFSPROC_READLINK, &info); 1097 1098 if (info.nmi_v3) 1099 nfsm_postop_attr(vp, attrflag); 1100 if (!error) { 1101 nfsm_strsiz(len, NFS_MAXPATHLEN); 1102 nfsm_mtouio(uiop, len); 1103 } 1104 1105 m_freem(info.nmi_mrep); 1106 1107 nfsmout: 1108 return (error); 1109 } 1110 1111 /* 1112 * nfs read rpc call 1113 * Ditto above 1114 */ 1115 int 1116 nfs_readrpc(struct vnode *vp, struct uio *uiop) 1117 { 1118 struct nfsm_info info; 1119 u_int32_t *tl; 1120 int32_t t1; 1121 caddr_t cp2; 1122 struct nfsmount *nmp; 1123 int error = 0, len, retlen, tsiz, eof, attrflag; 1124 1125 info.nmi_v3 = NFS_ISV3(vp); 1126 1127 eof = 0; 1128 1129 nmp = VFSTONFS(vp->v_mount); 1130 tsiz = uiop->uio_resid; 1131 if (uiop->uio_offset + tsiz > 0xffffffff && !info.nmi_v3) 1132 return (EFBIG); 1133 while (tsiz > 0) { 1134 nfsstats.rpccnt[NFSPROC_READ]++; 1135 len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; 1136 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1137 NFSX_UNSIGNED * 3); 1138 nfsm_fhtom(&info, vp, info.nmi_v3); 1139 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED * 3); 1140 if (info.nmi_v3) { 1141 txdr_hyper(uiop->uio_offset, tl); 1142 *(tl + 2) = txdr_unsigned(len); 1143 } else { 1144 *tl++ = txdr_unsigned(uiop->uio_offset); 1145 *tl++ = txdr_unsigned(len); 1146 *tl = 0; 1147 } 1148 1149 info.nmi_procp = curproc; 1150 info.nmi_cred = VTONFS(vp)->n_rcred; 1151 error = nfs_request(vp, NFSPROC_READ, &info); 1152 if (info.nmi_v3) 1153 nfsm_postop_attr(vp, attrflag); 1154 if (error) { 1155 m_freem(info.nmi_mrep); 1156 goto nfsmout; 1157 } 1158 1159 if (info.nmi_v3) { 1160 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1161 eof = fxdr_unsigned(int, *(tl + 1)); 1162 } else { 1163 nfsm_loadattr(vp, NULL); 1164 } 1165 1166 nfsm_strsiz(retlen, nmp->nm_rsize); 1167 nfsm_mtouio(uiop, retlen); 1168 m_freem(info.nmi_mrep); 1169 tsiz -= retlen; 1170 if (info.nmi_v3) { 1171 if (eof || retlen == 0) 1172 tsiz = 0; 1173 } else if (retlen < len) 1174 tsiz = 0; 1175 } 1176 1177 nfsmout: 1178 return (error); 1179 } 1180 1181 /* 1182 * nfs write call 1183 */ 1184 int 1185 nfs_writerpc(struct vnode *vp, struct uio *uiop, int *iomode, int *must_commit) 1186 { 1187 struct nfsm_info info; 1188 u_int32_t *tl; 1189 int32_t t1, backup; 1190 caddr_t cp2; 1191 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 1192 int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit; 1193 int committed = NFSV3WRITE_FILESYNC; 1194 1195 info.nmi_v3 = NFS_ISV3(vp); 1196 1197 #ifdef DIAGNOSTIC 1198 if (uiop->uio_iovcnt != 1) 1199 panic("nfs: writerpc iovcnt > 1"); 1200 #endif 1201 *must_commit = 0; 1202 tsiz = uiop->uio_resid; 1203 if (uiop->uio_offset + tsiz > 0xffffffff && !info.nmi_v3) 1204 return (EFBIG); 1205 while (tsiz > 0) { 1206 nfsstats.rpccnt[NFSPROC_WRITE]++; 1207 len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz; 1208 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) 1209 + 5 * NFSX_UNSIGNED + nfsm_rndup(len)); 1210 nfsm_fhtom(&info, vp, info.nmi_v3); 1211 if (info.nmi_v3) { 1212 tl = nfsm_build(&info.nmi_mb, 5 * NFSX_UNSIGNED); 1213 txdr_hyper(uiop->uio_offset, tl); 1214 tl += 2; 1215 *tl++ = txdr_unsigned(len); 1216 *tl++ = txdr_unsigned(*iomode); 1217 *tl = txdr_unsigned(len); 1218 } else { 1219 u_int32_t x; 1220 1221 tl = nfsm_build(&info.nmi_mb, 4 * NFSX_UNSIGNED); 1222 /* Set both "begin" and "current" to non-garbage. */ 1223 x = txdr_unsigned((u_int32_t)uiop->uio_offset); 1224 *tl++ = x; /* "begin offset" */ 1225 *tl++ = x; /* "current offset" */ 1226 x = txdr_unsigned(len); 1227 *tl++ = x; /* total to this offset */ 1228 *tl = x; /* size of this write */ 1229 1230 } 1231 nfsm_uiotombuf(&info.nmi_mb, uiop, len); 1232 1233 info.nmi_procp = curproc; 1234 info.nmi_cred = VTONFS(vp)->n_wcred; 1235 error = nfs_request(vp, NFSPROC_WRITE, &info); 1236 if (info.nmi_v3) { 1237 wccflag = NFSV3_WCCCHK; 1238 nfsm_wcc_data(vp, wccflag); 1239 } 1240 1241 if (error) { 1242 m_freem(info.nmi_mrep); 1243 goto nfsmout; 1244 } 1245 1246 if (info.nmi_v3) { 1247 wccflag = NFSV3_WCCCHK; 1248 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED 1249 + NFSX_V3WRITEVERF); 1250 rlen = fxdr_unsigned(int, *tl++); 1251 if (rlen <= 0) { 1252 error = NFSERR_IO; 1253 break; 1254 } else if (rlen < len) { 1255 backup = len - rlen; 1256 uiop->uio_iov->iov_base = 1257 (char *)uiop->uio_iov->iov_base - 1258 backup; 1259 uiop->uio_iov->iov_len += backup; 1260 uiop->uio_offset -= backup; 1261 uiop->uio_resid += backup; 1262 len = rlen; 1263 } 1264 commit = fxdr_unsigned(int, *tl++); 1265 1266 /* 1267 * Return the lowest commitment level 1268 * obtained by any of the RPCs. 1269 */ 1270 if (committed == NFSV3WRITE_FILESYNC) 1271 committed = commit; 1272 else if (committed == NFSV3WRITE_DATASYNC && 1273 commit == NFSV3WRITE_UNSTABLE) 1274 committed = commit; 1275 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) { 1276 bcopy(tl, nmp->nm_verf, 1277 NFSX_V3WRITEVERF); 1278 nmp->nm_flag |= NFSMNT_HASWRITEVERF; 1279 } else if (bcmp(tl, 1280 nmp->nm_verf, NFSX_V3WRITEVERF)) { 1281 *must_commit = 1; 1282 bcopy(tl, nmp->nm_verf, 1283 NFSX_V3WRITEVERF); 1284 } 1285 } else { 1286 nfsm_loadattr(vp, NULL); 1287 } 1288 if (wccflag) 1289 VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime; 1290 m_freem(info.nmi_mrep); 1291 tsiz -= len; 1292 } 1293 nfsmout: 1294 *iomode = committed; 1295 if (error) 1296 uiop->uio_resid = tsiz; 1297 return (error); 1298 } 1299 1300 /* 1301 * nfs mknod rpc 1302 * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the 1303 * mode set to specify the file type and the size field for rdev. 1304 */ 1305 int 1306 nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1307 struct vattr *vap) 1308 { 1309 struct nfsv2_sattr *sp; 1310 struct nfsm_info info; 1311 u_int32_t *tl; 1312 int32_t t1; 1313 struct vnode *newvp = NULL; 1314 struct nfsnode *np = NULL; 1315 char *cp2; 1316 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; 1317 u_int32_t rdev; 1318 1319 info.nmi_v3 = NFS_ISV3(dvp); 1320 1321 if (vap->va_type == VCHR || vap->va_type == VBLK) 1322 rdev = txdr_unsigned(vap->va_rdev); 1323 else if (vap->va_type == VFIFO || vap->va_type == VSOCK) 1324 rdev = nfs_xdrneg1; 1325 else { 1326 VOP_ABORTOP(dvp, cnp); 1327 return (EOPNOTSUPP); 1328 } 1329 nfsstats.rpccnt[NFSPROC_MKNOD]++; 1330 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1331 4 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + 1332 NFSX_SATTR(info.nmi_v3)); 1333 nfsm_fhtom(&info, dvp, info.nmi_v3); 1334 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1335 1336 if (info.nmi_v3) { 1337 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 1338 *tl++ = vtonfsv3_type(vap->va_type); 1339 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1340 if (vap->va_type == VCHR || vap->va_type == VBLK) { 1341 tl = nfsm_build(&info.nmi_mb, 2 * NFSX_UNSIGNED); 1342 *tl++ = txdr_unsigned(major(vap->va_rdev)); 1343 *tl = txdr_unsigned(minor(vap->va_rdev)); 1344 } 1345 } else { 1346 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1347 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1348 sp->sa_uid = nfs_xdrneg1; 1349 sp->sa_gid = nfs_xdrneg1; 1350 sp->sa_size = rdev; 1351 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1352 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1353 } 1354 1355 KASSERT(cnp->cn_proc == curproc); 1356 info.nmi_procp = cnp->cn_proc; 1357 info.nmi_cred = cnp->cn_cred; 1358 error = nfs_request(dvp, NFSPROC_MKNOD, &info); 1359 if (!error) { 1360 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1361 if (!gotvp) { 1362 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1363 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1364 if (!error) 1365 newvp = NFSTOV(np); 1366 } 1367 } 1368 if (info.nmi_v3) 1369 nfsm_wcc_data(dvp, wccflag); 1370 m_freem(info.nmi_mrep); 1371 1372 nfsmout: 1373 if (error) { 1374 if (newvp) 1375 vput(newvp); 1376 } else { 1377 if (cnp->cn_flags & MAKEENTRY) 1378 nfs_cache_enter(dvp, newvp, cnp); 1379 *vpp = newvp; 1380 } 1381 pool_put(&namei_pool, cnp->cn_pnbuf); 1382 VTONFS(dvp)->n_flag |= NMODIFIED; 1383 if (!wccflag) 1384 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1385 return (error); 1386 } 1387 1388 /* 1389 * nfs mknod vop 1390 * just call nfs_mknodrpc() to do the work. 1391 */ 1392 int 1393 nfs_mknod(void *v) 1394 { 1395 struct vop_mknod_args *ap = v; 1396 struct vnode *newvp; 1397 int error; 1398 1399 error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap); 1400 if (!error) 1401 vput(newvp); 1402 1403 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1404 1405 return (error); 1406 } 1407 1408 int 1409 nfs_create(void *v) 1410 { 1411 struct vop_create_args *ap = v; 1412 struct vnode *dvp = ap->a_dvp; 1413 struct vattr *vap = ap->a_vap; 1414 struct componentname *cnp = ap->a_cnp; 1415 struct nfsv2_sattr *sp; 1416 struct nfsm_info info; 1417 struct timespec ts; 1418 u_int32_t *tl; 1419 int32_t t1; 1420 struct nfsnode *np = NULL; 1421 struct vnode *newvp = NULL; 1422 caddr_t cp2; 1423 int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0; 1424 1425 info.nmi_v3 = NFS_ISV3(dvp); 1426 1427 /* 1428 * Oops, not for me.. 1429 */ 1430 if (vap->va_type == VSOCK) 1431 return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); 1432 1433 if (vap->va_vaflags & VA_EXCLUSIVE) 1434 fmode |= O_EXCL; 1435 1436 again: 1437 nfsstats.rpccnt[NFSPROC_CREATE]++; 1438 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1439 2 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + 1440 NFSX_SATTR(info.nmi_v3)); 1441 nfsm_fhtom(&info, dvp, info.nmi_v3); 1442 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1443 if (info.nmi_v3) { 1444 tl = nfsm_build(&info.nmi_mb, NFSX_UNSIGNED); 1445 if (fmode & O_EXCL) { 1446 *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); 1447 tl = nfsm_build(&info.nmi_mb, NFSX_V3CREATEVERF); 1448 arc4random_buf(tl, sizeof(*tl) * 2); 1449 } else { 1450 *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED); 1451 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1452 } 1453 } else { 1454 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1455 sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); 1456 sp->sa_uid = nfs_xdrneg1; 1457 sp->sa_gid = nfs_xdrneg1; 1458 sp->sa_size = 0; 1459 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1460 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1461 } 1462 1463 KASSERT(cnp->cn_proc == curproc); 1464 info.nmi_procp = cnp->cn_proc; 1465 info.nmi_cred = cnp->cn_cred; 1466 error = nfs_request(dvp, NFSPROC_CREATE, &info); 1467 if (!error) { 1468 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1469 if (!gotvp) { 1470 error = nfs_lookitup(dvp, cnp->cn_nameptr, 1471 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); 1472 if (!error) 1473 newvp = NFSTOV(np); 1474 } 1475 } 1476 if (info.nmi_v3) 1477 nfsm_wcc_data(dvp, wccflag); 1478 m_freem(info.nmi_mrep); 1479 1480 nfsmout: 1481 if (error) { 1482 if (newvp) { 1483 vput(newvp); 1484 newvp = NULL; 1485 } 1486 if (info.nmi_v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) { 1487 fmode &= ~O_EXCL; 1488 goto again; 1489 } 1490 } else if (info.nmi_v3 && (fmode & O_EXCL)) { 1491 getnanotime(&ts); 1492 if (vap->va_atime.tv_nsec == VNOVAL) 1493 vap->va_atime = ts; 1494 if (vap->va_mtime.tv_nsec == VNOVAL) 1495 vap->va_mtime = ts; 1496 error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc); 1497 } 1498 if (!error) { 1499 if (cnp->cn_flags & MAKEENTRY) 1500 nfs_cache_enter(dvp, newvp, cnp); 1501 *ap->a_vpp = newvp; 1502 } 1503 pool_put(&namei_pool, cnp->cn_pnbuf); 1504 VTONFS(dvp)->n_flag |= NMODIFIED; 1505 if (!wccflag) 1506 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1507 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 1508 return (error); 1509 } 1510 1511 /* 1512 * nfs file remove call 1513 * To try and make nfs semantics closer to ufs semantics, a file that has 1514 * other processes using the vnode is renamed instead of removed and then 1515 * removed later on the last close. 1516 * - If v_usecount > 1 1517 * If a rename is not already in the works 1518 * call nfs_sillyrename() to set it up 1519 * else 1520 * do the remove rpc 1521 */ 1522 int 1523 nfs_remove(void *v) 1524 { 1525 struct vop_remove_args *ap = v; 1526 struct vnode *vp = ap->a_vp; 1527 struct vnode *dvp = ap->a_dvp; 1528 struct componentname *cnp = ap->a_cnp; 1529 struct nfsnode *np = VTONFS(vp); 1530 int error = 0; 1531 struct vattr vattr; 1532 1533 #ifdef DIAGNOSTIC 1534 if ((cnp->cn_flags & HASBUF) == 0) 1535 panic("nfs_remove: no name"); 1536 if (vp->v_usecount < 1) 1537 panic("nfs_remove: bad v_usecount"); 1538 #endif 1539 if (vp->v_type == VDIR) 1540 error = EPERM; 1541 else if (vp->v_usecount == 1 || (np->n_sillyrename && 1542 VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 && 1543 vattr.va_nlink > 1)) { 1544 /* 1545 * Purge the name cache so that the chance of a lookup for 1546 * the name succeeding while the remove is in progress is 1547 * minimized. Without node locking it can still happen, such 1548 * that an I/O op returns ESTALE, but since you get this if 1549 * another host removes the file.. 1550 */ 1551 cache_purge(vp); 1552 /* 1553 * throw away biocache buffers, mainly to avoid 1554 * unnecessary delayed writes later. 1555 */ 1556 error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc); 1557 /* Do the rpc */ 1558 if (error != EINTR) 1559 error = nfs_removerpc(dvp, cnp->cn_nameptr, 1560 cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc); 1561 /* 1562 * Kludge City: If the first reply to the remove rpc is lost.. 1563 * the reply to the retransmitted request will be ENOENT 1564 * since the file was in fact removed 1565 * Therefore, we cheat and return success. 1566 */ 1567 if (error == ENOENT) 1568 error = 0; 1569 } else if (!np->n_sillyrename) 1570 error = nfs_sillyrename(dvp, vp, cnp); 1571 pool_put(&namei_pool, cnp->cn_pnbuf); 1572 NFS_INVALIDATE_ATTRCACHE(np); 1573 VN_KNOTE(vp, NOTE_DELETE); 1574 VN_KNOTE(dvp, NOTE_WRITE); 1575 if (vp == dvp) 1576 vrele(vp); 1577 else 1578 vput(vp); 1579 vput(dvp); 1580 return (error); 1581 } 1582 1583 /* 1584 * nfs file remove rpc called from nfs_inactive 1585 */ 1586 int 1587 nfs_removeit(struct sillyrename *sp) 1588 { 1589 KASSERT(VOP_ISLOCKED(sp->s_dvp)); 1590 /* 1591 * Make sure that the directory vnode is still valid. 1592 * 1593 * NFS can potentially try to nuke a silly *after* the directory 1594 * has already been pushed out on a forced unmount. Since the silly 1595 * is going to go away anyway, this is fine. 1596 */ 1597 if (sp->s_dvp->v_type == VBAD) 1598 return (0); 1599 return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, 1600 NULL)); 1601 } 1602 1603 /* 1604 * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). 1605 */ 1606 int 1607 nfs_removerpc(struct vnode *dvp, char *name, int namelen, struct ucred *cred, 1608 struct proc *proc) 1609 { 1610 struct nfsm_info info; 1611 u_int32_t *tl; 1612 int32_t t1; 1613 caddr_t cp2; 1614 int error = 0, wccflag = NFSV3_WCCRATTR; 1615 1616 info.nmi_v3 = NFS_ISV3(dvp); 1617 1618 nfsstats.rpccnt[NFSPROC_REMOVE]++; 1619 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1620 NFSX_UNSIGNED + nfsm_rndup(namelen)); 1621 nfsm_fhtom(&info, dvp, info.nmi_v3); 1622 nfsm_strtom(name, namelen, NFS_MAXNAMLEN); 1623 1624 info.nmi_procp = proc; 1625 info.nmi_cred = cred; 1626 error = nfs_request(dvp, NFSPROC_REMOVE, &info); 1627 if (info.nmi_v3) 1628 nfsm_wcc_data(dvp, wccflag); 1629 m_freem(info.nmi_mrep); 1630 1631 nfsmout: 1632 VTONFS(dvp)->n_flag |= NMODIFIED; 1633 if (!wccflag) 1634 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1635 return (error); 1636 } 1637 1638 /* 1639 * nfs file rename call 1640 */ 1641 int 1642 nfs_rename(void *v) 1643 { 1644 struct vop_rename_args *ap = v; 1645 struct vnode *fvp = ap->a_fvp; 1646 struct vnode *tvp = ap->a_tvp; 1647 struct vnode *fdvp = ap->a_fdvp; 1648 struct vnode *tdvp = ap->a_tdvp; 1649 struct componentname *tcnp = ap->a_tcnp; 1650 struct componentname *fcnp = ap->a_fcnp; 1651 int error; 1652 1653 #ifdef DIAGNOSTIC 1654 if ((tcnp->cn_flags & HASBUF) == 0 || 1655 (fcnp->cn_flags & HASBUF) == 0) 1656 panic("nfs_rename: no name"); 1657 #endif 1658 /* Check for cross-device rename */ 1659 if ((fvp->v_mount != tdvp->v_mount) || 1660 (tvp && (fvp->v_mount != tvp->v_mount))) { 1661 error = EXDEV; 1662 goto out; 1663 } 1664 1665 /* 1666 * If the tvp exists and is in use, sillyrename it before doing the 1667 * rename of the new file over it. 1668 */ 1669 if (tvp && tvp->v_usecount > 1 && !VTONFS(tvp)->n_sillyrename && 1670 tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { 1671 VN_KNOTE(tvp, NOTE_DELETE); 1672 vput(tvp); 1673 tvp = NULL; 1674 } 1675 1676 error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, 1677 tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, 1678 tcnp->cn_proc); 1679 1680 VN_KNOTE(fdvp, NOTE_WRITE); 1681 VN_KNOTE(tdvp, NOTE_WRITE); 1682 1683 if (fvp->v_type == VDIR) { 1684 if (tvp != NULL && tvp->v_type == VDIR) 1685 cache_purge(tdvp); 1686 cache_purge(fdvp); 1687 } 1688 out: 1689 if (tdvp == tvp) 1690 vrele(tdvp); 1691 else 1692 vput(tdvp); 1693 if (tvp) 1694 vput(tvp); 1695 vrele(fdvp); 1696 vrele(fvp); 1697 /* 1698 * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. 1699 */ 1700 if (error == ENOENT) 1701 error = 0; 1702 return (error); 1703 } 1704 1705 /* 1706 * nfs file rename rpc called from nfs_remove() above 1707 */ 1708 int 1709 nfs_renameit(struct vnode *sdvp, struct componentname *scnp, 1710 struct sillyrename *sp) 1711 { 1712 return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, 1713 sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, curproc)); 1714 } 1715 1716 /* 1717 * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). 1718 */ 1719 int 1720 nfs_renamerpc(struct vnode *fdvp, char *fnameptr, int fnamelen, 1721 struct vnode *tdvp, char *tnameptr, int tnamelen, struct ucred *cred, 1722 struct proc *proc) 1723 { 1724 struct nfsm_info info; 1725 u_int32_t *tl; 1726 int32_t t1; 1727 caddr_t cp2; 1728 int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; 1729 1730 info.nmi_v3 = NFS_ISV3(fdvp); 1731 1732 nfsstats.rpccnt[NFSPROC_RENAME]++; 1733 info.nmi_mb = info.nmi_mreq = nfsm_reqhead((NFSX_FH(info.nmi_v3) + 1734 NFSX_UNSIGNED) * 2 + nfsm_rndup(fnamelen) + nfsm_rndup(tnamelen)); 1735 nfsm_fhtom(&info, fdvp, info.nmi_v3); 1736 nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); 1737 nfsm_fhtom(&info, tdvp, info.nmi_v3); 1738 nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); 1739 1740 info.nmi_procp = proc; 1741 info.nmi_cred = cred; 1742 error = nfs_request(fdvp, NFSPROC_RENAME, &info); 1743 if (info.nmi_v3) { 1744 nfsm_wcc_data(fdvp, fwccflag); 1745 nfsm_wcc_data(tdvp, twccflag); 1746 } 1747 m_freem(info.nmi_mrep); 1748 1749 nfsmout: 1750 VTONFS(fdvp)->n_flag |= NMODIFIED; 1751 VTONFS(tdvp)->n_flag |= NMODIFIED; 1752 if (!fwccflag) 1753 NFS_INVALIDATE_ATTRCACHE(VTONFS(fdvp)); 1754 if (!twccflag) 1755 NFS_INVALIDATE_ATTRCACHE(VTONFS(tdvp)); 1756 return (error); 1757 } 1758 1759 /* 1760 * nfs hard link create call 1761 */ 1762 int 1763 nfs_link(void *v) 1764 { 1765 struct vop_link_args *ap = v; 1766 struct vnode *vp = ap->a_vp; 1767 struct vnode *dvp = ap->a_dvp; 1768 struct componentname *cnp = ap->a_cnp; 1769 struct nfsm_info info; 1770 u_int32_t *tl; 1771 int32_t t1; 1772 caddr_t cp2; 1773 int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; 1774 1775 info.nmi_v3 = NFS_ISV3(vp); 1776 1777 if (dvp->v_mount != vp->v_mount) { 1778 pool_put(&namei_pool, cnp->cn_pnbuf); 1779 vput(dvp); 1780 return (EXDEV); 1781 } 1782 error = vn_lock(vp, LK_EXCLUSIVE); 1783 if (error != 0) { 1784 VOP_ABORTOP(dvp, cnp); 1785 vput(dvp); 1786 return (error); 1787 } 1788 1789 /* 1790 * Push all writes to the server, so that the attribute cache 1791 * doesn't get "out of sync" with the server. 1792 * XXX There should be a better way! 1793 */ 1794 VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc); 1795 1796 nfsstats.rpccnt[NFSPROC_LINK]++; 1797 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(2 * NFSX_FH(info.nmi_v3) + 1798 NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 1799 nfsm_fhtom(&info, vp, info.nmi_v3); 1800 nfsm_fhtom(&info, dvp, info.nmi_v3); 1801 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1802 1803 info.nmi_procp = cnp->cn_proc; 1804 info.nmi_cred = cnp->cn_cred; 1805 error = nfs_request(vp, NFSPROC_LINK, &info); 1806 if (info.nmi_v3) { 1807 nfsm_postop_attr(vp, attrflag); 1808 nfsm_wcc_data(dvp, wccflag); 1809 } 1810 m_freem(info.nmi_mrep); 1811 nfsmout: 1812 pool_put(&namei_pool, cnp->cn_pnbuf); 1813 VTONFS(dvp)->n_flag |= NMODIFIED; 1814 if (!attrflag) 1815 NFS_INVALIDATE_ATTRCACHE(VTONFS(vp)); 1816 if (!wccflag) 1817 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1818 1819 VN_KNOTE(vp, NOTE_LINK); 1820 VN_KNOTE(dvp, NOTE_WRITE); 1821 VOP_UNLOCK(vp); 1822 vput(dvp); 1823 return (error); 1824 } 1825 1826 /* 1827 * nfs symbolic link create call 1828 */ 1829 int 1830 nfs_symlink(void *v) 1831 { 1832 struct vop_symlink_args *ap = v; 1833 struct vnode *dvp = ap->a_dvp; 1834 struct vattr *vap = ap->a_vap; 1835 struct componentname *cnp = ap->a_cnp; 1836 struct nfsv2_sattr *sp; 1837 struct nfsm_info info; 1838 u_int32_t *tl; 1839 int32_t t1; 1840 caddr_t cp2; 1841 int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; 1842 struct vnode *newvp = NULL; 1843 1844 info.nmi_v3 = NFS_ISV3(dvp); 1845 1846 nfsstats.rpccnt[NFSPROC_SYMLINK]++; 1847 slen = strlen(ap->a_target); 1848 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1849 2 * NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + 1850 NFSX_SATTR(info.nmi_v3)); 1851 nfsm_fhtom(&info, dvp, info.nmi_v3); 1852 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1853 if (info.nmi_v3) 1854 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1855 nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); 1856 if (!info.nmi_v3) { 1857 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1858 sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); 1859 sp->sa_uid = nfs_xdrneg1; 1860 sp->sa_gid = nfs_xdrneg1; 1861 sp->sa_size = nfs_xdrneg1; 1862 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1863 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1864 } 1865 1866 info.nmi_procp = cnp->cn_proc; 1867 info.nmi_cred = cnp->cn_cred; 1868 error = nfs_request(dvp, NFSPROC_SYMLINK, &info); 1869 if (info.nmi_v3) { 1870 if (!error) 1871 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1872 nfsm_wcc_data(dvp, wccflag); 1873 } 1874 m_freem(info.nmi_mrep); 1875 1876 nfsmout: 1877 if (newvp) 1878 vput(newvp); 1879 pool_put(&namei_pool, cnp->cn_pnbuf); 1880 VTONFS(dvp)->n_flag |= NMODIFIED; 1881 if (!wccflag) 1882 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1883 VN_KNOTE(dvp, NOTE_WRITE); 1884 vput(dvp); 1885 return (error); 1886 } 1887 1888 /* 1889 * nfs make dir call 1890 */ 1891 int 1892 nfs_mkdir(void *v) 1893 { 1894 struct vop_mkdir_args *ap = v; 1895 struct vnode *dvp = ap->a_dvp; 1896 struct vattr *vap = ap->a_vap; 1897 struct componentname *cnp = ap->a_cnp; 1898 struct nfsv2_sattr *sp; 1899 struct nfsm_info info; 1900 u_int32_t *tl; 1901 int32_t t1; 1902 int len; 1903 struct nfsnode *np = NULL; 1904 struct vnode *newvp = NULL; 1905 caddr_t cp2; 1906 int error = 0, wccflag = NFSV3_WCCRATTR; 1907 int gotvp = 0; 1908 1909 info.nmi_v3 = NFS_ISV3(dvp); 1910 1911 len = cnp->cn_namelen; 1912 nfsstats.rpccnt[NFSPROC_MKDIR]++; 1913 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1914 NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(info.nmi_v3)); 1915 nfsm_fhtom(&info, dvp, info.nmi_v3); 1916 nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); 1917 1918 if (info.nmi_v3) { 1919 nfsm_v3attrbuild(&info.nmi_mb, vap, 0); 1920 } else { 1921 sp = nfsm_build(&info.nmi_mb, NFSX_V2SATTR); 1922 sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); 1923 sp->sa_uid = nfs_xdrneg1; 1924 sp->sa_gid = nfs_xdrneg1; 1925 sp->sa_size = nfs_xdrneg1; 1926 txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); 1927 txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); 1928 } 1929 1930 info.nmi_procp = cnp->cn_proc; 1931 info.nmi_cred = cnp->cn_cred; 1932 error = nfs_request(dvp, NFSPROC_MKDIR, &info); 1933 if (!error) 1934 nfsm_mtofh(dvp, newvp, info.nmi_v3, gotvp); 1935 if (info.nmi_v3) 1936 nfsm_wcc_data(dvp, wccflag); 1937 m_freem(info.nmi_mrep); 1938 1939 nfsmout: 1940 VTONFS(dvp)->n_flag |= NMODIFIED; 1941 if (!wccflag) 1942 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 1943 1944 if (error == 0 && newvp == NULL) { 1945 error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, 1946 cnp->cn_proc, &np); 1947 if (!error) { 1948 newvp = NFSTOV(np); 1949 if (newvp->v_type != VDIR) 1950 error = EEXIST; 1951 } 1952 } 1953 if (error) { 1954 if (newvp) 1955 vput(newvp); 1956 } else { 1957 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK); 1958 if (cnp->cn_flags & MAKEENTRY) 1959 nfs_cache_enter(dvp, newvp, cnp); 1960 *ap->a_vpp = newvp; 1961 } 1962 pool_put(&namei_pool, cnp->cn_pnbuf); 1963 vput(dvp); 1964 return (error); 1965 } 1966 1967 /* 1968 * nfs remove directory call 1969 */ 1970 int 1971 nfs_rmdir(void *v) 1972 { 1973 struct vop_rmdir_args *ap = v; 1974 struct vnode *vp = ap->a_vp; 1975 struct vnode *dvp = ap->a_dvp; 1976 struct componentname *cnp = ap->a_cnp; 1977 struct nfsm_info info; 1978 u_int32_t *tl; 1979 int32_t t1; 1980 caddr_t cp2; 1981 int error = 0, wccflag = NFSV3_WCCRATTR; 1982 1983 info.nmi_v3 = NFS_ISV3(dvp); 1984 1985 nfsstats.rpccnt[NFSPROC_RMDIR]++; 1986 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + 1987 NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); 1988 nfsm_fhtom(&info, dvp, info.nmi_v3); 1989 nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); 1990 1991 info.nmi_procp = cnp->cn_proc; 1992 info.nmi_cred = cnp->cn_cred; 1993 error = nfs_request(dvp, NFSPROC_RMDIR, &info); 1994 if (info.nmi_v3) 1995 nfsm_wcc_data(dvp, wccflag); 1996 m_freem(info.nmi_mrep); 1997 1998 nfsmout: 1999 pool_put(&namei_pool, cnp->cn_pnbuf); 2000 VTONFS(dvp)->n_flag |= NMODIFIED; 2001 if (!wccflag) 2002 NFS_INVALIDATE_ATTRCACHE(VTONFS(dvp)); 2003 2004 VN_KNOTE(dvp, NOTE_WRITE|NOTE_LINK); 2005 VN_KNOTE(vp, NOTE_DELETE); 2006 2007 cache_purge(vp); 2008 vput(vp); 2009 vput(dvp); 2010 /* 2011 * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. 2012 */ 2013 if (error == ENOENT) 2014 error = 0; 2015 return (error); 2016 } 2017 2018 2019 /* 2020 * The readdir logic below has a big design bug. It stores the NFS cookie in 2021 * the returned uio->uio_offset but does not store the verifier (it cannot). 2022 * Instead, the code stores the verifier in the nfsnode and applies that 2023 * verifies to all cookies, no matter what verifier was originally with 2024 * the cookie. 2025 * 2026 * From a practical standpoint, this is not a problem since almost all 2027 * NFS servers do not change the validity of cookies across deletes 2028 * and inserts. 2029 */ 2030 2031 struct nfs_dirent { 2032 u_int32_t cookie[2]; 2033 struct dirent dirent; 2034 }; 2035 2036 #define NFS_DIRHDSIZ (sizeof (struct nfs_dirent) - (MAXNAMLEN + 1)) 2037 #define NFS_DIRENT_OVERHEAD offsetof(struct nfs_dirent, dirent) 2038 2039 /* 2040 * nfs readdir call 2041 */ 2042 int 2043 nfs_readdir(void *v) 2044 { 2045 struct vop_readdir_args *ap = v; 2046 struct vnode *vp = ap->a_vp; 2047 struct nfsnode *np = VTONFS(vp); 2048 struct uio *uio = ap->a_uio; 2049 int tresid, error = 0; 2050 struct vattr vattr; 2051 int cnt; 2052 u_int64_t newoff = uio->uio_offset; 2053 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2054 struct uio readdir_uio; 2055 struct iovec readdir_iovec; 2056 struct proc * p = uio->uio_procp; 2057 int done = 0, eof = 0; 2058 struct ucred *cred = ap->a_cred; 2059 void *data; 2060 2061 if (vp->v_type != VDIR) 2062 return (EPERM); 2063 /* 2064 * First, check for hit on the EOF offset cache 2065 */ 2066 if (np->n_direofoffset != 0 && 2067 uio->uio_offset == np->n_direofoffset) { 2068 if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 && 2069 timespeccmp(&np->n_mtime, &vattr.va_mtime, ==)) { 2070 nfsstats.direofcache_hits++; 2071 *ap->a_eofflag = 1; 2072 return (0); 2073 } 2074 } 2075 2076 if (uio->uio_resid < NFS_FABLKSIZE) 2077 return (EINVAL); 2078 2079 tresid = uio->uio_resid; 2080 2081 if (uio->uio_rw != UIO_READ) 2082 return (EINVAL); 2083 2084 if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) 2085 (void)nfs_fsinfo(nmp, vp, cred, p); 2086 2087 cnt = 5; 2088 2089 /* M_ZERO to avoid leaking kernel data in dirent padding */ 2090 data = malloc(NFS_DIRBLKSIZ, M_TEMP, M_WAITOK|M_ZERO); 2091 do { 2092 struct nfs_dirent *ndp = data; 2093 2094 readdir_iovec.iov_len = NFS_DIRBLKSIZ; 2095 readdir_iovec.iov_base = data; 2096 readdir_uio.uio_offset = newoff; 2097 readdir_uio.uio_iov = &readdir_iovec; 2098 readdir_uio.uio_iovcnt = 1; 2099 readdir_uio.uio_segflg = UIO_SYSSPACE; 2100 readdir_uio.uio_rw = UIO_READ; 2101 readdir_uio.uio_resid = NFS_DIRBLKSIZ; 2102 readdir_uio.uio_procp = curproc; 2103 2104 if (nmp->nm_flag & NFSMNT_RDIRPLUS) { 2105 error = nfs_readdirplusrpc(vp, &readdir_uio, cred, 2106 &eof, p); 2107 if (error == NFSERR_NOTSUPP) 2108 nmp->nm_flag &= ~NFSMNT_RDIRPLUS; 2109 } 2110 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) 2111 error = nfs_readdirrpc(vp, &readdir_uio, cred, &eof); 2112 2113 if (error == NFSERR_BAD_COOKIE) 2114 error = EINVAL; 2115 2116 while (error == 0 && 2117 ndp < (struct nfs_dirent *)readdir_iovec.iov_base) { 2118 struct dirent *dp = &ndp->dirent; 2119 int reclen = dp->d_reclen; 2120 2121 dp->d_reclen -= NFS_DIRENT_OVERHEAD; 2122 dp->d_off = fxdr_hyper(&ndp->cookie[0]); 2123 2124 if (uio->uio_resid < dp->d_reclen) { 2125 eof = 0; 2126 done = 1; 2127 break; 2128 } 2129 2130 if ((error = uiomove(dp, dp->d_reclen, uio))) 2131 break; 2132 2133 newoff = fxdr_hyper(&ndp->cookie[0]); 2134 2135 ndp = (struct nfs_dirent *)((u_int8_t *)ndp + reclen); 2136 } 2137 } while (!error && !done && !eof && cnt--); 2138 2139 free(data, M_TEMP, NFS_DIRBLKSIZ); 2140 data = NULL; 2141 2142 uio->uio_offset = newoff; 2143 2144 if (!error && (eof || uio->uio_resid == tresid)) { 2145 nfsstats.direofcache_misses++; 2146 *ap->a_eofflag = 1; 2147 return (0); 2148 } 2149 2150 *ap->a_eofflag = 0; 2151 return (error); 2152 } 2153 2154 2155 /* 2156 * The function below stuff the cookies in after the name 2157 */ 2158 2159 /* 2160 * Readdir rpc call. 2161 */ 2162 int 2163 nfs_readdirrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2164 int *end_of_directory) 2165 { 2166 int len, left; 2167 struct nfs_dirent *ndp = NULL; 2168 struct dirent *dp = NULL; 2169 struct nfsm_info info; 2170 u_int32_t *tl; 2171 caddr_t cp; 2172 int32_t t1; 2173 caddr_t cp2; 2174 nfsuint64 cookie; 2175 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2176 struct nfsnode *dnp = VTONFS(vp); 2177 u_quad_t fileno; 2178 int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1; 2179 int attrflag; 2180 2181 info.nmi_v3 = NFS_ISV3(vp); 2182 2183 #ifdef DIAGNOSTIC 2184 if (uiop->uio_iovcnt != 1 || 2185 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) 2186 panic("nfs readdirrpc bad uio"); 2187 #endif 2188 2189 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0]); 2190 2191 /* 2192 * Loop around doing readdir rpc's of size nm_readdirsize 2193 * truncated to a multiple of NFS_READDIRBLKSIZ. 2194 * The stopping criteria is EOF or buffer full. 2195 */ 2196 while (more_dirs && bigenough) { 2197 nfsstats.rpccnt[NFSPROC_READDIR]++; 2198 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) 2199 + NFSX_READDIR(info.nmi_v3)); 2200 nfsm_fhtom(&info, vp, info.nmi_v3); 2201 if (info.nmi_v3) { 2202 tl = nfsm_build(&info.nmi_mb, 5 * NFSX_UNSIGNED); 2203 *tl++ = cookie.nfsuquad[0]; 2204 *tl++ = cookie.nfsuquad[1]; 2205 if (cookie.nfsuquad[0] == 0 && 2206 cookie.nfsuquad[1] == 0) { 2207 *tl++ = 0; 2208 *tl++ = 0; 2209 } else { 2210 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2211 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2212 } 2213 } else { 2214 tl = nfsm_build(&info.nmi_mb, 2 * NFSX_UNSIGNED); 2215 *tl++ = cookie.nfsuquad[1]; 2216 } 2217 *tl = txdr_unsigned(nmp->nm_readdirsize); 2218 2219 info.nmi_procp = uiop->uio_procp; 2220 info.nmi_cred = cred; 2221 error = nfs_request(vp, NFSPROC_READDIR, &info); 2222 if (info.nmi_v3) 2223 nfsm_postop_attr(vp, attrflag); 2224 2225 if (error) { 2226 m_freem(info.nmi_mrep); 2227 goto nfsmout; 2228 } 2229 2230 if (info.nmi_v3) { 2231 nfsm_dissect(tl, u_int32_t *, 2232 2 * NFSX_UNSIGNED); 2233 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2234 dnp->n_cookieverf.nfsuquad[1] = *tl; 2235 } 2236 2237 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2238 more_dirs = fxdr_unsigned(int, *tl); 2239 2240 /* loop thru the dir entries, doctoring them to dirent form */ 2241 while (more_dirs && bigenough) { 2242 if (info.nmi_v3) { 2243 nfsm_dissect(tl, u_int32_t *, 2244 3 * NFSX_UNSIGNED); 2245 fileno = fxdr_hyper(tl); 2246 len = fxdr_unsigned(int, *(tl + 2)); 2247 } else { 2248 nfsm_dissect(tl, u_int32_t *, 2249 2 * NFSX_UNSIGNED); 2250 fileno = fxdr_unsigned(u_quad_t, *tl++); 2251 len = fxdr_unsigned(int, *tl); 2252 } 2253 if (len <= 0 || len > NFS_MAXNAMLEN) { 2254 error = EBADRPC; 2255 m_freem(info.nmi_mrep); 2256 goto nfsmout; 2257 } 2258 tlen = DIRENT_RECSIZE(len) + NFS_DIRENT_OVERHEAD; 2259 left = NFS_READDIRBLKSIZ - blksiz; 2260 if (tlen > left) { 2261 dp->d_reclen += left; 2262 uiop->uio_iov->iov_base += left; 2263 uiop->uio_iov->iov_len -= left; 2264 uiop->uio_resid -= left; 2265 blksiz = 0; 2266 } 2267 if (tlen > uiop->uio_resid) 2268 bigenough = 0; 2269 if (bigenough) { 2270 ndp = (struct nfs_dirent *) 2271 uiop->uio_iov->iov_base; 2272 dp = &ndp->dirent; 2273 dp->d_fileno = fileno; 2274 dp->d_namlen = len; 2275 dp->d_reclen = tlen; 2276 dp->d_type = DT_UNKNOWN; 2277 blksiz += tlen; 2278 if (blksiz == NFS_READDIRBLKSIZ) 2279 blksiz = 0; 2280 uiop->uio_resid -= NFS_DIRHDSIZ; 2281 uiop->uio_iov->iov_base = 2282 (char *)uiop->uio_iov->iov_base + 2283 NFS_DIRHDSIZ; 2284 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ; 2285 nfsm_mtouio(uiop, len); 2286 cp = uiop->uio_iov->iov_base; 2287 tlen -= NFS_DIRHDSIZ + len; 2288 *cp = '\0'; /* null terminate */ 2289 uiop->uio_iov->iov_base += tlen; 2290 uiop->uio_iov->iov_len -= tlen; 2291 uiop->uio_resid -= tlen; 2292 } else 2293 nfsm_adv(nfsm_rndup(len)); 2294 if (info.nmi_v3) { 2295 nfsm_dissect(tl, u_int32_t *, 2296 3 * NFSX_UNSIGNED); 2297 } else { 2298 nfsm_dissect(tl, u_int32_t *, 2299 2 * NFSX_UNSIGNED); 2300 } 2301 if (bigenough) { 2302 if (info.nmi_v3) { 2303 ndp->cookie[0] = cookie.nfsuquad[0] = 2304 *tl++; 2305 } else 2306 ndp->cookie[0] = 0; 2307 2308 ndp->cookie[1] = cookie.nfsuquad[1] = *tl++; 2309 } else if (info.nmi_v3) 2310 tl += 2; 2311 else 2312 tl++; 2313 more_dirs = fxdr_unsigned(int, *tl); 2314 } 2315 /* 2316 * If at end of rpc data, get the eof boolean 2317 */ 2318 if (!more_dirs) { 2319 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2320 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2321 } 2322 m_freem(info.nmi_mrep); 2323 } 2324 /* 2325 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ 2326 * by increasing d_reclen for the last record. 2327 */ 2328 if (blksiz > 0) { 2329 left = NFS_READDIRBLKSIZ - blksiz; 2330 dp->d_reclen += left; 2331 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + 2332 left; 2333 uiop->uio_iov->iov_len -= left; 2334 uiop->uio_resid -= left; 2335 } 2336 2337 /* 2338 * We are now either at the end of the directory or have filled the 2339 * block. 2340 */ 2341 if (bigenough) { 2342 dnp->n_direofoffset = fxdr_hyper(&cookie.nfsuquad[0]); 2343 if (end_of_directory) *end_of_directory = 1; 2344 } else { 2345 if (uiop->uio_resid > 0) 2346 printf("EEK! readdirrpc resid > 0\n"); 2347 } 2348 2349 nfsmout: 2350 return (error); 2351 } 2352 2353 /* 2354 * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). 2355 */ 2356 int 2357 nfs_readdirplusrpc(struct vnode *vp, struct uio *uiop, struct ucred *cred, 2358 int *end_of_directory, struct proc *p) 2359 { 2360 int len, left; 2361 struct nfs_dirent *ndirp = NULL; 2362 struct dirent *dp = NULL; 2363 struct nfsm_info info; 2364 u_int32_t *tl; 2365 caddr_t cp; 2366 int32_t t1; 2367 struct vnode *newvp; 2368 caddr_t cp2, dpossav1, dpossav2; 2369 struct mbuf *mdsav1, *mdsav2; 2370 struct nameidata nami, *ndp = &nami; 2371 struct componentname *cnp = &ndp->ni_cnd; 2372 nfsuint64 cookie; 2373 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2374 struct nfsnode *dnp = VTONFS(vp), *np; 2375 nfsfh_t *fhp; 2376 u_quad_t fileno; 2377 int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; 2378 int attrflag, fhsize; 2379 2380 #ifdef DIAGNOSTIC 2381 if (uiop->uio_iovcnt != 1 || 2382 (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) 2383 panic("nfs readdirplusrpc bad uio"); 2384 #endif 2385 NDINIT(ndp, 0, 0, UIO_SYSSPACE, NULL, p); 2386 ndp->ni_dvp = vp; 2387 newvp = NULLVP; 2388 2389 txdr_hyper(uiop->uio_offset, &cookie.nfsuquad[0]); 2390 2391 /* 2392 * Loop around doing readdir rpc's of size nm_readdirsize 2393 * truncated to a multiple of NFS_READDIRBLKSIZ. 2394 * The stopping criteria is EOF or buffer full. 2395 */ 2396 while (more_dirs && bigenough) { 2397 nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; 2398 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(1) + 6 * NFSX_UNSIGNED); 2399 nfsm_fhtom(&info, vp, 1); 2400 tl = nfsm_build(&info.nmi_mb, 6 * NFSX_UNSIGNED); 2401 *tl++ = cookie.nfsuquad[0]; 2402 *tl++ = cookie.nfsuquad[1]; 2403 if (cookie.nfsuquad[0] == 0 && 2404 cookie.nfsuquad[1] == 0) { 2405 *tl++ = 0; 2406 *tl++ = 0; 2407 } else { 2408 *tl++ = dnp->n_cookieverf.nfsuquad[0]; 2409 *tl++ = dnp->n_cookieverf.nfsuquad[1]; 2410 } 2411 *tl++ = txdr_unsigned(nmp->nm_readdirsize); 2412 *tl = txdr_unsigned(nmp->nm_rsize); 2413 2414 info.nmi_procp = uiop->uio_procp; 2415 info.nmi_cred = cred; 2416 error = nfs_request(vp, NFSPROC_READDIRPLUS, &info); 2417 nfsm_postop_attr(vp, attrflag); 2418 if (error) { 2419 m_freem(info.nmi_mrep); 2420 goto nfsmout; 2421 } 2422 2423 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2424 dnp->n_cookieverf.nfsuquad[0] = *tl++; 2425 dnp->n_cookieverf.nfsuquad[1] = *tl++; 2426 more_dirs = fxdr_unsigned(int, *tl); 2427 2428 /* loop thru the dir entries, doctoring them to 4bsd form */ 2429 while (more_dirs && bigenough) { 2430 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2431 fileno = fxdr_hyper(tl); 2432 len = fxdr_unsigned(int, *(tl + 2)); 2433 if (len <= 0 || len > NFS_MAXNAMLEN) { 2434 error = EBADRPC; 2435 m_freem(info.nmi_mrep); 2436 goto nfsmout; 2437 } 2438 tlen = DIRENT_RECSIZE(len) + NFS_DIRENT_OVERHEAD; 2439 left = NFS_READDIRBLKSIZ - blksiz; 2440 if (tlen > left) { 2441 dp->d_reclen += left; 2442 uiop->uio_iov->iov_base = 2443 (char *)uiop->uio_iov->iov_base + left; 2444 uiop->uio_iov->iov_len -= left; 2445 uiop->uio_resid -= left; 2446 blksiz = 0; 2447 } 2448 if (tlen > uiop->uio_resid) 2449 bigenough = 0; 2450 if (bigenough) { 2451 ndirp = (struct nfs_dirent *) 2452 uiop->uio_iov->iov_base; 2453 dp = &ndirp->dirent; 2454 dp->d_fileno = fileno; 2455 dp->d_namlen = len; 2456 dp->d_reclen = tlen; 2457 dp->d_type = DT_UNKNOWN; 2458 blksiz += tlen; 2459 if (blksiz == NFS_READDIRBLKSIZ) 2460 blksiz = 0; 2461 uiop->uio_resid -= NFS_DIRHDSIZ; 2462 uiop->uio_iov->iov_base = 2463 (char *)uiop->uio_iov->iov_base + 2464 NFS_DIRHDSIZ; 2465 uiop->uio_iov->iov_len -= NFS_DIRHDSIZ; 2466 cnp->cn_nameptr = uiop->uio_iov->iov_base; 2467 cnp->cn_namelen = len; 2468 nfsm_mtouio(uiop, len); 2469 cp = uiop->uio_iov->iov_base; 2470 tlen -= NFS_DIRHDSIZ + len; 2471 *cp = '\0'; 2472 uiop->uio_iov->iov_base += tlen; 2473 uiop->uio_iov->iov_len -= tlen; 2474 uiop->uio_resid -= tlen; 2475 } else 2476 nfsm_adv(nfsm_rndup(len)); 2477 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 2478 if (bigenough) { 2479 ndirp->cookie[0] = cookie.nfsuquad[0] = *tl++; 2480 ndirp->cookie[1] = cookie.nfsuquad[1] = *tl++; 2481 } else 2482 tl += 2; 2483 2484 /* 2485 * Since the attributes are before the file handle 2486 * (sigh), we must skip over the attributes and then 2487 * come back and get them. 2488 */ 2489 attrflag = fxdr_unsigned(int, *tl); 2490 if (attrflag) { 2491 dpossav1 = info.nmi_dpos; 2492 mdsav1 = info.nmi_md; 2493 nfsm_adv(NFSX_V3FATTR); 2494 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2495 doit = fxdr_unsigned(int, *tl); 2496 if (doit) { 2497 nfsm_getfh(fhp, fhsize, 1); 2498 if (NFS_CMPFH(dnp, fhp, fhsize)) { 2499 vref(vp); 2500 newvp = vp; 2501 np = dnp; 2502 } else { 2503 error = nfs_nget(vp->v_mount, 2504 fhp, fhsize, &np); 2505 if (error) 2506 doit = 0; 2507 else 2508 newvp = NFSTOV(np); 2509 } 2510 } 2511 if (doit && bigenough) { 2512 dpossav2 = info.nmi_dpos; 2513 info.nmi_dpos = dpossav1; 2514 mdsav2 = info.nmi_md; 2515 info.nmi_md = mdsav1; 2516 nfsm_loadattr(newvp, NULL); 2517 info.nmi_dpos = dpossav2; 2518 info.nmi_md = mdsav2; 2519 dp->d_type = IFTODT( 2520 VTTOIF(np->n_vattr.va_type)); 2521 if (cnp->cn_namelen <= 2522 NAMECACHE_MAXLEN) { 2523 ndp->ni_vp = newvp; 2524 cache_purge(ndp->ni_dvp); 2525 nfs_cache_enter(ndp->ni_dvp, 2526 ndp->ni_vp, cnp); 2527 } 2528 } 2529 } else { 2530 /* Just skip over the file handle */ 2531 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2532 i = fxdr_unsigned(int, *tl); 2533 if (i > 0) 2534 nfsm_adv(nfsm_rndup(i)); 2535 } 2536 if (newvp != NULLVP) { 2537 if (newvp == vp) 2538 vrele(newvp); 2539 else 2540 vput(newvp); 2541 newvp = NULLVP; 2542 } 2543 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2544 more_dirs = fxdr_unsigned(int, *tl); 2545 } 2546 /* 2547 * If at end of rpc data, get the eof boolean 2548 */ 2549 if (!more_dirs) { 2550 nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); 2551 more_dirs = (fxdr_unsigned(int, *tl) == 0); 2552 } 2553 m_freem(info.nmi_mrep); 2554 } 2555 /* 2556 * Fill last record, iff any, out to a multiple of NFS_READDIRBLKSIZ 2557 * by increasing d_reclen for the last record. 2558 */ 2559 if (blksiz > 0) { 2560 left = NFS_READDIRBLKSIZ - blksiz; 2561 dp->d_reclen += left; 2562 uiop->uio_iov->iov_base = (char *)uiop->uio_iov->iov_base + 2563 left; 2564 uiop->uio_iov->iov_len -= left; 2565 uiop->uio_resid -= left; 2566 } 2567 2568 /* 2569 * We are now either at the end of the directory or have filled the 2570 * block. 2571 */ 2572 if (bigenough) { 2573 dnp->n_direofoffset = fxdr_hyper(&cookie.nfsuquad[0]); 2574 if (end_of_directory) *end_of_directory = 1; 2575 } else { 2576 if (uiop->uio_resid > 0) 2577 printf("EEK! readdirplusrpc resid > 0\n"); 2578 } 2579 2580 nfsmout: 2581 if (newvp != NULLVP) { 2582 if (newvp == vp) 2583 vrele(newvp); 2584 else 2585 vput(newvp); 2586 } 2587 return (error); 2588 } 2589 2590 /* 2591 * Silly rename. To make the NFS filesystem that is stateless look a little 2592 * more like the "ufs" a remove of an active vnode is translated to a rename 2593 * to a funny looking filename that is removed by nfs_inactive on the 2594 * nfsnode. There is the potential for another process on a different client 2595 * to create the same funny name between the nfs_lookitup() fails and the 2596 * nfs_rename() completes, but... 2597 */ 2598 int 2599 nfs_sillyrename(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 2600 { 2601 struct sillyrename *sp; 2602 struct nfsnode *np; 2603 int error; 2604 2605 cache_purge(dvp); 2606 np = VTONFS(vp); 2607 sp = malloc(sizeof(*sp), M_NFSREQ, M_WAITOK); 2608 sp->s_cred = crdup(cnp->cn_cred); 2609 sp->s_dvp = dvp; 2610 vref(dvp); 2611 2612 if (vp->v_type == VDIR) { 2613 #ifdef DIAGNOSTIC 2614 printf("nfs: sillyrename dir\n"); 2615 #endif 2616 error = EINVAL; 2617 goto bad; 2618 } 2619 2620 /* Try lookitups until we get one that isn't there */ 2621 while (1) { 2622 /* Fudge together a funny name */ 2623 u_int32_t rnd[2]; 2624 2625 arc4random_buf(&rnd, sizeof rnd); 2626 sp->s_namlen = snprintf(sp->s_name, sizeof sp->s_name, 2627 ".nfs%08X%08X", rnd[0], rnd[1]); 2628 if (sp->s_namlen > sizeof sp->s_name) 2629 sp->s_namlen = strlen(sp->s_name); 2630 2631 if (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2632 cnp->cn_proc, NULL)) 2633 break; 2634 } 2635 2636 error = nfs_renameit(dvp, cnp, sp); 2637 if (error) 2638 goto bad; 2639 error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, 2640 cnp->cn_proc, &np); 2641 np->n_sillyrename = sp; 2642 return (0); 2643 bad: 2644 vrele(sp->s_dvp); 2645 crfree(sp->s_cred); 2646 free(sp, M_NFSREQ, sizeof(*sp)); 2647 return (error); 2648 } 2649 2650 /* 2651 * Look up a file name and optionally either update the file handle or 2652 * allocate an nfsnode, depending on the value of npp. 2653 * npp == NULL --> just do the lookup 2654 * *npp == NULL --> allocate a new nfsnode and make sure attributes are 2655 * handled too 2656 * *npp != NULL --> update the file handle in the vnode 2657 */ 2658 int 2659 nfs_lookitup(struct vnode *dvp, char *name, int len, struct ucred *cred, 2660 struct proc *procp, struct nfsnode **npp) 2661 { 2662 struct nfsm_info info; 2663 u_int32_t *tl; 2664 int32_t t1; 2665 struct vnode *newvp = NULL; 2666 struct nfsnode *np, *dnp = VTONFS(dvp); 2667 caddr_t cp2; 2668 int error = 0, fhlen, attrflag = 0; 2669 nfsfh_t *nfhp; 2670 2671 info.nmi_v3 = NFS_ISV3(dvp); 2672 2673 nfsstats.rpccnt[NFSPROC_LOOKUP]++; 2674 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(info.nmi_v3) + NFSX_UNSIGNED + 2675 nfsm_rndup(len)); 2676 nfsm_fhtom(&info, dvp, info.nmi_v3); 2677 nfsm_strtom(name, len, NFS_MAXNAMLEN); 2678 2679 info.nmi_procp = procp; 2680 info.nmi_cred = cred; 2681 error = nfs_request(dvp, NFSPROC_LOOKUP, &info); 2682 if (error && !info.nmi_v3) { 2683 m_freem(info.nmi_mrep); 2684 goto nfsmout; 2685 } 2686 2687 if (npp && !error) { 2688 nfsm_getfh(nfhp, fhlen, info.nmi_v3); 2689 if (*npp) { 2690 np = *npp; 2691 np->n_fhp = &np->n_fh; 2692 bcopy(nfhp, np->n_fhp, fhlen); 2693 np->n_fhsize = fhlen; 2694 newvp = NFSTOV(np); 2695 } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { 2696 vref(dvp); 2697 newvp = dvp; 2698 np = dnp; 2699 } else { 2700 error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); 2701 if (error) { 2702 m_freem(info.nmi_mrep); 2703 return (error); 2704 } 2705 newvp = NFSTOV(np); 2706 } 2707 if (info.nmi_v3) { 2708 nfsm_postop_attr(newvp, attrflag); 2709 if (!attrflag && *npp == NULL) { 2710 m_freem(info.nmi_mrep); 2711 if (newvp == dvp) 2712 vrele(newvp); 2713 else 2714 vput(newvp); 2715 return (ENOENT); 2716 } 2717 } else 2718 nfsm_loadattr(newvp, NULL); 2719 } 2720 m_freem(info.nmi_mrep); 2721 nfsmout: 2722 if (npp && *npp == NULL) { 2723 if (error) { 2724 if (newvp == dvp) 2725 vrele(newvp); 2726 else 2727 vput(newvp); 2728 } else 2729 *npp = np; 2730 } 2731 return (error); 2732 } 2733 2734 /* 2735 * Nfs Version 3 commit rpc 2736 */ 2737 int 2738 nfs_commit(struct vnode *vp, u_quad_t offset, int cnt, struct proc *procp) 2739 { 2740 struct nfsm_info info; 2741 u_int32_t *tl; 2742 int32_t t1; 2743 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2744 caddr_t cp2; 2745 int error = 0, wccflag = NFSV3_WCCRATTR; 2746 2747 if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) 2748 return (0); 2749 nfsstats.rpccnt[NFSPROC_COMMIT]++; 2750 info.nmi_mb = info.nmi_mreq = nfsm_reqhead(NFSX_FH(1)); 2751 nfsm_fhtom(&info, vp, 1); 2752 2753 tl = nfsm_build(&info.nmi_mb, 3 * NFSX_UNSIGNED); 2754 txdr_hyper(offset, tl); 2755 tl += 2; 2756 *tl = txdr_unsigned(cnt); 2757 2758 info.nmi_procp = procp; 2759 info.nmi_cred = VTONFS(vp)->n_wcred; 2760 error = nfs_request(vp, NFSPROC_COMMIT, &info); 2761 nfsm_wcc_data(vp, wccflag); 2762 2763 if (!error) { 2764 nfsm_dissect(tl, u_int32_t *, NFSX_V3WRITEVERF); 2765 if (bcmp(nmp->nm_verf, tl, 2766 NFSX_V3WRITEVERF)) { 2767 bcopy(tl, nmp->nm_verf, 2768 NFSX_V3WRITEVERF); 2769 error = NFSERR_STALEWRITEVERF; 2770 } 2771 } 2772 m_freem(info.nmi_mrep); 2773 2774 nfsmout: 2775 return (error); 2776 } 2777 2778 /* 2779 * Kludge City.. 2780 * - make nfs_bmap() essentially a no-op that does no translation 2781 * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc 2782 * (Maybe I could use the process's page mapping, but I was concerned that 2783 * Kernel Write might not be enabled and also figured copyout() would do 2784 * a lot more work than bcopy() and also it currently happens in the 2785 * context of the swapper process (2). 2786 */ 2787 int 2788 nfs_bmap(void *v) 2789 { 2790 struct vop_bmap_args *ap = v; 2791 struct vnode *vp = ap->a_vp; 2792 2793 if (ap->a_vpp != NULL) 2794 *ap->a_vpp = vp; 2795 if (ap->a_bnp != NULL) 2796 *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize); 2797 return (0); 2798 } 2799 2800 /* 2801 * Strategy routine. 2802 * For async requests when nfsiod(s) are running, queue the request by 2803 * calling nfs_asyncio(), otherwise just all nfs_doio() to do the 2804 * request. 2805 */ 2806 int 2807 nfs_strategy(void *v) 2808 { 2809 struct vop_strategy_args *ap = v; 2810 struct buf *bp = ap->a_bp; 2811 struct proc *p; 2812 int error = 0; 2813 2814 if ((bp->b_flags & (B_PHYS|B_ASYNC)) == (B_PHYS|B_ASYNC)) 2815 panic("nfs physio/async"); 2816 if (bp->b_flags & B_ASYNC) 2817 p = NULL; 2818 else 2819 p = curproc; /* XXX */ 2820 /* 2821 * If the op is asynchronous and an i/o daemon is waiting 2822 * queue the request, wake it up and wait for completion 2823 * otherwise just do it ourselves. 2824 */ 2825 if ((bp->b_flags & B_ASYNC) == 0 || nfs_asyncio(bp, 0)) 2826 error = nfs_doio(bp, p); 2827 return (error); 2828 } 2829 2830 /* 2831 * fsync vnode op. Just call nfs_flush() with commit == 1. 2832 */ 2833 int 2834 nfs_fsync(void *v) 2835 { 2836 struct vop_fsync_args *ap = v; 2837 2838 return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1)); 2839 } 2840 2841 /* 2842 * Flush all the blocks associated with a vnode. 2843 * Walk through the buffer pool and push any dirty pages 2844 * associated with the vnode. 2845 */ 2846 int 2847 nfs_flush(struct vnode *vp, struct ucred *cred, int waitfor, struct proc *p, 2848 int commit) 2849 { 2850 struct nfsnode *np = VTONFS(vp); 2851 struct buf *bp; 2852 int i; 2853 struct buf *nbp; 2854 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 2855 uint64_t slptimeo = INFSLP; 2856 int s, error = 0, slpflag = 0, retv, bvecpos; 2857 int passone = 1; 2858 u_quad_t off = (u_quad_t)-1, endoff = 0, toff; 2859 #ifndef NFS_COMMITBVECSIZ 2860 #define NFS_COMMITBVECSIZ 20 2861 #endif 2862 struct buf *bvec[NFS_COMMITBVECSIZ]; 2863 2864 if (nmp->nm_flag & NFSMNT_INT) 2865 slpflag = PCATCH; 2866 if (!commit) 2867 passone = 0; 2868 /* 2869 * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the 2870 * server, but nas not been committed to stable storage on the server 2871 * yet. On the first pass, the byte range is worked out and the commit 2872 * rpc is done. On the second pass, nfs_writebp() is called to do the 2873 * job. 2874 */ 2875 again: 2876 bvecpos = 0; 2877 if (NFS_ISV3(vp) && commit) { 2878 s = splbio(); 2879 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) { 2880 if (bvecpos >= NFS_COMMITBVECSIZ) 2881 break; 2882 if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) 2883 != (B_DELWRI | B_NEEDCOMMIT)) 2884 continue; 2885 bremfree(bp); 2886 bp->b_flags |= B_WRITEINPROG; 2887 buf_acquire(bp); 2888 2889 /* 2890 * A list of these buffers is kept so that the 2891 * second loop knows which buffers have actually 2892 * been committed. This is necessary, since there 2893 * may be a race between the commit rpc and new 2894 * uncommitted writes on the file. 2895 */ 2896 bvec[bvecpos++] = bp; 2897 toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + 2898 bp->b_dirtyoff; 2899 if (toff < off) 2900 off = toff; 2901 toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); 2902 if (toff > endoff) 2903 endoff = toff; 2904 } 2905 splx(s); 2906 } 2907 if (bvecpos > 0) { 2908 /* 2909 * Commit data on the server, as required. 2910 */ 2911 bcstats.pendingwrites++; 2912 bcstats.numwrites++; 2913 retv = nfs_commit(vp, off, (int)(endoff - off), p); 2914 if (retv == NFSERR_STALEWRITEVERF) 2915 nfs_clearcommit(vp->v_mount); 2916 /* 2917 * Now, either mark the blocks I/O done or mark the 2918 * blocks dirty, depending on whether the commit 2919 * succeeded. 2920 */ 2921 for (i = 0; i < bvecpos; i++) { 2922 bp = bvec[i]; 2923 bp->b_flags &= ~(B_NEEDCOMMIT | B_WRITEINPROG); 2924 if (retv) { 2925 if (i == 0) 2926 bcstats.pendingwrites--; 2927 brelse(bp); 2928 } else { 2929 if (i > 0) 2930 bcstats.pendingwrites++; 2931 s = splbio(); 2932 buf_undirty(bp); 2933 vp->v_numoutput++; 2934 bp->b_flags |= B_ASYNC; 2935 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR); 2936 bp->b_dirtyoff = bp->b_dirtyend = 0; 2937 biodone(bp); 2938 splx(s); 2939 } 2940 } 2941 } 2942 2943 /* 2944 * Start/do any write(s) that are required. 2945 */ 2946 loop: 2947 s = splbio(); 2948 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) { 2949 if (bp->b_flags & B_BUSY) { 2950 if (waitfor != MNT_WAIT || passone) 2951 continue; 2952 bp->b_flags |= B_WANTED; 2953 error = tsleep_nsec(bp, slpflag | (PRIBIO + 1), 2954 "nfsfsync", slptimeo); 2955 splx(s); 2956 if (error) { 2957 if (nfs_sigintr(nmp, NULL, p)) 2958 return (EINTR); 2959 if (slpflag == PCATCH) { 2960 slpflag = 0; 2961 slptimeo = SEC_TO_NSEC(2); 2962 } 2963 } 2964 goto loop; 2965 } 2966 if ((bp->b_flags & B_DELWRI) == 0) 2967 panic("nfs_fsync: not dirty"); 2968 if ((passone || !commit) && (bp->b_flags & B_NEEDCOMMIT)) 2969 continue; 2970 bremfree(bp); 2971 if (passone || !commit) { 2972 bp->b_flags |= B_ASYNC; 2973 } else { 2974 bp->b_flags |= (B_ASYNC|B_WRITEINPROG|B_NEEDCOMMIT); 2975 } 2976 buf_acquire(bp); 2977 splx(s); 2978 VOP_BWRITE(bp); 2979 goto loop; 2980 } 2981 splx(s); 2982 if (passone) { 2983 passone = 0; 2984 goto again; 2985 } 2986 if (waitfor == MNT_WAIT) { 2987 loop2: 2988 s = splbio(); 2989 error = vwaitforio(vp, slpflag, "nfs_fsync", slptimeo); 2990 splx(s); 2991 if (error) { 2992 if (nfs_sigintr(nmp, NULL, p)) 2993 return (EINTR); 2994 if (slpflag == PCATCH) { 2995 slpflag = 0; 2996 slptimeo = SEC_TO_NSEC(2); 2997 } 2998 goto loop2; 2999 } 3000 3001 if (!LIST_EMPTY(&vp->v_dirtyblkhd) && commit) { 3002 #if 0 3003 vprint("nfs_fsync: dirty", vp); 3004 #endif 3005 goto loop; 3006 } 3007 } 3008 if (np->n_flag & NWRITEERR) { 3009 error = np->n_error; 3010 np->n_flag &= ~NWRITEERR; 3011 } 3012 return (error); 3013 } 3014 3015 /* 3016 * Return POSIX pathconf information applicable to nfs. 3017 * Fake it. For v3 we could ask the server, but such code 3018 * hasn't been written yet. 3019 */ 3020 /* ARGSUSED */ 3021 int 3022 nfs_pathconf(void *v) 3023 { 3024 struct vop_pathconf_args *ap = v; 3025 struct nfsmount *nmp = VFSTONFS(ap->a_vp->v_mount); 3026 int error = 0; 3027 3028 switch (ap->a_name) { 3029 case _PC_LINK_MAX: 3030 *ap->a_retval = LINK_MAX; 3031 break; 3032 case _PC_NAME_MAX: 3033 *ap->a_retval = NAME_MAX; 3034 break; 3035 case _PC_CHOWN_RESTRICTED: 3036 *ap->a_retval = 1; 3037 break; 3038 case _PC_NO_TRUNC: 3039 *ap->a_retval = 1; 3040 break; 3041 case _PC_ALLOC_SIZE_MIN: 3042 *ap->a_retval = NFS_FABLKSIZE; 3043 break; 3044 case _PC_FILESIZEBITS: 3045 *ap->a_retval = 64; 3046 break; 3047 case _PC_REC_INCR_XFER_SIZE: 3048 *ap->a_retval = min(nmp->nm_rsize, nmp->nm_wsize); 3049 break; 3050 case _PC_REC_MAX_XFER_SIZE: 3051 *ap->a_retval = -1; /* means ``unlimited'' */ 3052 break; 3053 case _PC_REC_MIN_XFER_SIZE: 3054 *ap->a_retval = min(nmp->nm_rsize, nmp->nm_wsize); 3055 break; 3056 case _PC_REC_XFER_ALIGN: 3057 *ap->a_retval = PAGE_SIZE; 3058 break; 3059 case _PC_SYMLINK_MAX: 3060 *ap->a_retval = MAXPATHLEN; 3061 break; 3062 case _PC_2_SYMLINKS: 3063 *ap->a_retval = 1; 3064 break; 3065 case _PC_TIMESTAMP_RESOLUTION: 3066 *ap->a_retval = NFS_ISV3(ap->a_vp) ? 1 : 1000; 3067 break; 3068 default: 3069 error = EINVAL; 3070 break; 3071 } 3072 3073 return (error); 3074 } 3075 3076 /* 3077 * NFS advisory byte-level locks. 3078 */ 3079 int 3080 nfs_advlock(void *v) 3081 { 3082 struct vop_advlock_args *ap = v; 3083 struct nfsnode *np = VTONFS(ap->a_vp); 3084 3085 return (lf_advlock(&np->n_lockf, np->n_size, ap->a_id, ap->a_op, 3086 ap->a_fl, ap->a_flags)); 3087 } 3088 3089 /* 3090 * Print out the contents of an nfsnode. 3091 */ 3092 int 3093 nfs_print(void *v) 3094 { 3095 struct vop_print_args *ap = v; 3096 struct vnode *vp = ap->a_vp; 3097 struct nfsnode *np = VTONFS(vp); 3098 3099 printf("tag VT_NFS, fileid %lld fsid 0x%lx", 3100 np->n_vattr.va_fileid, np->n_vattr.va_fsid); 3101 #ifdef FIFO 3102 if (vp->v_type == VFIFO) 3103 fifo_printinfo(vp); 3104 #endif 3105 printf("\n"); 3106 return (0); 3107 } 3108 3109 /* 3110 * Just call nfs_writebp() with the force argument set to 1. 3111 */ 3112 int 3113 nfs_bwrite(void *v) 3114 { 3115 struct vop_bwrite_args *ap = v; 3116 3117 return (nfs_writebp(ap->a_bp, 1)); 3118 } 3119 3120 /* 3121 * This is a clone of vop_generic_bwrite(), except that B_WRITEINPROG isn't set unless 3122 * the force flag is one and it also handles the B_NEEDCOMMIT flag. 3123 */ 3124 int 3125 nfs_writebp(struct buf *bp, int force) 3126 { 3127 int oldflags = bp->b_flags, retv = 1; 3128 struct proc *p = curproc; /* XXX */ 3129 off_t off; 3130 size_t cnt; 3131 int s; 3132 struct vnode *vp; 3133 struct nfsnode *np; 3134 3135 if(!(bp->b_flags & B_BUSY)) 3136 panic("bwrite: buffer is not busy???"); 3137 3138 vp = bp->b_vp; 3139 np = VTONFS(vp); 3140 3141 bp->b_flags &= ~(B_READ|B_DONE|B_ERROR); 3142 3143 s = splbio(); 3144 buf_undirty(bp); 3145 3146 if ((oldflags & B_ASYNC) && !(oldflags & B_DELWRI) && p) 3147 ++p->p_ru.ru_oublock; 3148 3149 bp->b_vp->v_numoutput++; 3150 splx(s); 3151 3152 /* 3153 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not 3154 * an actual write will have to be scheduled via. VOP_STRATEGY(). 3155 * If B_WRITEINPROG is already set, then push it with a write anyhow. 3156 */ 3157 if ((oldflags & (B_NEEDCOMMIT | B_WRITEINPROG)) == B_NEEDCOMMIT) { 3158 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; 3159 cnt = bp->b_dirtyend - bp->b_dirtyoff; 3160 3161 rw_enter_write(&np->n_commitlock); 3162 if (!(bp->b_flags & B_NEEDCOMMIT)) { 3163 rw_exit_write(&np->n_commitlock); 3164 return (0); 3165 } 3166 3167 /* 3168 * If it's already been committed by somebody else, 3169 * bail. 3170 */ 3171 if (!nfs_in_committed_range(vp, bp)) { 3172 int pushedrange = 0; 3173 /* 3174 * Since we're going to do this, push as much 3175 * as we can. 3176 */ 3177 3178 if (nfs_in_tobecommitted_range(vp, bp)) { 3179 pushedrange = 1; 3180 off = np->n_pushlo; 3181 cnt = np->n_pushhi - np->n_pushlo; 3182 } 3183 3184 bp->b_flags |= B_WRITEINPROG; 3185 bcstats.pendingwrites++; 3186 bcstats.numwrites++; 3187 retv = nfs_commit(bp->b_vp, off, cnt, curproc); 3188 bp->b_flags &= ~B_WRITEINPROG; 3189 3190 if (retv == 0) { 3191 if (pushedrange) 3192 nfs_merge_commit_ranges(vp); 3193 else 3194 nfs_add_committed_range(vp, bp); 3195 } else 3196 bcstats.pendingwrites--; 3197 } else 3198 retv = 0; /* It has already been committed. */ 3199 3200 rw_exit_write(&np->n_commitlock); 3201 if (!retv) { 3202 bp->b_dirtyoff = bp->b_dirtyend = 0; 3203 bp->b_flags &= ~B_NEEDCOMMIT; 3204 s = splbio(); 3205 biodone(bp); 3206 splx(s); 3207 } else if (retv == NFSERR_STALEWRITEVERF) 3208 nfs_clearcommit(bp->b_vp->v_mount); 3209 } 3210 if (retv) { 3211 buf_flip_dma(bp); 3212 if (force) 3213 bp->b_flags |= B_WRITEINPROG; 3214 VOP_STRATEGY(bp); 3215 } 3216 3217 if( (oldflags & B_ASYNC) == 0) { 3218 int rtval; 3219 3220 bp->b_flags |= B_RAW; 3221 rtval = biowait(bp); 3222 if (!(oldflags & B_DELWRI) && p) { 3223 ++p->p_ru.ru_oublock; 3224 } 3225 brelse(bp); 3226 return (rtval); 3227 } 3228 3229 return (0); 3230 } 3231 3232 /* 3233 * nfs special file access vnode op. 3234 * Essentially just get vattr and then imitate iaccess() since the device is 3235 * local to the client. 3236 */ 3237 int 3238 nfsspec_access(void *v) 3239 { 3240 struct vop_access_args *ap = v; 3241 struct vattr va; 3242 struct vnode *vp = ap->a_vp; 3243 int error; 3244 3245 /* 3246 * Disallow write attempts on filesystems mounted read-only; 3247 * unless the file is a socket, fifo, or a block or character 3248 * device resident on the filesystem. 3249 */ 3250 if ((ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { 3251 switch (vp->v_type) { 3252 case VREG: 3253 case VDIR: 3254 case VLNK: 3255 return (EROFS); 3256 default: 3257 break; 3258 } 3259 } 3260 3261 error = VOP_GETATTR(vp, &va, ap->a_cred, ap->a_p); 3262 if (error) 3263 return (error); 3264 3265 return (vaccess(vp->v_type, va.va_mode, va.va_uid, va.va_gid, 3266 ap->a_mode, ap->a_cred)); 3267 } 3268 3269 int 3270 nfs_poll(void *v) 3271 { 3272 struct vop_poll_args *ap = v; 3273 3274 /* 3275 * We should really check to see if I/O is possible. 3276 */ 3277 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 3278 } 3279 3280 /* 3281 * Read wrapper for special devices. 3282 */ 3283 int 3284 nfsspec_read(void *v) 3285 { 3286 struct vop_read_args *ap = v; 3287 struct nfsnode *np = VTONFS(ap->a_vp); 3288 3289 /* 3290 * Set access flag. 3291 */ 3292 np->n_flag |= NACC; 3293 getnanotime(&np->n_atim); 3294 return (spec_read(ap)); 3295 } 3296 3297 /* 3298 * Write wrapper for special devices. 3299 */ 3300 int 3301 nfsspec_write(void *v) 3302 { 3303 struct vop_write_args *ap = v; 3304 struct nfsnode *np = VTONFS(ap->a_vp); 3305 3306 /* 3307 * Set update flag. 3308 */ 3309 np->n_flag |= NUPD; 3310 getnanotime(&np->n_mtim); 3311 return (spec_write(ap)); 3312 } 3313 3314 /* 3315 * Close wrapper for special devices. 3316 * 3317 * Update the times on the nfsnode then do device close. 3318 */ 3319 int 3320 nfsspec_close(void *v) 3321 { 3322 struct vop_close_args *ap = v; 3323 struct vnode *vp = ap->a_vp; 3324 struct nfsnode *np = VTONFS(vp); 3325 struct vattr vattr; 3326 3327 if (np->n_flag & (NACC | NUPD)) { 3328 np->n_flag |= NCHG; 3329 if (vp->v_usecount == 1 && 3330 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3331 VATTR_NULL(&vattr); 3332 if (np->n_flag & NACC) 3333 vattr.va_atime = np->n_atim; 3334 if (np->n_flag & NUPD) 3335 vattr.va_mtime = np->n_mtim; 3336 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3337 } 3338 } 3339 return (spec_close(ap)); 3340 } 3341 3342 #ifdef FIFO 3343 /* 3344 * Read wrapper for fifos. 3345 */ 3346 int 3347 nfsfifo_read(void *v) 3348 { 3349 struct vop_read_args *ap = v; 3350 struct nfsnode *np = VTONFS(ap->a_vp); 3351 3352 /* 3353 * Set access flag. 3354 */ 3355 np->n_flag |= NACC; 3356 getnanotime(&np->n_atim); 3357 return (fifo_read(ap)); 3358 } 3359 3360 /* 3361 * Write wrapper for fifos. 3362 */ 3363 int 3364 nfsfifo_write(void *v) 3365 { 3366 struct vop_write_args *ap = v; 3367 struct nfsnode *np = VTONFS(ap->a_vp); 3368 3369 /* 3370 * Set update flag. 3371 */ 3372 np->n_flag |= NUPD; 3373 getnanotime(&np->n_mtim); 3374 return (fifo_write(ap)); 3375 } 3376 3377 /* 3378 * Close wrapper for fifos. 3379 * 3380 * Update the times on the nfsnode then do fifo close. 3381 */ 3382 int 3383 nfsfifo_close(void *v) 3384 { 3385 struct vop_close_args *ap = v; 3386 struct vnode *vp = ap->a_vp; 3387 struct nfsnode *np = VTONFS(vp); 3388 struct vattr vattr; 3389 3390 if (np->n_flag & (NACC | NUPD)) { 3391 if (np->n_flag & NACC) { 3392 getnanotime(&np->n_atim); 3393 } 3394 if (np->n_flag & NUPD) { 3395 getnanotime(&np->n_mtim); 3396 } 3397 np->n_flag |= NCHG; 3398 if (vp->v_usecount == 1 && 3399 (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 3400 VATTR_NULL(&vattr); 3401 if (np->n_flag & NACC) 3402 vattr.va_atime = np->n_atim; 3403 if (np->n_flag & NUPD) 3404 vattr.va_mtime = np->n_mtim; 3405 (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); 3406 } 3407 } 3408 return (fifo_close(ap)); 3409 } 3410 3411 int 3412 nfsfifo_reclaim(void *v) 3413 { 3414 fifo_reclaim(v); 3415 return (nfs_reclaim(v)); 3416 } 3417 #endif /* ! FIFO */ 3418