1 /* $OpenBSD: nfs_bio.c,v 1.76 2014/07/08 17:19:26 deraadt Exp $ */ 2 /* $NetBSD: nfs_bio.c,v 1.25.4.2 1996/07/08 20:47:04 jtc Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Rick Macklem at The University of Guelph. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/resourcevar.h> 41 #include <sys/signalvar.h> 42 #include <sys/proc.h> 43 #include <sys/buf.h> 44 #include <sys/vnode.h> 45 #include <sys/mount.h> 46 #include <sys/kernel.h> 47 #include <sys/namei.h> 48 #include <sys/queue.h> 49 #include <sys/time.h> 50 51 #include <nfs/rpcv2.h> 52 #include <nfs/nfsproto.h> 53 #include <nfs/nfs.h> 54 #include <nfs/nfsmount.h> 55 #include <nfs/nfsnode.h> 56 #include <nfs/nfs_var.h> 57 58 extern int nfs_numasync; 59 extern struct nfsstats nfsstats; 60 struct nfs_bufqhead nfs_bufq; 61 uint32_t nfs_bufqmax, nfs_bufqlen; 62 63 /* 64 * Vnode op for read using bio 65 * Any similarity to readip() is purely coincidental 66 */ 67 int 68 nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) 69 { 70 struct nfsnode *np = VTONFS(vp); 71 int biosize, diff; 72 struct buf *bp = NULL, *rabp; 73 struct vattr vattr; 74 struct proc *p; 75 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 76 daddr_t lbn, bn, rabn; 77 caddr_t baddr; 78 int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin; 79 off_t offdiff; 80 81 #ifdef DIAGNOSTIC 82 if (uio->uio_rw != UIO_READ) 83 panic("nfs_read mode"); 84 #endif 85 if (uio->uio_resid == 0) 86 return (0); 87 if (uio->uio_offset < 0) 88 return (EINVAL); 89 p = uio->uio_procp; 90 if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) 91 (void)nfs_fsinfo(nmp, vp, cred, p); 92 biosize = nmp->nm_rsize; 93 /* 94 * For nfs, cache consistency can only be maintained approximately. 95 * Although RFC1094 does not specify the criteria, the following is 96 * believed to be compatible with the reference port. 97 * For nfs: 98 * If the file's modify time on the server has changed since the 99 * last read rpc or you have written to the file, 100 * you may have lost data cache consistency with the 101 * server, so flush all of the file's data out of the cache. 102 * Then force a getattr rpc to ensure that you have up to date 103 * attributes. 104 */ 105 if (np->n_flag & NMODIFIED) { 106 NFS_INVALIDATE_ATTRCACHE(np); 107 error = VOP_GETATTR(vp, &vattr, cred, p); 108 if (error) 109 return (error); 110 np->n_mtime = vattr.va_mtime; 111 } else { 112 error = VOP_GETATTR(vp, &vattr, cred, p); 113 if (error) 114 return (error); 115 if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) { 116 error = nfs_vinvalbuf(vp, V_SAVE, cred, p); 117 if (error) 118 return (error); 119 np->n_mtime = vattr.va_mtime; 120 } 121 } 122 123 /* 124 * update the cache read creds for this vnode 125 */ 126 if (np->n_rcred) 127 crfree(np->n_rcred); 128 np->n_rcred = cred; 129 crhold(cred); 130 131 do { 132 if ((vp->v_flag & VROOT) && vp->v_type == VLNK) { 133 return (nfs_readlinkrpc(vp, uio, cred)); 134 } 135 baddr = (caddr_t)0; 136 switch (vp->v_type) { 137 case VREG: 138 nfsstats.biocache_reads++; 139 lbn = uio->uio_offset / biosize; 140 on = uio->uio_offset & (biosize - 1); 141 bn = lbn * (biosize / DEV_BSIZE); 142 not_readin = 1; 143 144 /* 145 * Start the read ahead(s), as required. 146 */ 147 if (nfs_numasync > 0 && nmp->nm_readahead > 0) { 148 for (nra = 0; nra < nmp->nm_readahead && 149 (lbn + 1 + nra) * biosize < np->n_size; nra++) { 150 rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE); 151 if (!incore(vp, rabn)) { 152 rabp = nfs_getcacheblk(vp, rabn, biosize, p); 153 if (!rabp) 154 return (EINTR); 155 if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) { 156 rabp->b_flags |= (B_READ | B_ASYNC); 157 if (nfs_asyncio(rabp, 1)) { 158 rabp->b_flags |= B_INVAL; 159 brelse(rabp); 160 } 161 } else 162 brelse(rabp); 163 } 164 } 165 } 166 167 again: 168 bp = nfs_getcacheblk(vp, bn, biosize, p); 169 if (!bp) 170 return (EINTR); 171 got_buf = 1; 172 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) { 173 bp->b_flags |= B_READ; 174 not_readin = 0; 175 error = nfs_doio(bp, p); 176 if (error) { 177 brelse(bp); 178 return (error); 179 } 180 } 181 n = min((unsigned)(biosize - on), uio->uio_resid); 182 offdiff = np->n_size - uio->uio_offset; 183 if (offdiff < (off_t)n) 184 n = (int)offdiff; 185 if (not_readin && n > 0) { 186 if (on < bp->b_validoff || (on + n) > bp->b_validend) { 187 bp->b_flags |= B_INVAFTERWRITE; 188 if (bp->b_dirtyend > 0) { 189 if ((bp->b_flags & B_DELWRI) == 0) 190 panic("nfsbioread"); 191 if (VOP_BWRITE(bp) == EINTR) 192 return (EINTR); 193 } else 194 brelse(bp); 195 goto again; 196 } 197 } 198 diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on); 199 if (diff < n) 200 n = diff; 201 break; 202 case VLNK: 203 nfsstats.biocache_readlinks++; 204 bp = nfs_getcacheblk(vp, 0, NFS_MAXPATHLEN, p); 205 if (!bp) 206 return (EINTR); 207 if ((bp->b_flags & B_DONE) == 0) { 208 bp->b_flags |= B_READ; 209 error = nfs_doio(bp, p); 210 if (error) { 211 brelse(bp); 212 return (error); 213 } 214 } 215 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); 216 got_buf = 1; 217 on = 0; 218 break; 219 default: 220 panic("nfsbioread: type %x unexpected", vp->v_type); 221 break; 222 } 223 224 if (n > 0) { 225 if (!baddr) 226 baddr = bp->b_data; 227 error = uiomove(baddr + on, (int)n, uio); 228 } 229 230 if (vp->v_type == VLNK) 231 n = 0; 232 233 if (got_buf) 234 brelse(bp); 235 } while (error == 0 && uio->uio_resid > 0 && n > 0); 236 return (error); 237 } 238 239 /* 240 * Vnode op for write using bio 241 */ 242 int 243 nfs_write(void *v) 244 { 245 struct vop_write_args *ap = v; 246 int biosize; 247 struct uio *uio = ap->a_uio; 248 struct proc *p = uio->uio_procp; 249 struct vnode *vp = ap->a_vp; 250 struct nfsnode *np = VTONFS(vp); 251 struct ucred *cred = ap->a_cred; 252 int ioflag = ap->a_ioflag; 253 struct buf *bp; 254 struct vattr vattr; 255 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 256 daddr_t lbn, bn; 257 int n, on, error = 0, extended = 0, wrotedta = 0, truncated = 0; 258 ssize_t overrun; 259 260 #ifdef DIAGNOSTIC 261 if (uio->uio_rw != UIO_WRITE) 262 panic("nfs_write mode"); 263 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 264 panic("nfs_write proc"); 265 #endif 266 if (vp->v_type != VREG) 267 return (EIO); 268 if (np->n_flag & NWRITEERR) { 269 np->n_flag &= ~NWRITEERR; 270 return (np->n_error); 271 } 272 if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) 273 (void)nfs_fsinfo(nmp, vp, cred, p); 274 if (ioflag & (IO_APPEND | IO_SYNC)) { 275 if (np->n_flag & NMODIFIED) { 276 NFS_INVALIDATE_ATTRCACHE(np); 277 error = nfs_vinvalbuf(vp, V_SAVE, cred, p); 278 if (error) 279 return (error); 280 } 281 if (ioflag & IO_APPEND) { 282 NFS_INVALIDATE_ATTRCACHE(np); 283 error = VOP_GETATTR(vp, &vattr, cred, p); 284 if (error) 285 return (error); 286 uio->uio_offset = np->n_size; 287 } 288 } 289 if (uio->uio_offset < 0) 290 return (EINVAL); 291 if (uio->uio_resid == 0) 292 return (0); 293 294 /* do the filesize rlimit check */ 295 if ((error = vn_fsizechk(vp, uio, ioflag, &overrun))) 296 return (error); 297 298 /* 299 * update the cache write creds for this node. 300 */ 301 if (np->n_wcred) 302 crfree(np->n_wcred); 303 np->n_wcred = cred; 304 crhold(cred); 305 306 /* 307 * I use nm_rsize, not nm_wsize so that all buffer cache blocks 308 * will be the same size within a filesystem. nfs_writerpc will 309 * still use nm_wsize when sizing the rpc's. 310 */ 311 biosize = nmp->nm_rsize; 312 do { 313 314 /* 315 * XXX make sure we aren't cached in the VM page cache 316 */ 317 uvm_vnp_uncache(vp); 318 319 nfsstats.biocache_writes++; 320 lbn = uio->uio_offset / biosize; 321 on = uio->uio_offset & (biosize-1); 322 n = min((unsigned)(biosize - on), uio->uio_resid); 323 bn = lbn * (biosize / DEV_BSIZE); 324 again: 325 bp = nfs_getcacheblk(vp, bn, biosize, p); 326 if (!bp) { 327 error = EINTR; 328 goto out; 329 } 330 np->n_flag |= NMODIFIED; 331 if (uio->uio_offset + n > np->n_size) { 332 np->n_size = uio->uio_offset + n; 333 uvm_vnp_setsize(vp, (u_long)np->n_size); 334 extended = 1; 335 } else if (uio->uio_offset + n < np->n_size) 336 truncated = 1; 337 338 /* 339 * If the new write will leave a contiguous dirty 340 * area, just update the b_dirtyoff and b_dirtyend, 341 * otherwise force a write rpc of the old dirty area. 342 */ 343 if (bp->b_dirtyend > 0 && 344 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { 345 bp->b_proc = p; 346 if (VOP_BWRITE(bp) == EINTR) { 347 error = EINTR; 348 goto out; 349 } 350 goto again; 351 } 352 353 error = uiomove((char *)bp->b_data + on, n, uio); 354 if (error) { 355 bp->b_flags |= B_ERROR; 356 brelse(bp); 357 goto out; 358 } 359 if (bp->b_dirtyend > 0) { 360 bp->b_dirtyoff = min(on, bp->b_dirtyoff); 361 bp->b_dirtyend = max((on + n), bp->b_dirtyend); 362 } else { 363 bp->b_dirtyoff = on; 364 bp->b_dirtyend = on + n; 365 } 366 if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff || 367 bp->b_validoff > bp->b_dirtyend) { 368 bp->b_validoff = bp->b_dirtyoff; 369 bp->b_validend = bp->b_dirtyend; 370 } else { 371 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 372 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 373 } 374 375 wrotedta = 1; 376 377 /* 378 * Since this block is being modified, it must be written 379 * again and not just committed. 380 */ 381 382 if (NFS_ISV3(vp)) { 383 rw_enter_write(&np->n_commitlock); 384 if (bp->b_flags & B_NEEDCOMMIT) { 385 bp->b_flags &= ~B_NEEDCOMMIT; 386 nfs_del_tobecommitted_range(vp, bp); 387 } 388 nfs_del_committed_range(vp, bp); 389 rw_exit_write(&np->n_commitlock); 390 } else 391 bp->b_flags &= ~B_NEEDCOMMIT; 392 393 if (ioflag & IO_SYNC) { 394 bp->b_proc = p; 395 error = VOP_BWRITE(bp); 396 if (error) 397 goto out; 398 } else if ((n + on) == biosize) { 399 bp->b_proc = NULL; 400 bp->b_flags |= B_ASYNC; 401 (void)nfs_writebp(bp, 0); 402 } else { 403 bdwrite(bp); 404 } 405 } while (uio->uio_resid > 0 && n > 0); 406 407 /*out: XXX belongs here??? */ 408 if (wrotedta) 409 VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0) | 410 (truncated ? NOTE_TRUNCATE : 0)); 411 412 out: 413 /* correct the result for writes clamped by vn_fsizechk() */ 414 uio->uio_resid += overrun; 415 416 return (error); 417 } 418 419 /* 420 * Get an nfs cache block. 421 * Allocate a new one if the block isn't currently in the cache 422 * and return the block marked busy. If the calling process is 423 * interrupted by a signal for an interruptible mount point, return 424 * NULL. 425 */ 426 struct buf * 427 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct proc *p) 428 { 429 struct buf *bp; 430 struct nfsmount *nmp = VFSTONFS(vp->v_mount); 431 432 if (nmp->nm_flag & NFSMNT_INT) { 433 bp = getblk(vp, bn, size, PCATCH, 0); 434 while (bp == NULL) { 435 if (nfs_sigintr(nmp, NULL, p)) 436 return (NULL); 437 bp = getblk(vp, bn, size, 0, 2 * hz); 438 } 439 } else 440 bp = getblk(vp, bn, size, 0, 0); 441 return (bp); 442 } 443 444 /* 445 * Flush and invalidate all dirty buffers. If another process is already 446 * doing the flush, just wait for completion. 447 */ 448 int 449 nfs_vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p) 450 { 451 struct nfsmount *nmp= VFSTONFS(vp->v_mount); 452 struct nfsnode *np = VTONFS(vp); 453 int error, sintr, stimeo; 454 455 error = sintr = stimeo = 0; 456 457 if (ISSET(nmp->nm_flag, NFSMNT_INT)) { 458 sintr = PCATCH; 459 stimeo = 2 * hz; 460 } 461 462 /* First wait for any other process doing a flush to complete. */ 463 while (np->n_flag & NFLUSHINPROG) { 464 np->n_flag |= NFLUSHWANT; 465 error = tsleep(&np->n_flag, PRIBIO|sintr, "nfsvinval", stimeo); 466 if (error && sintr && nfs_sigintr(nmp, NULL, p)) 467 return (EINTR); 468 } 469 470 /* Now, flush as required. */ 471 np->n_flag |= NFLUSHINPROG; 472 error = vinvalbuf(vp, flags, cred, p, sintr, 0); 473 while (error) { 474 if (sintr && nfs_sigintr(nmp, NULL, p)) { 475 np->n_flag &= ~NFLUSHINPROG; 476 if (np->n_flag & NFLUSHWANT) { 477 np->n_flag &= ~NFLUSHWANT; 478 wakeup(&np->n_flag); 479 } 480 return (EINTR); 481 } 482 error = vinvalbuf(vp, flags, cred, p, 0, stimeo); 483 } 484 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); 485 if (np->n_flag & NFLUSHWANT) { 486 np->n_flag &= ~NFLUSHWANT; 487 wakeup(&np->n_flag); 488 } 489 return (0); 490 } 491 492 /* 493 * Initiate asynchronous I/O. Return an error if no nfsiods are available. 494 * This is mainly to avoid queueing async I/O requests when the nfsiods 495 * are all hung on a dead server. 496 */ 497 int 498 nfs_asyncio(struct buf *bp, int readahead) 499 { 500 if (nfs_numasync == 0) 501 goto out; 502 503 while (nfs_bufqlen > nfs_bufqmax) 504 if (readahead) 505 goto out; 506 else 507 tsleep(&nfs_bufqlen, PRIBIO, "nfs_bufq", 0); 508 509 if ((bp->b_flags & B_READ) == 0) { 510 bp->b_flags |= B_WRITEINPROG; 511 } 512 513 TAILQ_INSERT_TAIL(&nfs_bufq, bp, b_freelist); 514 nfs_bufqlen++; 515 516 wakeup_one(&nfs_bufq); 517 return (0); 518 519 out: 520 nfsstats.forcedsync++; 521 return (EIO); 522 } 523 524 /* 525 * Do an I/O operation to/from a cache block. This may be called 526 * synchronously or from an nfsiod. 527 */ 528 int 529 nfs_doio(struct buf *bp, struct proc *p) 530 { 531 struct uio *uiop; 532 struct vnode *vp; 533 struct nfsnode *np; 534 struct nfsmount *nmp; 535 int s, error = 0, diff, len, iomode, must_commit = 0; 536 struct uio uio; 537 struct iovec io; 538 539 vp = bp->b_vp; 540 np = VTONFS(vp); 541 nmp = VFSTONFS(vp->v_mount); 542 uiop = &uio; 543 uiop->uio_iov = &io; 544 uiop->uio_iovcnt = 1; 545 uiop->uio_segflg = UIO_SYSSPACE; 546 uiop->uio_procp = p; 547 548 /* 549 * Historically, paging was done with physio, but no more. 550 */ 551 if (bp->b_flags & B_PHYS) { 552 io.iov_len = uiop->uio_resid = bp->b_bcount; 553 /* mapping was done by vmapbuf() */ 554 io.iov_base = bp->b_data; 555 uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT; 556 if (bp->b_flags & B_READ) { 557 uiop->uio_rw = UIO_READ; 558 nfsstats.read_physios++; 559 error = nfs_readrpc(vp, uiop); 560 } else { 561 iomode = NFSV3WRITE_DATASYNC; 562 uiop->uio_rw = UIO_WRITE; 563 nfsstats.write_physios++; 564 error = nfs_writerpc(vp, uiop, &iomode, &must_commit); 565 } 566 if (error) { 567 bp->b_flags |= B_ERROR; 568 bp->b_error = error; 569 } 570 } else if (bp->b_flags & B_READ) { 571 io.iov_len = uiop->uio_resid = bp->b_bcount; 572 io.iov_base = bp->b_data; 573 uiop->uio_rw = UIO_READ; 574 switch (vp->v_type) { 575 case VREG: 576 uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT; 577 nfsstats.read_bios++; 578 bcstats.pendingreads++; 579 bcstats.numreads++; 580 error = nfs_readrpc(vp, uiop); 581 if (!error) { 582 bp->b_validoff = 0; 583 if (uiop->uio_resid) { 584 /* 585 * If len > 0, there is a hole in the file and 586 * no writes after the hole have been pushed to 587 * the server yet. 588 * Just zero fill the rest of the valid area. 589 */ 590 diff = bp->b_bcount - uiop->uio_resid; 591 len = np->n_size - ((((off_t)bp->b_blkno) << DEV_BSHIFT) 592 + diff); 593 if (len > 0) { 594 len = min(len, uiop->uio_resid); 595 bzero((char *)bp->b_data + diff, len); 596 bp->b_validend = diff + len; 597 } else 598 bp->b_validend = diff; 599 } else 600 bp->b_validend = bp->b_bcount; 601 } 602 if (p && (vp->v_flag & VTEXT) && 603 (timespeccmp(&np->n_mtime, &np->n_vattr.va_mtime, !=))) { 604 uprintf("Process killed due to text file modification\n"); 605 psignal(p, SIGKILL); 606 } 607 break; 608 case VLNK: 609 uiop->uio_offset = (off_t)0; 610 nfsstats.readlink_bios++; 611 bcstats.pendingreads++; 612 bcstats.numreads++; 613 error = nfs_readlinkrpc(vp, uiop, curproc->p_ucred); 614 break; 615 default: 616 panic("nfs_doio: type %x unexpected", vp->v_type); 617 break; 618 }; 619 if (error) { 620 bp->b_flags |= B_ERROR; 621 bp->b_error = error; 622 } 623 } else { 624 io.iov_len = uiop->uio_resid = bp->b_dirtyend 625 - bp->b_dirtyoff; 626 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE 627 + bp->b_dirtyoff; 628 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 629 uiop->uio_rw = UIO_WRITE; 630 nfsstats.write_bios++; 631 bcstats.pendingwrites++; 632 bcstats.numwrites++; 633 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE)) == B_ASYNC) 634 iomode = NFSV3WRITE_UNSTABLE; 635 else 636 iomode = NFSV3WRITE_FILESYNC; 637 bp->b_flags |= B_WRITEINPROG; 638 error = nfs_writerpc(vp, uiop, &iomode, &must_commit); 639 640 rw_enter_write(&np->n_commitlock); 641 if (!error && iomode == NFSV3WRITE_UNSTABLE) { 642 bp->b_flags |= B_NEEDCOMMIT; 643 nfs_add_tobecommitted_range(vp, bp); 644 } else { 645 bp->b_flags &= ~B_NEEDCOMMIT; 646 nfs_del_committed_range(vp, bp); 647 } 648 rw_exit_write(&np->n_commitlock); 649 650 bp->b_flags &= ~B_WRITEINPROG; 651 652 /* 653 * For an interrupted write, the buffer is still valid and the 654 * write hasn't been pushed to the server yet, so we can't set 655 * B_ERROR and report the interruption by setting B_EINTR. For 656 * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt 657 * is essentially a noop. 658 * For the case of a V3 write rpc not being committed to stable 659 * storage, the block is still dirty and requires either a commit 660 * rpc or another write rpc with iomode == NFSV3WRITE_FILESYNC 661 * before the block is reused. This is indicated by setting the 662 * B_DELWRI and B_NEEDCOMMIT flags. 663 */ 664 if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 665 s = splbio(); 666 buf_dirty(bp); 667 splx(s); 668 669 if (!(bp->b_flags & B_ASYNC) && error) 670 bp->b_flags |= B_EINTR; 671 } else { 672 if (error) { 673 bp->b_flags |= B_ERROR; 674 bp->b_error = np->n_error = error; 675 np->n_flag |= NWRITEERR; 676 } 677 bp->b_dirtyoff = bp->b_dirtyend = 0; 678 } 679 } 680 bp->b_resid = uiop->uio_resid; 681 if (must_commit) 682 nfs_clearcommit(vp->v_mount); 683 s = splbio(); 684 biodone(bp); 685 splx(s); 686 return (error); 687 } 688