xref: /openbsd-src/sys/nfs/nfs_bio.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: nfs_bio.c,v 1.81 2016/02/13 15:45:05 stefan Exp $	*/
2 /*	$NetBSD: nfs_bio.c,v 1.25.4.2 1996/07/08 20:47:04 jtc Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Rick Macklem at The University of Guelph.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/resourcevar.h>
41 #include <sys/signalvar.h>
42 #include <sys/proc.h>
43 #include <sys/buf.h>
44 #include <sys/vnode.h>
45 #include <sys/mount.h>
46 #include <sys/kernel.h>
47 #include <sys/namei.h>
48 #include <sys/queue.h>
49 #include <sys/time.h>
50 
51 #include <nfs/nfsproto.h>
52 #include <nfs/nfs.h>
53 #include <nfs/nfsmount.h>
54 #include <nfs/nfsnode.h>
55 #include <nfs/nfs_var.h>
56 
57 extern int nfs_numasync;
58 extern struct nfsstats nfsstats;
59 struct nfs_bufqhead nfs_bufq;
60 uint32_t nfs_bufqmax, nfs_bufqlen;
61 
62 /*
63  * Vnode op for read using bio
64  * Any similarity to readip() is purely coincidental
65  */
66 int
67 nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
68 {
69 	struct nfsnode *np = VTONFS(vp);
70 	int biosize, diff;
71 	struct buf *bp = NULL, *rabp;
72 	struct vattr vattr;
73 	struct proc *p;
74 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
75 	daddr_t lbn, bn, rabn;
76 	caddr_t baddr;
77 	int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin;
78 	off_t offdiff;
79 
80 #ifdef DIAGNOSTIC
81 	if (uio->uio_rw != UIO_READ)
82 		panic("nfs_read mode");
83 #endif
84 	if (uio->uio_resid == 0)
85 		return (0);
86 	if (uio->uio_offset < 0)
87 		return (EINVAL);
88 	p = uio->uio_procp;
89 	if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3)
90 		(void)nfs_fsinfo(nmp, vp, cred, p);
91 	biosize = nmp->nm_rsize;
92 	/*
93 	 * For nfs, cache consistency can only be maintained approximately.
94 	 * Although RFC1094 does not specify the criteria, the following is
95 	 * believed to be compatible with the reference port.
96 	 * For nfs:
97 	 * If the file's modify time on the server has changed since the
98 	 * last read rpc or you have written to the file,
99 	 * you may have lost data cache consistency with the
100 	 * server, so flush all of the file's data out of the cache.
101 	 * Then force a getattr rpc to ensure that you have up to date
102 	 * attributes.
103 	 */
104 	if (np->n_flag & NMODIFIED) {
105 		NFS_INVALIDATE_ATTRCACHE(np);
106 		error = VOP_GETATTR(vp, &vattr, cred, p);
107 		if (error)
108 			return (error);
109 		np->n_mtime = vattr.va_mtime;
110 	} else {
111 		error = VOP_GETATTR(vp, &vattr, cred, p);
112 		if (error)
113 			return (error);
114 		if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
115 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p);
116 			if (error)
117 				return (error);
118 			np->n_mtime = vattr.va_mtime;
119 		}
120 	}
121 
122 	/*
123 	 * update the cache read creds for this vnode
124 	 */
125 	if (np->n_rcred)
126 		crfree(np->n_rcred);
127 	np->n_rcred = cred;
128 	crhold(cred);
129 
130 	do {
131 	    if ((vp->v_flag & VROOT) && vp->v_type == VLNK) {
132 		    return (nfs_readlinkrpc(vp, uio, cred));
133 	    }
134 	    baddr = NULL;
135 	    switch (vp->v_type) {
136 	    case VREG:
137 		nfsstats.biocache_reads++;
138 		lbn = uio->uio_offset / biosize;
139 		on = uio->uio_offset & (biosize - 1);
140 		bn = lbn * (biosize / DEV_BSIZE);
141 		not_readin = 1;
142 
143 		/*
144 		 * Start the read ahead(s), as required.
145 		 */
146 		if (nfs_numasync > 0 && nmp->nm_readahead > 0) {
147 		    for (nra = 0; nra < nmp->nm_readahead &&
148 			(lbn + 1 + nra) * biosize < np->n_size; nra++) {
149 			rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
150 			if (!incore(vp, rabn)) {
151 			    rabp = nfs_getcacheblk(vp, rabn, biosize, p);
152 			    if (!rabp)
153 				return (EINTR);
154 			    if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) {
155 				rabp->b_flags |= (B_READ | B_ASYNC);
156 				if (nfs_asyncio(rabp, 1)) {
157 				    rabp->b_flags |= B_INVAL;
158 				    brelse(rabp);
159 				}
160 			    } else
161 				brelse(rabp);
162 			}
163 		    }
164 		}
165 
166 again:
167 		bp = nfs_getcacheblk(vp, bn, biosize, p);
168 		if (!bp)
169 			return (EINTR);
170 		got_buf = 1;
171 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
172 			bp->b_flags |= B_READ;
173 			not_readin = 0;
174 			error = nfs_doio(bp, p);
175 			if (error) {
176 			    brelse(bp);
177 			    return (error);
178 			}
179 		}
180 		n = ulmin(biosize - on, uio->uio_resid);
181 		offdiff = np->n_size - uio->uio_offset;
182 		if (offdiff < (off_t)n)
183 			n = (int)offdiff;
184 		if (not_readin && n > 0) {
185 			if (on < bp->b_validoff || (on + n) > bp->b_validend) {
186 				bp->b_flags |= B_INVAFTERWRITE;
187 				if (bp->b_dirtyend > 0) {
188 				    if ((bp->b_flags & B_DELWRI) == 0)
189 					panic("nfsbioread");
190 				    if (VOP_BWRITE(bp) == EINTR)
191 					return (EINTR);
192 				} else
193 				    brelse(bp);
194 				goto again;
195 			}
196 		}
197 		diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on);
198 		if (diff < n)
199 			n = diff;
200 		break;
201 	    case VLNK:
202 		nfsstats.biocache_readlinks++;
203 		bp = nfs_getcacheblk(vp, 0, NFS_MAXPATHLEN, p);
204 		if (!bp)
205 			return (EINTR);
206 		if ((bp->b_flags & B_DONE) == 0) {
207 			bp->b_flags |= B_READ;
208 			error = nfs_doio(bp, p);
209 			if (error) {
210 				brelse(bp);
211 				return (error);
212 			}
213 		}
214 		n = ulmin(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
215 		got_buf = 1;
216 		on = 0;
217 		break;
218 	    default:
219 		panic("nfsbioread: type %x unexpected", vp->v_type);
220 		break;
221 	    }
222 
223 	    if (n > 0) {
224 		if (!baddr)
225 			baddr = bp->b_data;
226 		error = uiomove(baddr + on, n, uio);
227 	    }
228 
229 	    if (vp->v_type == VLNK)
230 		n = 0;
231 
232 	    if (got_buf)
233 		brelse(bp);
234 	} while (error == 0 && uio->uio_resid > 0 && n > 0);
235 	return (error);
236 }
237 
238 /*
239  * Vnode op for write using bio
240  */
241 int
242 nfs_write(void *v)
243 {
244 	struct vop_write_args *ap = v;
245 	int biosize;
246 	struct uio *uio = ap->a_uio;
247 	struct proc *p = uio->uio_procp;
248 	struct vnode *vp = ap->a_vp;
249 	struct nfsnode *np = VTONFS(vp);
250 	struct ucred *cred = ap->a_cred;
251 	int ioflag = ap->a_ioflag;
252 	struct buf *bp;
253 	struct vattr vattr;
254 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
255 	daddr_t lbn, bn;
256 	int n, on, error = 0, extended = 0, wrotedta = 0, truncated = 0;
257 	ssize_t overrun;
258 
259 #ifdef DIAGNOSTIC
260 	if (uio->uio_rw != UIO_WRITE)
261 		panic("nfs_write mode");
262 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
263 		panic("nfs_write proc");
264 #endif
265 	if (vp->v_type != VREG)
266 		return (EIO);
267 	if (np->n_flag & NWRITEERR) {
268 		np->n_flag &= ~NWRITEERR;
269 		return (np->n_error);
270 	}
271 	if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3)
272 		(void)nfs_fsinfo(nmp, vp, cred, p);
273 	if (ioflag & (IO_APPEND | IO_SYNC)) {
274 		if (np->n_flag & NMODIFIED) {
275 			NFS_INVALIDATE_ATTRCACHE(np);
276 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p);
277 			if (error)
278 				return (error);
279 		}
280 		if (ioflag & IO_APPEND) {
281 			NFS_INVALIDATE_ATTRCACHE(np);
282 			error = VOP_GETATTR(vp, &vattr, cred, p);
283 			if (error)
284 				return (error);
285 			uio->uio_offset = np->n_size;
286 		}
287 	}
288 	if (uio->uio_offset < 0)
289 		return (EINVAL);
290 	if (uio->uio_resid == 0)
291 		return (0);
292 
293 	/* do the filesize rlimit check */
294 	if ((error = vn_fsizechk(vp, uio, ioflag, &overrun)))
295 		return (error);
296 
297 	/*
298 	 * update the cache write creds for this node.
299 	 */
300 	if (np->n_wcred)
301 		crfree(np->n_wcred);
302 	np->n_wcred = cred;
303 	crhold(cred);
304 
305 	/*
306 	 * I use nm_rsize, not nm_wsize so that all buffer cache blocks
307 	 * will be the same size within a filesystem. nfs_writerpc will
308 	 * still use nm_wsize when sizing the rpc's.
309 	 */
310 	biosize = nmp->nm_rsize;
311 	do {
312 
313 		/*
314 		 * XXX make sure we aren't cached in the VM page cache
315 		 */
316 		uvm_vnp_uncache(vp);
317 
318 		nfsstats.biocache_writes++;
319 		lbn = uio->uio_offset / biosize;
320 		on = uio->uio_offset & (biosize-1);
321 		n = ulmin(biosize - on, uio->uio_resid);
322 		bn = lbn * (biosize / DEV_BSIZE);
323 again:
324 		bp = nfs_getcacheblk(vp, bn, biosize, p);
325 		if (!bp) {
326 			error = EINTR;
327 			goto out;
328 		}
329 		np->n_flag |= NMODIFIED;
330 		if (uio->uio_offset + n > np->n_size) {
331 			np->n_size = uio->uio_offset + n;
332 			uvm_vnp_setsize(vp, (u_long)np->n_size);
333 			extended = 1;
334 		} else if (uio->uio_offset + n < np->n_size)
335 			truncated = 1;
336 
337 		/*
338 		 * If the new write will leave a contiguous dirty
339 		 * area, just update the b_dirtyoff and b_dirtyend,
340 		 * otherwise force a write rpc of the old dirty area.
341 		 */
342 		if (bp->b_dirtyend > 0 &&
343 		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
344 			bp->b_proc = p;
345 			if (VOP_BWRITE(bp) == EINTR) {
346 				error = EINTR;
347 				goto out;
348 			}
349 			goto again;
350 		}
351 
352 		error = uiomove((char *)bp->b_data + on, n, uio);
353 		if (error) {
354 			bp->b_flags |= B_ERROR;
355 			brelse(bp);
356 			goto out;
357 		}
358 		if (bp->b_dirtyend > 0) {
359 			bp->b_dirtyoff = min(on, bp->b_dirtyoff);
360 			bp->b_dirtyend = max((on + n), bp->b_dirtyend);
361 		} else {
362 			bp->b_dirtyoff = on;
363 			bp->b_dirtyend = on + n;
364 		}
365 		if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
366 		    bp->b_validoff > bp->b_dirtyend) {
367 			bp->b_validoff = bp->b_dirtyoff;
368 			bp->b_validend = bp->b_dirtyend;
369 		} else {
370 			bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
371 			bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
372 		}
373 
374 		wrotedta = 1;
375 
376 		/*
377 		 * Since this block is being modified, it must be written
378 		 * again and not just committed.
379 		 */
380 
381 		if (NFS_ISV3(vp)) {
382 			rw_enter_write(&np->n_commitlock);
383 			if (bp->b_flags & B_NEEDCOMMIT) {
384 				bp->b_flags &= ~B_NEEDCOMMIT;
385 				nfs_del_tobecommitted_range(vp, bp);
386 			}
387 			nfs_del_committed_range(vp, bp);
388 			rw_exit_write(&np->n_commitlock);
389 		} else
390 			bp->b_flags &= ~B_NEEDCOMMIT;
391 
392 		if (ioflag & IO_SYNC) {
393 			bp->b_proc = p;
394 			error = VOP_BWRITE(bp);
395 			if (error)
396 				goto out;
397 		} else if ((n + on) == biosize) {
398 			bp->b_proc = NULL;
399 			bp->b_flags |= B_ASYNC;
400 			(void)nfs_writebp(bp, 0);
401 		} else {
402 			bdwrite(bp);
403 		}
404 	} while (uio->uio_resid > 0 && n > 0);
405 
406 /*out: XXX belongs here??? */
407 	if (wrotedta)
408 		VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0) |
409 		    (truncated ? NOTE_TRUNCATE : 0));
410 
411 out:
412 	/* correct the result for writes clamped by vn_fsizechk() */
413 	uio->uio_resid += overrun;
414 
415 	return (error);
416 }
417 
418 /*
419  * Get an nfs cache block.
420  * Allocate a new one if the block isn't currently in the cache
421  * and return the block marked busy. If the calling process is
422  * interrupted by a signal for an interruptible mount point, return
423  * NULL.
424  */
425 struct buf *
426 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct proc *p)
427 {
428 	struct buf *bp;
429 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
430 
431 	if (nmp->nm_flag & NFSMNT_INT) {
432 		bp = getblk(vp, bn, size, PCATCH, 0);
433 		while (bp == NULL) {
434 			if (nfs_sigintr(nmp, NULL, p))
435 				return (NULL);
436 			bp = getblk(vp, bn, size, 0, 2 * hz);
437 		}
438 	} else
439 		bp = getblk(vp, bn, size, 0, 0);
440 	return (bp);
441 }
442 
443 /*
444  * Flush and invalidate all dirty buffers. If another process is already
445  * doing the flush, just wait for completion.
446  */
447 int
448 nfs_vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p)
449 {
450 	struct nfsmount		*nmp= VFSTONFS(vp->v_mount);
451 	struct nfsnode		*np = VTONFS(vp);
452 	int			 error, sintr, stimeo;
453 
454 	error = sintr = stimeo = 0;
455 
456 	if (ISSET(nmp->nm_flag, NFSMNT_INT)) {
457 		sintr = PCATCH;
458 		stimeo = 2 * hz;
459 	}
460 
461 	/* First wait for any other process doing a flush to complete. */
462 	while (np->n_flag & NFLUSHINPROG) {
463 		np->n_flag |= NFLUSHWANT;
464 		error = tsleep(&np->n_flag, PRIBIO|sintr, "nfsvinval", stimeo);
465 		if (error && sintr && nfs_sigintr(nmp, NULL, p))
466 			return (EINTR);
467 	}
468 
469 	/* Now, flush as required. */
470 	np->n_flag |= NFLUSHINPROG;
471 	error = vinvalbuf(vp, flags, cred, p, sintr, 0);
472 	while (error) {
473 		if (sintr && nfs_sigintr(nmp, NULL, p)) {
474 			np->n_flag &= ~NFLUSHINPROG;
475 			if (np->n_flag & NFLUSHWANT) {
476 				np->n_flag &= ~NFLUSHWANT;
477 				wakeup(&np->n_flag);
478 			}
479 			return (EINTR);
480 		}
481 		error = vinvalbuf(vp, flags, cred, p, 0, stimeo);
482 	}
483 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
484 	if (np->n_flag & NFLUSHWANT) {
485 		np->n_flag &= ~NFLUSHWANT;
486 		wakeup(&np->n_flag);
487 	}
488 	return (0);
489 }
490 
491 /*
492  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
493  * This is mainly to avoid queueing async I/O requests when the nfsiods
494  * are all hung on a dead server.
495  */
496 int
497 nfs_asyncio(struct buf *bp, int readahead)
498 {
499 	if (nfs_numasync == 0)
500 		goto out;
501 
502 	while (nfs_bufqlen > nfs_bufqmax)
503 		if (readahead)
504 			goto out;
505 		else
506 			tsleep(&nfs_bufqlen, PRIBIO, "nfs_bufq", 0);
507 
508 	if ((bp->b_flags & B_READ) == 0) {
509 		bp->b_flags |= B_WRITEINPROG;
510 	}
511 
512 	TAILQ_INSERT_TAIL(&nfs_bufq, bp, b_freelist);
513 	nfs_bufqlen++;
514 
515 	wakeup_one(&nfs_bufq);
516 	return (0);
517 
518 out:
519 	nfsstats.forcedsync++;
520 	return (EIO);
521 }
522 
523 /*
524  * Do an I/O operation to/from a cache block. This may be called
525  * synchronously or from an nfsiod.
526  */
527 int
528 nfs_doio(struct buf *bp, struct proc *p)
529 {
530 	struct uio *uiop;
531 	struct vnode *vp;
532 	struct nfsnode *np;
533 	struct nfsmount *nmp;
534 	int s, error = 0, diff, len, iomode, must_commit = 0;
535 	struct uio uio;
536 	struct iovec io;
537 
538 	vp = bp->b_vp;
539 	np = VTONFS(vp);
540 	nmp = VFSTONFS(vp->v_mount);
541 	uiop = &uio;
542 	uiop->uio_iov = &io;
543 	uiop->uio_iovcnt = 1;
544 	uiop->uio_segflg = UIO_SYSSPACE;
545 	uiop->uio_procp = p;
546 
547 	/*
548 	 * Historically, paging was done with physio, but no more.
549 	 */
550 	if (bp->b_flags & B_PHYS) {
551 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
552 	    /* mapping was done by vmapbuf() */
553 	    io.iov_base = bp->b_data;
554 	    uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
555 	    if (bp->b_flags & B_READ) {
556 		uiop->uio_rw = UIO_READ;
557 		nfsstats.read_physios++;
558 		error = nfs_readrpc(vp, uiop);
559 	    } else {
560 		iomode = NFSV3WRITE_DATASYNC;
561 		uiop->uio_rw = UIO_WRITE;
562 		nfsstats.write_physios++;
563 		error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
564 	    }
565 	    if (error) {
566 		bp->b_flags |= B_ERROR;
567 		bp->b_error = error;
568 	    }
569 	} else if (bp->b_flags & B_READ) {
570 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
571 	    io.iov_base = bp->b_data;
572 	    uiop->uio_rw = UIO_READ;
573 	    switch (vp->v_type) {
574 	    case VREG:
575 		uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
576 		nfsstats.read_bios++;
577 		bcstats.pendingreads++;
578 		bcstats.numreads++;
579 		error = nfs_readrpc(vp, uiop);
580 		if (!error) {
581 		    bp->b_validoff = 0;
582 		    if (uiop->uio_resid) {
583 			/*
584 			 * If len > 0, there is a hole in the file and
585 			 * no writes after the hole have been pushed to
586 			 * the server yet.
587 			 * Just zero fill the rest of the valid area.
588 			 */
589 			diff = bp->b_bcount - uiop->uio_resid;
590 			len = np->n_size - ((((off_t)bp->b_blkno) << DEV_BSHIFT)
591 				+ diff);
592 			if (len > 0) {
593 			    len = ulmin(len, uiop->uio_resid);
594 			    memset((char *)bp->b_data + diff, 0, len);
595 			    bp->b_validend = diff + len;
596 			} else
597 			    bp->b_validend = diff;
598 		    } else
599 			bp->b_validend = bp->b_bcount;
600 		}
601 		if (p && (vp->v_flag & VTEXT) &&
602 		    (timespeccmp(&np->n_mtime, &np->n_vattr.va_mtime, !=))) {
603 			uprintf("Process killed due to text file modification\n");
604 			psignal(p, SIGKILL);
605 		}
606 		break;
607 	    case VLNK:
608 		uiop->uio_offset = (off_t)0;
609 		nfsstats.readlink_bios++;
610 		bcstats.pendingreads++;
611 		bcstats.numreads++;
612 		error = nfs_readlinkrpc(vp, uiop, curproc->p_ucred);
613 		break;
614 	    default:
615 		panic("nfs_doio:  type %x unexpected", vp->v_type);
616 		break;
617 	    };
618 	    if (error) {
619 		bp->b_flags |= B_ERROR;
620 		bp->b_error = error;
621 	    }
622 	} else {
623 	    io.iov_len = uiop->uio_resid = bp->b_dirtyend
624 		- bp->b_dirtyoff;
625 	    uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE
626 		+ bp->b_dirtyoff;
627 	    io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
628 	    uiop->uio_rw = UIO_WRITE;
629 	    nfsstats.write_bios++;
630 	    bcstats.pendingwrites++;
631 	    bcstats.numwrites++;
632 	    if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE)) == B_ASYNC)
633 		iomode = NFSV3WRITE_UNSTABLE;
634 	    else
635 		iomode = NFSV3WRITE_FILESYNC;
636 	    bp->b_flags |= B_WRITEINPROG;
637 	    error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
638 
639 	    rw_enter_write(&np->n_commitlock);
640 	    if (!error && iomode == NFSV3WRITE_UNSTABLE) {
641 		bp->b_flags |= B_NEEDCOMMIT;
642 		nfs_add_tobecommitted_range(vp, bp);
643 	    } else {
644 		bp->b_flags &= ~B_NEEDCOMMIT;
645 		nfs_del_committed_range(vp, bp);
646 	    }
647 	    rw_exit_write(&np->n_commitlock);
648 
649 	    bp->b_flags &= ~B_WRITEINPROG;
650 
651 	    /*
652 	     * For an interrupted write, the buffer is still valid and the
653 	     * write hasn't been pushed to the server yet, so we can't set
654 	     * B_ERROR and report the interruption by setting B_EINTR. For
655 	     * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt
656 	     * is essentially a noop.
657 	     * For the case of a V3 write rpc not being committed to stable
658 	     * storage, the block is still dirty and requires either a commit
659 	     * rpc or another write rpc with iomode == NFSV3WRITE_FILESYNC
660 	     * before the block is reused. This is indicated by setting the
661 	     * B_DELWRI and B_NEEDCOMMIT flags.
662 	     */
663 	    if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
664 		    s = splbio();
665 		    buf_dirty(bp);
666 		    splx(s);
667 
668 		    if (!(bp->b_flags & B_ASYNC) && error)
669 			    bp->b_flags |= B_EINTR;
670 	    } else {
671 		if (error) {
672 		    bp->b_flags |= B_ERROR;
673 		    bp->b_error = np->n_error = error;
674 		    np->n_flag |= NWRITEERR;
675 		}
676 		bp->b_dirtyoff = bp->b_dirtyend = 0;
677 	    }
678 	}
679 	bp->b_resid = uiop->uio_resid;
680 	if (must_commit)
681 		nfs_clearcommit(vp->v_mount);
682 	s = splbio();
683 	biodone(bp);
684 	splx(s);
685 	return (error);
686 }
687