xref: /openbsd-src/sys/nfs/nfs_bio.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*	$OpenBSD: nfs_bio.c,v 1.73 2012/07/11 12:39:20 guenther Exp $	*/
2 /*	$NetBSD: nfs_bio.c,v 1.25.4.2 1996/07/08 20:47:04 jtc Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Rick Macklem at The University of Guelph.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/resourcevar.h>
41 #include <sys/signalvar.h>
42 #include <sys/proc.h>
43 #include <sys/buf.h>
44 #include <sys/vnode.h>
45 #include <sys/mount.h>
46 #include <sys/kernel.h>
47 #include <sys/namei.h>
48 #include <sys/queue.h>
49 #include <sys/time.h>
50 
51 #include <uvm/uvm_extern.h>
52 
53 #include <nfs/rpcv2.h>
54 #include <nfs/nfsproto.h>
55 #include <nfs/nfs.h>
56 #include <nfs/nfsmount.h>
57 #include <nfs/nfsnode.h>
58 #include <nfs/nfs_var.h>
59 
60 extern int nfs_numasync;
61 extern struct nfsstats nfsstats;
62 struct nfs_bufqhead nfs_bufq;
63 uint32_t nfs_bufqmax, nfs_bufqlen;
64 
65 /*
66  * Vnode op for read using bio
67  * Any similarity to readip() is purely coincidental
68  */
69 int
70 nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
71 {
72 	struct nfsnode *np = VTONFS(vp);
73 	int biosize, diff;
74 	struct buf *bp = NULL, *rabp;
75 	struct vattr vattr;
76 	struct proc *p;
77 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
78 	daddr64_t lbn, bn, rabn;
79 	caddr_t baddr;
80 	int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin;
81 	off_t offdiff;
82 
83 #ifdef DIAGNOSTIC
84 	if (uio->uio_rw != UIO_READ)
85 		panic("nfs_read mode");
86 #endif
87 	if (uio->uio_resid == 0)
88 		return (0);
89 	if (uio->uio_offset < 0)
90 		return (EINVAL);
91 	p = uio->uio_procp;
92 	if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3)
93 		(void)nfs_fsinfo(nmp, vp, cred, p);
94 	biosize = nmp->nm_rsize;
95 	/*
96 	 * For nfs, cache consistency can only be maintained approximately.
97 	 * Although RFC1094 does not specify the criteria, the following is
98 	 * believed to be compatible with the reference port.
99 	 * For nfs:
100 	 * If the file's modify time on the server has changed since the
101 	 * last read rpc or you have written to the file,
102 	 * you may have lost data cache consistency with the
103 	 * server, so flush all of the file's data out of the cache.
104 	 * Then force a getattr rpc to ensure that you have up to date
105 	 * attributes.
106 	 */
107 	if (np->n_flag & NMODIFIED) {
108 		NFS_INVALIDATE_ATTRCACHE(np);
109 		error = VOP_GETATTR(vp, &vattr, cred, p);
110 		if (error)
111 			return (error);
112 		np->n_mtime = vattr.va_mtime;
113 	} else {
114 		error = VOP_GETATTR(vp, &vattr, cred, p);
115 		if (error)
116 			return (error);
117 		if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
118 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p);
119 			if (error)
120 				return (error);
121 			np->n_mtime = vattr.va_mtime;
122 		}
123 	}
124 
125 	/*
126 	 * update the cache read creds for this vnode
127 	 */
128 	if (np->n_rcred)
129 		crfree(np->n_rcred);
130 	np->n_rcred = cred;
131 	crhold(cred);
132 
133 	do {
134 	    if ((vp->v_flag & VROOT) && vp->v_type == VLNK) {
135 		    return (nfs_readlinkrpc(vp, uio, cred));
136 	    }
137 	    baddr = (caddr_t)0;
138 	    switch (vp->v_type) {
139 	    case VREG:
140 		nfsstats.biocache_reads++;
141 		lbn = uio->uio_offset / biosize;
142 		on = uio->uio_offset & (biosize - 1);
143 		bn = lbn * (biosize / DEV_BSIZE);
144 		not_readin = 1;
145 
146 		/*
147 		 * Start the read ahead(s), as required.
148 		 */
149 		if (nfs_numasync > 0 && nmp->nm_readahead > 0) {
150 		    for (nra = 0; nra < nmp->nm_readahead &&
151 			(lbn + 1 + nra) * biosize < np->n_size; nra++) {
152 			rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
153 			if (!incore(vp, rabn)) {
154 			    rabp = nfs_getcacheblk(vp, rabn, biosize, p);
155 			    if (!rabp)
156 				return (EINTR);
157 			    if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) {
158 				rabp->b_flags |= (B_READ | B_ASYNC);
159 				if (nfs_asyncio(rabp, 1)) {
160 				    rabp->b_flags |= B_INVAL;
161 				    brelse(rabp);
162 				}
163 			    } else
164 				brelse(rabp);
165 			}
166 		    }
167 		}
168 
169 again:
170 		bp = nfs_getcacheblk(vp, bn, biosize, p);
171 		if (!bp)
172 			return (EINTR);
173 		got_buf = 1;
174 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
175 			bp->b_flags |= B_READ;
176 			not_readin = 0;
177 			error = nfs_doio(bp, p);
178 			if (error) {
179 			    brelse(bp);
180 			    return (error);
181 			}
182 		}
183 		n = min((unsigned)(biosize - on), uio->uio_resid);
184 		offdiff = np->n_size - uio->uio_offset;
185 		if (offdiff < (off_t)n)
186 			n = (int)offdiff;
187 		if (not_readin && n > 0) {
188 			if (on < bp->b_validoff || (on + n) > bp->b_validend) {
189 				bp->b_flags |= B_INVAFTERWRITE;
190 				if (bp->b_dirtyend > 0) {
191 				    if ((bp->b_flags & B_DELWRI) == 0)
192 					panic("nfsbioread");
193 				    if (VOP_BWRITE(bp) == EINTR)
194 					return (EINTR);
195 				} else
196 				    brelse(bp);
197 				goto again;
198 			}
199 		}
200 		diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on);
201 		if (diff < n)
202 			n = diff;
203 		break;
204 	    case VLNK:
205 		nfsstats.biocache_readlinks++;
206 		bp = nfs_getcacheblk(vp, 0, NFS_MAXPATHLEN, p);
207 		if (!bp)
208 			return (EINTR);
209 		if ((bp->b_flags & B_DONE) == 0) {
210 			bp->b_flags |= B_READ;
211 			error = nfs_doio(bp, p);
212 			if (error) {
213 				brelse(bp);
214 				return (error);
215 			}
216 		}
217 		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
218 		got_buf = 1;
219 		on = 0;
220 		break;
221 	    default:
222 		panic("nfsbioread: type %x unexpected", vp->v_type);
223 		break;
224 	    }
225 
226 	    if (n > 0) {
227 		if (!baddr)
228 			baddr = bp->b_data;
229 		error = uiomove(baddr + on, (int)n, uio);
230 	    }
231 
232 	    if (vp->v_type == VLNK)
233 		n = 0;
234 
235 	    if (got_buf)
236 		brelse(bp);
237 	} while (error == 0 && uio->uio_resid > 0 && n > 0);
238 	return (error);
239 }
240 
241 /*
242  * Vnode op for write using bio
243  */
244 int
245 nfs_write(void *v)
246 {
247 	struct vop_write_args *ap = v;
248 	int biosize;
249 	struct uio *uio = ap->a_uio;
250 	struct proc *p = uio->uio_procp;
251 	struct vnode *vp = ap->a_vp;
252 	struct nfsnode *np = VTONFS(vp);
253 	struct ucred *cred = ap->a_cred;
254 	int ioflag = ap->a_ioflag;
255 	struct buf *bp;
256 	struct vattr vattr;
257 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
258 	daddr64_t lbn, bn;
259 	int n, on, error = 0, extended = 0, wrotedta = 0, truncated = 0;
260 	int overrun;
261 
262 #ifdef DIAGNOSTIC
263 	if (uio->uio_rw != UIO_WRITE)
264 		panic("nfs_write mode");
265 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
266 		panic("nfs_write proc");
267 #endif
268 	if (vp->v_type != VREG)
269 		return (EIO);
270 	if (np->n_flag & NWRITEERR) {
271 		np->n_flag &= ~NWRITEERR;
272 		return (np->n_error);
273 	}
274 	if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3)
275 		(void)nfs_fsinfo(nmp, vp, cred, p);
276 	if (ioflag & (IO_APPEND | IO_SYNC)) {
277 		if (np->n_flag & NMODIFIED) {
278 			NFS_INVALIDATE_ATTRCACHE(np);
279 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p);
280 			if (error)
281 				return (error);
282 		}
283 		if (ioflag & IO_APPEND) {
284 			NFS_INVALIDATE_ATTRCACHE(np);
285 			error = VOP_GETATTR(vp, &vattr, cred, p);
286 			if (error)
287 				return (error);
288 			uio->uio_offset = np->n_size;
289 		}
290 	}
291 	if (uio->uio_offset < 0)
292 		return (EINVAL);
293 	if (uio->uio_resid == 0)
294 		return (0);
295 
296 	/* do the filesize rlimit check */
297 	if ((error = vn_fsizechk(vp, uio, ioflag, &overrun)))
298 		return (error);
299 
300 	/*
301 	 * update the cache write creds for this node.
302 	 */
303 	if (np->n_wcred)
304 		crfree(np->n_wcred);
305 	np->n_wcred = cred;
306 	crhold(cred);
307 
308 	/*
309 	 * I use nm_rsize, not nm_wsize so that all buffer cache blocks
310 	 * will be the same size within a filesystem. nfs_writerpc will
311 	 * still use nm_wsize when sizing the rpc's.
312 	 */
313 	biosize = nmp->nm_rsize;
314 	do {
315 
316 		/*
317 		 * XXX make sure we aren't cached in the VM page cache
318 		 */
319 		uvm_vnp_uncache(vp);
320 
321 		nfsstats.biocache_writes++;
322 		lbn = uio->uio_offset / biosize;
323 		on = uio->uio_offset & (biosize-1);
324 		n = min((unsigned)(biosize - on), uio->uio_resid);
325 		bn = lbn * (biosize / DEV_BSIZE);
326 again:
327 		bp = nfs_getcacheblk(vp, bn, biosize, p);
328 		if (!bp) {
329 			error = EINTR;
330 			goto out;
331 		}
332 		np->n_flag |= NMODIFIED;
333 		if (uio->uio_offset + n > np->n_size) {
334 			np->n_size = uio->uio_offset + n;
335 			uvm_vnp_setsize(vp, (u_long)np->n_size);
336 			extended = 1;
337 		} else if (uio->uio_offset + n < np->n_size)
338 			truncated = 1;
339 
340 		/*
341 		 * If the new write will leave a contiguous dirty
342 		 * area, just update the b_dirtyoff and b_dirtyend,
343 		 * otherwise force a write rpc of the old dirty area.
344 		 */
345 		if (bp->b_dirtyend > 0 &&
346 		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
347 			bp->b_proc = p;
348 			if (VOP_BWRITE(bp) == EINTR) {
349 				error = EINTR;
350 				goto out;
351 			}
352 			goto again;
353 		}
354 
355 		error = uiomove((char *)bp->b_data + on, n, uio);
356 		if (error) {
357 			bp->b_flags |= B_ERROR;
358 			brelse(bp);
359 			goto out;
360 		}
361 		if (bp->b_dirtyend > 0) {
362 			bp->b_dirtyoff = min(on, bp->b_dirtyoff);
363 			bp->b_dirtyend = max((on + n), bp->b_dirtyend);
364 		} else {
365 			bp->b_dirtyoff = on;
366 			bp->b_dirtyend = on + n;
367 		}
368 		if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
369 		    bp->b_validoff > bp->b_dirtyend) {
370 			bp->b_validoff = bp->b_dirtyoff;
371 			bp->b_validend = bp->b_dirtyend;
372 		} else {
373 			bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
374 			bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
375 		}
376 
377 		wrotedta = 1;
378 
379 		/*
380 		 * Since this block is being modified, it must be written
381 		 * again and not just committed.
382 		 */
383 
384 		if (NFS_ISV3(vp)) {
385 			rw_enter_write(&np->n_commitlock);
386 			if (bp->b_flags & B_NEEDCOMMIT) {
387 				bp->b_flags &= ~B_NEEDCOMMIT;
388 				nfs_del_tobecommitted_range(vp, bp);
389 			}
390 			nfs_del_committed_range(vp, bp);
391 			rw_exit_write(&np->n_commitlock);
392 		} else
393 			bp->b_flags &= ~B_NEEDCOMMIT;
394 
395 		if (ioflag & IO_SYNC) {
396 			bp->b_proc = p;
397 			error = VOP_BWRITE(bp);
398 			if (error)
399 				goto out;
400 		} else if ((n + on) == biosize) {
401 			bp->b_proc = NULL;
402 			bp->b_flags |= B_ASYNC;
403 			(void)nfs_writebp(bp, 0);
404 		} else {
405 			bdwrite(bp);
406 		}
407 	} while (uio->uio_resid > 0 && n > 0);
408 
409 /*out: XXX belongs here??? */
410 	if (wrotedta)
411 		VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0) |
412 		    (truncated ? NOTE_TRUNCATE : 0));
413 
414 out:
415 	/* correct the result for writes clamped by vn_fsizechk() */
416 	uio->uio_resid += overrun;
417 
418 	return (error);
419 }
420 
421 /*
422  * Get an nfs cache block.
423  * Allocate a new one if the block isn't currently in the cache
424  * and return the block marked busy. If the calling process is
425  * interrupted by a signal for an interruptible mount point, return
426  * NULL.
427  */
428 struct buf *
429 nfs_getcacheblk(struct vnode *vp, daddr64_t bn, int size, struct proc *p)
430 {
431 	struct buf *bp;
432 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
433 
434 	if (nmp->nm_flag & NFSMNT_INT) {
435 		bp = getblk(vp, bn, size, PCATCH, 0);
436 		while (bp == NULL) {
437 			if (nfs_sigintr(nmp, NULL, p))
438 				return (NULL);
439 			bp = getblk(vp, bn, size, 0, 2 * hz);
440 		}
441 	} else
442 		bp = getblk(vp, bn, size, 0, 0);
443 	return (bp);
444 }
445 
446 /*
447  * Flush and invalidate all dirty buffers. If another process is already
448  * doing the flush, just wait for completion.
449  */
450 int
451 nfs_vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p)
452 {
453 	struct nfsmount		*nmp= VFSTONFS(vp->v_mount);
454 	struct nfsnode		*np = VTONFS(vp);
455 	int			 error, sintr, stimeo;
456 
457 	error = sintr = stimeo = 0;
458 
459 	if (ISSET(nmp->nm_flag, NFSMNT_INT)) {
460 		sintr = PCATCH;
461 		stimeo = 2 * hz;
462 	}
463 
464 	/* First wait for any other process doing a flush to complete. */
465 	while (np->n_flag & NFLUSHINPROG) {
466 		np->n_flag |= NFLUSHWANT;
467 		error = tsleep(&np->n_flag, PRIBIO|sintr, "nfsvinval", stimeo);
468 		if (error && sintr && nfs_sigintr(nmp, NULL, p))
469 			return (EINTR);
470 	}
471 
472 	/* Now, flush as required. */
473 	np->n_flag |= NFLUSHINPROG;
474 	error = vinvalbuf(vp, flags, cred, p, sintr, 0);
475 	while (error) {
476 		if (sintr && nfs_sigintr(nmp, NULL, p)) {
477 			np->n_flag &= ~NFLUSHINPROG;
478 			if (np->n_flag & NFLUSHWANT) {
479 				np->n_flag &= ~NFLUSHWANT;
480 				wakeup(&np->n_flag);
481 			}
482 			return (EINTR);
483 		}
484 		error = vinvalbuf(vp, flags, cred, p, 0, stimeo);
485 	}
486 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
487 	if (np->n_flag & NFLUSHWANT) {
488 		np->n_flag &= ~NFLUSHWANT;
489 		wakeup(&np->n_flag);
490 	}
491 	return (0);
492 }
493 
494 /*
495  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
496  * This is mainly to avoid queueing async I/O requests when the nfsiods
497  * are all hung on a dead server.
498  */
499 int
500 nfs_asyncio(struct buf *bp, int readahead)
501 {
502 	if (nfs_numasync == 0)
503 		goto out;
504 
505 	while (nfs_bufqlen > nfs_bufqmax)
506 		if (readahead)
507 			goto out;
508 		else
509 			tsleep(&nfs_bufqlen, PRIBIO, "nfs_bufq", 0);
510 
511 	if ((bp->b_flags & B_READ) == 0) {
512 		bp->b_flags |= B_WRITEINPROG;
513 	}
514 
515 	TAILQ_INSERT_TAIL(&nfs_bufq, bp, b_freelist);
516 	nfs_bufqlen++;
517 
518 	wakeup_one(&nfs_bufq);
519 	return (0);
520 
521 out:
522 	nfsstats.forcedsync++;
523 	return (EIO);
524 }
525 
526 /*
527  * Do an I/O operation to/from a cache block. This may be called
528  * synchronously or from an nfsiod.
529  */
530 int
531 nfs_doio(struct buf *bp, struct proc *p)
532 {
533 	struct uio *uiop;
534 	struct vnode *vp;
535 	struct nfsnode *np;
536 	struct nfsmount *nmp;
537 	int s, error = 0, diff, len, iomode, must_commit = 0;
538 	struct uio uio;
539 	struct iovec io;
540 
541 	vp = bp->b_vp;
542 	np = VTONFS(vp);
543 	nmp = VFSTONFS(vp->v_mount);
544 	uiop = &uio;
545 	uiop->uio_iov = &io;
546 	uiop->uio_iovcnt = 1;
547 	uiop->uio_segflg = UIO_SYSSPACE;
548 	uiop->uio_procp = p;
549 
550 	/*
551 	 * Historically, paging was done with physio, but no more.
552 	 */
553 	if (bp->b_flags & B_PHYS) {
554 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
555 	    /* mapping was done by vmapbuf() */
556 	    io.iov_base = bp->b_data;
557 	    uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
558 	    if (bp->b_flags & B_READ) {
559 		uiop->uio_rw = UIO_READ;
560 		nfsstats.read_physios++;
561 		error = nfs_readrpc(vp, uiop);
562 	    } else {
563 		iomode = NFSV3WRITE_DATASYNC;
564 		uiop->uio_rw = UIO_WRITE;
565 		nfsstats.write_physios++;
566 		error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
567 	    }
568 	    if (error) {
569 		bp->b_flags |= B_ERROR;
570 		bp->b_error = error;
571 	    }
572 	} else if (bp->b_flags & B_READ) {
573 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
574 	    io.iov_base = bp->b_data;
575 	    uiop->uio_rw = UIO_READ;
576 	    switch (vp->v_type) {
577 	    case VREG:
578 		uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
579 		nfsstats.read_bios++;
580 		bcstats.pendingreads++;
581 		bcstats.numreads++;
582 		error = nfs_readrpc(vp, uiop);
583 		if (!error) {
584 		    bp->b_validoff = 0;
585 		    if (uiop->uio_resid) {
586 			/*
587 			 * If len > 0, there is a hole in the file and
588 			 * no writes after the hole have been pushed to
589 			 * the server yet.
590 			 * Just zero fill the rest of the valid area.
591 			 */
592 			diff = bp->b_bcount - uiop->uio_resid;
593 			len = np->n_size - ((((off_t)bp->b_blkno) << DEV_BSHIFT)
594 				+ diff);
595 			if (len > 0) {
596 			    len = min(len, uiop->uio_resid);
597 			    bzero((char *)bp->b_data + diff, len);
598 			    bp->b_validend = diff + len;
599 			} else
600 			    bp->b_validend = diff;
601 		    } else
602 			bp->b_validend = bp->b_bcount;
603 		}
604 		if (p && (vp->v_flag & VTEXT) &&
605 		    (timespeccmp(&np->n_mtime, &np->n_vattr.va_mtime, !=))) {
606 			uprintf("Process killed due to text file modification\n");
607 			psignal(p, SIGKILL);
608 		}
609 		break;
610 	    case VLNK:
611 		uiop->uio_offset = (off_t)0;
612 		nfsstats.readlink_bios++;
613 		bcstats.pendingreads++;
614 		bcstats.numreads++;
615 		error = nfs_readlinkrpc(vp, uiop, curproc->p_ucred);
616 		break;
617 	    default:
618 		panic("nfs_doio:  type %x unexpected", vp->v_type);
619 		break;
620 	    };
621 	    if (error) {
622 		bp->b_flags |= B_ERROR;
623 		bp->b_error = error;
624 	    }
625 	} else {
626 	    io.iov_len = uiop->uio_resid = bp->b_dirtyend
627 		- bp->b_dirtyoff;
628 	    uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE
629 		+ bp->b_dirtyoff;
630 	    io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
631 	    uiop->uio_rw = UIO_WRITE;
632 	    nfsstats.write_bios++;
633 	    bcstats.pendingwrites++;
634 	    bcstats.numwrites++;
635 	    if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE)) == B_ASYNC)
636 		iomode = NFSV3WRITE_UNSTABLE;
637 	    else
638 		iomode = NFSV3WRITE_FILESYNC;
639 	    bp->b_flags |= B_WRITEINPROG;
640 	    error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
641 
642 	    rw_enter_write(&np->n_commitlock);
643 	    if (!error && iomode == NFSV3WRITE_UNSTABLE) {
644 		bp->b_flags |= B_NEEDCOMMIT;
645 		nfs_add_tobecommitted_range(vp, bp);
646 	    } else {
647 		bp->b_flags &= ~B_NEEDCOMMIT;
648 		nfs_del_committed_range(vp, bp);
649 	    }
650 	    rw_exit_write(&np->n_commitlock);
651 
652 	    bp->b_flags &= ~B_WRITEINPROG;
653 
654 	    /*
655 	     * For an interrupted write, the buffer is still valid and the
656 	     * write hasn't been pushed to the server yet, so we can't set
657 	     * B_ERROR and report the interruption by setting B_EINTR. For
658 	     * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt
659 	     * is essentially a noop.
660 	     * For the case of a V3 write rpc not being committed to stable
661 	     * storage, the block is still dirty and requires either a commit
662 	     * rpc or another write rpc with iomode == NFSV3WRITE_FILESYNC
663 	     * before the block is reused. This is indicated by setting the
664 	     * B_DELWRI and B_NEEDCOMMIT flags.
665 	     */
666 	    if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
667 		    s = splbio();
668 		    buf_dirty(bp);
669 		    splx(s);
670 
671 		    if (!(bp->b_flags & B_ASYNC) && error)
672 			    bp->b_flags |= B_EINTR;
673 	    } else {
674 		if (error) {
675 		    bp->b_flags |= B_ERROR;
676 		    bp->b_error = np->n_error = error;
677 		    np->n_flag |= NWRITEERR;
678 		}
679 		bp->b_dirtyoff = bp->b_dirtyend = 0;
680 	    }
681 	}
682 	bp->b_resid = uiop->uio_resid;
683 	if (must_commit)
684 		nfs_clearcommit(vp->v_mount);
685 	s = splbio();
686 	biodone(bp);
687 	splx(s);
688 	return (error);
689 }
690