xref: /openbsd-src/sys/nfs/nfs_bio.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: nfs_bio.c,v 1.58 2009/03/19 16:44:40 oga Exp $	*/
2 /*	$NetBSD: nfs_bio.c,v 1.25.4.2 1996/07/08 20:47:04 jtc Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * Rick Macklem at The University of Guelph.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
36  */
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/resourcevar.h>
41 #include <sys/signalvar.h>
42 #include <sys/proc.h>
43 #include <sys/buf.h>
44 #include <sys/vnode.h>
45 #include <sys/mount.h>
46 #include <sys/kernel.h>
47 #include <sys/namei.h>
48 #include <sys/queue.h>
49 #include <sys/time.h>
50 
51 #include <uvm/uvm_extern.h>
52 
53 #include <nfs/rpcv2.h>
54 #include <nfs/nfsproto.h>
55 #include <nfs/nfs.h>
56 #include <nfs/nfsmount.h>
57 #include <nfs/nfsnode.h>
58 #include <nfs/nfs_var.h>
59 
60 extern int nfs_numasync;
61 extern struct nfsstats nfsstats;
62 struct nfs_bufqhead nfs_bufq;
63 uint32_t nfs_bufqmax, nfs_bufqlen;
64 
65 /*
66  * Vnode op for read using bio
67  * Any similarity to readip() is purely coincidental
68  */
69 int
70 nfs_bioread(vp, uio, ioflag, cred)
71 	struct vnode *vp;
72 	struct uio *uio;
73 	int ioflag;
74 	struct ucred *cred;
75 {
76 	struct nfsnode *np = VTONFS(vp);
77 	int biosize, diff;
78 	struct buf *bp = NULL, *rabp;
79 	struct vattr vattr;
80 	struct proc *p;
81 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
82 	daddr64_t lbn, bn, rabn;
83 	caddr_t baddr;
84 	int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin;
85 	off_t offdiff;
86 
87 #ifdef DIAGNOSTIC
88 	if (uio->uio_rw != UIO_READ)
89 		panic("nfs_read mode");
90 #endif
91 	if (uio->uio_resid == 0)
92 		return (0);
93 	if (uio->uio_offset < 0)
94 		return (EINVAL);
95 	p = uio->uio_procp;
96 	if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3)
97 		(void)nfs_fsinfo(nmp, vp, cred, p);
98 	biosize = nmp->nm_rsize;
99 	/*
100 	 * For nfs, cache consistency can only be maintained approximately.
101 	 * Although RFC1094 does not specify the criteria, the following is
102 	 * believed to be compatible with the reference port.
103 	 * For nfs:
104 	 * If the file's modify time on the server has changed since the
105 	 * last read rpc or you have written to the file,
106 	 * you may have lost data cache consistency with the
107 	 * server, so flush all of the file's data out of the cache.
108 	 * Then force a getattr rpc to ensure that you have up to date
109 	 * attributes.
110 	 */
111 	if (np->n_flag & NMODIFIED) {
112 		NFS_INVALIDATE_ATTRCACHE(np);
113 		error = VOP_GETATTR(vp, &vattr, cred, p);
114 		if (error)
115 			return (error);
116 		np->n_mtime = vattr.va_mtime;
117 	} else {
118 		error = VOP_GETATTR(vp, &vattr, cred, p);
119 		if (error)
120 			return (error);
121 		if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
122 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p);
123 			if (error)
124 				return (error);
125 			np->n_mtime = vattr.va_mtime;
126 		}
127 	}
128 
129 	/*
130 	 * update the cache read creds for this vnode
131 	 */
132 	if (np->n_rcred)
133 		crfree(np->n_rcred);
134 	np->n_rcred = cred;
135 	crhold(cred);
136 
137 	do {
138 	    if ((vp->v_flag & VROOT) && vp->v_type == VLNK) {
139 		    return (nfs_readlinkrpc(vp, uio, cred));
140 	    }
141 	    baddr = (caddr_t)0;
142 	    switch (vp->v_type) {
143 	    case VREG:
144 		nfsstats.biocache_reads++;
145 		lbn = uio->uio_offset / biosize;
146 		on = uio->uio_offset & (biosize - 1);
147 		bn = lbn * (biosize / DEV_BSIZE);
148 		not_readin = 1;
149 
150 		/*
151 		 * Start the read ahead(s), as required.
152 		 */
153 		if (nfs_numasync > 0 && nmp->nm_readahead > 0) {
154 		    for (nra = 0; nra < nmp->nm_readahead &&
155 			(lbn + 1 + nra) * biosize < np->n_size; nra++) {
156 			rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
157 			if (!incore(vp, rabn)) {
158 			    rabp = nfs_getcacheblk(vp, rabn, biosize, p);
159 			    if (!rabp)
160 				return (EINTR);
161 			    if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) {
162 				rabp->b_flags |= (B_READ | B_ASYNC);
163 				if (nfs_asyncio(rabp)) {
164 				    rabp->b_flags |= B_INVAL;
165 				    brelse(rabp);
166 				}
167 			    } else
168 				brelse(rabp);
169 			}
170 		    }
171 		}
172 
173 		/*
174 		 * If the block is in the cache and has the required data
175 		 * in a valid region, just copy it out.
176 		 * Otherwise, get the block and write back/read in,
177 		 * as required.
178 		 */
179 		if ((bp = incore(vp, bn)) &&
180 		    (bp->b_flags & (B_BUSY | B_WRITEINPROG)) ==
181 		    (B_BUSY | B_WRITEINPROG))
182 			got_buf = 0;
183 		else {
184 again:
185 			bp = nfs_getcacheblk(vp, bn, biosize, p);
186 			if (!bp)
187 				return (EINTR);
188 			got_buf = 1;
189 			if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
190 				bp->b_flags |= B_READ;
191 				not_readin = 0;
192 				error = nfs_doio(bp, p);
193 				if (error) {
194 				    brelse(bp);
195 				    return (error);
196 				}
197 			}
198 		}
199 		n = min((unsigned)(biosize - on), uio->uio_resid);
200 		offdiff = np->n_size - uio->uio_offset;
201 		if (offdiff < (off_t)n)
202 			n = (int)offdiff;
203 		if (not_readin && n > 0) {
204 			if (on < bp->b_validoff || (on + n) > bp->b_validend) {
205 				if (!got_buf) {
206 				    bp = nfs_getcacheblk(vp, bn, biosize, p);
207 				    if (!bp)
208 					return (EINTR);
209 				    got_buf = 1;
210 				}
211 				bp->b_flags |= B_INVAFTERWRITE;
212 				if (bp->b_dirtyend > 0) {
213 				    if ((bp->b_flags & B_DELWRI) == 0)
214 					panic("nfsbioread");
215 				    if (VOP_BWRITE(bp) == EINTR)
216 					return (EINTR);
217 				} else
218 				    brelse(bp);
219 				goto again;
220 			}
221 		}
222 		diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on);
223 		if (diff < n)
224 			n = diff;
225 		break;
226 	    case VLNK:
227 		nfsstats.biocache_readlinks++;
228 		bp = nfs_getcacheblk(vp, 0, NFS_MAXPATHLEN, p);
229 		if (!bp)
230 			return (EINTR);
231 		if ((bp->b_flags & B_DONE) == 0) {
232 			bp->b_flags |= B_READ;
233 			error = nfs_doio(bp, p);
234 			if (error) {
235 				brelse(bp);
236 				return (error);
237 			}
238 		}
239 		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
240 		got_buf = 1;
241 		on = 0;
242 		break;
243 	    default:
244 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
245 		break;
246 	    }
247 
248 	    if (n > 0) {
249 		if (!baddr)
250 			baddr = bp->b_data;
251 		error = uiomove(baddr + on, (int)n, uio);
252 	    }
253 	    switch (vp->v_type) {
254 	    case VREG:
255 		break;
256 	    case VLNK:
257 		n = 0;
258 		break;
259 	    default:
260 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
261 	    }
262 	    if (got_buf)
263 		brelse(bp);
264 	} while (error == 0 && uio->uio_resid > 0 && n > 0);
265 	return (error);
266 }
267 
268 /*
269  * Vnode op for write using bio
270  */
271 int
272 nfs_write(v)
273 	void *v;
274 {
275 	struct vop_write_args *ap = v;
276 	int biosize;
277 	struct uio *uio = ap->a_uio;
278 	struct proc *p = uio->uio_procp;
279 	struct vnode *vp = ap->a_vp;
280 	struct nfsnode *np = VTONFS(vp);
281 	struct ucred *cred = ap->a_cred;
282 	int ioflag = ap->a_ioflag;
283 	struct buf *bp;
284 	struct vattr vattr;
285 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
286 	daddr64_t lbn, bn;
287 	int n, on, error = 0, extended = 0, wrotedta = 0, truncated = 0;
288 
289 #ifdef DIAGNOSTIC
290 	if (uio->uio_rw != UIO_WRITE)
291 		panic("nfs_write mode");
292 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
293 		panic("nfs_write proc");
294 #endif
295 	if (vp->v_type != VREG)
296 		return (EIO);
297 	if (np->n_flag & NWRITEERR) {
298 		np->n_flag &= ~NWRITEERR;
299 		return (np->n_error);
300 	}
301 	if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3)
302 		(void)nfs_fsinfo(nmp, vp, cred, p);
303 	if (ioflag & (IO_APPEND | IO_SYNC)) {
304 		if (np->n_flag & NMODIFIED) {
305 			NFS_INVALIDATE_ATTRCACHE(np);
306 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p);
307 			if (error)
308 				return (error);
309 		}
310 		if (ioflag & IO_APPEND) {
311 			NFS_INVALIDATE_ATTRCACHE(np);
312 			error = VOP_GETATTR(vp, &vattr, cred, p);
313 			if (error)
314 				return (error);
315 			uio->uio_offset = np->n_size;
316 		}
317 	}
318 	if (uio->uio_offset < 0)
319 		return (EINVAL);
320 	if (uio->uio_resid == 0)
321 		return (0);
322 	/*
323 	 * Maybe this should be above the vnode op call, but so long as
324 	 * file servers have no limits, i don't think it matters
325 	 */
326 	if (p && uio->uio_offset + uio->uio_resid >
327 	      p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
328 		psignal(p, SIGXFSZ);
329 		return (EFBIG);
330 	}
331 
332 	/*
333 	 * update the cache write creds for this node.
334 	 */
335 	if (np->n_wcred)
336 		crfree(np->n_wcred);
337 	np->n_wcred = cred;
338 	crhold(cred);
339 
340 	/*
341 	 * I use nm_rsize, not nm_wsize so that all buffer cache blocks
342 	 * will be the same size within a filesystem. nfs_writerpc will
343 	 * still use nm_wsize when sizing the rpc's.
344 	 */
345 	biosize = nmp->nm_rsize;
346 	do {
347 
348 		/*
349 		 * XXX make sure we aren't cached in the VM page cache
350 		 */
351 		uvm_vnp_uncache(vp);
352 
353 		nfsstats.biocache_writes++;
354 		lbn = uio->uio_offset / biosize;
355 		on = uio->uio_offset & (biosize-1);
356 		n = min((unsigned)(biosize - on), uio->uio_resid);
357 		bn = lbn * (biosize / DEV_BSIZE);
358 again:
359 		bp = nfs_getcacheblk(vp, bn, biosize, p);
360 		if (!bp)
361 			return (EINTR);
362 		np->n_flag |= NMODIFIED;
363 		if (uio->uio_offset + n > np->n_size) {
364 			np->n_size = uio->uio_offset + n;
365 			uvm_vnp_setsize(vp, (u_long)np->n_size);
366 			extended = 1;
367 		} else if (uio->uio_offset + n < np->n_size)
368 			truncated = 1;
369 
370 		/*
371 		 * If the new write will leave a contiguous dirty
372 		 * area, just update the b_dirtyoff and b_dirtyend,
373 		 * otherwise force a write rpc of the old dirty area.
374 		 */
375 		if (bp->b_dirtyend > 0 &&
376 		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
377 			bp->b_proc = p;
378 			if (VOP_BWRITE(bp) == EINTR)
379 				return (EINTR);
380 			goto again;
381 		}
382 
383 		error = uiomove((char *)bp->b_data + on, n, uio);
384 		if (error) {
385 			bp->b_flags |= B_ERROR;
386 			brelse(bp);
387 			return (error);
388 		}
389 		if (bp->b_dirtyend > 0) {
390 			bp->b_dirtyoff = min(on, bp->b_dirtyoff);
391 			bp->b_dirtyend = max((on + n), bp->b_dirtyend);
392 		} else {
393 			bp->b_dirtyoff = on;
394 			bp->b_dirtyend = on + n;
395 		}
396 		if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
397 		    bp->b_validoff > bp->b_dirtyend) {
398 			bp->b_validoff = bp->b_dirtyoff;
399 			bp->b_validend = bp->b_dirtyend;
400 		} else {
401 			bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
402 			bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
403 		}
404 
405 		wrotedta = 1;
406 
407 		/*
408 		 * Since this block is being modified, it must be written
409 		 * again and not just committed.
410 		 */
411 
412 		if (NFS_ISV3(vp)) {
413 			rw_enter_write(&np->n_commitlock);
414 			if (bp->b_flags & B_NEEDCOMMIT) {
415 				bp->b_flags &= ~B_NEEDCOMMIT;
416 				nfs_del_tobecommitted_range(vp, bp);
417 			}
418 			nfs_del_committed_range(vp, bp);
419 			rw_exit_write(&np->n_commitlock);
420 		} else
421 			bp->b_flags &= ~B_NEEDCOMMIT;
422 
423 		/*
424 		 * If the lease is non-cachable or IO_SYNC do bwrite().
425 		 */
426 		if (ioflag & IO_SYNC) {
427 			bp->b_proc = p;
428 			error = VOP_BWRITE(bp);
429 			if (error)
430 				return (error);
431 		} else if ((n + on) == biosize) {
432 			bp->b_proc = (struct proc *)0;
433 			bp->b_flags |= B_ASYNC;
434 			(void)nfs_writebp(bp, 0);
435 		} else {
436 			bdwrite(bp);
437 		}
438 	} while (uio->uio_resid > 0 && n > 0);
439 
440 	if (wrotedta)
441 		VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0) |
442 		    (truncated ? NOTE_TRUNCATE : 0));
443 
444 	return (0);
445 }
446 
447 /*
448  * Get an nfs cache block.
449  * Allocate a new one if the block isn't currently in the cache
450  * and return the block marked busy. If the calling process is
451  * interrupted by a signal for an interruptible mount point, return
452  * NULL.
453  */
454 struct buf *
455 nfs_getcacheblk(vp, bn, size, p)
456 	struct vnode *vp;
457 	daddr64_t bn;
458 	int size;
459 	struct proc *p;
460 {
461 	struct buf *bp;
462 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
463 
464 	if (nmp->nm_flag & NFSMNT_INT) {
465 		bp = getblk(vp, bn, size, PCATCH, 0);
466 		while (bp == (struct buf *)0) {
467 			if (nfs_sigintr(nmp, (struct nfsreq *)0, p))
468 				return ((struct buf *)0);
469 			bp = getblk(vp, bn, size, 0, 2 * hz);
470 		}
471 	} else
472 		bp = getblk(vp, bn, size, 0, 0);
473 	return (bp);
474 }
475 
476 /*
477  * Flush and invalidate all dirty buffers. If another process is already
478  * doing the flush, just wait for completion.
479  */
480 int
481 nfs_vinvalbuf(struct vnode *vp, int flags, struct ucred *cred, struct proc *p)
482 {
483 	struct nfsmount		*nmp= VFSTONFS(vp->v_mount);
484 	struct nfsnode		*np = VTONFS(vp);
485 	int			 error, sintr, stimeo;
486 
487 	error = sintr = stimeo = 0;
488 
489 	if (ISSET(nmp->nm_flag, NFSMNT_INT)) {
490 		sintr = PCATCH;
491 		stimeo = 2 * hz;
492 	}
493 
494 	/* First wait for any other process doing a flush to complete. */
495 	while (np->n_flag & NFLUSHINPROG) {
496 		np->n_flag |= NFLUSHWANT;
497 		error = tsleep(&np->n_flag, PRIBIO|sintr, "nfsvinval", stimeo);
498 		if (error && sintr && nfs_sigintr(nmp, NULL, p))
499 			return (EINTR);
500 	}
501 
502 	/* Now, flush as required. */
503 	np->n_flag |= NFLUSHINPROG;
504 	error = vinvalbuf(vp, flags, cred, p, sintr, 0);
505 	while (error) {
506 		if (sintr && nfs_sigintr(nmp, NULL, p)) {
507 			np->n_flag &= ~NFLUSHINPROG;
508 			if (np->n_flag & NFLUSHWANT) {
509 				np->n_flag &= ~NFLUSHWANT;
510 				wakeup(&np->n_flag);
511 			}
512 			return (EINTR);
513 		}
514 		error = vinvalbuf(vp, flags, cred, p, 0, stimeo);
515 	}
516 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
517 	if (np->n_flag & NFLUSHWANT) {
518 		np->n_flag &= ~NFLUSHWANT;
519 		wakeup(&np->n_flag);
520 	}
521 	return (0);
522 }
523 
524 /*
525  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
526  * This is mainly to avoid queueing async I/O requests when the nfsiods
527  * are all hung on a dead server.
528  */
529 int
530 nfs_asyncio(bp)
531 	struct buf *bp;
532 {
533 	if (nfs_numasync == 0)
534 		goto out;
535 
536 	if (nfs_bufqlen > nfs_bufqmax)
537 		goto out; /* too many bufs in use, force sync */
538 
539 	if ((bp->b_flags & B_READ) == 0) {
540 		bp->b_flags |= B_WRITEINPROG;
541 	}
542 
543 	TAILQ_INSERT_TAIL(&nfs_bufq, bp, b_freelist);
544 	nfs_bufqlen++;
545 
546 	wakeup_one(&nfs_bufq);
547 	return (0);
548 
549 out:
550 	nfsstats.forcedsync++;
551 	return (EIO);
552 }
553 
554 /*
555  * Do an I/O operation to/from a cache block. This may be called
556  * synchronously or from an nfsiod.
557  */
558 int
559 nfs_doio(bp, p)
560 	struct buf *bp;
561 	struct proc *p;
562 {
563 	struct uio *uiop;
564 	struct vnode *vp;
565 	struct nfsnode *np;
566 	struct nfsmount *nmp;
567 	int s, error = 0, diff, len, iomode, must_commit = 0;
568 	struct uio uio;
569 	struct iovec io;
570 
571 	vp = bp->b_vp;
572 	np = VTONFS(vp);
573 	nmp = VFSTONFS(vp->v_mount);
574 	uiop = &uio;
575 	uiop->uio_iov = &io;
576 	uiop->uio_iovcnt = 1;
577 	uiop->uio_segflg = UIO_SYSSPACE;
578 	uiop->uio_procp = p;
579 
580 	/*
581 	 * Historically, paging was done with physio, but no more...
582 	 */
583 	if (bp->b_flags & B_PHYS) {
584 	    /*
585 	     * ...though reading /dev/drum still gets us here.
586 	     */
587 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
588 	    /* mapping was done by vmapbuf() */
589 	    io.iov_base = bp->b_data;
590 	    uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
591 	    if (bp->b_flags & B_READ) {
592 		uiop->uio_rw = UIO_READ;
593 		nfsstats.read_physios++;
594 		error = nfs_readrpc(vp, uiop);
595 	    } else {
596 		iomode = NFSV3WRITE_DATASYNC;
597 		uiop->uio_rw = UIO_WRITE;
598 		nfsstats.write_physios++;
599 		error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
600 	    }
601 	    if (error) {
602 		bp->b_flags |= B_ERROR;
603 		bp->b_error = error;
604 	    }
605 	} else if (bp->b_flags & B_READ) {
606 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
607 	    io.iov_base = bp->b_data;
608 	    uiop->uio_rw = UIO_READ;
609 	    switch (vp->v_type) {
610 	    case VREG:
611 		uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
612 		nfsstats.read_bios++;
613 		bcstats.pendingreads++;
614 		bcstats.numreads++;
615 		error = nfs_readrpc(vp, uiop);
616 		if (!error) {
617 		    bp->b_validoff = 0;
618 		    if (uiop->uio_resid) {
619 			/*
620 			 * If len > 0, there is a hole in the file and
621 			 * no writes after the hole have been pushed to
622 			 * the server yet.
623 			 * Just zero fill the rest of the valid area.
624 			 */
625 			diff = bp->b_bcount - uiop->uio_resid;
626 			len = np->n_size - ((((off_t)bp->b_blkno) << DEV_BSHIFT)
627 				+ diff);
628 			if (len > 0) {
629 			    len = min(len, uiop->uio_resid);
630 			    bzero((char *)bp->b_data + diff, len);
631 			    bp->b_validend = diff + len;
632 			} else
633 			    bp->b_validend = diff;
634 		    } else
635 			bp->b_validend = bp->b_bcount;
636 		}
637 		if (p && (vp->v_flag & VTEXT) &&
638 		    (timespeccmp(&np->n_mtime, &np->n_vattr.va_mtime, !=))) {
639 			uprintf("Process killed due to text file modification\n");
640 			psignal(p, SIGKILL);
641 		}
642 		break;
643 	    case VLNK:
644 		uiop->uio_offset = (off_t)0;
645 		nfsstats.readlink_bios++;
646 		bcstats.pendingreads++;
647 		bcstats.numreads++;
648 		error = nfs_readlinkrpc(vp, uiop, curproc->p_ucred);
649 		break;
650 	    default:
651 		printf("nfs_doio:  type %x unexpected\n", vp->v_type);
652 		break;
653 	    };
654 	    if (error) {
655 		bp->b_flags |= B_ERROR;
656 		bp->b_error = error;
657 	    }
658 	} else {
659 	    io.iov_len = uiop->uio_resid = bp->b_dirtyend
660 		- bp->b_dirtyoff;
661 	    uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE
662 		+ bp->b_dirtyoff;
663 	    io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
664 	    uiop->uio_rw = UIO_WRITE;
665 	    nfsstats.write_bios++;
666 	    bcstats.pendingwrites++;
667 	    bcstats.numwrites++;
668 	    if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE)) == B_ASYNC)
669 		iomode = NFSV3WRITE_UNSTABLE;
670 	    else
671 		iomode = NFSV3WRITE_FILESYNC;
672 	    bp->b_flags |= B_WRITEINPROG;
673 	    error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
674 
675 	    rw_enter_write(&np->n_commitlock);
676 	    if (!error && iomode == NFSV3WRITE_UNSTABLE) {
677 		bp->b_flags |= B_NEEDCOMMIT;
678 		nfs_add_tobecommitted_range(vp, bp);
679 	    } else {
680 		bp->b_flags &= ~B_NEEDCOMMIT;
681 		nfs_del_committed_range(vp, bp);
682 	    }
683 	    rw_exit_write(&np->n_commitlock);
684 
685 	    bp->b_flags &= ~B_WRITEINPROG;
686 
687 	    /*
688 	     * For an interrupted write, the buffer is still valid and the
689 	     * write hasn't been pushed to the server yet, so we can't set
690 	     * B_ERROR and report the interruption by setting B_EINTR. For
691 	     * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt
692 	     * is essentially a noop.
693 	     * For the case of a V3 write rpc not being committed to stable
694 	     * storage, the block is still dirty and requires either a commit
695 	     * rpc or another write rpc with iomode == NFSV3WRITE_FILESYNC
696 	     * before the block is reused. This is indicated by setting the
697 	     * B_DELWRI and B_NEEDCOMMIT flags.
698 	     */
699 	    if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
700 		    s = splbio();
701 		    buf_dirty(bp);
702 		    splx(s);
703 
704 		    if (!(bp->b_flags & B_ASYNC) && error)
705 			    bp->b_flags |= B_EINTR;
706 	    } else {
707 		if (error) {
708 		    bp->b_flags |= B_ERROR;
709 		    bp->b_error = np->n_error = error;
710 		    np->n_flag |= NWRITEERR;
711 		}
712 		bp->b_dirtyoff = bp->b_dirtyend = 0;
713 	    }
714 	}
715 	bp->b_resid = uiop->uio_resid;
716 	if (must_commit)
717 		nfs_clearcommit(vp->v_mount);
718 	s = splbio();
719 	biodone(bp);
720 	splx(s);
721 	return (error);
722 }
723