xref: /csrg-svn/sys/nfs/nfs_bio.c (revision 56660)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)nfs_bio.c	7.34 (Berkeley) 11/01/92
11  */
12 
13 #include <sys/param.h>
14 #include <sys/systm.h>
15 #include <sys/resourcevar.h>
16 #include <sys/proc.h>
17 #include <sys/buf.h>
18 #include <sys/vnode.h>
19 #include <sys/trace.h>
20 #include <sys/mount.h>
21 #include <sys/kernel.h>
22 
23 #include <vm/vm.h>
24 
25 #include <nfs/nfsnode.h>
26 #include <nfs/rpcv2.h>
27 #include <nfs/nfsv2.h>
28 #include <nfs/nfs.h>
29 #include <nfs/nfsmount.h>
30 #include <nfs/nqnfs.h>
31 
32 /* True and false, how exciting */
33 #define	TRUE	1
34 #define	FALSE	0
35 
36 /*
37  * Vnode op for read using bio
38  * Any similarity to readip() is purely coincidental
39  */
40 nfs_bioread(vp, uio, ioflag, cred)
41 	register struct vnode *vp;
42 	register struct uio *uio;
43 	int ioflag;
44 	struct ucred *cred;
45 {
46 	register struct nfsnode *np = VTONFS(vp);
47 	register int biosize;
48 	struct buf *bp;
49 	struct vattr vattr;
50 	struct nfsmount *nmp;
51 	daddr_t lbn, bn, rablock[NFS_MAXRAHEAD];
52 	int rasize[NFS_MAXRAHEAD], nra, diff, error = 0;
53 	int n, on;
54 
55 #ifdef lint
56 	ioflag = ioflag;
57 #endif /* lint */
58 #ifdef DIAGNOSTIC
59 	if (uio->uio_rw != UIO_READ)
60 		panic("nfs_read mode");
61 #endif
62 	if (uio->uio_resid == 0)
63 		return (0);
64 	if (uio->uio_offset < 0 && vp->v_type != VDIR)
65 		return (EINVAL);
66 	nmp = VFSTONFS(vp->v_mount);
67 	biosize = nmp->nm_rsize;
68 	/*
69 	 * For nfs, cache consistency can only be maintained approximately.
70 	 * Although RFC1094 does not specify the criteria, the following is
71 	 * believed to be compatible with the reference port.
72 	 * For nqnfs, full cache consistency is maintained within the loop.
73 	 * For nfs:
74 	 * If the file's modify time on the server has changed since the
75 	 * last read rpc or you have written to the file,
76 	 * you may have lost data cache consistency with the
77 	 * server, so flush all of the file's data out of the cache.
78 	 * Then force a getattr rpc to ensure that you have up to date
79 	 * attributes.
80 	 * The mount flag NFSMNT_MYWRITE says "Assume that my writes are
81 	 * the ones changing the modify time.
82 	 * NB: This implies that cache data can be read when up to
83 	 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
84 	 * attributes this could be forced by setting n_attrstamp to 0 before
85 	 * the VOP_GETATTR() call.
86 	 */
87 	if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
88 		if (np->n_flag & NMODIFIED) {
89 			if ((nmp->nm_flag & NFSMNT_MYWRITE) == 0 ||
90 			     vp->v_type != VREG)
91 				NFS_VINVBUF(np, vp, TRUE, cred, uio->uio_procp);
92 			np->n_attrstamp = 0;
93 			np->n_direofoffset = 0;
94 			if (error = VOP_GETATTR(vp, &vattr, cred, uio->uio_procp))
95 				return (error);
96 			np->n_mtime = vattr.va_mtime.ts_sec;
97 		} else {
98 			if (error = VOP_GETATTR(vp, &vattr, cred, uio->uio_procp))
99 				return (error);
100 			if (np->n_mtime != vattr.va_mtime.ts_sec) {
101 				np->n_direofoffset = 0;
102 				NFS_VINVBUF(np, vp, TRUE, cred, uio->uio_procp);
103 				np->n_mtime = vattr.va_mtime.ts_sec;
104 			}
105 		}
106 	}
107 	do {
108 
109 	    /*
110 	     * Get a valid lease. If cached data is stale, flush it.
111 	     */
112 	    if ((nmp->nm_flag & NFSMNT_NQNFS) &&
113 		NQNFS_CKINVALID(vp, np, NQL_READ)) {
114 		do {
115 			error = nqnfs_getlease(vp, NQL_READ, cred, uio->uio_procp);
116 		} while (error == NQNFS_EXPIRED);
117 		if (error)
118 			return (error);
119 		if (np->n_lrev != np->n_brev ||
120 		    ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
121 			if (vp->v_type == VDIR) {
122 				np->n_direofoffset = 0;
123 				cache_purge(vp);
124 			}
125 			NFS_VINVBUF(np, vp, TRUE, cred, uio->uio_procp);
126 			np->n_brev = np->n_lrev;
127 		}
128 	    }
129 	    if (np->n_flag & NQNFSNONCACHE) {
130 		switch (vp->v_type) {
131 		case VREG:
132 			error = nfs_readrpc(vp, uio, cred);
133 			break;
134 		case VLNK:
135 			error = nfs_readlinkrpc(vp, uio, cred);
136 			break;
137 		case VDIR:
138 			error = nfs_readdirrpc(vp, uio, cred);
139 			break;
140 		};
141 		return (error);
142 	    }
143 	    switch (vp->v_type) {
144 	    case VREG:
145 		nfsstats.biocache_reads++;
146 		lbn = uio->uio_offset / biosize;
147 		on = uio->uio_offset & (biosize-1);
148 		n = min((unsigned)(biosize - on), uio->uio_resid);
149 		diff = np->n_size - uio->uio_offset;
150 		if (diff <= 0)
151 			return (error);
152 		if (diff < n)
153 			n = diff;
154 		bn = lbn*(biosize/DEV_BSIZE);
155 		for (nra = 0; nra < nmp->nm_readahead &&
156 			(lbn + 1 + nra) * biosize < np->n_size; nra++) {
157 			rablock[nra] = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
158 			rasize[nra] = biosize;
159 		}
160 again:
161 		if (nra > 0 && lbn >= vp->v_lastr)
162 			error = breadn(vp, bn, biosize, rablock, rasize, nra,
163 				cred, &bp);
164 		else
165 			error = bread(vp, bn, biosize, cred, &bp);
166 		if (bp->b_validend > 0) {
167 			if (on < bp->b_validoff || (on+n) > bp->b_validend) {
168 				bp->b_flags |= B_INVAL;
169 				if (bp->b_dirtyend > 0) {
170 					if ((bp->b_flags & B_DELWRI) == 0)
171 						panic("nfsbioread");
172 					(void) bwrite(bp);
173 				} else
174 					brelse(bp);
175 				goto again;
176 			}
177 		} else {
178 			bp->b_validoff = 0;
179 			bp->b_validend = biosize - bp->b_resid;
180 		}
181 		vp->v_lastr = lbn;
182 		if (bp->b_resid) {
183 		   diff = (on >= (biosize-bp->b_resid)) ? 0 :
184 			(biosize-bp->b_resid-on);
185 		   n = min(n, diff);
186 		}
187 		break;
188 	    case VLNK:
189 		nfsstats.biocache_readlinks++;
190 		on = 0;
191 		error = bread(vp, (daddr_t)0, NFS_MAXPATHLEN, cred, &bp);
192 		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
193 		break;
194 	    case VDIR:
195 		nfsstats.biocache_readdirs++;
196 		on = 0;
197 		error = bread(vp, uio->uio_offset, NFS_DIRBLKSIZ, cred, &bp);
198 		n = min(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid);
199 		break;
200 	    };
201 	    if (error) {
202 		brelse(bp);
203 		return (error);
204 	    }
205 
206 	    /*
207 	     * For nqnfs:
208 	     * Must check for valid lease, since it may have expired while in
209 	     * bread(). If expired, get a lease.
210 	     * If data is stale, flush and try again.
211 	     * nb: If a read rpc is done by bread() or breada() and there is
212 	     *     no valid lease, a get_lease request will be piggy backed.
213 	     */
214 	    if (nmp->nm_flag & NFSMNT_NQNFS) {
215 		if (NQNFS_CKINVALID(vp, np, NQL_READ)) {
216 			do {
217 				error = nqnfs_getlease(vp, NQL_READ, cred, uio->uio_procp);
218 			} while (error == NQNFS_EXPIRED);
219 			if (error) {
220 				brelse(bp);
221 				return (error);
222 			}
223 			if ((np->n_flag & NQNFSNONCACHE) ||
224 			    np->n_lrev != np->n_brev ||
225 			    ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
226 				if (vp->v_type == VDIR) {
227 					np->n_direofoffset = 0;
228 					cache_purge(vp);
229 				}
230 				brelse(bp);
231 				NFS_VINVBUF(np, vp, TRUE, cred, uio->uio_procp);
232 				np->n_brev = np->n_lrev;
233 				continue;
234 			}
235 		} else if ((np->n_flag & NQNFSNONCACHE) ||
236 		    ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
237 			np->n_direofoffset = 0;
238 			brelse(bp);
239 			NFS_VINVBUF(np, vp, TRUE, cred, uio->uio_procp);
240 			np->n_brev = np->n_lrev;
241 			continue;
242 		}
243 	    }
244 	    if (n > 0)
245 		error = uiomove(bp->b_un.b_addr + on, (int)n, uio);
246 	    switch (vp->v_type) {
247 	    case VREG:
248 		if (n+on == biosize || uio->uio_offset == np->n_size)
249 			bp->b_flags |= B_AGE;
250 		break;
251 	    case VLNK:
252 		n = 0;
253 		break;
254 	    case VDIR:
255 		uio->uio_offset = bp->b_blkno;
256 		break;
257 	    };
258 	    brelse(bp);
259 	} while (error == 0 && uio->uio_resid > 0 && n != 0);
260 	return (error);
261 }
262 
263 /*
264  * Vnode op for write using bio
265  */
266 nfs_write(ap)
267 	struct vop_write_args /* {
268 		struct vnode *a_vp;
269 		struct uio *a_uio;
270 		int  a_ioflag;
271 		struct ucred *a_cred;
272 	} */ *ap;
273 {
274 	register int biosize;
275 	register struct uio *uio = ap->a_uio;
276 	struct proc *p = uio->uio_procp;
277 	register struct vnode *vp = ap->a_vp;
278 	struct nfsnode *np = VTONFS(vp);
279 	register struct ucred *cred = ap->a_cred;
280 	int ioflag = ap->a_ioflag;
281 	struct buf *bp;
282 	struct vattr vattr;
283 	struct nfsmount *nmp;
284 	daddr_t lbn, bn;
285 	int n, on, error = 0;
286 
287 #ifdef DIAGNOSTIC
288 	if (uio->uio_rw != UIO_WRITE)
289 		panic("nfs_write mode");
290 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
291 		panic("nfs_write proc");
292 #endif
293 	if (vp->v_type != VREG)
294 		return (EIO);
295 	if (np->n_flag & NWRITEERR) {
296 		np->n_flag &= ~NWRITEERR;
297 		return (np->n_error);
298 	}
299 	if (ioflag & (IO_APPEND | IO_SYNC)) {
300 		if (np->n_flag & NMODIFIED) {
301 			np->n_attrstamp = 0;
302 			NFS_VINVBUF(np, vp, TRUE, cred, p);
303 		}
304 		if (ioflag & IO_APPEND) {
305 			np->n_attrstamp = 0;
306 			if (error = VOP_GETATTR(vp, &vattr, cred, p))
307 				return (error);
308 			uio->uio_offset = np->n_size;
309 		}
310 	}
311 	nmp = VFSTONFS(vp->v_mount);
312 	if (uio->uio_offset < 0)
313 		return (EINVAL);
314 	if (uio->uio_resid == 0)
315 		return (0);
316 	/*
317 	 * Maybe this should be above the vnode op call, but so long as
318 	 * file servers have no limits, i don't think it matters
319 	 */
320 	if (p && uio->uio_offset + uio->uio_resid >
321 	      p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
322 		psignal(p, SIGXFSZ);
323 		return (EFBIG);
324 	}
325 	/*
326 	 * I use nm_rsize, not nm_wsize so that all buffer cache blocks
327 	 * will be the same size within a filesystem. nfs_writerpc will
328 	 * still use nm_wsize when sizing the rpc's.
329 	 */
330 	biosize = nmp->nm_rsize;
331 	np->n_flag |= NMODIFIED;
332 	do {
333 
334 		/*
335 		 * Check for a valid write lease.
336 		 * If non-cachable, just do the rpc
337 		 */
338 		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
339 		    NQNFS_CKINVALID(vp, np, NQL_WRITE)) {
340 			do {
341 				error = nqnfs_getlease(vp, NQL_WRITE, cred, p);
342 			} while (error == NQNFS_EXPIRED);
343 			if (error)
344 				return (error);
345 			if (np->n_lrev != np->n_brev ||
346 			    (np->n_flag & NQNFSNONCACHE)) {
347 				NFS_VINVBUF(np, vp, TRUE, cred, p);
348 				np->n_brev = np->n_lrev;
349 			}
350 		}
351 		if (np->n_flag & NQNFSNONCACHE)
352 			return (nfs_writerpc(vp, uio, cred, 0));
353 		nfsstats.biocache_writes++;
354 		lbn = uio->uio_offset / biosize;
355 		on = uio->uio_offset & (biosize-1);
356 		n = min((unsigned)(biosize - on), uio->uio_resid);
357 		if (uio->uio_offset + n > np->n_size) {
358 			np->n_size = uio->uio_offset + n;
359 			vnode_pager_setsize(vp, (u_long)np->n_size);
360 		}
361 		bn = lbn * (biosize / DEV_BSIZE);
362 again:
363 		bp = getblk(vp, bn, biosize);
364 		if (bp->b_wcred == NOCRED) {
365 			crhold(cred);
366 			bp->b_wcred = cred;
367 		}
368 
369 		/*
370 		 * If the new write will leave a contiguous dirty
371 		 * area, just update the b_dirtyoff and b_dirtyend,
372 		 * otherwise force a write rpc of the old dirty area.
373 		 */
374 		if (bp->b_dirtyend > 0 &&
375 		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
376 			bp->b_proc = p;
377 			if (error = bwrite(bp))
378 				return (error);
379 			goto again;
380 		}
381 
382 		/*
383 		 * Check for valid write lease and get one as required.
384 		 * In case getblk() and/or bwrite() delayed us.
385 		 */
386 		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
387 		    NQNFS_CKINVALID(vp, np, NQL_WRITE)) {
388 			do {
389 				error = nqnfs_getlease(vp, NQL_WRITE, cred, p);
390 			} while (error == NQNFS_EXPIRED);
391 			if (error) {
392 				brelse(bp);
393 				return (error);
394 			}
395 			if (np->n_lrev != np->n_brev ||
396 			    (np->n_flag & NQNFSNONCACHE)) {
397 				brelse(bp);
398 				NFS_VINVBUF(np, vp, TRUE, cred, p);
399 				np->n_brev = np->n_lrev;
400 				goto again;
401 			}
402 		}
403 		if (error = uiomove(bp->b_un.b_addr + on, n, uio)) {
404 			brelse(bp);
405 			return (error);
406 		}
407 		if (bp->b_dirtyend > 0) {
408 			bp->b_dirtyoff = min(on, bp->b_dirtyoff);
409 			bp->b_dirtyend = max((on+n), bp->b_dirtyend);
410 		} else {
411 			bp->b_dirtyoff = on;
412 			bp->b_dirtyend = on+n;
413 		}
414 		if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
415 		    bp->b_validoff > bp->b_dirtyend) {
416 			bp->b_validoff = bp->b_dirtyoff;
417 			bp->b_validend = bp->b_dirtyend;
418 		} else {
419 			bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
420 			bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
421 		}
422 
423 		/*
424 		 * If the lease is non-cachable or IO_SYNC do bwrite().
425 		 */
426 		if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
427 			bp->b_proc = p;
428 			bwrite(bp);
429 		} else if ((n+on) == biosize &&
430 			 (nmp->nm_flag & NFSMNT_NQNFS) == 0) {
431 			bp->b_flags |= B_AGE;
432 			bp->b_proc = (struct proc *)0;
433 			bawrite(bp);
434 		} else {
435 			bp->b_proc = (struct proc *)0;
436 			bdwrite(bp);
437 		}
438 	} while (error == 0 && uio->uio_resid > 0 && n != 0);
439 	return (error);
440 }
441