xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 46989)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)vfs_cluster.c	7.33 (Berkeley) 03/04/91
8  */
9 
10 #include "param.h"
11 #include "user.h"
12 #include "buf.h"
13 #include "vnode.h"
14 #include "specdev.h"
15 #include "mount.h"
16 #include "trace.h"
17 #include "ucred.h"
18 
19 /*
20  * Find the block in the buffer pool.
21  * If the buffer is not present, allocate a new buffer and load
22  * its contents according to the filesystem fill routine.
23  */
24 bread(vp, blkno, size, cred, bpp)
25 	struct vnode *vp;
26 	daddr_t blkno;
27 	int size;
28 	struct ucred *cred;
29 	struct buf **bpp;
30 {
31 	register struct buf *bp;
32 
33 	if (size == 0)
34 		panic("bread: size 0");
35 	*bpp = bp = getblk(vp, blkno, size);
36 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
37 		trace(TR_BREADHIT, pack(vp, size), blkno);
38 		return (0);
39 	}
40 	bp->b_flags |= B_READ;
41 	if (bp->b_bcount > bp->b_bufsize)
42 		panic("bread");
43 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
44 		crhold(cred);
45 		bp->b_rcred = cred;
46 	}
47 	VOP_STRATEGY(bp);
48 	trace(TR_BREADMISS, pack(vp, size), blkno);
49 	u.u_ru.ru_inblock++;		/* pay for read */
50 	return (biowait(bp));
51 }
52 
53 /*
54  * Operates like bread, but also starts I/O on the specified
55  * read-ahead block.
56  */
57 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
58 	struct vnode *vp;
59 	daddr_t blkno; int size;
60 	daddr_t rablkno; int rabsize;
61 	struct ucred *cred;
62 	struct buf **bpp;
63 {
64 	register struct buf *bp, *rabp;
65 
66 	bp = NULL;
67 	/*
68 	 * If the block is not memory resident,
69 	 * allocate a buffer and start I/O.
70 	 */
71 	if (!incore(vp, blkno)) {
72 		*bpp = bp = getblk(vp, blkno, size);
73 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
74 			bp->b_flags |= B_READ;
75 			if (bp->b_bcount > bp->b_bufsize)
76 				panic("breada");
77 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
78 				crhold(cred);
79 				bp->b_rcred = cred;
80 			}
81 			VOP_STRATEGY(bp);
82 			trace(TR_BREADMISS, pack(vp, size), blkno);
83 			u.u_ru.ru_inblock++;		/* pay for read */
84 		} else
85 			trace(TR_BREADHIT, pack(vp, size), blkno);
86 	}
87 
88 	/*
89 	 * If there is a read-ahead block, start I/O on it too.
90 	 */
91 	if (!incore(vp, rablkno)) {
92 		rabp = getblk(vp, rablkno, rabsize);
93 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
94 			brelse(rabp);
95 			trace(TR_BREADHITRA, pack(vp, rabsize), rablkno);
96 		} else {
97 			rabp->b_flags |= B_ASYNC | B_READ;
98 			if (rabp->b_bcount > rabp->b_bufsize)
99 				panic("breadrabp");
100 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
101 				crhold(cred);
102 				rabp->b_rcred = cred;
103 			}
104 			VOP_STRATEGY(rabp);
105 			trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno);
106 			u.u_ru.ru_inblock++;		/* pay in advance */
107 		}
108 	}
109 
110 	/*
111 	 * If block was memory resident, let bread get it.
112 	 * If block was not memory resident, the read was
113 	 * started above, so just wait for the read to complete.
114 	 */
115 	if (bp == NULL)
116 		return (bread(vp, blkno, size, cred, bpp));
117 	return (biowait(bp));
118 }
119 
120 /*
121  * Synchronous write.
122  * Release buffer on completion.
123  */
124 bwrite(bp)
125 	register struct buf *bp;
126 {
127 	register int flag;
128 	int s, error;
129 
130 	flag = bp->b_flags;
131 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
132 	if ((flag & B_DELWRI) == 0)
133 		u.u_ru.ru_oublock++;		/* noone paid yet */
134 	else
135 		reassignbuf(bp, bp->b_vp);
136 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
137 	if (bp->b_bcount > bp->b_bufsize)
138 		panic("bwrite");
139 	s = splbio();
140 	bp->b_vp->v_numoutput++;
141 	splx(s);
142 	VOP_STRATEGY(bp);
143 
144 	/*
145 	 * If the write was synchronous, then await I/O completion.
146 	 * If the write was "delayed", then we put the buffer on
147 	 * the queue of blocks awaiting I/O completion status.
148 	 */
149 	if ((flag & B_ASYNC) == 0) {
150 		error = biowait(bp);
151 		brelse(bp);
152 	} else if (flag & B_DELWRI) {
153 		bp->b_flags |= B_AGE;
154 		error = 0;
155 	}
156 	return (error);
157 }
158 
159 /*
160  * Delayed write.
161  *
162  * The buffer is marked dirty, but is not queued for I/O.
163  * This routine should be used when the buffer is expected
164  * to be modified again soon, typically a small write that
165  * partially fills a buffer.
166  *
167  * NB: magnetic tapes cannot be delayed; they must be
168  * written in the order that the writes are requested.
169  */
170 bdwrite(bp)
171 	register struct buf *bp;
172 {
173 
174 	if ((bp->b_flags & B_DELWRI) == 0) {
175 		bp->b_flags |= B_DELWRI;
176 		reassignbuf(bp, bp->b_vp);
177 		u.u_ru.ru_oublock++;		/* noone paid yet */
178 	}
179 	/*
180 	 * If this is a tape drive, the write must be initiated.
181 	 */
182 	if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) {
183 		bawrite(bp);
184 	} else {
185 		bp->b_flags |= (B_DONE | B_DELWRI);
186 		brelse(bp);
187 	}
188 }
189 
190 /*
191  * Asynchronous write.
192  * Start I/O on a buffer, but do not wait for it to complete.
193  * The buffer is released when the I/O completes.
194  */
195 bawrite(bp)
196 	register struct buf *bp;
197 {
198 
199 	/*
200 	 * Setting the ASYNC flag causes bwrite to return
201 	 * after starting the I/O.
202 	 */
203 	bp->b_flags |= B_ASYNC;
204 	(void) bwrite(bp);
205 }
206 
207 /*
208  * Release a buffer.
209  * Even if the buffer is dirty, no I/O is started.
210  */
211 brelse(bp)
212 	register struct buf *bp;
213 {
214 	register struct buf *flist;
215 	int s;
216 
217 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
218 	/*
219 	 * If a process is waiting for the buffer, or
220 	 * is waiting for a free buffer, awaken it.
221 	 */
222 	if (bp->b_flags & B_WANTED)
223 		wakeup((caddr_t)bp);
224 	if (bfreelist[0].b_flags & B_WANTED) {
225 		bfreelist[0].b_flags &= ~B_WANTED;
226 		wakeup((caddr_t)bfreelist);
227 	}
228 	/*
229 	 * Retry I/O for locked buffers rather than invalidating them.
230 	 */
231 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
232 		bp->b_flags &= ~B_ERROR;
233 	/*
234 	 * Disassociate buffers that are no longer valid.
235 	 */
236 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
237 		bp->b_flags |= B_INVAL;
238 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
239 		if (bp->b_vp)
240 			brelvp(bp);
241 		bp->b_flags &= ~B_DELWRI;
242 	}
243 	/*
244 	 * Stick the buffer back on a free list.
245 	 */
246 	s = splbio();
247 	if (bp->b_bufsize <= 0) {
248 		/* block has no buffer ... put at front of unused buffer list */
249 		flist = &bfreelist[BQ_EMPTY];
250 		binsheadfree(bp, flist);
251 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
252 		/* block has no info ... put at front of most free list */
253 		flist = &bfreelist[BQ_AGE];
254 		binsheadfree(bp, flist);
255 	} else {
256 		if (bp->b_flags & B_LOCKED)
257 			flist = &bfreelist[BQ_LOCKED];
258 		else if (bp->b_flags & B_AGE)
259 			flist = &bfreelist[BQ_AGE];
260 		else
261 			flist = &bfreelist[BQ_LRU];
262 		binstailfree(bp, flist);
263 	}
264 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
265 	splx(s);
266 }
267 
268 /*
269  * Check to see if a block is currently memory resident.
270  */
271 incore(vp, blkno)
272 	struct vnode *vp;
273 	daddr_t blkno;
274 {
275 	register struct buf *bp;
276 	register struct buf *dp;
277 
278 	dp = BUFHASH(vp, blkno);
279 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
280 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
281 		    (bp->b_flags & B_INVAL) == 0)
282 			return (1);
283 	return (0);
284 }
285 
286 /*
287  * Check to see if a block is currently memory resident.
288  * If it is resident, return it. If it is not resident,
289  * allocate a new buffer and assign it to the block.
290  */
291 struct buf *
292 getblk(vp, blkno, size)
293 	register struct vnode *vp;
294 	daddr_t blkno;
295 	int size;
296 {
297 	register struct buf *bp, *dp;
298 	int s;
299 
300 	if (size > MAXBSIZE)
301 		panic("getblk: size too big");
302 	/*
303 	 * Search the cache for the block. If the buffer is found,
304 	 * but it is currently locked, the we must wait for it to
305 	 * become available.
306 	 */
307 	dp = BUFHASH(vp, blkno);
308 loop:
309 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
310 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
311 		    (bp->b_flags & B_INVAL))
312 			continue;
313 		s = splbio();
314 		if (bp->b_flags & B_BUSY) {
315 			bp->b_flags |= B_WANTED;
316 			sleep((caddr_t)bp, PRIBIO + 1);
317 			splx(s);
318 			goto loop;
319 		}
320 		bremfree(bp);
321 		bp->b_flags |= B_BUSY;
322 		splx(s);
323 		if (bp->b_bcount != size) {
324 			printf("getblk: stray size");
325 			bp->b_flags |= B_INVAL;
326 			bwrite(bp);
327 			goto loop;
328 		}
329 		bp->b_flags |= B_CACHE;
330 		return (bp);
331 	}
332 	bp = getnewbuf();
333 	bremhash(bp);
334 	bgetvp(vp, bp);
335 	bp->b_bcount = 0;
336 	bp->b_lblkno = blkno;
337 	bp->b_blkno = blkno;
338 	bp->b_error = 0;
339 	bp->b_resid = 0;
340 	binshash(bp, dp);
341 	allocbuf(bp, size);
342 	return (bp);
343 }
344 
345 /*
346  * Allocate a buffer.
347  * The caller will assign it to a block.
348  */
349 struct buf *
350 geteblk(size)
351 	int size;
352 {
353 	register struct buf *bp, *flist;
354 
355 	if (size > MAXBSIZE)
356 		panic("geteblk: size too big");
357 	bp = getnewbuf();
358 	bp->b_flags |= B_INVAL;
359 	bremhash(bp);
360 	flist = &bfreelist[BQ_AGE];
361 	bp->b_bcount = 0;
362 	bp->b_error = 0;
363 	bp->b_resid = 0;
364 	binshash(bp, flist);
365 	allocbuf(bp, size);
366 	return (bp);
367 }
368 
369 /*
370  * Expand or contract the actual memory allocated to a buffer.
371  * If no memory is available, release buffer and take error exit.
372  */
373 allocbuf(tp, size)
374 	register struct buf *tp;
375 	int size;
376 {
377 	register struct buf *bp, *ep;
378 	int sizealloc, take, s;
379 
380 	sizealloc = roundup(size, CLBYTES);
381 	/*
382 	 * Buffer size does not change
383 	 */
384 	if (sizealloc == tp->b_bufsize)
385 		goto out;
386 	/*
387 	 * Buffer size is shrinking.
388 	 * Place excess space in a buffer header taken from the
389 	 * BQ_EMPTY buffer list and placed on the "most free" list.
390 	 * If no extra buffer headers are available, leave the
391 	 * extra space in the present buffer.
392 	 */
393 	if (sizealloc < tp->b_bufsize) {
394 		ep = bfreelist[BQ_EMPTY].av_forw;
395 		if (ep == &bfreelist[BQ_EMPTY])
396 			goto out;
397 		s = splbio();
398 		bremfree(ep);
399 		ep->b_flags |= B_BUSY;
400 		splx(s);
401 		pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
402 		    (int)tp->b_bufsize - sizealloc);
403 		ep->b_bufsize = tp->b_bufsize - sizealloc;
404 		tp->b_bufsize = sizealloc;
405 		ep->b_flags |= B_INVAL;
406 		ep->b_bcount = 0;
407 		brelse(ep);
408 		goto out;
409 	}
410 	/*
411 	 * More buffer space is needed. Get it out of buffers on
412 	 * the "most free" list, placing the empty headers on the
413 	 * BQ_EMPTY buffer header list.
414 	 */
415 	while (tp->b_bufsize < sizealloc) {
416 		take = sizealloc - tp->b_bufsize;
417 		bp = getnewbuf();
418 		if (take >= bp->b_bufsize)
419 			take = bp->b_bufsize;
420 		pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
421 		    &tp->b_un.b_addr[tp->b_bufsize], take);
422 		tp->b_bufsize += take;
423 		bp->b_bufsize = bp->b_bufsize - take;
424 		if (bp->b_bcount > bp->b_bufsize)
425 			bp->b_bcount = bp->b_bufsize;
426 		if (bp->b_bufsize <= 0) {
427 			bremhash(bp);
428 			binshash(bp, &bfreelist[BQ_EMPTY]);
429 			bp->b_dev = NODEV;
430 			bp->b_error = 0;
431 			bp->b_flags |= B_INVAL;
432 		}
433 		brelse(bp);
434 	}
435 out:
436 	tp->b_bcount = size;
437 	return (1);
438 }
439 
440 /*
441  * Find a buffer which is available for use.
442  * Select something from a free list.
443  * Preference is to AGE list, then LRU list.
444  */
445 struct buf *
446 getnewbuf()
447 {
448 	register struct buf *bp, *dp;
449 	register struct ucred *cred;
450 	int s;
451 
452 loop:
453 	s = splbio();
454 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
455 		if (dp->av_forw != dp)
456 			break;
457 	if (dp == bfreelist) {		/* no free blocks */
458 		dp->b_flags |= B_WANTED;
459 		sleep((caddr_t)dp, PRIBIO + 1);
460 		splx(s);
461 		goto loop;
462 	}
463 	bp = dp->av_forw;
464 	bremfree(bp);
465 	bp->b_flags |= B_BUSY;
466 	splx(s);
467 	if (bp->b_flags & B_DELWRI) {
468 		(void) bawrite(bp);
469 		goto loop;
470 	}
471 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
472 	if (bp->b_vp)
473 		brelvp(bp);
474 	if (bp->b_rcred != NOCRED) {
475 		cred = bp->b_rcred;
476 		bp->b_rcred = NOCRED;
477 		crfree(cred);
478 	}
479 	if (bp->b_wcred != NOCRED) {
480 		cred = bp->b_wcred;
481 		bp->b_wcred = NOCRED;
482 		crfree(cred);
483 	}
484 	bp->b_flags = B_BUSY;
485 	bp->b_dirtyoff = bp->b_dirtyend = 0;
486 	return (bp);
487 }
488 
489 /*
490  * Wait for I/O to complete.
491  *
492  * Extract and return any errors associated with the I/O.
493  * If the error flag is set, but no specific error is
494  * given, return EIO.
495  */
496 biowait(bp)
497 	register struct buf *bp;
498 {
499 	int s;
500 
501 	s = splbio();
502 	while ((bp->b_flags & B_DONE) == 0)
503 		sleep((caddr_t)bp, PRIBIO);
504 	splx(s);
505 	if ((bp->b_flags & B_ERROR) == 0)
506 		return (0);
507 	if (bp->b_error)
508 		return (bp->b_error);
509 	return (EIO);
510 }
511 
512 /*
513  * Mark I/O complete on a buffer.
514  *
515  * If a callback has been requested, e.g. the pageout
516  * daemon, do so. Otherwise, awaken waiting processes.
517  */
518 biodone(bp)
519 	register struct buf *bp;
520 {
521 	register struct vnode *vp;
522 
523 	if (bp->b_flags & B_DONE)
524 		panic("dup biodone");
525 	bp->b_flags |= B_DONE;
526 	if ((bp->b_flags & B_READ) == 0) {
527 		bp->b_dirtyoff = bp->b_dirtyend = 0;
528 		if (vp = bp->b_vp) {
529 			vp->v_numoutput--;
530 			if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
531 				if (vp->v_numoutput < 0)
532 					panic("biodone: neg numoutput");
533 				vp->v_flag &= ~VBWAIT;
534 				wakeup((caddr_t)&vp->v_numoutput);
535 			}
536 		}
537 	}
538 	if (bp->b_flags & B_CALL) {
539 		bp->b_flags &= ~B_CALL;
540 		(*bp->b_iodone)(bp);
541 		return;
542 	}
543 	if (bp->b_flags & B_ASYNC)
544 		brelse(bp);
545 	else {
546 		bp->b_flags &= ~B_WANTED;
547 		wakeup((caddr_t)bp);
548 	}
549 }
550 
551 /*
552  * Make sure all write-behind blocks associated
553  * with mount point are flushed out (from sync).
554  */
555 mntflushbuf(mountp, flags)
556 	struct mount *mountp;
557 	int flags;
558 {
559 	register struct vnode *vp;
560 
561 	if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
562 		panic("mntflushbuf: not busy");
563 loop:
564 	for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
565 		if (vget(vp))
566 			goto loop;
567 		vflushbuf(vp, flags);
568 		vput(vp);
569 		if (vp->v_mount != mountp)
570 			goto loop;
571 	}
572 }
573 
574 /*
575  * Flush all dirty buffers associated with a vnode.
576  */
577 vflushbuf(vp, flags)
578 	register struct vnode *vp;
579 	int flags;
580 {
581 	register struct buf *bp;
582 	struct buf *nbp;
583 	int s;
584 
585 loop:
586 	s = splbio();
587 	for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
588 		nbp = bp->b_blockf;
589 		if ((bp->b_flags & B_BUSY))
590 			continue;
591 		if ((bp->b_flags & B_DELWRI) == 0)
592 			panic("vflushbuf: not dirty");
593 		bremfree(bp);
594 		bp->b_flags |= B_BUSY;
595 		splx(s);
596 		/*
597 		 * Wait for I/O associated with indirect blocks to complete,
598 		 * since there is no way to quickly wait for them below.
599 		 * NB: This is really specific to ufs, but is done here
600 		 * as it is easier and quicker.
601 		 */
602 		if (bp->b_vp == vp || (flags & B_SYNC) == 0) {
603 			(void) bawrite(bp);
604 			s = splbio();
605 		} else {
606 			(void) bwrite(bp);
607 			goto loop;
608 		}
609 	}
610 	splx(s);
611 	if ((flags & B_SYNC) == 0)
612 		return;
613 	s = splbio();
614 	while (vp->v_numoutput) {
615 		vp->v_flag |= VBWAIT;
616 		sleep((caddr_t)&vp->v_numoutput, PRIBIO + 1);
617 	}
618 	splx(s);
619 	if (vp->v_dirtyblkhd) {
620 		vprint("vflushbuf: dirty", vp);
621 		goto loop;
622 	}
623 }
624 
625 /*
626  * Invalidate in core blocks belonging to closed or umounted filesystem
627  *
628  * Go through the list of vnodes associated with the file system;
629  * for each vnode invalidate any buffers that it holds. Normally
630  * this routine is preceeded by a bflush call, so that on a quiescent
631  * filesystem there will be no dirty buffers when we are done. Binval
632  * returns the count of dirty buffers when it is finished.
633  */
634 mntinvalbuf(mountp)
635 	struct mount *mountp;
636 {
637 	register struct vnode *vp;
638 	int dirty = 0;
639 
640 	if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
641 		panic("mntinvalbuf: not busy");
642 loop:
643 	for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
644 		if (vget(vp))
645 			goto loop;
646 		dirty += vinvalbuf(vp, 1);
647 		vput(vp);
648 		if (vp->v_mount != mountp)
649 			goto loop;
650 	}
651 	return (dirty);
652 }
653 
654 /*
655  * Flush out and invalidate all buffers associated with a vnode.
656  * Called with the underlying object locked.
657  */
658 vinvalbuf(vp, save)
659 	register struct vnode *vp;
660 	int save;
661 {
662 	register struct buf *bp;
663 	struct buf *nbp, *blist;
664 	int s, dirty = 0;
665 
666 	for (;;) {
667 		if (blist = vp->v_dirtyblkhd)
668 			/* void */;
669 		else if (blist = vp->v_cleanblkhd)
670 			/* void */;
671 		else
672 			break;
673 		for (bp = blist; bp; bp = nbp) {
674 			nbp = bp->b_blockf;
675 			s = splbio();
676 			if (bp->b_flags & B_BUSY) {
677 				bp->b_flags |= B_WANTED;
678 				sleep((caddr_t)bp, PRIBIO + 1);
679 				splx(s);
680 				break;
681 			}
682 			bremfree(bp);
683 			bp->b_flags |= B_BUSY;
684 			splx(s);
685 			if (save && (bp->b_flags & B_DELWRI)) {
686 				dirty++;
687 				(void) bwrite(bp);
688 				break;
689 			}
690 			if (bp->b_vp != vp)
691 				reassignbuf(bp, bp->b_vp);
692 			else
693 				bp->b_flags |= B_INVAL;
694 			brelse(bp);
695 		}
696 	}
697 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
698 		panic("vinvalbuf: flush failed");
699 	return (dirty);
700 }
701 
702 /*
703  * Associate a buffer with a vnode.
704  */
705 bgetvp(vp, bp)
706 	register struct vnode *vp;
707 	register struct buf *bp;
708 {
709 
710 	if (bp->b_vp)
711 		panic("bgetvp: not free");
712 	VHOLD(vp);
713 	bp->b_vp = vp;
714 	if (vp->v_type == VBLK || vp->v_type == VCHR)
715 		bp->b_dev = vp->v_rdev;
716 	else
717 		bp->b_dev = NODEV;
718 	/*
719 	 * Insert onto list for new vnode.
720 	 */
721 	if (vp->v_cleanblkhd) {
722 		bp->b_blockf = vp->v_cleanblkhd;
723 		bp->b_blockb = &vp->v_cleanblkhd;
724 		vp->v_cleanblkhd->b_blockb = &bp->b_blockf;
725 		vp->v_cleanblkhd = bp;
726 	} else {
727 		vp->v_cleanblkhd = bp;
728 		bp->b_blockb = &vp->v_cleanblkhd;
729 		bp->b_blockf = NULL;
730 	}
731 }
732 
733 /*
734  * Disassociate a buffer from a vnode.
735  */
736 brelvp(bp)
737 	register struct buf *bp;
738 {
739 	struct buf *bq;
740 	struct vnode *vp;
741 
742 	if (bp->b_vp == (struct vnode *) 0)
743 		panic("brelvp: NULL");
744 	/*
745 	 * Delete from old vnode list, if on one.
746 	 */
747 	if (bp->b_blockb) {
748 		if (bq = bp->b_blockf)
749 			bq->b_blockb = bp->b_blockb;
750 		*bp->b_blockb = bq;
751 		bp->b_blockf = NULL;
752 		bp->b_blockb = NULL;
753 	}
754 	vp = bp->b_vp;
755 	bp->b_vp = (struct vnode *) 0;
756 	HOLDRELE(vp);
757 }
758 
759 /*
760  * Reassign a buffer from one vnode to another.
761  * Used to assign file specific control information
762  * (indirect blocks) to the vnode to which they belong.
763  */
764 reassignbuf(bp, newvp)
765 	register struct buf *bp;
766 	register struct vnode *newvp;
767 {
768 	register struct buf *bq, **listheadp;
769 
770 	if (newvp == NULL)
771 		panic("reassignbuf: NULL");
772 	/*
773 	 * Delete from old vnode list, if on one.
774 	 */
775 	if (bp->b_blockb) {
776 		if (bq = bp->b_blockf)
777 			bq->b_blockb = bp->b_blockb;
778 		*bp->b_blockb = bq;
779 	}
780 	/*
781 	 * If dirty, put on list of dirty buffers;
782 	 * otherwise insert onto list of clean buffers.
783 	 */
784 	if (bp->b_flags & B_DELWRI)
785 		listheadp = &newvp->v_dirtyblkhd;
786 	else
787 		listheadp = &newvp->v_cleanblkhd;
788 	if (*listheadp) {
789 		bp->b_blockf = *listheadp;
790 		bp->b_blockb = listheadp;
791 		bp->b_blockf->b_blockb = &bp->b_blockf;
792 		*listheadp = bp;
793 	} else {
794 		*listheadp = bp;
795 		bp->b_blockb = listheadp;
796 		bp->b_blockf = NULL;
797 	}
798 }
799