xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 39882)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)vfs_cluster.c	7.19 (Berkeley) 01/04/90
18  */
19 
20 #include "param.h"
21 #include "user.h"
22 #include "buf.h"
23 #include "vnode.h"
24 #include "mount.h"
25 #include "trace.h"
26 #include "ucred.h"
27 
28 /*
29  * Read in (if necessary) the block and return a buffer pointer.
30  */
31 bread(vp, blkno, size, cred, bpp)
32 	struct vnode *vp;
33 	daddr_t blkno;
34 	int size;
35 	struct ucred *cred;
36 	struct buf **bpp;
37 {
38 	register struct buf *bp;
39 
40 	if (size == 0)
41 		panic("bread: size 0");
42 	*bpp = bp = getblk(vp, blkno, size);
43 	if (bp->b_flags&(B_DONE|B_DELWRI)) {
44 		trace(TR_BREADHIT, pack(vp->v_mount->m_fsid[0], size), blkno);
45 		return (0);
46 	}
47 	bp->b_flags |= B_READ;
48 	if (bp->b_bcount > bp->b_bufsize)
49 		panic("bread");
50 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
51 		crhold(cred);
52 		bp->b_rcred = cred;
53 	}
54 	VOP_STRATEGY(bp);
55 	trace(TR_BREADMISS, pack(vp->v_mount->m_fsid[0], size), blkno);
56 	u.u_ru.ru_inblock++;		/* pay for read */
57 	return (biowait(bp));
58 }
59 
60 /*
61  * Read in the block, like bread, but also start I/O on the
62  * read-ahead block (which is not allocated to the caller)
63  */
64 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
65 	struct vnode *vp;
66 	daddr_t blkno; int size;
67 	daddr_t rablkno; int rabsize;
68 	struct ucred *cred;
69 	struct buf **bpp;
70 {
71 	register struct buf *bp, *rabp;
72 
73 	bp = NULL;
74 	/*
75 	 * If the block isn't in core, then allocate
76 	 * a buffer and initiate i/o (getblk checks
77 	 * for a cache hit).
78 	 */
79 	if (!incore(vp, blkno)) {
80 		*bpp = bp = getblk(vp, blkno, size);
81 		if ((bp->b_flags&(B_DONE|B_DELWRI)) == 0) {
82 			bp->b_flags |= B_READ;
83 			if (bp->b_bcount > bp->b_bufsize)
84 				panic("breada");
85 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
86 				crhold(cred);
87 				bp->b_rcred = cred;
88 			}
89 			VOP_STRATEGY(bp);
90 			trace(TR_BREADMISS, pack(vp->v_mount->m_fsid[0], size),
91 			    blkno);
92 			u.u_ru.ru_inblock++;		/* pay for read */
93 		} else
94 			trace(TR_BREADHIT, pack(vp->v_mount->m_fsid[0], size),
95 			    blkno);
96 	}
97 
98 	/*
99 	 * If there's a read-ahead block, start i/o
100 	 * on it also (as above).
101 	 */
102 	if (rablkno && !incore(vp, rablkno)) {
103 		rabp = getblk(vp, rablkno, rabsize);
104 		if (rabp->b_flags & (B_DONE|B_DELWRI)) {
105 			brelse(rabp);
106 			trace(TR_BREADHITRA,
107 			    pack(vp->v_mount->m_fsid[0], rabsize), rablkno);
108 		} else {
109 			rabp->b_flags |= B_READ|B_ASYNC;
110 			if (rabp->b_bcount > rabp->b_bufsize)
111 				panic("breadrabp");
112 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
113 				crhold(cred);
114 				rabp->b_rcred = cred;
115 			}
116 			VOP_STRATEGY(rabp);
117 			trace(TR_BREADMISSRA,
118 			    pack(vp->v_mount->m_fsid[0], rabsize), rablkno);
119 			u.u_ru.ru_inblock++;		/* pay in advance */
120 		}
121 	}
122 
123 	/*
124 	 * If block was in core, let bread get it.
125 	 * If block wasn't in core, then the read was started
126 	 * above, and just wait for it.
127 	 */
128 	if (bp == NULL)
129 		return (bread(vp, blkno, size, cred, bpp));
130 	return (biowait(bp));
131 }
132 
133 /*
134  * Write the buffer, waiting for completion.
135  * Then release the buffer.
136  */
137 bwrite(bp)
138 	register struct buf *bp;
139 {
140 	register int flag;
141 	int error;
142 
143 	flag = bp->b_flags;
144 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
145 	if ((flag&B_DELWRI) == 0)
146 		u.u_ru.ru_oublock++;		/* noone paid yet */
147 	else
148 		reassignbuf(bp, bp->b_vp);
149 	trace(TR_BWRITE,
150 	    pack(bp->b_vp->v_mount->m_fsid[0], bp->b_bcount), bp->b_lblkno);
151 	if (bp->b_bcount > bp->b_bufsize)
152 		panic("bwrite");
153 	bp->b_vp->v_numoutput++;
154 	VOP_STRATEGY(bp);
155 
156 	/*
157 	 * If the write was synchronous, then await i/o completion.
158 	 * If the write was "delayed", then we put the buffer on
159 	 * the q of blocks awaiting i/o completion status.
160 	 */
161 	if ((flag&B_ASYNC) == 0) {
162 		error = biowait(bp);
163 		brelse(bp);
164 	} else if (flag & B_DELWRI) {
165 		bp->b_flags |= B_AGE;
166 		error = 0;
167 	}
168 	return (error);
169 }
170 
171 /*
172  * Release the buffer, marking it so that if it is grabbed
173  * for another purpose it will be written out before being
174  * given up (e.g. when writing a partial block where it is
175  * assumed that another write for the same block will soon follow).
176  * This can't be done for magtape, since writes must be done
177  * in the same order as requested.
178  */
179 bdwrite(bp)
180 	register struct buf *bp;
181 {
182 
183 	if ((bp->b_flags & B_DELWRI) == 0) {
184 		bp->b_flags |= B_DELWRI;
185 		reassignbuf(bp, bp->b_vp);
186 		u.u_ru.ru_oublock++;		/* noone paid yet */
187 	}
188 	/*
189 	 * If this is a tape drive, the write must be initiated.
190 	 */
191 	if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) {
192 		bawrite(bp);
193 	} else {
194 		bp->b_flags |= B_DELWRI | B_DONE;
195 		brelse(bp);
196 	}
197 }
198 
199 /*
200  * Release the buffer, start I/O on it, but don't wait for completion.
201  */
202 bawrite(bp)
203 	register struct buf *bp;
204 {
205 
206 	bp->b_flags |= B_ASYNC;
207 	(void) bwrite(bp);
208 }
209 
210 /*
211  * Release the buffer, with no I/O implied.
212  */
213 brelse(bp)
214 	register struct buf *bp;
215 {
216 	register struct buf *flist;
217 	register s;
218 
219 	trace(TR_BRELSE,
220 	    pack(bp->b_vp->v_mount->m_fsid[0], bp->b_bufsize), bp->b_lblkno);
221 	/*
222 	 * If a process is waiting for the buffer, or
223 	 * is waiting for a free buffer, awaken it.
224 	 */
225 	if (bp->b_flags&B_WANTED)
226 		wakeup((caddr_t)bp);
227 	if (bfreelist[0].b_flags&B_WANTED) {
228 		bfreelist[0].b_flags &= ~B_WANTED;
229 		wakeup((caddr_t)bfreelist);
230 	}
231 	/*
232 	 * Retry I/O for locked buffers rather than invalidating them.
233 	 */
234 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
235 		bp->b_flags &= ~B_ERROR;
236 
237 	/*
238 	 * Disassociate buffers that are no longer valid.
239 	 */
240 	if (bp->b_flags & (B_NOCACHE|B_ERROR))
241 		bp->b_flags |= B_INVAL;
242 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR|B_INVAL))) {
243 		if (bp->b_vp)
244 			brelvp(bp);
245 		bp->b_flags &= ~B_DELWRI;
246 	}
247 	/*
248 	 * Stick the buffer back on a free list.
249 	 */
250 	s = splbio();
251 	if (bp->b_bufsize <= 0) {
252 		/* block has no buffer ... put at front of unused buffer list */
253 		flist = &bfreelist[BQ_EMPTY];
254 		binsheadfree(bp, flist);
255 	} else if (bp->b_flags & (B_ERROR|B_INVAL)) {
256 		/* block has no info ... put at front of most free list */
257 		flist = &bfreelist[BQ_AGE];
258 		binsheadfree(bp, flist);
259 	} else {
260 		if (bp->b_flags & B_LOCKED)
261 			flist = &bfreelist[BQ_LOCKED];
262 		else if (bp->b_flags & B_AGE)
263 			flist = &bfreelist[BQ_AGE];
264 		else
265 			flist = &bfreelist[BQ_LRU];
266 		binstailfree(bp, flist);
267 	}
268 	bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE|B_NOCACHE);
269 	splx(s);
270 }
271 
272 /*
273  * See if the block is associated with some buffer
274  * (mainly to avoid getting hung up on a wait in breada)
275  */
276 incore(vp, blkno)
277 	struct vnode *vp;
278 	daddr_t blkno;
279 {
280 	register struct buf *bp;
281 	register struct buf *dp;
282 
283 	dp = BUFHASH(vp, blkno);
284 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
285 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
286 		    (bp->b_flags & B_INVAL) == 0)
287 			return (1);
288 	return (0);
289 }
290 
291 /*
292  * Return a block if it is in memory.
293  */
294 baddr(vp, blkno, size, cred, bpp)
295 	struct vnode *vp;
296 	daddr_t blkno;
297 	int size;
298 	struct ucred *cred;
299 	struct buf **bpp;
300 {
301 
302 	if (incore(vp, blkno))
303 		return (bread(vp, blkno, size, cred, bpp));
304 	*bpp = 0;
305 	return (0);
306 }
307 
308 /*
309  * Assign a buffer for the given block.  If the appropriate
310  * block is already associated, return it; otherwise search
311  * for the oldest non-busy buffer and reassign it.
312  *
313  * We use splx here because this routine may be called
314  * on the interrupt stack during a dump, and we don't
315  * want to lower the ipl back to 0.
316  */
317 struct buf *
318 getblk(vp, blkno, size)
319 	register struct vnode *vp;
320 	daddr_t blkno;
321 	int size;
322 {
323 	register struct buf *bp, *dp;
324 	int s;
325 
326 	if (size > MAXBSIZE)
327 		panic("getblk: size too big");
328 	/*
329 	 * To prevent overflow of 32-bit ints when converting block
330 	 * numbers to byte offsets, blknos > 2^32 / DEV_BSIZE are set
331 	 * to the maximum number that can be converted to a byte offset
332 	 * without overflow. This is historic code; what bug it fixed,
333 	 * or whether it is still a reasonable thing to do is open to
334 	 * dispute. mkm 9/85
335 	 *
336 	 * Make it a panic to see if it ever really happens. mkm 11/89
337 	 */
338 	if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-DEV_BSHIFT)) {
339 		panic("getblk: blkno too big");
340 		blkno = 1 << ((sizeof(int)*NBBY-DEV_BSHIFT) + 1);
341 	}
342 	/*
343 	 * Search the cache for the block.  If we hit, but
344 	 * the buffer is in use for i/o, then we wait until
345 	 * the i/o has completed.
346 	 */
347 	dp = BUFHASH(vp, blkno);
348 loop:
349 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
350 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
351 		    bp->b_flags&B_INVAL)
352 			continue;
353 		s = splbio();
354 		if (bp->b_flags&B_BUSY) {
355 			bp->b_flags |= B_WANTED;
356 			sleep((caddr_t)bp, PRIBIO+1);
357 			splx(s);
358 			goto loop;
359 		}
360 		bremfree(bp);
361 		bp->b_flags |= B_BUSY;
362 		splx(s);
363 		if (bp->b_bcount != size) {
364 			printf("getblk: stray size");
365 			bp->b_flags |= B_INVAL;
366 			bwrite(bp);
367 			goto loop;
368 		}
369 		bp->b_flags |= B_CACHE;
370 		return (bp);
371 	}
372 	bp = getnewbuf();
373 	bfree(bp);
374 	bremhash(bp);
375 	bgetvp(vp, bp);
376 	bp->b_lblkno = blkno;
377 	bp->b_blkno = blkno;
378 	bp->b_error = 0;
379 	bp->b_resid = 0;
380 	binshash(bp, dp);
381 	brealloc(bp, size);
382 	return (bp);
383 }
384 
385 /*
386  * get an empty block,
387  * not assigned to any particular device
388  */
389 struct buf *
390 geteblk(size)
391 	int size;
392 {
393 	register struct buf *bp, *flist;
394 
395 	if (size > MAXBSIZE)
396 		panic("geteblk: size too big");
397 	bp = getnewbuf();
398 	bp->b_flags |= B_INVAL;
399 	bfree(bp);
400 	bremhash(bp);
401 	flist = &bfreelist[BQ_AGE];
402 	bp->b_error = 0;
403 	bp->b_resid = 0;
404 	binshash(bp, flist);
405 	brealloc(bp, size);
406 	return (bp);
407 }
408 
409 /*
410  * Allocate space associated with a buffer.
411  */
412 brealloc(bp, size)
413 	register struct buf *bp;
414 	int size;
415 {
416 	daddr_t start, last;
417 	register struct buf *ep;
418 	struct buf *dp;
419 	int s;
420 
421 	if (size == bp->b_bcount)
422 		return;
423 	allocbuf(bp, size);
424 }
425 
426 /*
427  * Find a buffer which is available for use.
428  * Select something from a free list.
429  * Preference is to AGE list, then LRU list.
430  */
431 struct buf *
432 getnewbuf()
433 {
434 	register struct buf *bp, *dp;
435 	register struct ucred *cred;
436 	int s;
437 
438 loop:
439 	s = splbio();
440 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
441 		if (dp->av_forw != dp)
442 			break;
443 	if (dp == bfreelist) {		/* no free blocks */
444 		dp->b_flags |= B_WANTED;
445 		sleep((caddr_t)dp, PRIBIO+1);
446 		splx(s);
447 		goto loop;
448 	}
449 	bp = dp->av_forw;
450 	bremfree(bp);
451 	bp->b_flags |= B_BUSY;
452 	splx(s);
453 	if (bp->b_flags & B_DELWRI) {
454 		(void) bawrite(bp);
455 		goto loop;
456 	}
457 	trace(TR_BRELSE,
458 	    pack(bp->b_vp->v_mount->m_fsid[0], bp->b_bufsize), bp->b_lblkno);
459 	if (bp->b_vp)
460 		brelvp(bp);
461 	if (bp->b_rcred != NOCRED) {
462 		cred = bp->b_rcred;
463 		bp->b_rcred = NOCRED;
464 		crfree(cred);
465 	}
466 	if (bp->b_wcred != NOCRED) {
467 		cred = bp->b_wcred;
468 		bp->b_wcred = NOCRED;
469 		crfree(cred);
470 	}
471 	bp->b_flags = B_BUSY;
472 	return (bp);
473 }
474 
475 /*
476  * Wait for I/O completion on the buffer; return errors
477  * to the user.
478  */
479 biowait(bp)
480 	register struct buf *bp;
481 {
482 	int s;
483 
484 	s = splbio();
485 	while ((bp->b_flags & B_DONE) == 0)
486 		sleep((caddr_t)bp, PRIBIO);
487 	splx(s);
488 	/*
489 	 * Pick up the device's error number and pass it to the user;
490 	 * if there is an error but the number is 0 set a generalized code.
491 	 */
492 	if ((bp->b_flags & B_ERROR) == 0)
493 		return (0);
494 	if (bp->b_error)
495 		return (bp->b_error);
496 	return (EIO);
497 }
498 
499 /*
500  * Mark I/O complete on a buffer.
501  * If someone should be called, e.g. the pageout
502  * daemon, do so.  Otherwise, wake up anyone
503  * waiting for it.
504  */
505 biodone(bp)
506 	register struct buf *bp;
507 {
508 	register struct vnode *vp;
509 
510 	if (bp->b_flags & B_DONE)
511 		panic("dup biodone");
512 	bp->b_flags |= B_DONE;
513 	if ((bp->b_flags & B_READ) == 0) {
514 		bp->b_dirtyoff = bp->b_dirtyend = 0;
515 		if (vp = bp->b_vp) {
516 			vp->v_numoutput--;
517 			if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
518 				if (vp->v_numoutput < 0)
519 					panic("biodone: neg numoutput");
520 				vp->v_flag &= ~VBWAIT;
521 				wakeup((caddr_t)&vp->v_numoutput);
522 			}
523 		}
524 	}
525 	if (bp->b_flags & B_CALL) {
526 		bp->b_flags &= ~B_CALL;
527 		(*bp->b_iodone)(bp);
528 		return;
529 	}
530 	if (bp->b_flags&B_ASYNC)
531 		brelse(bp);
532 	else {
533 		bp->b_flags &= ~B_WANTED;
534 		wakeup((caddr_t)bp);
535 	}
536 }
537 
538 /*
539  * Make sure all write-behind blocks associated
540  * with mount point are flushed out (from sync).
541  */
542 mntflushbuf(mountp, flags)
543 	struct mount *mountp;
544 	int flags;
545 {
546 	register struct vnode *vp;
547 	struct vnode *nvp;
548 
549 loop:
550 	for (vp = mountp->m_mounth; vp; vp = nvp) {
551 		nvp = vp->v_mountf;
552 		if (vget(vp))
553 			goto loop;
554 		vflushbuf(vp, flags);
555 		vput(vp);
556 	}
557 }
558 
559 /*
560  * Flush all dirty buffers associated with a vnode.
561  */
562 vflushbuf(vp, flags)
563 	register struct vnode *vp;
564 	int flags;
565 {
566 	register struct buf *bp;
567 	struct buf *nbp;
568 	int s;
569 
570 loop:
571 	s = splbio();
572 	for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
573 		nbp = bp->b_blockf;
574 		if ((bp->b_flags & B_BUSY))
575 			continue;
576 		if ((bp->b_flags & B_DELWRI) == 0)
577 			panic("vflushbuf: not dirty");
578 		bremfree(bp);
579 		bp->b_flags |= B_BUSY;
580 		splx(s);
581 		/*
582 		 * Wait for I/O associated with indirect blocks to complete,
583 		 * since there is no way to quickly wait for them below.
584 		 * NB - This is really specific to ufs, but is done here
585 		 * as it is easier and quicker.
586 		 */
587 		if (bp->b_vp == vp || (flags & B_SYNC) == 0) {
588 			(void) bawrite(bp);
589 		} else {
590 			(void) bwrite(bp);
591 			goto loop;
592 		}
593 	}
594 	splx(s);
595 	if ((flags & B_SYNC) == 0)
596 		return;
597 	s = splbio();
598 	while (vp->v_numoutput) {
599 		vp->v_flag |= VBWAIT;
600 		sleep((caddr_t)&vp->v_numoutput, PRIBIO+1);
601 	}
602 	splx(s);
603 	if (vp->v_dirtyblkhd) {
604 		vprint("vflushbuf: dirty", vp);
605 		goto loop;
606 	}
607 }
608 
609 /*
610  * Invalidate in core blocks belonging to closed or umounted filesystem
611  *
612  * Go through the list of vnodes associated with the file system;
613  * for each vnode invalidate any buffers that it holds. Normally
614  * this routine is preceeded by a bflush call, so that on a quiescent
615  * filesystem there will be no dirty buffers when we are done. Binval
616  * returns the count of dirty buffers when it is finished.
617  */
618 mntinvalbuf(mountp)
619 	struct mount *mountp;
620 {
621 	register struct vnode *vp;
622 	struct vnode *nvp;
623 	int dirty = 0;
624 
625 loop:
626 	for (vp = mountp->m_mounth; vp; vp = nvp) {
627 		nvp = vp->v_mountf;
628 		if (vget(vp))
629 			goto loop;
630 		dirty += vinvalbuf(vp, 1);
631 		vput(vp);
632 	}
633 	return (dirty);
634 }
635 
636 /*
637  * Flush out and invalidate all buffers associated with a vnode.
638  * Called with the underlying object locked.
639  */
640 vinvalbuf(vp, save)
641 	register struct vnode *vp;
642 	int save;
643 {
644 	register struct buf *bp;
645 	struct buf *nbp, *blist;
646 	int s, dirty = 0;
647 
648 	for (;;) {
649 		if (blist = vp->v_dirtyblkhd)
650 			/* void */;
651 		else if (blist = vp->v_cleanblkhd)
652 			/* void */;
653 		else
654 			break;
655 		for (bp = blist; bp; bp = nbp) {
656 			nbp = bp->b_blockf;
657 			s = splbio();
658 			if (bp->b_flags & B_BUSY) {
659 				bp->b_flags |= B_WANTED;
660 				sleep((caddr_t)bp, PRIBIO+1);
661 				splx(s);
662 				break;
663 			}
664 			bremfree(bp);
665 			bp->b_flags |= B_BUSY;
666 			splx(s);
667 			if (save && (bp->b_flags & B_DELWRI)) {
668 				dirty++;
669 				(void) bwrite(bp);
670 				break;
671 			}
672 			bp->b_flags |= B_INVAL;
673 			brelse(bp);
674 		}
675 	}
676 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
677 		panic("vinvalbuf: flush failed");
678 	return (dirty);
679 }
680 
681 /*
682  * Associate a buffer with a vnode.
683  */
684 bgetvp(vp, bp)
685 	register struct vnode *vp;
686 	register struct buf *bp;
687 {
688 
689 	if (bp->b_vp)
690 		panic("bgetvp: not free");
691 	VHOLD(vp);
692 	bp->b_vp = vp;
693 	if (vp->v_type == VBLK || vp->v_type == VCHR)
694 		bp->b_dev = vp->v_rdev;
695 	else
696 		bp->b_dev = NODEV;
697 	/*
698 	 * Insert onto list for new vnode.
699 	 */
700 	if (vp->v_cleanblkhd) {
701 		bp->b_blockf = vp->v_cleanblkhd;
702 		bp->b_blockb = &vp->v_cleanblkhd;
703 		vp->v_cleanblkhd->b_blockb = &bp->b_blockf;
704 		vp->v_cleanblkhd = bp;
705 	} else {
706 		vp->v_cleanblkhd = bp;
707 		bp->b_blockb = &vp->v_cleanblkhd;
708 		bp->b_blockf = NULL;
709 	}
710 }
711 
712 /*
713  * Disassociate a buffer from a vnode.
714  */
715 brelvp(bp)
716 	register struct buf *bp;
717 {
718 	struct buf *bq;
719 	struct vnode *vp;
720 
721 	if (bp->b_vp == (struct vnode *) 0)
722 		panic("brelvp: NULL");
723 	/*
724 	 * Delete from old vnode list, if on one.
725 	 */
726 	if (bp->b_blockb) {
727 		if (bq = bp->b_blockf)
728 			bq->b_blockb = bp->b_blockb;
729 		*bp->b_blockb = bq;
730 		bp->b_blockf = NULL;
731 		bp->b_blockb = NULL;
732 	}
733 	vp = bp->b_vp;
734 	bp->b_vp = (struct vnode *) 0;
735 	HOLDRELE(vp);
736 }
737 
738 /*
739  * Reassign a buffer from one vnode to another.
740  * Used to assign file specific control information
741  * (indirect blocks) to the vnode to which they belong.
742  */
743 reassignbuf(bp, newvp)
744 	register struct buf *bp;
745 	register struct vnode *newvp;
746 {
747 	register struct buf *bq, **listheadp;
748 
749 	if (newvp == NULL)
750 		panic("reassignbuf: NULL");
751 	/*
752 	 * Delete from old vnode list, if on one.
753 	 */
754 	if (bp->b_blockb) {
755 		if (bq = bp->b_blockf)
756 			bq->b_blockb = bp->b_blockb;
757 		*bp->b_blockb = bq;
758 	}
759 	/*
760 	 * If dirty, put on list of dirty buffers;
761 	 * otherwise insert onto list of clean buffers.
762 	 */
763 	if (bp->b_flags & B_DELWRI)
764 		listheadp = &newvp->v_dirtyblkhd;
765 	else
766 		listheadp = &newvp->v_cleanblkhd;
767 	if (*listheadp) {
768 		bp->b_blockf = *listheadp;
769 		bp->b_blockb = listheadp;
770 		bp->b_blockf->b_blockb = &bp->b_blockf;
771 		*listheadp = bp;
772 	} else {
773 		*listheadp = bp;
774 		bp->b_blockb = listheadp;
775 		bp->b_blockf = NULL;
776 	}
777 }
778