xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 41400)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)vfs_cluster.c	7.27 (Berkeley) 05/04/90
18  */
19 
20 #include "param.h"
21 #include "user.h"
22 #include "buf.h"
23 #include "vnode.h"
24 #include "specdev.h"
25 #include "mount.h"
26 #include "trace.h"
27 #include "ucred.h"
28 
29 /*
30  * Read in (if necessary) the block and return a buffer pointer.
31  */
32 bread(vp, blkno, size, cred, bpp)
33 	struct vnode *vp;
34 	daddr_t blkno;
35 	int size;
36 	struct ucred *cred;
37 	struct buf **bpp;
38 {
39 	register struct buf *bp;
40 
41 	if (size == 0)
42 		panic("bread: size 0");
43 	*bpp = bp = getblk(vp, blkno, size);
44 	if (bp->b_flags&(B_DONE|B_DELWRI)) {
45 		trace(TR_BREADHIT, pack(vp, size), blkno);
46 		return (0);
47 	}
48 	bp->b_flags |= B_READ;
49 	if (bp->b_bcount > bp->b_bufsize)
50 		panic("bread");
51 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
52 		crhold(cred);
53 		bp->b_rcred = cred;
54 	}
55 	VOP_STRATEGY(bp);
56 	trace(TR_BREADMISS, pack(vp, size), blkno);
57 	u.u_ru.ru_inblock++;		/* pay for read */
58 	return (biowait(bp));
59 }
60 
61 /*
62  * Read in the block, like bread, but also start I/O on the
63  * read-ahead block (which is not allocated to the caller)
64  */
65 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
66 	struct vnode *vp;
67 	daddr_t blkno; int size;
68 	daddr_t rablkno; int rabsize;
69 	struct ucred *cred;
70 	struct buf **bpp;
71 {
72 	register struct buf *bp, *rabp;
73 
74 	bp = NULL;
75 	/*
76 	 * If the block isn't in core, then allocate
77 	 * a buffer and initiate i/o (getblk checks
78 	 * for a cache hit).
79 	 */
80 	if (!incore(vp, blkno)) {
81 		*bpp = bp = getblk(vp, blkno, size);
82 		if ((bp->b_flags&(B_DONE|B_DELWRI)) == 0) {
83 			bp->b_flags |= B_READ;
84 			if (bp->b_bcount > bp->b_bufsize)
85 				panic("breada");
86 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
87 				crhold(cred);
88 				bp->b_rcred = cred;
89 			}
90 			VOP_STRATEGY(bp);
91 			trace(TR_BREADMISS, pack(vp, size), blkno);
92 			u.u_ru.ru_inblock++;		/* pay for read */
93 		} else
94 			trace(TR_BREADHIT, pack(vp, size), blkno);
95 	}
96 
97 	/*
98 	 * If there's a read-ahead block, start i/o
99 	 * on it also (as above).
100 	 */
101 	if (!incore(vp, rablkno)) {
102 		rabp = getblk(vp, rablkno, rabsize);
103 		if (rabp->b_flags & (B_DONE|B_DELWRI)) {
104 			brelse(rabp);
105 			trace(TR_BREADHITRA, pack(vp, rabsize), rablkno);
106 		} else {
107 			rabp->b_flags |= B_READ|B_ASYNC;
108 			if (rabp->b_bcount > rabp->b_bufsize)
109 				panic("breadrabp");
110 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
111 				crhold(cred);
112 				rabp->b_rcred = cred;
113 			}
114 			VOP_STRATEGY(rabp);
115 			trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno);
116 			u.u_ru.ru_inblock++;		/* pay in advance */
117 		}
118 	}
119 
120 	/*
121 	 * If block was in core, let bread get it.
122 	 * If block wasn't in core, then the read was started
123 	 * above, and just wait for it.
124 	 */
125 	if (bp == NULL)
126 		return (bread(vp, blkno, size, cred, bpp));
127 	return (biowait(bp));
128 }
129 
130 /*
131  * Write the buffer, waiting for completion.
132  * Then release the buffer.
133  */
134 bwrite(bp)
135 	register struct buf *bp;
136 {
137 	register int flag;
138 	int s, error;
139 
140 	flag = bp->b_flags;
141 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
142 	if ((flag&B_DELWRI) == 0)
143 		u.u_ru.ru_oublock++;		/* noone paid yet */
144 	else
145 		reassignbuf(bp, bp->b_vp);
146 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
147 	if (bp->b_bcount > bp->b_bufsize)
148 		panic("bwrite");
149 	s = splbio();
150 	bp->b_vp->v_numoutput++;
151 	splx(s);
152 	VOP_STRATEGY(bp);
153 
154 	/*
155 	 * If the write was synchronous, then await i/o completion.
156 	 * If the write was "delayed", then we put the buffer on
157 	 * the q of blocks awaiting i/o completion status.
158 	 */
159 	if ((flag&B_ASYNC) == 0) {
160 		error = biowait(bp);
161 		brelse(bp);
162 	} else if (flag & B_DELWRI) {
163 		bp->b_flags |= B_AGE;
164 		error = 0;
165 	}
166 	return (error);
167 }
168 
169 /*
170  * Release the buffer, marking it so that if it is grabbed
171  * for another purpose it will be written out before being
172  * given up (e.g. when writing a partial block where it is
173  * assumed that another write for the same block will soon follow).
174  * This can't be done for magtape, since writes must be done
175  * in the same order as requested.
176  */
177 bdwrite(bp)
178 	register struct buf *bp;
179 {
180 
181 	if ((bp->b_flags & B_DELWRI) == 0) {
182 		bp->b_flags |= B_DELWRI;
183 		reassignbuf(bp, bp->b_vp);
184 		u.u_ru.ru_oublock++;		/* noone paid yet */
185 	}
186 	/*
187 	 * If this is a tape drive, the write must be initiated.
188 	 */
189 	if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) {
190 		bawrite(bp);
191 	} else {
192 		bp->b_flags |= B_DELWRI | B_DONE;
193 		brelse(bp);
194 	}
195 }
196 
197 /*
198  * Release the buffer, start I/O on it, but don't wait for completion.
199  */
200 bawrite(bp)
201 	register struct buf *bp;
202 {
203 
204 	bp->b_flags |= B_ASYNC;
205 	(void) bwrite(bp);
206 }
207 
208 /*
209  * Release the buffer, with no I/O implied.
210  */
211 brelse(bp)
212 	register struct buf *bp;
213 {
214 	register struct buf *flist;
215 	register s;
216 
217 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
218 	/*
219 	 * If a process is waiting for the buffer, or
220 	 * is waiting for a free buffer, awaken it.
221 	 */
222 	if (bp->b_flags&B_WANTED)
223 		wakeup((caddr_t)bp);
224 	if (bfreelist[0].b_flags&B_WANTED) {
225 		bfreelist[0].b_flags &= ~B_WANTED;
226 		wakeup((caddr_t)bfreelist);
227 	}
228 	/*
229 	 * Retry I/O for locked buffers rather than invalidating them.
230 	 */
231 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
232 		bp->b_flags &= ~B_ERROR;
233 
234 	/*
235 	 * Disassociate buffers that are no longer valid.
236 	 */
237 	if (bp->b_flags & (B_NOCACHE|B_ERROR))
238 		bp->b_flags |= B_INVAL;
239 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR|B_INVAL))) {
240 		if (bp->b_vp)
241 			brelvp(bp);
242 		bp->b_flags &= ~B_DELWRI;
243 	}
244 	/*
245 	 * Stick the buffer back on a free list.
246 	 */
247 	s = splbio();
248 	if (bp->b_bufsize <= 0) {
249 		/* block has no buffer ... put at front of unused buffer list */
250 		flist = &bfreelist[BQ_EMPTY];
251 		binsheadfree(bp, flist);
252 	} else if (bp->b_flags & (B_ERROR|B_INVAL)) {
253 		/* block has no info ... put at front of most free list */
254 		flist = &bfreelist[BQ_AGE];
255 		binsheadfree(bp, flist);
256 	} else {
257 		if (bp->b_flags & B_LOCKED)
258 			flist = &bfreelist[BQ_LOCKED];
259 		else if (bp->b_flags & B_AGE)
260 			flist = &bfreelist[BQ_AGE];
261 		else
262 			flist = &bfreelist[BQ_LRU];
263 		binstailfree(bp, flist);
264 	}
265 	bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE|B_NOCACHE);
266 	splx(s);
267 }
268 
269 /*
270  * See if the block is associated with some buffer
271  * (mainly to avoid getting hung up on a wait in breada)
272  */
273 incore(vp, blkno)
274 	struct vnode *vp;
275 	daddr_t blkno;
276 {
277 	register struct buf *bp;
278 	register struct buf *dp;
279 
280 	dp = BUFHASH(vp, blkno);
281 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
282 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
283 		    (bp->b_flags & B_INVAL) == 0)
284 			return (1);
285 	return (0);
286 }
287 
288 /*
289  * Return a block if it is in memory.
290  */
291 baddr(vp, blkno, size, cred, bpp)
292 	struct vnode *vp;
293 	daddr_t blkno;
294 	int size;
295 	struct ucred *cred;
296 	struct buf **bpp;
297 {
298 
299 	if (incore(vp, blkno))
300 		return (bread(vp, blkno, size, cred, bpp));
301 	*bpp = 0;
302 	return (0);
303 }
304 
305 /*
306  * Assign a buffer for the given block.  If the appropriate
307  * block is already associated, return it; otherwise search
308  * for the oldest non-busy buffer and reassign it.
309  *
310  * We use splx here because this routine may be called
311  * on the interrupt stack during a dump, and we don't
312  * want to lower the ipl back to 0.
313  */
314 struct buf *
315 getblk(vp, blkno, size)
316 	register struct vnode *vp;
317 	daddr_t blkno;
318 	int size;
319 {
320 	register struct buf *bp, *dp;
321 	int s;
322 
323 	if (size > MAXBSIZE)
324 		panic("getblk: size too big");
325 	/*
326 	 * To prevent overflow of 32-bit ints when converting block
327 	 * numbers to byte offsets, blknos > 2^32 / DEV_BSIZE are set
328 	 * to the maximum number that can be converted to a byte offset
329 	 * without overflow. This is historic code; what bug it fixed,
330 	 * or whether it is still a reasonable thing to do is open to
331 	 * dispute. mkm 9/85
332 	 *
333 	 * Make it a panic to see if it ever really happens. mkm 11/89
334 	 */
335 	if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-DEV_BSHIFT)) {
336 		panic("getblk: blkno too big");
337 		blkno = 1 << ((sizeof(int)*NBBY-DEV_BSHIFT) + 1);
338 	}
339 	/*
340 	 * Search the cache for the block.  If we hit, but
341 	 * the buffer is in use for i/o, then we wait until
342 	 * the i/o has completed.
343 	 */
344 	dp = BUFHASH(vp, blkno);
345 loop:
346 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
347 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
348 		    bp->b_flags&B_INVAL)
349 			continue;
350 		s = splbio();
351 		if (bp->b_flags&B_BUSY) {
352 			bp->b_flags |= B_WANTED;
353 			sleep((caddr_t)bp, PRIBIO+1);
354 			splx(s);
355 			goto loop;
356 		}
357 		bremfree(bp);
358 		bp->b_flags |= B_BUSY;
359 		splx(s);
360 		if (bp->b_bcount != size) {
361 			printf("getblk: stray size");
362 			bp->b_flags |= B_INVAL;
363 			bwrite(bp);
364 			goto loop;
365 		}
366 		bp->b_flags |= B_CACHE;
367 		return (bp);
368 	}
369 	bp = getnewbuf();
370 	bfree(bp);
371 	bremhash(bp);
372 	bgetvp(vp, bp);
373 	bp->b_lblkno = blkno;
374 	bp->b_blkno = blkno;
375 	bp->b_error = 0;
376 	bp->b_resid = 0;
377 	binshash(bp, dp);
378 	brealloc(bp, size);
379 	return (bp);
380 }
381 
382 /*
383  * get an empty block,
384  * not assigned to any particular device
385  */
386 struct buf *
387 geteblk(size)
388 	int size;
389 {
390 	register struct buf *bp, *flist;
391 
392 	if (size > MAXBSIZE)
393 		panic("geteblk: size too big");
394 	bp = getnewbuf();
395 	bp->b_flags |= B_INVAL;
396 	bfree(bp);
397 	bremhash(bp);
398 	flist = &bfreelist[BQ_AGE];
399 	bp->b_error = 0;
400 	bp->b_resid = 0;
401 	binshash(bp, flist);
402 	brealloc(bp, size);
403 	return (bp);
404 }
405 
406 /*
407  * Allocate space associated with a buffer.
408  */
409 brealloc(bp, size)
410 	register struct buf *bp;
411 	int size;
412 {
413 	daddr_t start, last;
414 	register struct buf *ep;
415 	struct buf *dp;
416 	int s;
417 
418 	if (size == bp->b_bcount)
419 		return;
420 	allocbuf(bp, size);
421 }
422 
423 /*
424  * Find a buffer which is available for use.
425  * Select something from a free list.
426  * Preference is to AGE list, then LRU list.
427  */
428 struct buf *
429 getnewbuf()
430 {
431 	register struct buf *bp, *dp;
432 	register struct ucred *cred;
433 	int s;
434 
435 loop:
436 	s = splbio();
437 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
438 		if (dp->av_forw != dp)
439 			break;
440 	if (dp == bfreelist) {		/* no free blocks */
441 		dp->b_flags |= B_WANTED;
442 		sleep((caddr_t)dp, PRIBIO+1);
443 		splx(s);
444 		goto loop;
445 	}
446 	bp = dp->av_forw;
447 	bremfree(bp);
448 	bp->b_flags |= B_BUSY;
449 	splx(s);
450 	if (bp->b_flags & B_DELWRI) {
451 		(void) bawrite(bp);
452 		goto loop;
453 	}
454 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
455 	if (bp->b_vp)
456 		brelvp(bp);
457 	if (bp->b_rcred != NOCRED) {
458 		cred = bp->b_rcred;
459 		bp->b_rcred = NOCRED;
460 		crfree(cred);
461 	}
462 	if (bp->b_wcred != NOCRED) {
463 		cred = bp->b_wcred;
464 		bp->b_wcred = NOCRED;
465 		crfree(cred);
466 	}
467 	bp->b_flags = B_BUSY;
468 	return (bp);
469 }
470 
471 /*
472  * Wait for I/O completion on the buffer; return errors
473  * to the user.
474  */
475 biowait(bp)
476 	register struct buf *bp;
477 {
478 	int s;
479 
480 	s = splbio();
481 	while ((bp->b_flags & B_DONE) == 0)
482 		sleep((caddr_t)bp, PRIBIO);
483 	splx(s);
484 	/*
485 	 * Pick up the device's error number and pass it to the user;
486 	 * if there is an error but the number is 0 set a generalized code.
487 	 */
488 	if ((bp->b_flags & B_ERROR) == 0)
489 		return (0);
490 	if (bp->b_error)
491 		return (bp->b_error);
492 	return (EIO);
493 }
494 
495 /*
496  * Mark I/O complete on a buffer.
497  * If someone should be called, e.g. the pageout
498  * daemon, do so.  Otherwise, wake up anyone
499  * waiting for it.
500  */
501 biodone(bp)
502 	register struct buf *bp;
503 {
504 	register struct vnode *vp;
505 
506 	if (bp->b_flags & B_DONE)
507 		panic("dup biodone");
508 	bp->b_flags |= B_DONE;
509 	if ((bp->b_flags & B_READ) == 0) {
510 		bp->b_dirtyoff = bp->b_dirtyend = 0;
511 		if (vp = bp->b_vp) {
512 			vp->v_numoutput--;
513 			if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
514 				if (vp->v_numoutput < 0)
515 					panic("biodone: neg numoutput");
516 				vp->v_flag &= ~VBWAIT;
517 				wakeup((caddr_t)&vp->v_numoutput);
518 			}
519 		}
520 	}
521 	if (bp->b_flags & B_CALL) {
522 		bp->b_flags &= ~B_CALL;
523 		(*bp->b_iodone)(bp);
524 		return;
525 	}
526 	if (bp->b_flags&B_ASYNC)
527 		brelse(bp);
528 	else {
529 		bp->b_flags &= ~B_WANTED;
530 		wakeup((caddr_t)bp);
531 	}
532 }
533 
534 /*
535  * Make sure all write-behind blocks associated
536  * with mount point are flushed out (from sync).
537  */
538 mntflushbuf(mountp, flags)
539 	struct mount *mountp;
540 	int flags;
541 {
542 	register struct vnode *vp;
543 	struct vnode *nvp;
544 
545 	if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
546 		panic("mntflushbuf: not busy");
547 loop:
548 	for (vp = mountp->mnt_mounth; vp; vp = nvp) {
549 		nvp = vp->v_mountf;
550 		if (vget(vp))
551 			goto loop;
552 		vflushbuf(vp, flags);
553 		vput(vp);
554 	}
555 }
556 
557 /*
558  * Flush all dirty buffers associated with a vnode.
559  */
560 vflushbuf(vp, flags)
561 	register struct vnode *vp;
562 	int flags;
563 {
564 	register struct buf *bp;
565 	struct buf *nbp;
566 	int s;
567 
568 loop:
569 	s = splbio();
570 	for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
571 		nbp = bp->b_blockf;
572 		if ((bp->b_flags & B_BUSY))
573 			continue;
574 		if ((bp->b_flags & B_DELWRI) == 0)
575 			panic("vflushbuf: not dirty");
576 		bremfree(bp);
577 		bp->b_flags |= B_BUSY;
578 		splx(s);
579 		/*
580 		 * Wait for I/O associated with indirect blocks to complete,
581 		 * since there is no way to quickly wait for them below.
582 		 * NB - This is really specific to ufs, but is done here
583 		 * as it is easier and quicker.
584 		 */
585 		if (bp->b_vp == vp || (flags & B_SYNC) == 0) {
586 			(void) bawrite(bp);
587 			s = splbio();
588 		} else {
589 			(void) bwrite(bp);
590 			goto loop;
591 		}
592 	}
593 	splx(s);
594 	if ((flags & B_SYNC) == 0)
595 		return;
596 	s = splbio();
597 	while (vp->v_numoutput) {
598 		vp->v_flag |= VBWAIT;
599 		sleep((caddr_t)&vp->v_numoutput, PRIBIO+1);
600 	}
601 	splx(s);
602 	if (vp->v_dirtyblkhd) {
603 		vprint("vflushbuf: dirty", vp);
604 		goto loop;
605 	}
606 }
607 
608 /*
609  * Invalidate in core blocks belonging to closed or umounted filesystem
610  *
611  * Go through the list of vnodes associated with the file system;
612  * for each vnode invalidate any buffers that it holds. Normally
613  * this routine is preceeded by a bflush call, so that on a quiescent
614  * filesystem there will be no dirty buffers when we are done. Binval
615  * returns the count of dirty buffers when it is finished.
616  */
617 mntinvalbuf(mountp)
618 	struct mount *mountp;
619 {
620 	register struct vnode *vp;
621 	struct vnode *nvp;
622 	int dirty = 0;
623 
624 	if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
625 		panic("mntinvalbuf: not busy");
626 loop:
627 	for (vp = mountp->mnt_mounth; vp; vp = nvp) {
628 		nvp = vp->v_mountf;
629 		if (vget(vp))
630 			goto loop;
631 		dirty += vinvalbuf(vp, 1);
632 		vput(vp);
633 	}
634 	return (dirty);
635 }
636 
637 /*
638  * Flush out and invalidate all buffers associated with a vnode.
639  * Called with the underlying object locked.
640  */
641 vinvalbuf(vp, save)
642 	register struct vnode *vp;
643 	int save;
644 {
645 	register struct buf *bp;
646 	struct buf *nbp, *blist;
647 	int s, dirty = 0;
648 
649 	for (;;) {
650 		if (blist = vp->v_dirtyblkhd)
651 			/* void */;
652 		else if (blist = vp->v_cleanblkhd)
653 			/* void */;
654 		else
655 			break;
656 		for (bp = blist; bp; bp = nbp) {
657 			nbp = bp->b_blockf;
658 			s = splbio();
659 			if (bp->b_flags & B_BUSY) {
660 				bp->b_flags |= B_WANTED;
661 				sleep((caddr_t)bp, PRIBIO+1);
662 				splx(s);
663 				break;
664 			}
665 			bremfree(bp);
666 			bp->b_flags |= B_BUSY;
667 			splx(s);
668 			if (save && (bp->b_flags & B_DELWRI)) {
669 				dirty++;
670 				(void) bwrite(bp);
671 				break;
672 			}
673 			if (bp->b_vp != vp)
674 				reassignbuf(bp, bp->b_vp);
675 			else
676 				bp->b_flags |= B_INVAL;
677 			brelse(bp);
678 		}
679 	}
680 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
681 		panic("vinvalbuf: flush failed");
682 	return (dirty);
683 }
684 
685 /*
686  * Associate a buffer with a vnode.
687  */
688 bgetvp(vp, bp)
689 	register struct vnode *vp;
690 	register struct buf *bp;
691 {
692 
693 	if (bp->b_vp)
694 		panic("bgetvp: not free");
695 	VHOLD(vp);
696 	bp->b_vp = vp;
697 	if (vp->v_type == VBLK || vp->v_type == VCHR)
698 		bp->b_dev = vp->v_rdev;
699 	else
700 		bp->b_dev = NODEV;
701 	/*
702 	 * Insert onto list for new vnode.
703 	 */
704 	if (vp->v_cleanblkhd) {
705 		bp->b_blockf = vp->v_cleanblkhd;
706 		bp->b_blockb = &vp->v_cleanblkhd;
707 		vp->v_cleanblkhd->b_blockb = &bp->b_blockf;
708 		vp->v_cleanblkhd = bp;
709 	} else {
710 		vp->v_cleanblkhd = bp;
711 		bp->b_blockb = &vp->v_cleanblkhd;
712 		bp->b_blockf = NULL;
713 	}
714 }
715 
716 /*
717  * Disassociate a buffer from a vnode.
718  */
719 brelvp(bp)
720 	register struct buf *bp;
721 {
722 	struct buf *bq;
723 	struct vnode *vp;
724 
725 	if (bp->b_vp == (struct vnode *) 0)
726 		panic("brelvp: NULL");
727 	/*
728 	 * Delete from old vnode list, if on one.
729 	 */
730 	if (bp->b_blockb) {
731 		if (bq = bp->b_blockf)
732 			bq->b_blockb = bp->b_blockb;
733 		*bp->b_blockb = bq;
734 		bp->b_blockf = NULL;
735 		bp->b_blockb = NULL;
736 	}
737 	vp = bp->b_vp;
738 	bp->b_vp = (struct vnode *) 0;
739 	HOLDRELE(vp);
740 }
741 
742 /*
743  * Reassign a buffer from one vnode to another.
744  * Used to assign file specific control information
745  * (indirect blocks) to the vnode to which they belong.
746  */
747 reassignbuf(bp, newvp)
748 	register struct buf *bp;
749 	register struct vnode *newvp;
750 {
751 	register struct buf *bq, **listheadp;
752 
753 	if (newvp == NULL)
754 		panic("reassignbuf: NULL");
755 	/*
756 	 * Delete from old vnode list, if on one.
757 	 */
758 	if (bp->b_blockb) {
759 		if (bq = bp->b_blockf)
760 			bq->b_blockb = bp->b_blockb;
761 		*bp->b_blockb = bq;
762 	}
763 	/*
764 	 * If dirty, put on list of dirty buffers;
765 	 * otherwise insert onto list of clean buffers.
766 	 */
767 	if (bp->b_flags & B_DELWRI)
768 		listheadp = &newvp->v_dirtyblkhd;
769 	else
770 		listheadp = &newvp->v_cleanblkhd;
771 	if (*listheadp) {
772 		bp->b_blockf = *listheadp;
773 		bp->b_blockb = listheadp;
774 		bp->b_blockf->b_blockb = &bp->b_blockf;
775 		*listheadp = bp;
776 	} else {
777 		*listheadp = bp;
778 		bp->b_blockb = listheadp;
779 		bp->b_blockf = NULL;
780 	}
781 }
782