xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 40226)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)vfs_cluster.c	7.22 (Berkeley) 02/25/90
18  */
19 
20 #include "param.h"
21 #include "user.h"
22 #include "buf.h"
23 #include "vnode.h"
24 #include "mount.h"
25 #include "trace.h"
26 #include "ucred.h"
27 
28 /*
29  * Read in (if necessary) the block and return a buffer pointer.
30  */
31 bread(vp, blkno, size, cred, bpp)
32 	struct vnode *vp;
33 	daddr_t blkno;
34 	int size;
35 	struct ucred *cred;
36 	struct buf **bpp;
37 {
38 	register struct buf *bp;
39 
40 	if (size == 0)
41 		panic("bread: size 0");
42 	*bpp = bp = getblk(vp, blkno, size);
43 	if (bp->b_flags&(B_DONE|B_DELWRI)) {
44 		trace(TR_BREADHIT, pack(vp->v_mount->m_fsid[0], size), blkno);
45 		return (0);
46 	}
47 	bp->b_flags |= B_READ;
48 	if (bp->b_bcount > bp->b_bufsize)
49 		panic("bread");
50 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
51 		crhold(cred);
52 		bp->b_rcred = cred;
53 	}
54 	VOP_STRATEGY(bp);
55 	trace(TR_BREADMISS, pack(vp->v_mount->m_fsid[0], size), blkno);
56 	u.u_ru.ru_inblock++;		/* pay for read */
57 	return (biowait(bp));
58 }
59 
60 /*
61  * Read in the block, like bread, but also start I/O on the
62  * read-ahead block (which is not allocated to the caller)
63  */
64 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
65 	struct vnode *vp;
66 	daddr_t blkno; int size;
67 	daddr_t rablkno; int rabsize;
68 	struct ucred *cred;
69 	struct buf **bpp;
70 {
71 	register struct buf *bp, *rabp;
72 
73 	bp = NULL;
74 	/*
75 	 * If the block isn't in core, then allocate
76 	 * a buffer and initiate i/o (getblk checks
77 	 * for a cache hit).
78 	 */
79 	if (!incore(vp, blkno)) {
80 		*bpp = bp = getblk(vp, blkno, size);
81 		if ((bp->b_flags&(B_DONE|B_DELWRI)) == 0) {
82 			bp->b_flags |= B_READ;
83 			if (bp->b_bcount > bp->b_bufsize)
84 				panic("breada");
85 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
86 				crhold(cred);
87 				bp->b_rcred = cred;
88 			}
89 			VOP_STRATEGY(bp);
90 			trace(TR_BREADMISS, pack(vp->v_mount->m_fsid[0], size),
91 			    blkno);
92 			u.u_ru.ru_inblock++;		/* pay for read */
93 		} else
94 			trace(TR_BREADHIT, pack(vp->v_mount->m_fsid[0], size),
95 			    blkno);
96 	}
97 
98 	/*
99 	 * If there's a read-ahead block, start i/o
100 	 * on it also (as above).
101 	 */
102 	if (!incore(vp, rablkno)) {
103 		rabp = getblk(vp, rablkno, rabsize);
104 		if (rabp->b_flags & (B_DONE|B_DELWRI)) {
105 			brelse(rabp);
106 			trace(TR_BREADHITRA,
107 			    pack(vp->v_mount->m_fsid[0], rabsize), rablkno);
108 		} else {
109 			rabp->b_flags |= B_READ|B_ASYNC;
110 			if (rabp->b_bcount > rabp->b_bufsize)
111 				panic("breadrabp");
112 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
113 				crhold(cred);
114 				rabp->b_rcred = cred;
115 			}
116 			VOP_STRATEGY(rabp);
117 			trace(TR_BREADMISSRA,
118 			    pack(vp->v_mount->m_fsid[0], rabsize), rablkno);
119 			u.u_ru.ru_inblock++;		/* pay in advance */
120 		}
121 	}
122 
123 	/*
124 	 * If block was in core, let bread get it.
125 	 * If block wasn't in core, then the read was started
126 	 * above, and just wait for it.
127 	 */
128 	if (bp == NULL)
129 		return (bread(vp, blkno, size, cred, bpp));
130 	return (biowait(bp));
131 }
132 
133 /*
134  * Write the buffer, waiting for completion.
135  * Then release the buffer.
136  */
137 bwrite(bp)
138 	register struct buf *bp;
139 {
140 	register int flag;
141 	int s, error;
142 
143 	flag = bp->b_flags;
144 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
145 	if ((flag&B_DELWRI) == 0)
146 		u.u_ru.ru_oublock++;		/* noone paid yet */
147 	else
148 		reassignbuf(bp, bp->b_vp);
149 	trace(TR_BWRITE,
150 	    pack(bp->b_vp->v_mount->m_fsid[0], bp->b_bcount), bp->b_lblkno);
151 	if (bp->b_bcount > bp->b_bufsize)
152 		panic("bwrite");
153 	s = splbio();
154 	bp->b_vp->v_numoutput++;
155 	splx(s);
156 	VOP_STRATEGY(bp);
157 
158 	/*
159 	 * If the write was synchronous, then await i/o completion.
160 	 * If the write was "delayed", then we put the buffer on
161 	 * the q of blocks awaiting i/o completion status.
162 	 */
163 	if ((flag&B_ASYNC) == 0) {
164 		error = biowait(bp);
165 		brelse(bp);
166 	} else if (flag & B_DELWRI) {
167 		bp->b_flags |= B_AGE;
168 		error = 0;
169 	}
170 	return (error);
171 }
172 
173 /*
174  * Release the buffer, marking it so that if it is grabbed
175  * for another purpose it will be written out before being
176  * given up (e.g. when writing a partial block where it is
177  * assumed that another write for the same block will soon follow).
178  * This can't be done for magtape, since writes must be done
179  * in the same order as requested.
180  */
181 bdwrite(bp)
182 	register struct buf *bp;
183 {
184 
185 	if ((bp->b_flags & B_DELWRI) == 0) {
186 		bp->b_flags |= B_DELWRI;
187 		reassignbuf(bp, bp->b_vp);
188 		u.u_ru.ru_oublock++;		/* noone paid yet */
189 	}
190 	/*
191 	 * If this is a tape drive, the write must be initiated.
192 	 */
193 	if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) {
194 		bawrite(bp);
195 	} else {
196 		bp->b_flags |= B_DELWRI | B_DONE;
197 		brelse(bp);
198 	}
199 }
200 
201 /*
202  * Release the buffer, start I/O on it, but don't wait for completion.
203  */
204 bawrite(bp)
205 	register struct buf *bp;
206 {
207 
208 	bp->b_flags |= B_ASYNC;
209 	(void) bwrite(bp);
210 }
211 
212 /*
213  * Release the buffer, with no I/O implied.
214  */
215 brelse(bp)
216 	register struct buf *bp;
217 {
218 	register struct buf *flist;
219 	register s;
220 
221 	trace(TR_BRELSE,
222 	    pack(bp->b_vp->v_mount->m_fsid[0], bp->b_bufsize), bp->b_lblkno);
223 	/*
224 	 * If a process is waiting for the buffer, or
225 	 * is waiting for a free buffer, awaken it.
226 	 */
227 	if (bp->b_flags&B_WANTED)
228 		wakeup((caddr_t)bp);
229 	if (bfreelist[0].b_flags&B_WANTED) {
230 		bfreelist[0].b_flags &= ~B_WANTED;
231 		wakeup((caddr_t)bfreelist);
232 	}
233 	/*
234 	 * Retry I/O for locked buffers rather than invalidating them.
235 	 */
236 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
237 		bp->b_flags &= ~B_ERROR;
238 
239 	/*
240 	 * Disassociate buffers that are no longer valid.
241 	 */
242 	if (bp->b_flags & (B_NOCACHE|B_ERROR))
243 		bp->b_flags |= B_INVAL;
244 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR|B_INVAL))) {
245 		if (bp->b_vp)
246 			brelvp(bp);
247 		bp->b_flags &= ~B_DELWRI;
248 	}
249 	/*
250 	 * Stick the buffer back on a free list.
251 	 */
252 	s = splbio();
253 	if (bp->b_bufsize <= 0) {
254 		/* block has no buffer ... put at front of unused buffer list */
255 		flist = &bfreelist[BQ_EMPTY];
256 		binsheadfree(bp, flist);
257 	} else if (bp->b_flags & (B_ERROR|B_INVAL)) {
258 		/* block has no info ... put at front of most free list */
259 		flist = &bfreelist[BQ_AGE];
260 		binsheadfree(bp, flist);
261 	} else {
262 		if (bp->b_flags & B_LOCKED)
263 			flist = &bfreelist[BQ_LOCKED];
264 		else if (bp->b_flags & B_AGE)
265 			flist = &bfreelist[BQ_AGE];
266 		else
267 			flist = &bfreelist[BQ_LRU];
268 		binstailfree(bp, flist);
269 	}
270 	bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE|B_NOCACHE);
271 	splx(s);
272 }
273 
274 /*
275  * See if the block is associated with some buffer
276  * (mainly to avoid getting hung up on a wait in breada)
277  */
278 incore(vp, blkno)
279 	struct vnode *vp;
280 	daddr_t blkno;
281 {
282 	register struct buf *bp;
283 	register struct buf *dp;
284 
285 	dp = BUFHASH(vp, blkno);
286 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
287 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
288 		    (bp->b_flags & B_INVAL) == 0)
289 			return (1);
290 	return (0);
291 }
292 
293 /*
294  * Return a block if it is in memory.
295  */
296 baddr(vp, blkno, size, cred, bpp)
297 	struct vnode *vp;
298 	daddr_t blkno;
299 	int size;
300 	struct ucred *cred;
301 	struct buf **bpp;
302 {
303 
304 	if (incore(vp, blkno))
305 		return (bread(vp, blkno, size, cred, bpp));
306 	*bpp = 0;
307 	return (0);
308 }
309 
310 /*
311  * Assign a buffer for the given block.  If the appropriate
312  * block is already associated, return it; otherwise search
313  * for the oldest non-busy buffer and reassign it.
314  *
315  * We use splx here because this routine may be called
316  * on the interrupt stack during a dump, and we don't
317  * want to lower the ipl back to 0.
318  */
319 struct buf *
320 getblk(vp, blkno, size)
321 	register struct vnode *vp;
322 	daddr_t blkno;
323 	int size;
324 {
325 	register struct buf *bp, *dp;
326 	int s;
327 
328 	if (size > MAXBSIZE)
329 		panic("getblk: size too big");
330 	/*
331 	 * To prevent overflow of 32-bit ints when converting block
332 	 * numbers to byte offsets, blknos > 2^32 / DEV_BSIZE are set
333 	 * to the maximum number that can be converted to a byte offset
334 	 * without overflow. This is historic code; what bug it fixed,
335 	 * or whether it is still a reasonable thing to do is open to
336 	 * dispute. mkm 9/85
337 	 *
338 	 * Make it a panic to see if it ever really happens. mkm 11/89
339 	 */
340 	if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-DEV_BSHIFT)) {
341 		panic("getblk: blkno too big");
342 		blkno = 1 << ((sizeof(int)*NBBY-DEV_BSHIFT) + 1);
343 	}
344 	/*
345 	 * Search the cache for the block.  If we hit, but
346 	 * the buffer is in use for i/o, then we wait until
347 	 * the i/o has completed.
348 	 */
349 	dp = BUFHASH(vp, blkno);
350 loop:
351 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
352 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
353 		    bp->b_flags&B_INVAL)
354 			continue;
355 		s = splbio();
356 		if (bp->b_flags&B_BUSY) {
357 			bp->b_flags |= B_WANTED;
358 			sleep((caddr_t)bp, PRIBIO+1);
359 			splx(s);
360 			goto loop;
361 		}
362 		bremfree(bp);
363 		bp->b_flags |= B_BUSY;
364 		splx(s);
365 		if (bp->b_bcount != size) {
366 			printf("getblk: stray size");
367 			bp->b_flags |= B_INVAL;
368 			bwrite(bp);
369 			goto loop;
370 		}
371 		bp->b_flags |= B_CACHE;
372 		return (bp);
373 	}
374 	bp = getnewbuf();
375 	bfree(bp);
376 	bremhash(bp);
377 	bgetvp(vp, bp);
378 	bp->b_lblkno = blkno;
379 	bp->b_blkno = blkno;
380 	bp->b_error = 0;
381 	bp->b_resid = 0;
382 	binshash(bp, dp);
383 	brealloc(bp, size);
384 	return (bp);
385 }
386 
387 /*
388  * get an empty block,
389  * not assigned to any particular device
390  */
391 struct buf *
392 geteblk(size)
393 	int size;
394 {
395 	register struct buf *bp, *flist;
396 
397 	if (size > MAXBSIZE)
398 		panic("geteblk: size too big");
399 	bp = getnewbuf();
400 	bp->b_flags |= B_INVAL;
401 	bfree(bp);
402 	bremhash(bp);
403 	flist = &bfreelist[BQ_AGE];
404 	bp->b_error = 0;
405 	bp->b_resid = 0;
406 	binshash(bp, flist);
407 	brealloc(bp, size);
408 	return (bp);
409 }
410 
411 /*
412  * Allocate space associated with a buffer.
413  */
414 brealloc(bp, size)
415 	register struct buf *bp;
416 	int size;
417 {
418 	daddr_t start, last;
419 	register struct buf *ep;
420 	struct buf *dp;
421 	int s;
422 
423 	if (size == bp->b_bcount)
424 		return;
425 	allocbuf(bp, size);
426 }
427 
428 /*
429  * Find a buffer which is available for use.
430  * Select something from a free list.
431  * Preference is to AGE list, then LRU list.
432  */
433 struct buf *
434 getnewbuf()
435 {
436 	register struct buf *bp, *dp;
437 	register struct ucred *cred;
438 	int s;
439 
440 loop:
441 	s = splbio();
442 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
443 		if (dp->av_forw != dp)
444 			break;
445 	if (dp == bfreelist) {		/* no free blocks */
446 		dp->b_flags |= B_WANTED;
447 		sleep((caddr_t)dp, PRIBIO+1);
448 		splx(s);
449 		goto loop;
450 	}
451 	bp = dp->av_forw;
452 	bremfree(bp);
453 	bp->b_flags |= B_BUSY;
454 	splx(s);
455 	if (bp->b_flags & B_DELWRI) {
456 		(void) bawrite(bp);
457 		goto loop;
458 	}
459 	trace(TR_BRELSE,
460 	    pack(bp->b_vp->v_mount->m_fsid[0], bp->b_bufsize), bp->b_lblkno);
461 	if (bp->b_vp)
462 		brelvp(bp);
463 	if (bp->b_rcred != NOCRED) {
464 		cred = bp->b_rcred;
465 		bp->b_rcred = NOCRED;
466 		crfree(cred);
467 	}
468 	if (bp->b_wcred != NOCRED) {
469 		cred = bp->b_wcred;
470 		bp->b_wcred = NOCRED;
471 		crfree(cred);
472 	}
473 	bp->b_flags = B_BUSY;
474 	return (bp);
475 }
476 
477 /*
478  * Wait for I/O completion on the buffer; return errors
479  * to the user.
480  */
481 biowait(bp)
482 	register struct buf *bp;
483 {
484 	int s;
485 
486 	s = splbio();
487 	while ((bp->b_flags & B_DONE) == 0)
488 		sleep((caddr_t)bp, PRIBIO);
489 	splx(s);
490 	/*
491 	 * Pick up the device's error number and pass it to the user;
492 	 * if there is an error but the number is 0 set a generalized code.
493 	 */
494 	if ((bp->b_flags & B_ERROR) == 0)
495 		return (0);
496 	if (bp->b_error)
497 		return (bp->b_error);
498 	return (EIO);
499 }
500 
501 /*
502  * Mark I/O complete on a buffer.
503  * If someone should be called, e.g. the pageout
504  * daemon, do so.  Otherwise, wake up anyone
505  * waiting for it.
506  */
507 biodone(bp)
508 	register struct buf *bp;
509 {
510 	register struct vnode *vp;
511 
512 	if (bp->b_flags & B_DONE)
513 		panic("dup biodone");
514 	bp->b_flags |= B_DONE;
515 	if ((bp->b_flags & B_READ) == 0) {
516 		bp->b_dirtyoff = bp->b_dirtyend = 0;
517 		if (vp = bp->b_vp) {
518 			vp->v_numoutput--;
519 			if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
520 				if (vp->v_numoutput < 0)
521 					panic("biodone: neg numoutput");
522 				vp->v_flag &= ~VBWAIT;
523 				wakeup((caddr_t)&vp->v_numoutput);
524 			}
525 		}
526 	}
527 	if (bp->b_flags & B_CALL) {
528 		bp->b_flags &= ~B_CALL;
529 		(*bp->b_iodone)(bp);
530 		return;
531 	}
532 	if (bp->b_flags&B_ASYNC)
533 		brelse(bp);
534 	else {
535 		bp->b_flags &= ~B_WANTED;
536 		wakeup((caddr_t)bp);
537 	}
538 }
539 
540 /*
541  * Make sure all write-behind blocks associated
542  * with mount point are flushed out (from sync).
543  */
544 mntflushbuf(mountp, flags)
545 	struct mount *mountp;
546 	int flags;
547 {
548 	register struct vnode *vp;
549 	struct vnode *nvp;
550 
551 loop:
552 	for (vp = mountp->m_mounth; vp; vp = nvp) {
553 		nvp = vp->v_mountf;
554 		if (vget(vp))
555 			goto loop;
556 		vflushbuf(vp, flags);
557 		vput(vp);
558 	}
559 }
560 
561 /*
562  * Flush all dirty buffers associated with a vnode.
563  */
564 vflushbuf(vp, flags)
565 	register struct vnode *vp;
566 	int flags;
567 {
568 	register struct buf *bp;
569 	struct buf *nbp;
570 	int s;
571 
572 loop:
573 	s = splbio();
574 	for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
575 		nbp = bp->b_blockf;
576 		if ((bp->b_flags & B_BUSY))
577 			continue;
578 		if ((bp->b_flags & B_DELWRI) == 0)
579 			panic("vflushbuf: not dirty");
580 		bremfree(bp);
581 		bp->b_flags |= B_BUSY;
582 		splx(s);
583 		/*
584 		 * Wait for I/O associated with indirect blocks to complete,
585 		 * since there is no way to quickly wait for them below.
586 		 * NB - This is really specific to ufs, but is done here
587 		 * as it is easier and quicker.
588 		 */
589 		if (bp->b_vp == vp || (flags & B_SYNC) == 0) {
590 			(void) bawrite(bp);
591 		} else {
592 			(void) bwrite(bp);
593 			goto loop;
594 		}
595 	}
596 	splx(s);
597 	if ((flags & B_SYNC) == 0)
598 		return;
599 	s = splbio();
600 	while (vp->v_numoutput) {
601 		vp->v_flag |= VBWAIT;
602 		sleep((caddr_t)&vp->v_numoutput, PRIBIO+1);
603 	}
604 	splx(s);
605 	if (vp->v_dirtyblkhd) {
606 		vprint("vflushbuf: dirty", vp);
607 		goto loop;
608 	}
609 }
610 
611 /*
612  * Invalidate in core blocks belonging to closed or umounted filesystem
613  *
614  * Go through the list of vnodes associated with the file system;
615  * for each vnode invalidate any buffers that it holds. Normally
616  * this routine is preceeded by a bflush call, so that on a quiescent
617  * filesystem there will be no dirty buffers when we are done. Binval
618  * returns the count of dirty buffers when it is finished.
619  */
620 mntinvalbuf(mountp)
621 	struct mount *mountp;
622 {
623 	register struct vnode *vp;
624 	struct vnode *nvp;
625 	int dirty = 0;
626 
627 loop:
628 	for (vp = mountp->m_mounth; vp; vp = nvp) {
629 		nvp = vp->v_mountf;
630 		if (vget(vp))
631 			goto loop;
632 		dirty += vinvalbuf(vp, 1);
633 		vput(vp);
634 	}
635 	return (dirty);
636 }
637 
638 /*
639  * Flush out and invalidate all buffers associated with a vnode.
640  * Called with the underlying object locked.
641  */
642 vinvalbuf(vp, save)
643 	register struct vnode *vp;
644 	int save;
645 {
646 	register struct buf *bp;
647 	struct buf *nbp, *blist;
648 	int s, dirty = 0;
649 
650 	for (;;) {
651 		if (blist = vp->v_dirtyblkhd)
652 			/* void */;
653 		else if (blist = vp->v_cleanblkhd)
654 			/* void */;
655 		else
656 			break;
657 		for (bp = blist; bp; bp = nbp) {
658 			nbp = bp->b_blockf;
659 			s = splbio();
660 			if (bp->b_flags & B_BUSY) {
661 				bp->b_flags |= B_WANTED;
662 				sleep((caddr_t)bp, PRIBIO+1);
663 				splx(s);
664 				break;
665 			}
666 			bremfree(bp);
667 			bp->b_flags |= B_BUSY;
668 			splx(s);
669 			if (save && (bp->b_flags & B_DELWRI)) {
670 				dirty++;
671 				(void) bwrite(bp);
672 				break;
673 			}
674 			if (bp->b_vp != vp)
675 				reassignbuf(bp, bp->b_vp);
676 			else
677 				bp->b_flags |= B_INVAL;
678 			brelse(bp);
679 		}
680 	}
681 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
682 		panic("vinvalbuf: flush failed");
683 	return (dirty);
684 }
685 
686 /*
687  * Associate a buffer with a vnode.
688  */
689 bgetvp(vp, bp)
690 	register struct vnode *vp;
691 	register struct buf *bp;
692 {
693 
694 	if (bp->b_vp)
695 		panic("bgetvp: not free");
696 	VHOLD(vp);
697 	bp->b_vp = vp;
698 	if (vp->v_type == VBLK || vp->v_type == VCHR)
699 		bp->b_dev = vp->v_rdev;
700 	else
701 		bp->b_dev = NODEV;
702 	/*
703 	 * Insert onto list for new vnode.
704 	 */
705 	if (vp->v_cleanblkhd) {
706 		bp->b_blockf = vp->v_cleanblkhd;
707 		bp->b_blockb = &vp->v_cleanblkhd;
708 		vp->v_cleanblkhd->b_blockb = &bp->b_blockf;
709 		vp->v_cleanblkhd = bp;
710 	} else {
711 		vp->v_cleanblkhd = bp;
712 		bp->b_blockb = &vp->v_cleanblkhd;
713 		bp->b_blockf = NULL;
714 	}
715 }
716 
717 /*
718  * Disassociate a buffer from a vnode.
719  */
720 brelvp(bp)
721 	register struct buf *bp;
722 {
723 	struct buf *bq;
724 	struct vnode *vp;
725 
726 	if (bp->b_vp == (struct vnode *) 0)
727 		panic("brelvp: NULL");
728 	/*
729 	 * Delete from old vnode list, if on one.
730 	 */
731 	if (bp->b_blockb) {
732 		if (bq = bp->b_blockf)
733 			bq->b_blockb = bp->b_blockb;
734 		*bp->b_blockb = bq;
735 		bp->b_blockf = NULL;
736 		bp->b_blockb = NULL;
737 	}
738 	vp = bp->b_vp;
739 	bp->b_vp = (struct vnode *) 0;
740 	HOLDRELE(vp);
741 }
742 
743 /*
744  * Reassign a buffer from one vnode to another.
745  * Used to assign file specific control information
746  * (indirect blocks) to the vnode to which they belong.
747  */
748 reassignbuf(bp, newvp)
749 	register struct buf *bp;
750 	register struct vnode *newvp;
751 {
752 	register struct buf *bq, **listheadp;
753 
754 	if (newvp == NULL)
755 		panic("reassignbuf: NULL");
756 	/*
757 	 * Delete from old vnode list, if on one.
758 	 */
759 	if (bp->b_blockb) {
760 		if (bq = bp->b_blockf)
761 			bq->b_blockb = bp->b_blockb;
762 		*bp->b_blockb = bq;
763 	}
764 	/*
765 	 * If dirty, put on list of dirty buffers;
766 	 * otherwise insert onto list of clean buffers.
767 	 */
768 	if (bp->b_flags & B_DELWRI)
769 		listheadp = &newvp->v_dirtyblkhd;
770 	else
771 		listheadp = &newvp->v_cleanblkhd;
772 	if (*listheadp) {
773 		bp->b_blockf = *listheadp;
774 		bp->b_blockb = listheadp;
775 		bp->b_blockf->b_blockb = &bp->b_blockf;
776 		*listheadp = bp;
777 	} else {
778 		*listheadp = bp;
779 		bp->b_blockb = listheadp;
780 		bp->b_blockf = NULL;
781 	}
782 }
783