xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 40652)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)vfs_cluster.c	7.25 (Berkeley) 03/27/90
18  */
19 
20 #include "param.h"
21 #include "user.h"
22 #include "buf.h"
23 #include "vnode.h"
24 #include "specdev.h"
25 #include "mount.h"
26 #include "trace.h"
27 #include "ucred.h"
28 
29 /*
30  * Read in (if necessary) the block and return a buffer pointer.
31  */
32 bread(vp, blkno, size, cred, bpp)
33 	struct vnode *vp;
34 	daddr_t blkno;
35 	int size;
36 	struct ucred *cred;
37 	struct buf **bpp;
38 {
39 	register struct buf *bp;
40 
41 	if (size == 0)
42 		panic("bread: size 0");
43 	*bpp = bp = getblk(vp, blkno, size);
44 	if (bp->b_flags&(B_DONE|B_DELWRI)) {
45 		trace(TR_BREADHIT, pack(vp, size), blkno);
46 		return (0);
47 	}
48 	bp->b_flags |= B_READ;
49 	if (bp->b_bcount > bp->b_bufsize)
50 		panic("bread");
51 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
52 		crhold(cred);
53 		bp->b_rcred = cred;
54 	}
55 	VOP_STRATEGY(bp);
56 	trace(TR_BREADMISS, pack(vp, size), blkno);
57 	u.u_ru.ru_inblock++;		/* pay for read */
58 	return (biowait(bp));
59 }
60 
61 /*
62  * Read in the block, like bread, but also start I/O on the
63  * read-ahead block (which is not allocated to the caller)
64  */
65 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
66 	struct vnode *vp;
67 	daddr_t blkno; int size;
68 	daddr_t rablkno; int rabsize;
69 	struct ucred *cred;
70 	struct buf **bpp;
71 {
72 	register struct buf *bp, *rabp;
73 
74 	bp = NULL;
75 	/*
76 	 * If the block isn't in core, then allocate
77 	 * a buffer and initiate i/o (getblk checks
78 	 * for a cache hit).
79 	 */
80 	if (!incore(vp, blkno)) {
81 		*bpp = bp = getblk(vp, blkno, size);
82 		if ((bp->b_flags&(B_DONE|B_DELWRI)) == 0) {
83 			bp->b_flags |= B_READ;
84 			if (bp->b_bcount > bp->b_bufsize)
85 				panic("breada");
86 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
87 				crhold(cred);
88 				bp->b_rcred = cred;
89 			}
90 			VOP_STRATEGY(bp);
91 			trace(TR_BREADMISS, pack(vp, size), blkno);
92 			u.u_ru.ru_inblock++;		/* pay for read */
93 		} else
94 			trace(TR_BREADHIT, pack(vp, size), blkno);
95 	}
96 
97 	/*
98 	 * If there's a read-ahead block, start i/o
99 	 * on it also (as above).
100 	 */
101 	if (!incore(vp, rablkno)) {
102 		rabp = getblk(vp, rablkno, rabsize);
103 		if (rabp->b_flags & (B_DONE|B_DELWRI)) {
104 			brelse(rabp);
105 			trace(TR_BREADHITRA, pack(vp, rabsize), rablkno);
106 		} else {
107 			rabp->b_flags |= B_READ|B_ASYNC;
108 			if (rabp->b_bcount > rabp->b_bufsize)
109 				panic("breadrabp");
110 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
111 				crhold(cred);
112 				rabp->b_rcred = cred;
113 			}
114 			VOP_STRATEGY(rabp);
115 			trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno);
116 			u.u_ru.ru_inblock++;		/* pay in advance */
117 		}
118 	}
119 
120 	/*
121 	 * If block was in core, let bread get it.
122 	 * If block wasn't in core, then the read was started
123 	 * above, and just wait for it.
124 	 */
125 	if (bp == NULL)
126 		return (bread(vp, blkno, size, cred, bpp));
127 	return (biowait(bp));
128 }
129 
130 /*
131  * Write the buffer, waiting for completion.
132  * Then release the buffer.
133  */
134 bwrite(bp)
135 	register struct buf *bp;
136 {
137 	register int flag;
138 	int s, error;
139 
140 	flag = bp->b_flags;
141 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
142 	if ((flag&B_DELWRI) == 0)
143 		u.u_ru.ru_oublock++;		/* noone paid yet */
144 	else
145 		reassignbuf(bp, bp->b_vp);
146 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
147 	if (bp->b_bcount > bp->b_bufsize)
148 		panic("bwrite");
149 	s = splbio();
150 	bp->b_vp->v_numoutput++;
151 	splx(s);
152 	VOP_STRATEGY(bp);
153 
154 	/*
155 	 * If the write was synchronous, then await i/o completion.
156 	 * If the write was "delayed", then we put the buffer on
157 	 * the q of blocks awaiting i/o completion status.
158 	 */
159 	if ((flag&B_ASYNC) == 0) {
160 		error = biowait(bp);
161 		brelse(bp);
162 	} else if (flag & B_DELWRI) {
163 		bp->b_flags |= B_AGE;
164 		error = 0;
165 	}
166 	return (error);
167 }
168 
169 /*
170  * Release the buffer, marking it so that if it is grabbed
171  * for another purpose it will be written out before being
172  * given up (e.g. when writing a partial block where it is
173  * assumed that another write for the same block will soon follow).
174  * This can't be done for magtape, since writes must be done
175  * in the same order as requested.
176  */
177 bdwrite(bp)
178 	register struct buf *bp;
179 {
180 
181 	if ((bp->b_flags & B_DELWRI) == 0) {
182 		bp->b_flags |= B_DELWRI;
183 		reassignbuf(bp, bp->b_vp);
184 		u.u_ru.ru_oublock++;		/* noone paid yet */
185 	}
186 	/*
187 	 * If this is a tape drive, the write must be initiated.
188 	 */
189 	if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) {
190 		bawrite(bp);
191 	} else {
192 		bp->b_flags |= B_DELWRI | B_DONE;
193 		brelse(bp);
194 	}
195 }
196 
197 /*
198  * Release the buffer, start I/O on it, but don't wait for completion.
199  */
200 bawrite(bp)
201 	register struct buf *bp;
202 {
203 
204 	bp->b_flags |= B_ASYNC;
205 	(void) bwrite(bp);
206 }
207 
208 /*
209  * Release the buffer, with no I/O implied.
210  */
211 brelse(bp)
212 	register struct buf *bp;
213 {
214 	register struct buf *flist;
215 	register s;
216 
217 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
218 	/*
219 	 * If a process is waiting for the buffer, or
220 	 * is waiting for a free buffer, awaken it.
221 	 */
222 	if (bp->b_flags&B_WANTED)
223 		wakeup((caddr_t)bp);
224 	if (bfreelist[0].b_flags&B_WANTED) {
225 		bfreelist[0].b_flags &= ~B_WANTED;
226 		wakeup((caddr_t)bfreelist);
227 	}
228 	/*
229 	 * Retry I/O for locked buffers rather than invalidating them.
230 	 */
231 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
232 		bp->b_flags &= ~B_ERROR;
233 
234 	/*
235 	 * Disassociate buffers that are no longer valid.
236 	 */
237 	if (bp->b_flags & (B_NOCACHE|B_ERROR))
238 		bp->b_flags |= B_INVAL;
239 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR|B_INVAL))) {
240 		if (bp->b_vp)
241 			brelvp(bp);
242 		bp->b_flags &= ~B_DELWRI;
243 	}
244 	/*
245 	 * Stick the buffer back on a free list.
246 	 */
247 	s = splbio();
248 	if (bp->b_bufsize <= 0) {
249 		/* block has no buffer ... put at front of unused buffer list */
250 		flist = &bfreelist[BQ_EMPTY];
251 		binsheadfree(bp, flist);
252 	} else if (bp->b_flags & (B_ERROR|B_INVAL)) {
253 		/* block has no info ... put at front of most free list */
254 		flist = &bfreelist[BQ_AGE];
255 		binsheadfree(bp, flist);
256 	} else {
257 		if (bp->b_flags & B_LOCKED)
258 			flist = &bfreelist[BQ_LOCKED];
259 		else if (bp->b_flags & B_AGE)
260 			flist = &bfreelist[BQ_AGE];
261 		else
262 			flist = &bfreelist[BQ_LRU];
263 		binstailfree(bp, flist);
264 	}
265 	bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE|B_NOCACHE);
266 	splx(s);
267 }
268 
269 /*
270  * See if the block is associated with some buffer
271  * (mainly to avoid getting hung up on a wait in breada)
272  */
273 incore(vp, blkno)
274 	struct vnode *vp;
275 	daddr_t blkno;
276 {
277 	register struct buf *bp;
278 	register struct buf *dp;
279 
280 	dp = BUFHASH(vp, blkno);
281 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
282 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
283 		    (bp->b_flags & B_INVAL) == 0)
284 			return (1);
285 	return (0);
286 }
287 
288 /*
289  * Return a block if it is in memory.
290  */
291 baddr(vp, blkno, size, cred, bpp)
292 	struct vnode *vp;
293 	daddr_t blkno;
294 	int size;
295 	struct ucred *cred;
296 	struct buf **bpp;
297 {
298 
299 	if (incore(vp, blkno))
300 		return (bread(vp, blkno, size, cred, bpp));
301 	*bpp = 0;
302 	return (0);
303 }
304 
305 /*
306  * Assign a buffer for the given block.  If the appropriate
307  * block is already associated, return it; otherwise search
308  * for the oldest non-busy buffer and reassign it.
309  *
310  * We use splx here because this routine may be called
311  * on the interrupt stack during a dump, and we don't
312  * want to lower the ipl back to 0.
313  */
314 struct buf *
315 getblk(vp, blkno, size)
316 	register struct vnode *vp;
317 	daddr_t blkno;
318 	int size;
319 {
320 	register struct buf *bp, *dp;
321 	int s;
322 
323 	if (size > MAXBSIZE)
324 		panic("getblk: size too big");
325 	/*
326 	 * To prevent overflow of 32-bit ints when converting block
327 	 * numbers to byte offsets, blknos > 2^32 / DEV_BSIZE are set
328 	 * to the maximum number that can be converted to a byte offset
329 	 * without overflow. This is historic code; what bug it fixed,
330 	 * or whether it is still a reasonable thing to do is open to
331 	 * dispute. mkm 9/85
332 	 *
333 	 * Make it a panic to see if it ever really happens. mkm 11/89
334 	 */
335 	if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-DEV_BSHIFT)) {
336 		panic("getblk: blkno too big");
337 		blkno = 1 << ((sizeof(int)*NBBY-DEV_BSHIFT) + 1);
338 	}
339 	/*
340 	 * Search the cache for the block.  If we hit, but
341 	 * the buffer is in use for i/o, then we wait until
342 	 * the i/o has completed.
343 	 */
344 	dp = BUFHASH(vp, blkno);
345 loop:
346 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
347 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
348 		    bp->b_flags&B_INVAL)
349 			continue;
350 		s = splbio();
351 		if (bp->b_flags&B_BUSY) {
352 			bp->b_flags |= B_WANTED;
353 			sleep((caddr_t)bp, PRIBIO+1);
354 			splx(s);
355 			goto loop;
356 		}
357 		bremfree(bp);
358 		bp->b_flags |= B_BUSY;
359 		splx(s);
360 		if (bp->b_bcount != size) {
361 			printf("getblk: stray size");
362 			bp->b_flags |= B_INVAL;
363 			bwrite(bp);
364 			goto loop;
365 		}
366 		bp->b_flags |= B_CACHE;
367 		return (bp);
368 	}
369 	bp = getnewbuf();
370 	bfree(bp);
371 	bremhash(bp);
372 	bgetvp(vp, bp);
373 	bp->b_lblkno = blkno;
374 	bp->b_blkno = blkno;
375 	bp->b_error = 0;
376 	bp->b_resid = 0;
377 	binshash(bp, dp);
378 	brealloc(bp, size);
379 	return (bp);
380 }
381 
382 /*
383  * get an empty block,
384  * not assigned to any particular device
385  */
386 struct buf *
387 geteblk(size)
388 	int size;
389 {
390 	register struct buf *bp, *flist;
391 
392 	if (size > MAXBSIZE)
393 		panic("geteblk: size too big");
394 	bp = getnewbuf();
395 	bp->b_flags |= B_INVAL;
396 	bfree(bp);
397 	bremhash(bp);
398 	flist = &bfreelist[BQ_AGE];
399 	bp->b_error = 0;
400 	bp->b_resid = 0;
401 	binshash(bp, flist);
402 	brealloc(bp, size);
403 	return (bp);
404 }
405 
406 /*
407  * Allocate space associated with a buffer.
408  */
409 brealloc(bp, size)
410 	register struct buf *bp;
411 	int size;
412 {
413 	daddr_t start, last;
414 	register struct buf *ep;
415 	struct buf *dp;
416 	int s;
417 
418 	if (size == bp->b_bcount)
419 		return;
420 	allocbuf(bp, size);
421 }
422 
423 /*
424  * Find a buffer which is available for use.
425  * Select something from a free list.
426  * Preference is to AGE list, then LRU list.
427  */
428 struct buf *
429 getnewbuf()
430 {
431 	register struct buf *bp, *dp;
432 	register struct ucred *cred;
433 	int s;
434 
435 loop:
436 	s = splbio();
437 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
438 		if (dp->av_forw != dp)
439 			break;
440 	if (dp == bfreelist) {		/* no free blocks */
441 		dp->b_flags |= B_WANTED;
442 		sleep((caddr_t)dp, PRIBIO+1);
443 		splx(s);
444 		goto loop;
445 	}
446 	bp = dp->av_forw;
447 	bremfree(bp);
448 	bp->b_flags |= B_BUSY;
449 	splx(s);
450 	if (bp->b_flags & B_DELWRI) {
451 		(void) bawrite(bp);
452 		goto loop;
453 	}
454 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
455 	if (bp->b_vp)
456 		brelvp(bp);
457 	if (bp->b_rcred != NOCRED) {
458 		cred = bp->b_rcred;
459 		bp->b_rcred = NOCRED;
460 		crfree(cred);
461 	}
462 	if (bp->b_wcred != NOCRED) {
463 		cred = bp->b_wcred;
464 		bp->b_wcred = NOCRED;
465 		crfree(cred);
466 	}
467 	bp->b_flags = B_BUSY;
468 	return (bp);
469 }
470 
471 /*
472  * Wait for I/O completion on the buffer; return errors
473  * to the user.
474  */
475 biowait(bp)
476 	register struct buf *bp;
477 {
478 	int s;
479 
480 	s = splbio();
481 	while ((bp->b_flags & B_DONE) == 0)
482 		sleep((caddr_t)bp, PRIBIO);
483 	splx(s);
484 	/*
485 	 * Pick up the device's error number and pass it to the user;
486 	 * if there is an error but the number is 0 set a generalized code.
487 	 */
488 	if ((bp->b_flags & B_ERROR) == 0)
489 		return (0);
490 	if (bp->b_error)
491 		return (bp->b_error);
492 	return (EIO);
493 }
494 
495 /*
496  * Mark I/O complete on a buffer.
497  * If someone should be called, e.g. the pageout
498  * daemon, do so.  Otherwise, wake up anyone
499  * waiting for it.
500  */
501 biodone(bp)
502 	register struct buf *bp;
503 {
504 	register struct vnode *vp;
505 
506 	if (bp->b_flags & B_DONE)
507 		panic("dup biodone");
508 	bp->b_flags |= B_DONE;
509 	if ((bp->b_flags & B_READ) == 0) {
510 		bp->b_dirtyoff = bp->b_dirtyend = 0;
511 		if (vp = bp->b_vp) {
512 			vp->v_numoutput--;
513 			if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
514 				if (vp->v_numoutput < 0)
515 					panic("biodone: neg numoutput");
516 				vp->v_flag &= ~VBWAIT;
517 				wakeup((caddr_t)&vp->v_numoutput);
518 			}
519 		}
520 	}
521 	if (bp->b_flags & B_CALL) {
522 		bp->b_flags &= ~B_CALL;
523 		(*bp->b_iodone)(bp);
524 		return;
525 	}
526 	if (bp->b_flags&B_ASYNC)
527 		brelse(bp);
528 	else {
529 		bp->b_flags &= ~B_WANTED;
530 		wakeup((caddr_t)bp);
531 	}
532 }
533 
534 /*
535  * Make sure all write-behind blocks associated
536  * with mount point are flushed out (from sync).
537  */
538 mntflushbuf(mountp, flags)
539 	struct mount *mountp;
540 	int flags;
541 {
542 	register struct vnode *vp;
543 	struct vnode *nvp;
544 
545 loop:
546 	for (vp = mountp->m_mounth; vp; vp = nvp) {
547 		nvp = vp->v_mountf;
548 		if (vget(vp))
549 			goto loop;
550 		vflushbuf(vp, flags);
551 		vput(vp);
552 	}
553 }
554 
555 /*
556  * Flush all dirty buffers associated with a vnode.
557  */
558 vflushbuf(vp, flags)
559 	register struct vnode *vp;
560 	int flags;
561 {
562 	register struct buf *bp;
563 	struct buf *nbp;
564 	int s;
565 
566 loop:
567 	s = splbio();
568 	for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
569 		nbp = bp->b_blockf;
570 		if ((bp->b_flags & B_BUSY))
571 			continue;
572 		if ((bp->b_flags & B_DELWRI) == 0)
573 			panic("vflushbuf: not dirty");
574 		bremfree(bp);
575 		bp->b_flags |= B_BUSY;
576 		splx(s);
577 		/*
578 		 * Wait for I/O associated with indirect blocks to complete,
579 		 * since there is no way to quickly wait for them below.
580 		 * NB - This is really specific to ufs, but is done here
581 		 * as it is easier and quicker.
582 		 */
583 		if (bp->b_vp == vp || (flags & B_SYNC) == 0) {
584 			(void) bawrite(bp);
585 			s = splbio();
586 		} else {
587 			(void) bwrite(bp);
588 			goto loop;
589 		}
590 	}
591 	splx(s);
592 	if ((flags & B_SYNC) == 0)
593 		return;
594 	s = splbio();
595 	while (vp->v_numoutput) {
596 		vp->v_flag |= VBWAIT;
597 		sleep((caddr_t)&vp->v_numoutput, PRIBIO+1);
598 	}
599 	splx(s);
600 	if (vp->v_dirtyblkhd) {
601 		vprint("vflushbuf: dirty", vp);
602 		goto loop;
603 	}
604 }
605 
606 /*
607  * Invalidate in core blocks belonging to closed or umounted filesystem
608  *
609  * Go through the list of vnodes associated with the file system;
610  * for each vnode invalidate any buffers that it holds. Normally
611  * this routine is preceeded by a bflush call, so that on a quiescent
612  * filesystem there will be no dirty buffers when we are done. Binval
613  * returns the count of dirty buffers when it is finished.
614  */
615 mntinvalbuf(mountp)
616 	struct mount *mountp;
617 {
618 	register struct vnode *vp;
619 	struct vnode *nvp;
620 	int dirty = 0;
621 
622 loop:
623 	for (vp = mountp->m_mounth; vp; vp = nvp) {
624 		nvp = vp->v_mountf;
625 		if (vget(vp))
626 			goto loop;
627 		dirty += vinvalbuf(vp, 1);
628 		vput(vp);
629 	}
630 	return (dirty);
631 }
632 
633 /*
634  * Flush out and invalidate all buffers associated with a vnode.
635  * Called with the underlying object locked.
636  */
637 vinvalbuf(vp, save)
638 	register struct vnode *vp;
639 	int save;
640 {
641 	register struct buf *bp;
642 	struct buf *nbp, *blist;
643 	int s, dirty = 0;
644 
645 	for (;;) {
646 		if (blist = vp->v_dirtyblkhd)
647 			/* void */;
648 		else if (blist = vp->v_cleanblkhd)
649 			/* void */;
650 		else
651 			break;
652 		for (bp = blist; bp; bp = nbp) {
653 			nbp = bp->b_blockf;
654 			s = splbio();
655 			if (bp->b_flags & B_BUSY) {
656 				bp->b_flags |= B_WANTED;
657 				sleep((caddr_t)bp, PRIBIO+1);
658 				splx(s);
659 				break;
660 			}
661 			bremfree(bp);
662 			bp->b_flags |= B_BUSY;
663 			splx(s);
664 			if (save && (bp->b_flags & B_DELWRI)) {
665 				dirty++;
666 				(void) bwrite(bp);
667 				break;
668 			}
669 			if (bp->b_vp != vp)
670 				reassignbuf(bp, bp->b_vp);
671 			else
672 				bp->b_flags |= B_INVAL;
673 			brelse(bp);
674 		}
675 	}
676 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
677 		panic("vinvalbuf: flush failed");
678 	return (dirty);
679 }
680 
681 /*
682  * Associate a buffer with a vnode.
683  */
684 bgetvp(vp, bp)
685 	register struct vnode *vp;
686 	register struct buf *bp;
687 {
688 
689 	if (bp->b_vp)
690 		panic("bgetvp: not free");
691 	VHOLD(vp);
692 	bp->b_vp = vp;
693 	if (vp->v_type == VBLK || vp->v_type == VCHR)
694 		bp->b_dev = vp->v_rdev;
695 	else
696 		bp->b_dev = NODEV;
697 	/*
698 	 * Insert onto list for new vnode.
699 	 */
700 	if (vp->v_cleanblkhd) {
701 		bp->b_blockf = vp->v_cleanblkhd;
702 		bp->b_blockb = &vp->v_cleanblkhd;
703 		vp->v_cleanblkhd->b_blockb = &bp->b_blockf;
704 		vp->v_cleanblkhd = bp;
705 	} else {
706 		vp->v_cleanblkhd = bp;
707 		bp->b_blockb = &vp->v_cleanblkhd;
708 		bp->b_blockf = NULL;
709 	}
710 }
711 
712 /*
713  * Disassociate a buffer from a vnode.
714  */
715 brelvp(bp)
716 	register struct buf *bp;
717 {
718 	struct buf *bq;
719 	struct vnode *vp;
720 
721 	if (bp->b_vp == (struct vnode *) 0)
722 		panic("brelvp: NULL");
723 	/*
724 	 * Delete from old vnode list, if on one.
725 	 */
726 	if (bp->b_blockb) {
727 		if (bq = bp->b_blockf)
728 			bq->b_blockb = bp->b_blockb;
729 		*bp->b_blockb = bq;
730 		bp->b_blockf = NULL;
731 		bp->b_blockb = NULL;
732 	}
733 	vp = bp->b_vp;
734 	bp->b_vp = (struct vnode *) 0;
735 	HOLDRELE(vp);
736 }
737 
738 /*
739  * Reassign a buffer from one vnode to another.
740  * Used to assign file specific control information
741  * (indirect blocks) to the vnode to which they belong.
742  */
743 reassignbuf(bp, newvp)
744 	register struct buf *bp;
745 	register struct vnode *newvp;
746 {
747 	register struct buf *bq, **listheadp;
748 
749 	if (newvp == NULL)
750 		panic("reassignbuf: NULL");
751 	/*
752 	 * Delete from old vnode list, if on one.
753 	 */
754 	if (bp->b_blockb) {
755 		if (bq = bp->b_blockf)
756 			bq->b_blockb = bp->b_blockb;
757 		*bp->b_blockb = bq;
758 	}
759 	/*
760 	 * If dirty, put on list of dirty buffers;
761 	 * otherwise insert onto list of clean buffers.
762 	 */
763 	if (bp->b_flags & B_DELWRI)
764 		listheadp = &newvp->v_dirtyblkhd;
765 	else
766 		listheadp = &newvp->v_cleanblkhd;
767 	if (*listheadp) {
768 		bp->b_blockf = *listheadp;
769 		bp->b_blockb = listheadp;
770 		bp->b_blockf->b_blockb = &bp->b_blockf;
771 		*listheadp = bp;
772 	} else {
773 		*listheadp = bp;
774 		bp->b_blockb = listheadp;
775 		bp->b_blockf = NULL;
776 	}
777 }
778