xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 40341)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)vfs_cluster.c	7.23 (Berkeley) 03/06/90
18  */
19 
20 #include "param.h"
21 #include "user.h"
22 #include "buf.h"
23 #include "vnode.h"
24 #include "mount.h"
25 #include "trace.h"
26 #include "ucred.h"
27 
28 /*
29  * Read in (if necessary) the block and return a buffer pointer.
30  */
31 bread(vp, blkno, size, cred, bpp)
32 	struct vnode *vp;
33 	daddr_t blkno;
34 	int size;
35 	struct ucred *cred;
36 	struct buf **bpp;
37 {
38 	register struct buf *bp;
39 
40 	if (size == 0)
41 		panic("bread: size 0");
42 	*bpp = bp = getblk(vp, blkno, size);
43 	if (bp->b_flags&(B_DONE|B_DELWRI)) {
44 		trace(TR_BREADHIT, pack(vp, size), blkno);
45 		return (0);
46 	}
47 	bp->b_flags |= B_READ;
48 	if (bp->b_bcount > bp->b_bufsize)
49 		panic("bread");
50 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
51 		crhold(cred);
52 		bp->b_rcred = cred;
53 	}
54 	VOP_STRATEGY(bp);
55 	trace(TR_BREADMISS, pack(vp, size), blkno);
56 	u.u_ru.ru_inblock++;		/* pay for read */
57 	return (biowait(bp));
58 }
59 
60 /*
61  * Read in the block, like bread, but also start I/O on the
62  * read-ahead block (which is not allocated to the caller)
63  */
64 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
65 	struct vnode *vp;
66 	daddr_t blkno; int size;
67 	daddr_t rablkno; int rabsize;
68 	struct ucred *cred;
69 	struct buf **bpp;
70 {
71 	register struct buf *bp, *rabp;
72 
73 	bp = NULL;
74 	/*
75 	 * If the block isn't in core, then allocate
76 	 * a buffer and initiate i/o (getblk checks
77 	 * for a cache hit).
78 	 */
79 	if (!incore(vp, blkno)) {
80 		*bpp = bp = getblk(vp, blkno, size);
81 		if ((bp->b_flags&(B_DONE|B_DELWRI)) == 0) {
82 			bp->b_flags |= B_READ;
83 			if (bp->b_bcount > bp->b_bufsize)
84 				panic("breada");
85 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
86 				crhold(cred);
87 				bp->b_rcred = cred;
88 			}
89 			VOP_STRATEGY(bp);
90 			trace(TR_BREADMISS, pack(vp, size), blkno);
91 			u.u_ru.ru_inblock++;		/* pay for read */
92 		} else
93 			trace(TR_BREADHIT, pack(vp, size), blkno);
94 	}
95 
96 	/*
97 	 * If there's a read-ahead block, start i/o
98 	 * on it also (as above).
99 	 */
100 	if (!incore(vp, rablkno)) {
101 		rabp = getblk(vp, rablkno, rabsize);
102 		if (rabp->b_flags & (B_DONE|B_DELWRI)) {
103 			brelse(rabp);
104 			trace(TR_BREADHITRA, pack(vp, rabsize), rablkno);
105 		} else {
106 			rabp->b_flags |= B_READ|B_ASYNC;
107 			if (rabp->b_bcount > rabp->b_bufsize)
108 				panic("breadrabp");
109 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
110 				crhold(cred);
111 				rabp->b_rcred = cred;
112 			}
113 			VOP_STRATEGY(rabp);
114 			trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno);
115 			u.u_ru.ru_inblock++;		/* pay in advance */
116 		}
117 	}
118 
119 	/*
120 	 * If block was in core, let bread get it.
121 	 * If block wasn't in core, then the read was started
122 	 * above, and just wait for it.
123 	 */
124 	if (bp == NULL)
125 		return (bread(vp, blkno, size, cred, bpp));
126 	return (biowait(bp));
127 }
128 
129 /*
130  * Write the buffer, waiting for completion.
131  * Then release the buffer.
132  */
133 bwrite(bp)
134 	register struct buf *bp;
135 {
136 	register int flag;
137 	int s, error;
138 
139 	flag = bp->b_flags;
140 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
141 	if ((flag&B_DELWRI) == 0)
142 		u.u_ru.ru_oublock++;		/* noone paid yet */
143 	else
144 		reassignbuf(bp, bp->b_vp);
145 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
146 	if (bp->b_bcount > bp->b_bufsize)
147 		panic("bwrite");
148 	s = splbio();
149 	bp->b_vp->v_numoutput++;
150 	splx(s);
151 	VOP_STRATEGY(bp);
152 
153 	/*
154 	 * If the write was synchronous, then await i/o completion.
155 	 * If the write was "delayed", then we put the buffer on
156 	 * the q of blocks awaiting i/o completion status.
157 	 */
158 	if ((flag&B_ASYNC) == 0) {
159 		error = biowait(bp);
160 		brelse(bp);
161 	} else if (flag & B_DELWRI) {
162 		bp->b_flags |= B_AGE;
163 		error = 0;
164 	}
165 	return (error);
166 }
167 
168 /*
169  * Release the buffer, marking it so that if it is grabbed
170  * for another purpose it will be written out before being
171  * given up (e.g. when writing a partial block where it is
172  * assumed that another write for the same block will soon follow).
173  * This can't be done for magtape, since writes must be done
174  * in the same order as requested.
175  */
176 bdwrite(bp)
177 	register struct buf *bp;
178 {
179 
180 	if ((bp->b_flags & B_DELWRI) == 0) {
181 		bp->b_flags |= B_DELWRI;
182 		reassignbuf(bp, bp->b_vp);
183 		u.u_ru.ru_oublock++;		/* noone paid yet */
184 	}
185 	/*
186 	 * If this is a tape drive, the write must be initiated.
187 	 */
188 	if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) {
189 		bawrite(bp);
190 	} else {
191 		bp->b_flags |= B_DELWRI | B_DONE;
192 		brelse(bp);
193 	}
194 }
195 
196 /*
197  * Release the buffer, start I/O on it, but don't wait for completion.
198  */
199 bawrite(bp)
200 	register struct buf *bp;
201 {
202 
203 	bp->b_flags |= B_ASYNC;
204 	(void) bwrite(bp);
205 }
206 
207 /*
208  * Release the buffer, with no I/O implied.
209  */
210 brelse(bp)
211 	register struct buf *bp;
212 {
213 	register struct buf *flist;
214 	register s;
215 
216 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
217 	/*
218 	 * If a process is waiting for the buffer, or
219 	 * is waiting for a free buffer, awaken it.
220 	 */
221 	if (bp->b_flags&B_WANTED)
222 		wakeup((caddr_t)bp);
223 	if (bfreelist[0].b_flags&B_WANTED) {
224 		bfreelist[0].b_flags &= ~B_WANTED;
225 		wakeup((caddr_t)bfreelist);
226 	}
227 	/*
228 	 * Retry I/O for locked buffers rather than invalidating them.
229 	 */
230 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
231 		bp->b_flags &= ~B_ERROR;
232 
233 	/*
234 	 * Disassociate buffers that are no longer valid.
235 	 */
236 	if (bp->b_flags & (B_NOCACHE|B_ERROR))
237 		bp->b_flags |= B_INVAL;
238 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR|B_INVAL))) {
239 		if (bp->b_vp)
240 			brelvp(bp);
241 		bp->b_flags &= ~B_DELWRI;
242 	}
243 	/*
244 	 * Stick the buffer back on a free list.
245 	 */
246 	s = splbio();
247 	if (bp->b_bufsize <= 0) {
248 		/* block has no buffer ... put at front of unused buffer list */
249 		flist = &bfreelist[BQ_EMPTY];
250 		binsheadfree(bp, flist);
251 	} else if (bp->b_flags & (B_ERROR|B_INVAL)) {
252 		/* block has no info ... put at front of most free list */
253 		flist = &bfreelist[BQ_AGE];
254 		binsheadfree(bp, flist);
255 	} else {
256 		if (bp->b_flags & B_LOCKED)
257 			flist = &bfreelist[BQ_LOCKED];
258 		else if (bp->b_flags & B_AGE)
259 			flist = &bfreelist[BQ_AGE];
260 		else
261 			flist = &bfreelist[BQ_LRU];
262 		binstailfree(bp, flist);
263 	}
264 	bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE|B_NOCACHE);
265 	splx(s);
266 }
267 
268 /*
269  * See if the block is associated with some buffer
270  * (mainly to avoid getting hung up on a wait in breada)
271  */
272 incore(vp, blkno)
273 	struct vnode *vp;
274 	daddr_t blkno;
275 {
276 	register struct buf *bp;
277 	register struct buf *dp;
278 
279 	dp = BUFHASH(vp, blkno);
280 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
281 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
282 		    (bp->b_flags & B_INVAL) == 0)
283 			return (1);
284 	return (0);
285 }
286 
287 /*
288  * Return a block if it is in memory.
289  */
290 baddr(vp, blkno, size, cred, bpp)
291 	struct vnode *vp;
292 	daddr_t blkno;
293 	int size;
294 	struct ucred *cred;
295 	struct buf **bpp;
296 {
297 
298 	if (incore(vp, blkno))
299 		return (bread(vp, blkno, size, cred, bpp));
300 	*bpp = 0;
301 	return (0);
302 }
303 
304 /*
305  * Assign a buffer for the given block.  If the appropriate
306  * block is already associated, return it; otherwise search
307  * for the oldest non-busy buffer and reassign it.
308  *
309  * We use splx here because this routine may be called
310  * on the interrupt stack during a dump, and we don't
311  * want to lower the ipl back to 0.
312  */
313 struct buf *
314 getblk(vp, blkno, size)
315 	register struct vnode *vp;
316 	daddr_t blkno;
317 	int size;
318 {
319 	register struct buf *bp, *dp;
320 	int s;
321 
322 	if (size > MAXBSIZE)
323 		panic("getblk: size too big");
324 	/*
325 	 * To prevent overflow of 32-bit ints when converting block
326 	 * numbers to byte offsets, blknos > 2^32 / DEV_BSIZE are set
327 	 * to the maximum number that can be converted to a byte offset
328 	 * without overflow. This is historic code; what bug it fixed,
329 	 * or whether it is still a reasonable thing to do is open to
330 	 * dispute. mkm 9/85
331 	 *
332 	 * Make it a panic to see if it ever really happens. mkm 11/89
333 	 */
334 	if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-DEV_BSHIFT)) {
335 		panic("getblk: blkno too big");
336 		blkno = 1 << ((sizeof(int)*NBBY-DEV_BSHIFT) + 1);
337 	}
338 	/*
339 	 * Search the cache for the block.  If we hit, but
340 	 * the buffer is in use for i/o, then we wait until
341 	 * the i/o has completed.
342 	 */
343 	dp = BUFHASH(vp, blkno);
344 loop:
345 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
346 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
347 		    bp->b_flags&B_INVAL)
348 			continue;
349 		s = splbio();
350 		if (bp->b_flags&B_BUSY) {
351 			bp->b_flags |= B_WANTED;
352 			sleep((caddr_t)bp, PRIBIO+1);
353 			splx(s);
354 			goto loop;
355 		}
356 		bremfree(bp);
357 		bp->b_flags |= B_BUSY;
358 		splx(s);
359 		if (bp->b_bcount != size) {
360 			printf("getblk: stray size");
361 			bp->b_flags |= B_INVAL;
362 			bwrite(bp);
363 			goto loop;
364 		}
365 		bp->b_flags |= B_CACHE;
366 		return (bp);
367 	}
368 	bp = getnewbuf();
369 	bfree(bp);
370 	bremhash(bp);
371 	bgetvp(vp, bp);
372 	bp->b_lblkno = blkno;
373 	bp->b_blkno = blkno;
374 	bp->b_error = 0;
375 	bp->b_resid = 0;
376 	binshash(bp, dp);
377 	brealloc(bp, size);
378 	return (bp);
379 }
380 
381 /*
382  * get an empty block,
383  * not assigned to any particular device
384  */
385 struct buf *
386 geteblk(size)
387 	int size;
388 {
389 	register struct buf *bp, *flist;
390 
391 	if (size > MAXBSIZE)
392 		panic("geteblk: size too big");
393 	bp = getnewbuf();
394 	bp->b_flags |= B_INVAL;
395 	bfree(bp);
396 	bremhash(bp);
397 	flist = &bfreelist[BQ_AGE];
398 	bp->b_error = 0;
399 	bp->b_resid = 0;
400 	binshash(bp, flist);
401 	brealloc(bp, size);
402 	return (bp);
403 }
404 
405 /*
406  * Allocate space associated with a buffer.
407  */
408 brealloc(bp, size)
409 	register struct buf *bp;
410 	int size;
411 {
412 	daddr_t start, last;
413 	register struct buf *ep;
414 	struct buf *dp;
415 	int s;
416 
417 	if (size == bp->b_bcount)
418 		return;
419 	allocbuf(bp, size);
420 }
421 
422 /*
423  * Find a buffer which is available for use.
424  * Select something from a free list.
425  * Preference is to AGE list, then LRU list.
426  */
427 struct buf *
428 getnewbuf()
429 {
430 	register struct buf *bp, *dp;
431 	register struct ucred *cred;
432 	int s;
433 
434 loop:
435 	s = splbio();
436 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
437 		if (dp->av_forw != dp)
438 			break;
439 	if (dp == bfreelist) {		/* no free blocks */
440 		dp->b_flags |= B_WANTED;
441 		sleep((caddr_t)dp, PRIBIO+1);
442 		splx(s);
443 		goto loop;
444 	}
445 	bp = dp->av_forw;
446 	bremfree(bp);
447 	bp->b_flags |= B_BUSY;
448 	splx(s);
449 	if (bp->b_flags & B_DELWRI) {
450 		(void) bawrite(bp);
451 		goto loop;
452 	}
453 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
454 	if (bp->b_vp)
455 		brelvp(bp);
456 	if (bp->b_rcred != NOCRED) {
457 		cred = bp->b_rcred;
458 		bp->b_rcred = NOCRED;
459 		crfree(cred);
460 	}
461 	if (bp->b_wcred != NOCRED) {
462 		cred = bp->b_wcred;
463 		bp->b_wcred = NOCRED;
464 		crfree(cred);
465 	}
466 	bp->b_flags = B_BUSY;
467 	return (bp);
468 }
469 
470 /*
471  * Wait for I/O completion on the buffer; return errors
472  * to the user.
473  */
474 biowait(bp)
475 	register struct buf *bp;
476 {
477 	int s;
478 
479 	s = splbio();
480 	while ((bp->b_flags & B_DONE) == 0)
481 		sleep((caddr_t)bp, PRIBIO);
482 	splx(s);
483 	/*
484 	 * Pick up the device's error number and pass it to the user;
485 	 * if there is an error but the number is 0 set a generalized code.
486 	 */
487 	if ((bp->b_flags & B_ERROR) == 0)
488 		return (0);
489 	if (bp->b_error)
490 		return (bp->b_error);
491 	return (EIO);
492 }
493 
494 /*
495  * Mark I/O complete on a buffer.
496  * If someone should be called, e.g. the pageout
497  * daemon, do so.  Otherwise, wake up anyone
498  * waiting for it.
499  */
500 biodone(bp)
501 	register struct buf *bp;
502 {
503 	register struct vnode *vp;
504 
505 	if (bp->b_flags & B_DONE)
506 		panic("dup biodone");
507 	bp->b_flags |= B_DONE;
508 	if ((bp->b_flags & B_READ) == 0) {
509 		bp->b_dirtyoff = bp->b_dirtyend = 0;
510 		if (vp = bp->b_vp) {
511 			vp->v_numoutput--;
512 			if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
513 				if (vp->v_numoutput < 0)
514 					panic("biodone: neg numoutput");
515 				vp->v_flag &= ~VBWAIT;
516 				wakeup((caddr_t)&vp->v_numoutput);
517 			}
518 		}
519 	}
520 	if (bp->b_flags & B_CALL) {
521 		bp->b_flags &= ~B_CALL;
522 		(*bp->b_iodone)(bp);
523 		return;
524 	}
525 	if (bp->b_flags&B_ASYNC)
526 		brelse(bp);
527 	else {
528 		bp->b_flags &= ~B_WANTED;
529 		wakeup((caddr_t)bp);
530 	}
531 }
532 
533 /*
534  * Make sure all write-behind blocks associated
535  * with mount point are flushed out (from sync).
536  */
537 mntflushbuf(mountp, flags)
538 	struct mount *mountp;
539 	int flags;
540 {
541 	register struct vnode *vp;
542 	struct vnode *nvp;
543 
544 loop:
545 	for (vp = mountp->m_mounth; vp; vp = nvp) {
546 		nvp = vp->v_mountf;
547 		if (vget(vp))
548 			goto loop;
549 		vflushbuf(vp, flags);
550 		vput(vp);
551 	}
552 }
553 
554 /*
555  * Flush all dirty buffers associated with a vnode.
556  */
557 vflushbuf(vp, flags)
558 	register struct vnode *vp;
559 	int flags;
560 {
561 	register struct buf *bp;
562 	struct buf *nbp;
563 	int s;
564 
565 loop:
566 	s = splbio();
567 	for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
568 		nbp = bp->b_blockf;
569 		if ((bp->b_flags & B_BUSY))
570 			continue;
571 		if ((bp->b_flags & B_DELWRI) == 0)
572 			panic("vflushbuf: not dirty");
573 		bremfree(bp);
574 		bp->b_flags |= B_BUSY;
575 		splx(s);
576 		/*
577 		 * Wait for I/O associated with indirect blocks to complete,
578 		 * since there is no way to quickly wait for them below.
579 		 * NB - This is really specific to ufs, but is done here
580 		 * as it is easier and quicker.
581 		 */
582 		if (bp->b_vp == vp || (flags & B_SYNC) == 0) {
583 			(void) bawrite(bp);
584 		} else {
585 			(void) bwrite(bp);
586 			goto loop;
587 		}
588 	}
589 	splx(s);
590 	if ((flags & B_SYNC) == 0)
591 		return;
592 	s = splbio();
593 	while (vp->v_numoutput) {
594 		vp->v_flag |= VBWAIT;
595 		sleep((caddr_t)&vp->v_numoutput, PRIBIO+1);
596 	}
597 	splx(s);
598 	if (vp->v_dirtyblkhd) {
599 		vprint("vflushbuf: dirty", vp);
600 		goto loop;
601 	}
602 }
603 
604 /*
605  * Invalidate in core blocks belonging to closed or umounted filesystem
606  *
607  * Go through the list of vnodes associated with the file system;
608  * for each vnode invalidate any buffers that it holds. Normally
609  * this routine is preceeded by a bflush call, so that on a quiescent
610  * filesystem there will be no dirty buffers when we are done. Binval
611  * returns the count of dirty buffers when it is finished.
612  */
613 mntinvalbuf(mountp)
614 	struct mount *mountp;
615 {
616 	register struct vnode *vp;
617 	struct vnode *nvp;
618 	int dirty = 0;
619 
620 loop:
621 	for (vp = mountp->m_mounth; vp; vp = nvp) {
622 		nvp = vp->v_mountf;
623 		if (vget(vp))
624 			goto loop;
625 		dirty += vinvalbuf(vp, 1);
626 		vput(vp);
627 	}
628 	return (dirty);
629 }
630 
631 /*
632  * Flush out and invalidate all buffers associated with a vnode.
633  * Called with the underlying object locked.
634  */
635 vinvalbuf(vp, save)
636 	register struct vnode *vp;
637 	int save;
638 {
639 	register struct buf *bp;
640 	struct buf *nbp, *blist;
641 	int s, dirty = 0;
642 
643 	for (;;) {
644 		if (blist = vp->v_dirtyblkhd)
645 			/* void */;
646 		else if (blist = vp->v_cleanblkhd)
647 			/* void */;
648 		else
649 			break;
650 		for (bp = blist; bp; bp = nbp) {
651 			nbp = bp->b_blockf;
652 			s = splbio();
653 			if (bp->b_flags & B_BUSY) {
654 				bp->b_flags |= B_WANTED;
655 				sleep((caddr_t)bp, PRIBIO+1);
656 				splx(s);
657 				break;
658 			}
659 			bremfree(bp);
660 			bp->b_flags |= B_BUSY;
661 			splx(s);
662 			if (save && (bp->b_flags & B_DELWRI)) {
663 				dirty++;
664 				(void) bwrite(bp);
665 				break;
666 			}
667 			if (bp->b_vp != vp)
668 				reassignbuf(bp, bp->b_vp);
669 			else
670 				bp->b_flags |= B_INVAL;
671 			brelse(bp);
672 		}
673 	}
674 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
675 		panic("vinvalbuf: flush failed");
676 	return (dirty);
677 }
678 
679 /*
680  * Associate a buffer with a vnode.
681  */
682 bgetvp(vp, bp)
683 	register struct vnode *vp;
684 	register struct buf *bp;
685 {
686 
687 	if (bp->b_vp)
688 		panic("bgetvp: not free");
689 	VHOLD(vp);
690 	bp->b_vp = vp;
691 	if (vp->v_type == VBLK || vp->v_type == VCHR)
692 		bp->b_dev = vp->v_rdev;
693 	else
694 		bp->b_dev = NODEV;
695 	/*
696 	 * Insert onto list for new vnode.
697 	 */
698 	if (vp->v_cleanblkhd) {
699 		bp->b_blockf = vp->v_cleanblkhd;
700 		bp->b_blockb = &vp->v_cleanblkhd;
701 		vp->v_cleanblkhd->b_blockb = &bp->b_blockf;
702 		vp->v_cleanblkhd = bp;
703 	} else {
704 		vp->v_cleanblkhd = bp;
705 		bp->b_blockb = &vp->v_cleanblkhd;
706 		bp->b_blockf = NULL;
707 	}
708 }
709 
710 /*
711  * Disassociate a buffer from a vnode.
712  */
713 brelvp(bp)
714 	register struct buf *bp;
715 {
716 	struct buf *bq;
717 	struct vnode *vp;
718 
719 	if (bp->b_vp == (struct vnode *) 0)
720 		panic("brelvp: NULL");
721 	/*
722 	 * Delete from old vnode list, if on one.
723 	 */
724 	if (bp->b_blockb) {
725 		if (bq = bp->b_blockf)
726 			bq->b_blockb = bp->b_blockb;
727 		*bp->b_blockb = bq;
728 		bp->b_blockf = NULL;
729 		bp->b_blockb = NULL;
730 	}
731 	vp = bp->b_vp;
732 	bp->b_vp = (struct vnode *) 0;
733 	HOLDRELE(vp);
734 }
735 
736 /*
737  * Reassign a buffer from one vnode to another.
738  * Used to assign file specific control information
739  * (indirect blocks) to the vnode to which they belong.
740  */
741 reassignbuf(bp, newvp)
742 	register struct buf *bp;
743 	register struct vnode *newvp;
744 {
745 	register struct buf *bq, **listheadp;
746 
747 	if (newvp == NULL)
748 		panic("reassignbuf: NULL");
749 	/*
750 	 * Delete from old vnode list, if on one.
751 	 */
752 	if (bp->b_blockb) {
753 		if (bq = bp->b_blockf)
754 			bq->b_blockb = bp->b_blockb;
755 		*bp->b_blockb = bq;
756 	}
757 	/*
758 	 * If dirty, put on list of dirty buffers;
759 	 * otherwise insert onto list of clean buffers.
760 	 */
761 	if (bp->b_flags & B_DELWRI)
762 		listheadp = &newvp->v_dirtyblkhd;
763 	else
764 		listheadp = &newvp->v_cleanblkhd;
765 	if (*listheadp) {
766 		bp->b_blockf = *listheadp;
767 		bp->b_blockb = listheadp;
768 		bp->b_blockf->b_blockb = &bp->b_blockf;
769 		*listheadp = bp;
770 	} else {
771 		*listheadp = bp;
772 		bp->b_blockb = listheadp;
773 		bp->b_blockf = NULL;
774 	}
775 }
776