xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 40639)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)vfs_cluster.c	7.24 (Berkeley) 03/27/90
18  */
19 
20 #include "param.h"
21 #include "user.h"
22 #include "buf.h"
23 #include "vnode.h"
24 #include "mount.h"
25 #include "trace.h"
26 #include "ucred.h"
27 
28 /*
29  * Read in (if necessary) the block and return a buffer pointer.
30  */
31 bread(vp, blkno, size, cred, bpp)
32 	struct vnode *vp;
33 	daddr_t blkno;
34 	int size;
35 	struct ucred *cred;
36 	struct buf **bpp;
37 {
38 	register struct buf *bp;
39 
40 	if (size == 0)
41 		panic("bread: size 0");
42 	*bpp = bp = getblk(vp, blkno, size);
43 	if (bp->b_flags&(B_DONE|B_DELWRI)) {
44 		trace(TR_BREADHIT, pack(vp, size), blkno);
45 		return (0);
46 	}
47 	bp->b_flags |= B_READ;
48 	if (bp->b_bcount > bp->b_bufsize)
49 		panic("bread");
50 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
51 		crhold(cred);
52 		bp->b_rcred = cred;
53 	}
54 	VOP_STRATEGY(bp);
55 	trace(TR_BREADMISS, pack(vp, size), blkno);
56 	u.u_ru.ru_inblock++;		/* pay for read */
57 	return (biowait(bp));
58 }
59 
60 /*
61  * Read in the block, like bread, but also start I/O on the
62  * read-ahead block (which is not allocated to the caller)
63  */
64 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
65 	struct vnode *vp;
66 	daddr_t blkno; int size;
67 	daddr_t rablkno; int rabsize;
68 	struct ucred *cred;
69 	struct buf **bpp;
70 {
71 	register struct buf *bp, *rabp;
72 
73 	bp = NULL;
74 	/*
75 	 * If the block isn't in core, then allocate
76 	 * a buffer and initiate i/o (getblk checks
77 	 * for a cache hit).
78 	 */
79 	if (!incore(vp, blkno)) {
80 		*bpp = bp = getblk(vp, blkno, size);
81 		if ((bp->b_flags&(B_DONE|B_DELWRI)) == 0) {
82 			bp->b_flags |= B_READ;
83 			if (bp->b_bcount > bp->b_bufsize)
84 				panic("breada");
85 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
86 				crhold(cred);
87 				bp->b_rcred = cred;
88 			}
89 			VOP_STRATEGY(bp);
90 			trace(TR_BREADMISS, pack(vp, size), blkno);
91 			u.u_ru.ru_inblock++;		/* pay for read */
92 		} else
93 			trace(TR_BREADHIT, pack(vp, size), blkno);
94 	}
95 
96 	/*
97 	 * If there's a read-ahead block, start i/o
98 	 * on it also (as above).
99 	 */
100 	if (!incore(vp, rablkno)) {
101 		rabp = getblk(vp, rablkno, rabsize);
102 		if (rabp->b_flags & (B_DONE|B_DELWRI)) {
103 			brelse(rabp);
104 			trace(TR_BREADHITRA, pack(vp, rabsize), rablkno);
105 		} else {
106 			rabp->b_flags |= B_READ|B_ASYNC;
107 			if (rabp->b_bcount > rabp->b_bufsize)
108 				panic("breadrabp");
109 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
110 				crhold(cred);
111 				rabp->b_rcred = cred;
112 			}
113 			VOP_STRATEGY(rabp);
114 			trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno);
115 			u.u_ru.ru_inblock++;		/* pay in advance */
116 		}
117 	}
118 
119 	/*
120 	 * If block was in core, let bread get it.
121 	 * If block wasn't in core, then the read was started
122 	 * above, and just wait for it.
123 	 */
124 	if (bp == NULL)
125 		return (bread(vp, blkno, size, cred, bpp));
126 	return (biowait(bp));
127 }
128 
129 /*
130  * Write the buffer, waiting for completion.
131  * Then release the buffer.
132  */
133 bwrite(bp)
134 	register struct buf *bp;
135 {
136 	register int flag;
137 	int s, error;
138 
139 	flag = bp->b_flags;
140 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
141 	if ((flag&B_DELWRI) == 0)
142 		u.u_ru.ru_oublock++;		/* noone paid yet */
143 	else
144 		reassignbuf(bp, bp->b_vp);
145 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
146 	if (bp->b_bcount > bp->b_bufsize)
147 		panic("bwrite");
148 	s = splbio();
149 	bp->b_vp->v_numoutput++;
150 	splx(s);
151 	VOP_STRATEGY(bp);
152 
153 	/*
154 	 * If the write was synchronous, then await i/o completion.
155 	 * If the write was "delayed", then we put the buffer on
156 	 * the q of blocks awaiting i/o completion status.
157 	 */
158 	if ((flag&B_ASYNC) == 0) {
159 		error = biowait(bp);
160 		brelse(bp);
161 	} else if (flag & B_DELWRI) {
162 		bp->b_flags |= B_AGE;
163 		error = 0;
164 	}
165 	return (error);
166 }
167 
168 /*
169  * Release the buffer, marking it so that if it is grabbed
170  * for another purpose it will be written out before being
171  * given up (e.g. when writing a partial block where it is
172  * assumed that another write for the same block will soon follow).
173  * This can't be done for magtape, since writes must be done
174  * in the same order as requested.
175  */
176 bdwrite(bp)
177 	register struct buf *bp;
178 {
179 
180 	if ((bp->b_flags & B_DELWRI) == 0) {
181 		bp->b_flags |= B_DELWRI;
182 		reassignbuf(bp, bp->b_vp);
183 		u.u_ru.ru_oublock++;		/* noone paid yet */
184 	}
185 	/*
186 	 * If this is a tape drive, the write must be initiated.
187 	 */
188 	if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) {
189 		bawrite(bp);
190 	} else {
191 		bp->b_flags |= B_DELWRI | B_DONE;
192 		brelse(bp);
193 	}
194 }
195 
196 /*
197  * Release the buffer, start I/O on it, but don't wait for completion.
198  */
199 bawrite(bp)
200 	register struct buf *bp;
201 {
202 
203 	bp->b_flags |= B_ASYNC;
204 	(void) bwrite(bp);
205 }
206 
207 /*
208  * Release the buffer, with no I/O implied.
209  */
210 brelse(bp)
211 	register struct buf *bp;
212 {
213 	register struct buf *flist;
214 	register s;
215 
216 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
217 	/*
218 	 * If a process is waiting for the buffer, or
219 	 * is waiting for a free buffer, awaken it.
220 	 */
221 	if (bp->b_flags&B_WANTED)
222 		wakeup((caddr_t)bp);
223 	if (bfreelist[0].b_flags&B_WANTED) {
224 		bfreelist[0].b_flags &= ~B_WANTED;
225 		wakeup((caddr_t)bfreelist);
226 	}
227 	/*
228 	 * Retry I/O for locked buffers rather than invalidating them.
229 	 */
230 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
231 		bp->b_flags &= ~B_ERROR;
232 
233 	/*
234 	 * Disassociate buffers that are no longer valid.
235 	 */
236 	if (bp->b_flags & (B_NOCACHE|B_ERROR))
237 		bp->b_flags |= B_INVAL;
238 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR|B_INVAL))) {
239 		if (bp->b_vp)
240 			brelvp(bp);
241 		bp->b_flags &= ~B_DELWRI;
242 	}
243 	/*
244 	 * Stick the buffer back on a free list.
245 	 */
246 	s = splbio();
247 	if (bp->b_bufsize <= 0) {
248 		/* block has no buffer ... put at front of unused buffer list */
249 		flist = &bfreelist[BQ_EMPTY];
250 		binsheadfree(bp, flist);
251 	} else if (bp->b_flags & (B_ERROR|B_INVAL)) {
252 		/* block has no info ... put at front of most free list */
253 		flist = &bfreelist[BQ_AGE];
254 		binsheadfree(bp, flist);
255 	} else {
256 		if (bp->b_flags & B_LOCKED)
257 			flist = &bfreelist[BQ_LOCKED];
258 		else if (bp->b_flags & B_AGE)
259 			flist = &bfreelist[BQ_AGE];
260 		else
261 			flist = &bfreelist[BQ_LRU];
262 		binstailfree(bp, flist);
263 	}
264 	bp->b_flags &= ~(B_WANTED|B_BUSY|B_ASYNC|B_AGE|B_NOCACHE);
265 	splx(s);
266 }
267 
268 /*
269  * See if the block is associated with some buffer
270  * (mainly to avoid getting hung up on a wait in breada)
271  */
272 incore(vp, blkno)
273 	struct vnode *vp;
274 	daddr_t blkno;
275 {
276 	register struct buf *bp;
277 	register struct buf *dp;
278 
279 	dp = BUFHASH(vp, blkno);
280 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
281 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
282 		    (bp->b_flags & B_INVAL) == 0)
283 			return (1);
284 	return (0);
285 }
286 
287 /*
288  * Return a block if it is in memory.
289  */
290 baddr(vp, blkno, size, cred, bpp)
291 	struct vnode *vp;
292 	daddr_t blkno;
293 	int size;
294 	struct ucred *cred;
295 	struct buf **bpp;
296 {
297 
298 	if (incore(vp, blkno))
299 		return (bread(vp, blkno, size, cred, bpp));
300 	*bpp = 0;
301 	return (0);
302 }
303 
304 /*
305  * Assign a buffer for the given block.  If the appropriate
306  * block is already associated, return it; otherwise search
307  * for the oldest non-busy buffer and reassign it.
308  *
309  * We use splx here because this routine may be called
310  * on the interrupt stack during a dump, and we don't
311  * want to lower the ipl back to 0.
312  */
313 struct buf *
314 getblk(vp, blkno, size)
315 	register struct vnode *vp;
316 	daddr_t blkno;
317 	int size;
318 {
319 	register struct buf *bp, *dp;
320 	int s;
321 
322 	if (size > MAXBSIZE)
323 		panic("getblk: size too big");
324 	/*
325 	 * To prevent overflow of 32-bit ints when converting block
326 	 * numbers to byte offsets, blknos > 2^32 / DEV_BSIZE are set
327 	 * to the maximum number that can be converted to a byte offset
328 	 * without overflow. This is historic code; what bug it fixed,
329 	 * or whether it is still a reasonable thing to do is open to
330 	 * dispute. mkm 9/85
331 	 *
332 	 * Make it a panic to see if it ever really happens. mkm 11/89
333 	 */
334 	if ((unsigned)blkno >= 1 << (sizeof(int)*NBBY-DEV_BSHIFT)) {
335 		panic("getblk: blkno too big");
336 		blkno = 1 << ((sizeof(int)*NBBY-DEV_BSHIFT) + 1);
337 	}
338 	/*
339 	 * Search the cache for the block.  If we hit, but
340 	 * the buffer is in use for i/o, then we wait until
341 	 * the i/o has completed.
342 	 */
343 	dp = BUFHASH(vp, blkno);
344 loop:
345 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
346 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
347 		    bp->b_flags&B_INVAL)
348 			continue;
349 		s = splbio();
350 		if (bp->b_flags&B_BUSY) {
351 			bp->b_flags |= B_WANTED;
352 			sleep((caddr_t)bp, PRIBIO+1);
353 			splx(s);
354 			goto loop;
355 		}
356 		bremfree(bp);
357 		bp->b_flags |= B_BUSY;
358 		splx(s);
359 		if (bp->b_bcount != size) {
360 			printf("getblk: stray size");
361 			bp->b_flags |= B_INVAL;
362 			bwrite(bp);
363 			goto loop;
364 		}
365 		bp->b_flags |= B_CACHE;
366 		return (bp);
367 	}
368 	bp = getnewbuf();
369 	bfree(bp);
370 	bremhash(bp);
371 	bgetvp(vp, bp);
372 	bp->b_lblkno = blkno;
373 	bp->b_blkno = blkno;
374 	bp->b_error = 0;
375 	bp->b_resid = 0;
376 	binshash(bp, dp);
377 	brealloc(bp, size);
378 	return (bp);
379 }
380 
381 /*
382  * get an empty block,
383  * not assigned to any particular device
384  */
385 struct buf *
386 geteblk(size)
387 	int size;
388 {
389 	register struct buf *bp, *flist;
390 
391 	if (size > MAXBSIZE)
392 		panic("geteblk: size too big");
393 	bp = getnewbuf();
394 	bp->b_flags |= B_INVAL;
395 	bfree(bp);
396 	bremhash(bp);
397 	flist = &bfreelist[BQ_AGE];
398 	bp->b_error = 0;
399 	bp->b_resid = 0;
400 	binshash(bp, flist);
401 	brealloc(bp, size);
402 	return (bp);
403 }
404 
405 /*
406  * Allocate space associated with a buffer.
407  */
408 brealloc(bp, size)
409 	register struct buf *bp;
410 	int size;
411 {
412 	daddr_t start, last;
413 	register struct buf *ep;
414 	struct buf *dp;
415 	int s;
416 
417 	if (size == bp->b_bcount)
418 		return;
419 	allocbuf(bp, size);
420 }
421 
422 /*
423  * Find a buffer which is available for use.
424  * Select something from a free list.
425  * Preference is to AGE list, then LRU list.
426  */
427 struct buf *
428 getnewbuf()
429 {
430 	register struct buf *bp, *dp;
431 	register struct ucred *cred;
432 	int s;
433 
434 loop:
435 	s = splbio();
436 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
437 		if (dp->av_forw != dp)
438 			break;
439 	if (dp == bfreelist) {		/* no free blocks */
440 		dp->b_flags |= B_WANTED;
441 		sleep((caddr_t)dp, PRIBIO+1);
442 		splx(s);
443 		goto loop;
444 	}
445 	bp = dp->av_forw;
446 	bremfree(bp);
447 	bp->b_flags |= B_BUSY;
448 	splx(s);
449 	if (bp->b_flags & B_DELWRI) {
450 		(void) bawrite(bp);
451 		goto loop;
452 	}
453 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
454 	if (bp->b_vp)
455 		brelvp(bp);
456 	if (bp->b_rcred != NOCRED) {
457 		cred = bp->b_rcred;
458 		bp->b_rcred = NOCRED;
459 		crfree(cred);
460 	}
461 	if (bp->b_wcred != NOCRED) {
462 		cred = bp->b_wcred;
463 		bp->b_wcred = NOCRED;
464 		crfree(cred);
465 	}
466 	bp->b_flags = B_BUSY;
467 	return (bp);
468 }
469 
470 /*
471  * Wait for I/O completion on the buffer; return errors
472  * to the user.
473  */
474 biowait(bp)
475 	register struct buf *bp;
476 {
477 	int s;
478 
479 	s = splbio();
480 	while ((bp->b_flags & B_DONE) == 0)
481 		sleep((caddr_t)bp, PRIBIO);
482 	splx(s);
483 	/*
484 	 * Pick up the device's error number and pass it to the user;
485 	 * if there is an error but the number is 0 set a generalized code.
486 	 */
487 	if ((bp->b_flags & B_ERROR) == 0)
488 		return (0);
489 	if (bp->b_error)
490 		return (bp->b_error);
491 	return (EIO);
492 }
493 
494 /*
495  * Mark I/O complete on a buffer.
496  * If someone should be called, e.g. the pageout
497  * daemon, do so.  Otherwise, wake up anyone
498  * waiting for it.
499  */
500 biodone(bp)
501 	register struct buf *bp;
502 {
503 	register struct vnode *vp;
504 
505 	if (bp->b_flags & B_DONE)
506 		panic("dup biodone");
507 	bp->b_flags |= B_DONE;
508 	if ((bp->b_flags & B_READ) == 0) {
509 		bp->b_dirtyoff = bp->b_dirtyend = 0;
510 		if (vp = bp->b_vp) {
511 			vp->v_numoutput--;
512 			if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
513 				if (vp->v_numoutput < 0)
514 					panic("biodone: neg numoutput");
515 				vp->v_flag &= ~VBWAIT;
516 				wakeup((caddr_t)&vp->v_numoutput);
517 			}
518 		}
519 	}
520 	if (bp->b_flags & B_CALL) {
521 		bp->b_flags &= ~B_CALL;
522 		(*bp->b_iodone)(bp);
523 		return;
524 	}
525 	if (bp->b_flags&B_ASYNC)
526 		brelse(bp);
527 	else {
528 		bp->b_flags &= ~B_WANTED;
529 		wakeup((caddr_t)bp);
530 	}
531 }
532 
533 /*
534  * Make sure all write-behind blocks associated
535  * with mount point are flushed out (from sync).
536  */
537 mntflushbuf(mountp, flags)
538 	struct mount *mountp;
539 	int flags;
540 {
541 	register struct vnode *vp;
542 	struct vnode *nvp;
543 
544 loop:
545 	for (vp = mountp->m_mounth; vp; vp = nvp) {
546 		nvp = vp->v_mountf;
547 		if (vget(vp))
548 			goto loop;
549 		vflushbuf(vp, flags);
550 		vput(vp);
551 	}
552 }
553 
554 /*
555  * Flush all dirty buffers associated with a vnode.
556  */
557 vflushbuf(vp, flags)
558 	register struct vnode *vp;
559 	int flags;
560 {
561 	register struct buf *bp;
562 	struct buf *nbp;
563 	int s;
564 
565 loop:
566 	s = splbio();
567 	for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
568 		nbp = bp->b_blockf;
569 		if ((bp->b_flags & B_BUSY))
570 			continue;
571 		if ((bp->b_flags & B_DELWRI) == 0)
572 			panic("vflushbuf: not dirty");
573 		bremfree(bp);
574 		bp->b_flags |= B_BUSY;
575 		splx(s);
576 		/*
577 		 * Wait for I/O associated with indirect blocks to complete,
578 		 * since there is no way to quickly wait for them below.
579 		 * NB - This is really specific to ufs, but is done here
580 		 * as it is easier and quicker.
581 		 */
582 		if (bp->b_vp == vp || (flags & B_SYNC) == 0) {
583 			(void) bawrite(bp);
584 			s = splbio();
585 		} else {
586 			(void) bwrite(bp);
587 			goto loop;
588 		}
589 	}
590 	splx(s);
591 	if ((flags & B_SYNC) == 0)
592 		return;
593 	s = splbio();
594 	while (vp->v_numoutput) {
595 		vp->v_flag |= VBWAIT;
596 		sleep((caddr_t)&vp->v_numoutput, PRIBIO+1);
597 	}
598 	splx(s);
599 	if (vp->v_dirtyblkhd) {
600 		vprint("vflushbuf: dirty", vp);
601 		goto loop;
602 	}
603 }
604 
605 /*
606  * Invalidate in core blocks belonging to closed or umounted filesystem
607  *
608  * Go through the list of vnodes associated with the file system;
609  * for each vnode invalidate any buffers that it holds. Normally
610  * this routine is preceeded by a bflush call, so that on a quiescent
611  * filesystem there will be no dirty buffers when we are done. Binval
612  * returns the count of dirty buffers when it is finished.
613  */
614 mntinvalbuf(mountp)
615 	struct mount *mountp;
616 {
617 	register struct vnode *vp;
618 	struct vnode *nvp;
619 	int dirty = 0;
620 
621 loop:
622 	for (vp = mountp->m_mounth; vp; vp = nvp) {
623 		nvp = vp->v_mountf;
624 		if (vget(vp))
625 			goto loop;
626 		dirty += vinvalbuf(vp, 1);
627 		vput(vp);
628 	}
629 	return (dirty);
630 }
631 
632 /*
633  * Flush out and invalidate all buffers associated with a vnode.
634  * Called with the underlying object locked.
635  */
636 vinvalbuf(vp, save)
637 	register struct vnode *vp;
638 	int save;
639 {
640 	register struct buf *bp;
641 	struct buf *nbp, *blist;
642 	int s, dirty = 0;
643 
644 	for (;;) {
645 		if (blist = vp->v_dirtyblkhd)
646 			/* void */;
647 		else if (blist = vp->v_cleanblkhd)
648 			/* void */;
649 		else
650 			break;
651 		for (bp = blist; bp; bp = nbp) {
652 			nbp = bp->b_blockf;
653 			s = splbio();
654 			if (bp->b_flags & B_BUSY) {
655 				bp->b_flags |= B_WANTED;
656 				sleep((caddr_t)bp, PRIBIO+1);
657 				splx(s);
658 				break;
659 			}
660 			bremfree(bp);
661 			bp->b_flags |= B_BUSY;
662 			splx(s);
663 			if (save && (bp->b_flags & B_DELWRI)) {
664 				dirty++;
665 				(void) bwrite(bp);
666 				break;
667 			}
668 			if (bp->b_vp != vp)
669 				reassignbuf(bp, bp->b_vp);
670 			else
671 				bp->b_flags |= B_INVAL;
672 			brelse(bp);
673 		}
674 	}
675 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
676 		panic("vinvalbuf: flush failed");
677 	return (dirty);
678 }
679 
680 /*
681  * Associate a buffer with a vnode.
682  */
683 bgetvp(vp, bp)
684 	register struct vnode *vp;
685 	register struct buf *bp;
686 {
687 
688 	if (bp->b_vp)
689 		panic("bgetvp: not free");
690 	VHOLD(vp);
691 	bp->b_vp = vp;
692 	if (vp->v_type == VBLK || vp->v_type == VCHR)
693 		bp->b_dev = vp->v_rdev;
694 	else
695 		bp->b_dev = NODEV;
696 	/*
697 	 * Insert onto list for new vnode.
698 	 */
699 	if (vp->v_cleanblkhd) {
700 		bp->b_blockf = vp->v_cleanblkhd;
701 		bp->b_blockb = &vp->v_cleanblkhd;
702 		vp->v_cleanblkhd->b_blockb = &bp->b_blockf;
703 		vp->v_cleanblkhd = bp;
704 	} else {
705 		vp->v_cleanblkhd = bp;
706 		bp->b_blockb = &vp->v_cleanblkhd;
707 		bp->b_blockf = NULL;
708 	}
709 }
710 
711 /*
712  * Disassociate a buffer from a vnode.
713  */
714 brelvp(bp)
715 	register struct buf *bp;
716 {
717 	struct buf *bq;
718 	struct vnode *vp;
719 
720 	if (bp->b_vp == (struct vnode *) 0)
721 		panic("brelvp: NULL");
722 	/*
723 	 * Delete from old vnode list, if on one.
724 	 */
725 	if (bp->b_blockb) {
726 		if (bq = bp->b_blockf)
727 			bq->b_blockb = bp->b_blockb;
728 		*bp->b_blockb = bq;
729 		bp->b_blockf = NULL;
730 		bp->b_blockb = NULL;
731 	}
732 	vp = bp->b_vp;
733 	bp->b_vp = (struct vnode *) 0;
734 	HOLDRELE(vp);
735 }
736 
737 /*
738  * Reassign a buffer from one vnode to another.
739  * Used to assign file specific control information
740  * (indirect blocks) to the vnode to which they belong.
741  */
742 reassignbuf(bp, newvp)
743 	register struct buf *bp;
744 	register struct vnode *newvp;
745 {
746 	register struct buf *bq, **listheadp;
747 
748 	if (newvp == NULL)
749 		panic("reassignbuf: NULL");
750 	/*
751 	 * Delete from old vnode list, if on one.
752 	 */
753 	if (bp->b_blockb) {
754 		if (bq = bp->b_blockf)
755 			bq->b_blockb = bp->b_blockb;
756 		*bp->b_blockb = bq;
757 	}
758 	/*
759 	 * If dirty, put on list of dirty buffers;
760 	 * otherwise insert onto list of clean buffers.
761 	 */
762 	if (bp->b_flags & B_DELWRI)
763 		listheadp = &newvp->v_dirtyblkhd;
764 	else
765 		listheadp = &newvp->v_cleanblkhd;
766 	if (*listheadp) {
767 		bp->b_blockf = *listheadp;
768 		bp->b_blockb = listheadp;
769 		bp->b_blockf->b_blockb = &bp->b_blockf;
770 		*listheadp = bp;
771 	} else {
772 		*listheadp = bp;
773 		bp->b_blockb = listheadp;
774 		bp->b_blockf = NULL;
775 	}
776 }
777