xref: /csrg-svn/sys/kern/vfs_cluster.c (revision 47545)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)vfs_cluster.c	7.34 (Berkeley) 03/17/91
8  */
9 
10 #include "param.h"
11 #include "user.h"
12 #include "proc.h"
13 #include "buf.h"
14 #include "vnode.h"
15 #include "specdev.h"
16 #include "mount.h"
17 #include "trace.h"
18 #include "ucred.h"
19 
20 /*
21  * Find the block in the buffer pool.
22  * If the buffer is not present, allocate a new buffer and load
23  * its contents according to the filesystem fill routine.
24  */
25 bread(vp, blkno, size, cred, bpp)
26 	struct vnode *vp;
27 	daddr_t blkno;
28 	int size;
29 	struct ucred *cred;
30 	struct buf **bpp;
31 {
32 	struct proc *p = curproc;		/* XXX */
33 	register struct buf *bp;
34 
35 	if (size == 0)
36 		panic("bread: size 0");
37 	*bpp = bp = getblk(vp, blkno, size);
38 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
39 		trace(TR_BREADHIT, pack(vp, size), blkno);
40 		return (0);
41 	}
42 	bp->b_flags |= B_READ;
43 	if (bp->b_bcount > bp->b_bufsize)
44 		panic("bread");
45 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
46 		crhold(cred);
47 		bp->b_rcred = cred;
48 	}
49 	VOP_STRATEGY(bp);
50 	trace(TR_BREADMISS, pack(vp, size), blkno);
51 	p->p_stats->p_ru.ru_inblock++;		/* pay for read */
52 	return (biowait(bp));
53 }
54 
55 /*
56  * Operates like bread, but also starts I/O on the specified
57  * read-ahead block.
58  */
59 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
60 	struct vnode *vp;
61 	daddr_t blkno; int size;
62 	daddr_t rablkno; int rabsize;
63 	struct ucred *cred;
64 	struct buf **bpp;
65 {
66 	struct proc *p = curproc;		/* XXX */
67 	register struct buf *bp, *rabp;
68 
69 	bp = NULL;
70 	/*
71 	 * If the block is not memory resident,
72 	 * allocate a buffer and start I/O.
73 	 */
74 	if (!incore(vp, blkno)) {
75 		*bpp = bp = getblk(vp, blkno, size);
76 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
77 			bp->b_flags |= B_READ;
78 			if (bp->b_bcount > bp->b_bufsize)
79 				panic("breada");
80 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
81 				crhold(cred);
82 				bp->b_rcred = cred;
83 			}
84 			VOP_STRATEGY(bp);
85 			trace(TR_BREADMISS, pack(vp, size), blkno);
86 			p->p_stats->p_ru.ru_inblock++;	/* pay for read */
87 		} else
88 			trace(TR_BREADHIT, pack(vp, size), blkno);
89 	}
90 
91 	/*
92 	 * If there is a read-ahead block, start I/O on it too.
93 	 */
94 	if (!incore(vp, rablkno)) {
95 		rabp = getblk(vp, rablkno, rabsize);
96 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
97 			brelse(rabp);
98 			trace(TR_BREADHITRA, pack(vp, rabsize), rablkno);
99 		} else {
100 			rabp->b_flags |= B_ASYNC | B_READ;
101 			if (rabp->b_bcount > rabp->b_bufsize)
102 				panic("breadrabp");
103 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
104 				crhold(cred);
105 				rabp->b_rcred = cred;
106 			}
107 			VOP_STRATEGY(rabp);
108 			trace(TR_BREADMISSRA, pack(vp, rabsize), rablkno);
109 			p->p_stats->p_ru.ru_inblock++;	/* pay in advance */
110 		}
111 	}
112 
113 	/*
114 	 * If block was memory resident, let bread get it.
115 	 * If block was not memory resident, the read was
116 	 * started above, so just wait for the read to complete.
117 	 */
118 	if (bp == NULL)
119 		return (bread(vp, blkno, size, cred, bpp));
120 	return (biowait(bp));
121 }
122 
123 /*
124  * Synchronous write.
125  * Release buffer on completion.
126  */
127 bwrite(bp)
128 	register struct buf *bp;
129 {
130 	struct proc *p = curproc;		/* XXX */
131 	register int flag;
132 	int s, error;
133 
134 	flag = bp->b_flags;
135 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
136 	if ((flag & B_DELWRI) == 0)
137 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
138 	else
139 		reassignbuf(bp, bp->b_vp);
140 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
141 	if (bp->b_bcount > bp->b_bufsize)
142 		panic("bwrite");
143 	s = splbio();
144 	bp->b_vp->v_numoutput++;
145 	splx(s);
146 	VOP_STRATEGY(bp);
147 
148 	/*
149 	 * If the write was synchronous, then await I/O completion.
150 	 * If the write was "delayed", then we put the buffer on
151 	 * the queue of blocks awaiting I/O completion status.
152 	 */
153 	if ((flag & B_ASYNC) == 0) {
154 		error = biowait(bp);
155 		brelse(bp);
156 	} else if (flag & B_DELWRI) {
157 		bp->b_flags |= B_AGE;
158 		error = 0;
159 	}
160 	return (error);
161 }
162 
163 /*
164  * Delayed write.
165  *
166  * The buffer is marked dirty, but is not queued for I/O.
167  * This routine should be used when the buffer is expected
168  * to be modified again soon, typically a small write that
169  * partially fills a buffer.
170  *
171  * NB: magnetic tapes cannot be delayed; they must be
172  * written in the order that the writes are requested.
173  */
174 bdwrite(bp)
175 	register struct buf *bp;
176 {
177 	struct proc *p = curproc;		/* XXX */
178 
179 	if ((bp->b_flags & B_DELWRI) == 0) {
180 		bp->b_flags |= B_DELWRI;
181 		reassignbuf(bp, bp->b_vp);
182 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
183 	}
184 	/*
185 	 * If this is a tape drive, the write must be initiated.
186 	 */
187 	if (VOP_IOCTL(bp->b_vp, 0, B_TAPE, 0, NOCRED) == 0) {
188 		bawrite(bp);
189 	} else {
190 		bp->b_flags |= (B_DONE | B_DELWRI);
191 		brelse(bp);
192 	}
193 }
194 
195 /*
196  * Asynchronous write.
197  * Start I/O on a buffer, but do not wait for it to complete.
198  * The buffer is released when the I/O completes.
199  */
200 bawrite(bp)
201 	register struct buf *bp;
202 {
203 
204 	/*
205 	 * Setting the ASYNC flag causes bwrite to return
206 	 * after starting the I/O.
207 	 */
208 	bp->b_flags |= B_ASYNC;
209 	(void) bwrite(bp);
210 }
211 
212 /*
213  * Release a buffer.
214  * Even if the buffer is dirty, no I/O is started.
215  */
216 brelse(bp)
217 	register struct buf *bp;
218 {
219 	register struct buf *flist;
220 	int s;
221 
222 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
223 	/*
224 	 * If a process is waiting for the buffer, or
225 	 * is waiting for a free buffer, awaken it.
226 	 */
227 	if (bp->b_flags & B_WANTED)
228 		wakeup((caddr_t)bp);
229 	if (bfreelist[0].b_flags & B_WANTED) {
230 		bfreelist[0].b_flags &= ~B_WANTED;
231 		wakeup((caddr_t)bfreelist);
232 	}
233 	/*
234 	 * Retry I/O for locked buffers rather than invalidating them.
235 	 */
236 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
237 		bp->b_flags &= ~B_ERROR;
238 	/*
239 	 * Disassociate buffers that are no longer valid.
240 	 */
241 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
242 		bp->b_flags |= B_INVAL;
243 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
244 		if (bp->b_vp)
245 			brelvp(bp);
246 		bp->b_flags &= ~B_DELWRI;
247 	}
248 	/*
249 	 * Stick the buffer back on a free list.
250 	 */
251 	s = splbio();
252 	if (bp->b_bufsize <= 0) {
253 		/* block has no buffer ... put at front of unused buffer list */
254 		flist = &bfreelist[BQ_EMPTY];
255 		binsheadfree(bp, flist);
256 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
257 		/* block has no info ... put at front of most free list */
258 		flist = &bfreelist[BQ_AGE];
259 		binsheadfree(bp, flist);
260 	} else {
261 		if (bp->b_flags & B_LOCKED)
262 			flist = &bfreelist[BQ_LOCKED];
263 		else if (bp->b_flags & B_AGE)
264 			flist = &bfreelist[BQ_AGE];
265 		else
266 			flist = &bfreelist[BQ_LRU];
267 		binstailfree(bp, flist);
268 	}
269 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
270 	splx(s);
271 }
272 
273 /*
274  * Check to see if a block is currently memory resident.
275  */
276 incore(vp, blkno)
277 	struct vnode *vp;
278 	daddr_t blkno;
279 {
280 	register struct buf *bp;
281 	register struct buf *dp;
282 
283 	dp = BUFHASH(vp, blkno);
284 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw)
285 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
286 		    (bp->b_flags & B_INVAL) == 0)
287 			return (1);
288 	return (0);
289 }
290 
291 /*
292  * Check to see if a block is currently memory resident.
293  * If it is resident, return it. If it is not resident,
294  * allocate a new buffer and assign it to the block.
295  */
296 struct buf *
297 getblk(vp, blkno, size)
298 	register struct vnode *vp;
299 	daddr_t blkno;
300 	int size;
301 {
302 	register struct buf *bp, *dp;
303 	int s;
304 
305 	if (size > MAXBSIZE)
306 		panic("getblk: size too big");
307 	/*
308 	 * Search the cache for the block. If the buffer is found,
309 	 * but it is currently locked, the we must wait for it to
310 	 * become available.
311 	 */
312 	dp = BUFHASH(vp, blkno);
313 loop:
314 	for (bp = dp->b_forw; bp != dp; bp = bp->b_forw) {
315 		if (bp->b_lblkno != blkno || bp->b_vp != vp ||
316 		    (bp->b_flags & B_INVAL))
317 			continue;
318 		s = splbio();
319 		if (bp->b_flags & B_BUSY) {
320 			bp->b_flags |= B_WANTED;
321 			sleep((caddr_t)bp, PRIBIO + 1);
322 			splx(s);
323 			goto loop;
324 		}
325 		bremfree(bp);
326 		bp->b_flags |= B_BUSY;
327 		splx(s);
328 		if (bp->b_bcount != size) {
329 			printf("getblk: stray size");
330 			bp->b_flags |= B_INVAL;
331 			bwrite(bp);
332 			goto loop;
333 		}
334 		bp->b_flags |= B_CACHE;
335 		return (bp);
336 	}
337 	bp = getnewbuf();
338 	bremhash(bp);
339 	bgetvp(vp, bp);
340 	bp->b_bcount = 0;
341 	bp->b_lblkno = blkno;
342 	bp->b_blkno = blkno;
343 	bp->b_error = 0;
344 	bp->b_resid = 0;
345 	binshash(bp, dp);
346 	allocbuf(bp, size);
347 	return (bp);
348 }
349 
350 /*
351  * Allocate a buffer.
352  * The caller will assign it to a block.
353  */
354 struct buf *
355 geteblk(size)
356 	int size;
357 {
358 	register struct buf *bp, *flist;
359 
360 	if (size > MAXBSIZE)
361 		panic("geteblk: size too big");
362 	bp = getnewbuf();
363 	bp->b_flags |= B_INVAL;
364 	bremhash(bp);
365 	flist = &bfreelist[BQ_AGE];
366 	bp->b_bcount = 0;
367 	bp->b_error = 0;
368 	bp->b_resid = 0;
369 	binshash(bp, flist);
370 	allocbuf(bp, size);
371 	return (bp);
372 }
373 
374 /*
375  * Expand or contract the actual memory allocated to a buffer.
376  * If no memory is available, release buffer and take error exit.
377  */
378 allocbuf(tp, size)
379 	register struct buf *tp;
380 	int size;
381 {
382 	register struct buf *bp, *ep;
383 	int sizealloc, take, s;
384 
385 	sizealloc = roundup(size, CLBYTES);
386 	/*
387 	 * Buffer size does not change
388 	 */
389 	if (sizealloc == tp->b_bufsize)
390 		goto out;
391 	/*
392 	 * Buffer size is shrinking.
393 	 * Place excess space in a buffer header taken from the
394 	 * BQ_EMPTY buffer list and placed on the "most free" list.
395 	 * If no extra buffer headers are available, leave the
396 	 * extra space in the present buffer.
397 	 */
398 	if (sizealloc < tp->b_bufsize) {
399 		ep = bfreelist[BQ_EMPTY].av_forw;
400 		if (ep == &bfreelist[BQ_EMPTY])
401 			goto out;
402 		s = splbio();
403 		bremfree(ep);
404 		ep->b_flags |= B_BUSY;
405 		splx(s);
406 		pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
407 		    (int)tp->b_bufsize - sizealloc);
408 		ep->b_bufsize = tp->b_bufsize - sizealloc;
409 		tp->b_bufsize = sizealloc;
410 		ep->b_flags |= B_INVAL;
411 		ep->b_bcount = 0;
412 		brelse(ep);
413 		goto out;
414 	}
415 	/*
416 	 * More buffer space is needed. Get it out of buffers on
417 	 * the "most free" list, placing the empty headers on the
418 	 * BQ_EMPTY buffer header list.
419 	 */
420 	while (tp->b_bufsize < sizealloc) {
421 		take = sizealloc - tp->b_bufsize;
422 		bp = getnewbuf();
423 		if (take >= bp->b_bufsize)
424 			take = bp->b_bufsize;
425 		pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
426 		    &tp->b_un.b_addr[tp->b_bufsize], take);
427 		tp->b_bufsize += take;
428 		bp->b_bufsize = bp->b_bufsize - take;
429 		if (bp->b_bcount > bp->b_bufsize)
430 			bp->b_bcount = bp->b_bufsize;
431 		if (bp->b_bufsize <= 0) {
432 			bremhash(bp);
433 			binshash(bp, &bfreelist[BQ_EMPTY]);
434 			bp->b_dev = NODEV;
435 			bp->b_error = 0;
436 			bp->b_flags |= B_INVAL;
437 		}
438 		brelse(bp);
439 	}
440 out:
441 	tp->b_bcount = size;
442 	return (1);
443 }
444 
445 /*
446  * Find a buffer which is available for use.
447  * Select something from a free list.
448  * Preference is to AGE list, then LRU list.
449  */
450 struct buf *
451 getnewbuf()
452 {
453 	register struct buf *bp, *dp;
454 	register struct ucred *cred;
455 	int s;
456 
457 loop:
458 	s = splbio();
459 	for (dp = &bfreelist[BQ_AGE]; dp > bfreelist; dp--)
460 		if (dp->av_forw != dp)
461 			break;
462 	if (dp == bfreelist) {		/* no free blocks */
463 		dp->b_flags |= B_WANTED;
464 		sleep((caddr_t)dp, PRIBIO + 1);
465 		splx(s);
466 		goto loop;
467 	}
468 	bp = dp->av_forw;
469 	bremfree(bp);
470 	bp->b_flags |= B_BUSY;
471 	splx(s);
472 	if (bp->b_flags & B_DELWRI) {
473 		(void) bawrite(bp);
474 		goto loop;
475 	}
476 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
477 	if (bp->b_vp)
478 		brelvp(bp);
479 	if (bp->b_rcred != NOCRED) {
480 		cred = bp->b_rcred;
481 		bp->b_rcred = NOCRED;
482 		crfree(cred);
483 	}
484 	if (bp->b_wcred != NOCRED) {
485 		cred = bp->b_wcred;
486 		bp->b_wcred = NOCRED;
487 		crfree(cred);
488 	}
489 	bp->b_flags = B_BUSY;
490 	bp->b_dirtyoff = bp->b_dirtyend = 0;
491 	return (bp);
492 }
493 
494 /*
495  * Wait for I/O to complete.
496  *
497  * Extract and return any errors associated with the I/O.
498  * If the error flag is set, but no specific error is
499  * given, return EIO.
500  */
501 biowait(bp)
502 	register struct buf *bp;
503 {
504 	int s;
505 
506 	s = splbio();
507 	while ((bp->b_flags & B_DONE) == 0)
508 		sleep((caddr_t)bp, PRIBIO);
509 	splx(s);
510 	if ((bp->b_flags & B_ERROR) == 0)
511 		return (0);
512 	if (bp->b_error)
513 		return (bp->b_error);
514 	return (EIO);
515 }
516 
517 /*
518  * Mark I/O complete on a buffer.
519  *
520  * If a callback has been requested, e.g. the pageout
521  * daemon, do so. Otherwise, awaken waiting processes.
522  */
523 biodone(bp)
524 	register struct buf *bp;
525 {
526 	register struct vnode *vp;
527 
528 	if (bp->b_flags & B_DONE)
529 		panic("dup biodone");
530 	bp->b_flags |= B_DONE;
531 	if ((bp->b_flags & B_READ) == 0) {
532 		bp->b_dirtyoff = bp->b_dirtyend = 0;
533 		if (vp = bp->b_vp) {
534 			vp->v_numoutput--;
535 			if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) {
536 				if (vp->v_numoutput < 0)
537 					panic("biodone: neg numoutput");
538 				vp->v_flag &= ~VBWAIT;
539 				wakeup((caddr_t)&vp->v_numoutput);
540 			}
541 		}
542 	}
543 	if (bp->b_flags & B_CALL) {
544 		bp->b_flags &= ~B_CALL;
545 		(*bp->b_iodone)(bp);
546 		return;
547 	}
548 	if (bp->b_flags & B_ASYNC)
549 		brelse(bp);
550 	else {
551 		bp->b_flags &= ~B_WANTED;
552 		wakeup((caddr_t)bp);
553 	}
554 }
555 
556 /*
557  * Make sure all write-behind blocks associated
558  * with mount point are flushed out (from sync).
559  */
560 mntflushbuf(mountp, flags)
561 	struct mount *mountp;
562 	int flags;
563 {
564 	register struct vnode *vp;
565 
566 	if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
567 		panic("mntflushbuf: not busy");
568 loop:
569 	for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
570 		if (vget(vp))
571 			goto loop;
572 		vflushbuf(vp, flags);
573 		vput(vp);
574 		if (vp->v_mount != mountp)
575 			goto loop;
576 	}
577 }
578 
579 /*
580  * Flush all dirty buffers associated with a vnode.
581  */
582 vflushbuf(vp, flags)
583 	register struct vnode *vp;
584 	int flags;
585 {
586 	register struct buf *bp;
587 	struct buf *nbp;
588 	int s;
589 
590 loop:
591 	s = splbio();
592 	for (bp = vp->v_dirtyblkhd; bp; bp = nbp) {
593 		nbp = bp->b_blockf;
594 		if ((bp->b_flags & B_BUSY))
595 			continue;
596 		if ((bp->b_flags & B_DELWRI) == 0)
597 			panic("vflushbuf: not dirty");
598 		bremfree(bp);
599 		bp->b_flags |= B_BUSY;
600 		splx(s);
601 		/*
602 		 * Wait for I/O associated with indirect blocks to complete,
603 		 * since there is no way to quickly wait for them below.
604 		 * NB: This is really specific to ufs, but is done here
605 		 * as it is easier and quicker.
606 		 */
607 		if (bp->b_vp == vp || (flags & B_SYNC) == 0) {
608 			(void) bawrite(bp);
609 			s = splbio();
610 		} else {
611 			(void) bwrite(bp);
612 			goto loop;
613 		}
614 	}
615 	splx(s);
616 	if ((flags & B_SYNC) == 0)
617 		return;
618 	s = splbio();
619 	while (vp->v_numoutput) {
620 		vp->v_flag |= VBWAIT;
621 		sleep((caddr_t)&vp->v_numoutput, PRIBIO + 1);
622 	}
623 	splx(s);
624 	if (vp->v_dirtyblkhd) {
625 		vprint("vflushbuf: dirty", vp);
626 		goto loop;
627 	}
628 }
629 
630 /*
631  * Invalidate in core blocks belonging to closed or umounted filesystem
632  *
633  * Go through the list of vnodes associated with the file system;
634  * for each vnode invalidate any buffers that it holds. Normally
635  * this routine is preceeded by a bflush call, so that on a quiescent
636  * filesystem there will be no dirty buffers when we are done. Binval
637  * returns the count of dirty buffers when it is finished.
638  */
639 mntinvalbuf(mountp)
640 	struct mount *mountp;
641 {
642 	register struct vnode *vp;
643 	int dirty = 0;
644 
645 	if ((mountp->mnt_flag & MNT_MPBUSY) == 0)
646 		panic("mntinvalbuf: not busy");
647 loop:
648 	for (vp = mountp->mnt_mounth; vp; vp = vp->v_mountf) {
649 		if (vget(vp))
650 			goto loop;
651 		dirty += vinvalbuf(vp, 1);
652 		vput(vp);
653 		if (vp->v_mount != mountp)
654 			goto loop;
655 	}
656 	return (dirty);
657 }
658 
659 /*
660  * Flush out and invalidate all buffers associated with a vnode.
661  * Called with the underlying object locked.
662  */
663 vinvalbuf(vp, save)
664 	register struct vnode *vp;
665 	int save;
666 {
667 	register struct buf *bp;
668 	struct buf *nbp, *blist;
669 	int s, dirty = 0;
670 
671 	for (;;) {
672 		if (blist = vp->v_dirtyblkhd)
673 			/* void */;
674 		else if (blist = vp->v_cleanblkhd)
675 			/* void */;
676 		else
677 			break;
678 		for (bp = blist; bp; bp = nbp) {
679 			nbp = bp->b_blockf;
680 			s = splbio();
681 			if (bp->b_flags & B_BUSY) {
682 				bp->b_flags |= B_WANTED;
683 				sleep((caddr_t)bp, PRIBIO + 1);
684 				splx(s);
685 				break;
686 			}
687 			bremfree(bp);
688 			bp->b_flags |= B_BUSY;
689 			splx(s);
690 			if (save && (bp->b_flags & B_DELWRI)) {
691 				dirty++;
692 				(void) bwrite(bp);
693 				break;
694 			}
695 			if (bp->b_vp != vp)
696 				reassignbuf(bp, bp->b_vp);
697 			else
698 				bp->b_flags |= B_INVAL;
699 			brelse(bp);
700 		}
701 	}
702 	if (vp->v_dirtyblkhd || vp->v_cleanblkhd)
703 		panic("vinvalbuf: flush failed");
704 	return (dirty);
705 }
706 
707 /*
708  * Associate a buffer with a vnode.
709  */
710 bgetvp(vp, bp)
711 	register struct vnode *vp;
712 	register struct buf *bp;
713 {
714 
715 	if (bp->b_vp)
716 		panic("bgetvp: not free");
717 	VHOLD(vp);
718 	bp->b_vp = vp;
719 	if (vp->v_type == VBLK || vp->v_type == VCHR)
720 		bp->b_dev = vp->v_rdev;
721 	else
722 		bp->b_dev = NODEV;
723 	/*
724 	 * Insert onto list for new vnode.
725 	 */
726 	if (vp->v_cleanblkhd) {
727 		bp->b_blockf = vp->v_cleanblkhd;
728 		bp->b_blockb = &vp->v_cleanblkhd;
729 		vp->v_cleanblkhd->b_blockb = &bp->b_blockf;
730 		vp->v_cleanblkhd = bp;
731 	} else {
732 		vp->v_cleanblkhd = bp;
733 		bp->b_blockb = &vp->v_cleanblkhd;
734 		bp->b_blockf = NULL;
735 	}
736 }
737 
738 /*
739  * Disassociate a buffer from a vnode.
740  */
741 brelvp(bp)
742 	register struct buf *bp;
743 {
744 	struct buf *bq;
745 	struct vnode *vp;
746 
747 	if (bp->b_vp == (struct vnode *) 0)
748 		panic("brelvp: NULL");
749 	/*
750 	 * Delete from old vnode list, if on one.
751 	 */
752 	if (bp->b_blockb) {
753 		if (bq = bp->b_blockf)
754 			bq->b_blockb = bp->b_blockb;
755 		*bp->b_blockb = bq;
756 		bp->b_blockf = NULL;
757 		bp->b_blockb = NULL;
758 	}
759 	vp = bp->b_vp;
760 	bp->b_vp = (struct vnode *) 0;
761 	HOLDRELE(vp);
762 }
763 
764 /*
765  * Reassign a buffer from one vnode to another.
766  * Used to assign file specific control information
767  * (indirect blocks) to the vnode to which they belong.
768  */
769 reassignbuf(bp, newvp)
770 	register struct buf *bp;
771 	register struct vnode *newvp;
772 {
773 	register struct buf *bq, **listheadp;
774 
775 	if (newvp == NULL)
776 		panic("reassignbuf: NULL");
777 	/*
778 	 * Delete from old vnode list, if on one.
779 	 */
780 	if (bp->b_blockb) {
781 		if (bq = bp->b_blockf)
782 			bq->b_blockb = bp->b_blockb;
783 		*bp->b_blockb = bq;
784 	}
785 	/*
786 	 * If dirty, put on list of dirty buffers;
787 	 * otherwise insert onto list of clean buffers.
788 	 */
789 	if (bp->b_flags & B_DELWRI)
790 		listheadp = &newvp->v_dirtyblkhd;
791 	else
792 		listheadp = &newvp->v_cleanblkhd;
793 	if (*listheadp) {
794 		bp->b_blockf = *listheadp;
795 		bp->b_blockb = listheadp;
796 		bp->b_blockf->b_blockb = &bp->b_blockf;
797 		*listheadp = bp;
798 	} else {
799 		*listheadp = bp;
800 		bp->b_blockb = listheadp;
801 		bp->b_blockf = NULL;
802 	}
803 }
804