xref: /csrg-svn/sys/ufs/lfs/lfs_alloc.c (revision 4607)
1 /* Copyright (c) 1981 Regents of the University of California */
2 
3 static char vers[] = "@(#)lfs_alloc.c 1.6 10/25/81";
4 
5 /*	alloc.c	4.8	81/03/08	*/
6 
7 #include "../h/param.h"
8 #include "../h/systm.h"
9 #include "../h/mount.h"
10 #include "../h/fs.h"
11 #include "../h/conf.h"
12 #include "../h/buf.h"
13 #include "../h/inode.h"
14 #include "../h/dir.h"
15 #include "../h/user.h"
16 
17 extern long		hashalloc();
18 extern long		alloccg();
19 extern long		ialloccg();
20 extern int		inside[], around[];
21 extern unsigned char	fragtbl[];
22 
23 struct buf *
24 alloc(dev, ip, bpref, size)
25 	dev_t dev;
26 	register struct inode *ip;
27 	daddr_t bpref;
28 	int size;
29 {
30 	daddr_t bno;
31 	register struct fs *fs;
32 	register struct buf *bp;
33 	int cg;
34 
35 	if ((unsigned)size > BSIZE || size % FSIZE != 0)
36 		panic("alloc: bad size");
37 	fs = getfs(dev);
38 	if (fs->fs_nbfree == 0 && size == BSIZE)
39 		goto nospace;
40 	if (bpref == 0)
41 		cg = itog(ip->i_number, fs);
42 	else
43 		cg = dtog(bpref, fs);
44 	bno = hashalloc(dev, fs, cg, (long)bpref, size, alloccg);
45 	if (bno == 0)
46 		goto nospace;
47 	bp = getblk(dev, bno, size);
48 	clrbuf(bp);
49 	return (bp);
50 nospace:
51 	fserr(fs, "file system full");
52 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
53 	u.u_error = ENOSPC;
54 	return (NULL);
55 }
56 
57 struct buf *
58 realloccg(dev, ip, bprev, osize, nsize)
59 	dev_t dev;
60 	register struct inode *ip;
61 	daddr_t bprev;
62 	int osize, nsize;
63 {
64 	daddr_t bno;
65 	register struct fs *fs;
66 	register struct buf *bp, *obp;
67 	caddr_t cp;
68 	int cg;
69 
70 	if ((unsigned)osize > BSIZE || osize % FSIZE != 0 ||
71 	    (unsigned)nsize > BSIZE || nsize % FSIZE != 0)
72 		panic("realloccg: bad size");
73 	fs = getfs(dev);
74 	if (bprev == 0)
75 		panic("realloccg: bad bprev");
76 	else
77 		cg = dtog(bprev, fs);
78 	bno = fragextend(dev, fs, cg, (long)bprev, osize, nsize);
79 	if (bno != 0) {
80 		bp = bread(dev, bno, osize);
81 		bp->b_bcount = nsize;
82 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
83 		return (bp);
84 	}
85 	bno = hashalloc(dev, fs, cg, (long)bprev, nsize, alloccg);
86 	if (bno != 0) {
87 		/*
88 		 * make a new copy
89 		 */
90 		obp = bread(dev, bprev, osize);
91 		bp = getblk(dev, bno, nsize);
92 		cp = bp->b_un.b_addr;
93 		bp->b_un.b_addr = obp->b_un.b_addr;
94 		obp->b_un.b_addr = cp;
95 		obp->b_flags |= B_INVAL;
96 		brelse(obp);
97 		fre(dev, bprev, osize);
98 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
99 		return(bp);
100 	}
101 	/*
102 	 * no space available
103 	 */
104 	fserr(fs, "file system full");
105 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
106 	u.u_error = ENOSPC;
107 	return (NULL);
108 }
109 
110 struct inode *
111 ialloc(dev, ipref, mode)
112 	dev_t dev;
113 	ino_t ipref;
114 	int mode;
115 {
116 	daddr_t ino;
117 	register struct fs *fs;
118 	register struct inode *ip;
119 	int cg;
120 
121 	fs = getfs(dev);
122 	if (fs->fs_nifree == 0)
123 		goto noinodes;
124 	cg = itog(ipref, fs);
125 	ino = hashalloc(dev, fs, cg, (long)ipref, mode, ialloccg);
126 	if (ino == 0)
127 		goto noinodes;
128 	ip = iget(dev, ino);
129 	if (ip == NULL) {
130 		ifree(dev, ino);
131 		return (NULL);
132 	}
133 	if (ip->i_mode)
134 		panic("ialloc: dup alloc");
135 	return (ip);
136 noinodes:
137 	fserr(fs, "out of inodes");
138 	uprintf("\n%s: create failed, no inodes free\n", fs->fs_fsmnt);
139 	u.u_error = ENOSPC;
140 	return (NULL);
141 }
142 
143 dipref(dev)
144 	dev_t dev;
145 {
146 	register struct fs *fs;
147 	int cg, minndir, mincg;
148 
149 	fs = getfs(dev);
150 	minndir = fs->fs_cs[0].cs_ndir;
151 	mincg = 0;
152 	for (cg = 1; cg < fs->fs_ncg; cg++)
153 		if (fs->fs_cs[cg].cs_ndir < minndir) {
154 			mincg = cg;
155 			minndir = fs->fs_cs[cg].cs_ndir;
156 			if (minndir == 0)
157 				break;
158 		}
159 	return (fs->fs_ipg * mincg);
160 }
161 
162 long
163 hashalloc(dev, fs, cg, pref, size, allocator)
164 	dev_t dev;
165 	register struct fs *fs;
166 	int cg;
167 	long pref;
168 	int size;	/* size for data blocks, mode for inodes */
169 	long (*allocator)();
170 {
171 	long result;
172 	int i, icg = cg;
173 
174 	/*
175 	 * 1: preferred cylinder group
176 	 */
177 	result = (*allocator)(dev, fs, cg, pref, size);
178 	if (result)
179 		return (result);
180 	/*
181 	 * 2: quadratic rehash
182 	 */
183 	for (i = 1; i < fs->fs_ncg; i *= 2) {
184 		cg += i;
185 		if (cg >= fs->fs_ncg)
186 			cg -= fs->fs_ncg;
187 		result = (*allocator)(dev, fs, cg, 0, size);
188 		if (result)
189 			return (result);
190 	}
191 	/*
192 	 * 3: brute force search
193 	 */
194 	cg = icg;
195 	for (i = 0; i < fs->fs_ncg; i++) {
196 		result = (*allocator)(dev, fs, cg, 0, size);
197 		if (result)
198 			return (result);
199 		cg++;
200 		if (cg == fs->fs_ncg)
201 			cg = 0;
202 	}
203 	return (0);
204 }
205 
206 daddr_t
207 fragextend(dev, fs, cg, bprev, osize, nsize)
208 	dev_t dev;
209 	register struct fs *fs;
210 	int cg;
211 	long bprev;
212 	int osize, nsize;
213 {
214 	register struct buf *bp;
215 	register struct cg *cgp;
216 	long bno;
217 	int frags, bbase;
218 	int i;
219 
220 	frags = nsize / FSIZE;
221 	bbase = bprev % FRAG;
222 	if (bbase > (bprev + frags - 1) % FRAG) {
223 		/* cannot extend across a block boundry */
224 		return (0);
225 	}
226 	bp = bread(dev, cgtod(cg, fs), BSIZE);
227 	if (bp->b_flags & B_ERROR)
228 		return (0);
229 	cgp = bp->b_un.b_cg;
230 	bno = bprev % fs->fs_fpg;
231 	for (i = osize / FSIZE; i < frags; i++) {
232 		if (isclr(cgp->cg_free, bno + i))
233 			break;
234 	}
235 	if (i == frags) {
236 		/*
237 		 * the current fragment can be extended
238 		 * deduct the count on fragment being extended into
239 		 * increase the count on the remaining fragment (if any)
240 		 * allocate the extended piece
241 		 */
242 		for (i = frags; i < FRAG - bbase; i++)
243 			if (isclr(cgp->cg_free, bno + i))
244 				break;
245 		cgp->cg_frsum[i - osize / FSIZE]--;
246 		if (i != frags)
247 			cgp->cg_frsum[i - frags]++;
248 		for (i = osize / FSIZE; i < frags; i++) {
249 			clrbit(cgp->cg_free, bno + i);
250 			cgp->cg_nffree--;
251 			fs->fs_nffree--;
252 		}
253 		fs->fs_fmod++;
254 		bdwrite(bp);
255 		return (bprev);
256 	}
257 	brelse(bp);
258 	return (0);
259 }
260 
261 daddr_t
262 alloccg(dev, fs, cg, bpref, size)
263 	dev_t dev;
264 	register struct fs *fs;
265 	int cg;
266 	daddr_t bpref;
267 	int size;
268 {
269 	register struct buf *bp;
270 	register struct cg *cgp;
271 	int bno, frags;
272 	int allocsiz;
273 	int start, len, loc;
274 	int blk, field, subfield, pos;
275 	register int i;
276 
277 	bp = bread(dev, cgtod(cg, fs), BSIZE);
278 	if (bp->b_flags & B_ERROR)
279 		return (0);
280 	cgp = bp->b_un.b_cg;
281 	if (size == BSIZE) {
282 		if (cgp->cg_nbfree == 0) {
283 			brelse(bp);
284 			return (0);
285 		}
286 		bno = alloccgblk(dev, fs, cgp, bpref);
287 		bdwrite(bp);
288 		return (bno);
289 	}
290 	/*
291 	 * check to see if any fragments are already available
292 	 * allocsiz is the size which will be allocated, hacking
293 	 * it down to a smaller size if necessary
294 	 */
295 	frags = size / FSIZE;
296 	for (allocsiz = frags; allocsiz < FRAG; allocsiz++)
297 		if (cgp->cg_frsum[allocsiz] != 0)
298 			break;
299 	if (allocsiz == FRAG) {
300 		/*
301 		 * no fragments were available, so a block will be
302 		 * allocated, and hacked up
303 		 */
304 		if (cgp->cg_nbfree == 0) {
305 			brelse(bp);
306 			return (0);
307 		}
308 		bno = alloccgblk(dev, fs, cgp, bpref);
309 		bpref = bno % fs->fs_fpg;
310 		for (i = frags; i < FRAG; i++)
311 			setbit(cgp->cg_free, bpref + i);
312 		i = FRAG - frags;
313 		cgp->cg_nffree += i;
314 		fs->fs_nffree += i;
315 		cgp->cg_frsum[i]++;
316 		bdwrite(bp);
317 		return (bno);
318 	}
319 	/*
320 	 * find the fragment by searching through the free block
321 	 * map for an appropriate bit pattern
322 	 */
323 	if (bpref)
324 		start = bpref % fs->fs_fpg / NBBY;
325 	else
326 		start = cgp->cg_frotor / NBBY;
327 	len = roundup(fs->fs_fpg - 1, NBBY) / NBBY - start;
328 	loc = scanc(len, &cgp->cg_free[start], fragtbl, 1 << (allocsiz - 1));
329 	if (loc == 0) {
330 		len = start - 1;
331 		start = (cgdmin(cg, fs) - cgbase(cg, fs)) / NBBY;
332 		loc = scanc(len, &cgp->cg_free[start], fragtbl,
333 			1 << (allocsiz - 1));
334 		if (loc == 0)
335 			panic("alloccg: can't find frag");
336 	}
337 	bno = (start + len - loc) * NBBY;
338 	cgp->cg_frotor = bno;
339 	/*
340 	 * found the byte in the map
341 	 * sift through the bits to find the selected frag
342 	 */
343 	for (i = 0; i < NBBY; i += FRAG) {
344 		blk = (cgp->cg_free[bno / NBBY] >> i) & (0xff >> NBBY - FRAG);
345 		blk <<= 1;
346 		field = around[allocsiz];
347 		subfield = inside[allocsiz];
348 		for (pos = 0; pos <= FRAG - allocsiz; pos++) {
349 			if ((blk & field) == subfield) {
350 				bno += i + pos;
351 				goto gotit;
352 			}
353 			field <<= 1;
354 			subfield <<= 1;
355 		}
356 	}
357 	panic("alloccg: frag not in block");
358 gotit:
359 	for (i = 0; i < frags; i++)
360 		clrbit(cgp->cg_free, bno + i);
361 	cgp->cg_nffree -= frags;
362 	fs->fs_nffree -= frags;
363 	cgp->cg_frsum[allocsiz]--;
364 	if (frags != allocsiz)
365 		cgp->cg_frsum[allocsiz - frags]++;
366 	bdwrite(bp);
367 	return (cg * fs->fs_fpg + bno);
368 }
369 
370 daddr_t
371 alloccgblk(dev, fs, cgp, bpref)
372 	dev_t dev;
373 	struct fs *fs;
374 	register struct cg *cgp;
375 	daddr_t bpref;
376 {
377 	register int i;
378 
379 	if (bpref) {
380 		bpref &= ~(FRAG - 1);
381 		bpref %= fs->fs_fpg;
382 		if (isblock(cgp->cg_free, bpref/FRAG))
383 			goto gotit;
384 	} else
385 		bpref = cgp->cg_rotor;
386 	for (i = 0; i < cgp->cg_ndblk; i += FRAG) {
387 		bpref += FRAG;
388 		if (bpref >= cgp->cg_ndblk)
389 			bpref = 0;
390 		if (isblock(cgp->cg_free, bpref/FRAG)) {
391 			cgp->cg_rotor = bpref;
392 			goto gotit;
393 		}
394 	}
395 	panic("alloccgblk: can't find a blk");
396 	return (0);
397 gotit:
398 	clrblock(cgp->cg_free, bpref/FRAG);
399 	cgp->cg_nbfree--;
400 	fs->fs_nbfree--;
401 	fs->fs_cs[cgp->cg_cgx].cs_nbfree--;
402 	i = bpref * NSPF;
403 	cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]--;
404 	fs->fs_fmod++;
405 	return (cgp->cg_cgx * fs->fs_fpg + bpref);
406 }
407 
408 long
409 ialloccg(dev, fs, cg, ipref, mode)
410 	dev_t dev;
411 	register struct fs *fs;
412 	int cg;
413 	daddr_t ipref;
414 	int mode;
415 {
416 	register struct buf *bp;
417 	register struct cg *cgp;
418 	int i;
419 
420 	bp = bread(dev, cgtod(cg, fs), BSIZE);
421 	if (bp->b_flags & B_ERROR)
422 		return (0);
423 	cgp = bp->b_un.b_cg;
424 	if (cgp->cg_nifree == 0) {
425 		brelse(bp);
426 		return (0);
427 	}
428 	if (ipref) {
429 		ipref %= fs->fs_ipg;
430 		if (isclr(cgp->cg_iused, ipref))
431 			goto gotit;
432 	} else
433 		ipref = cgp->cg_irotor;
434 	for (i = 0; i < fs->fs_ipg; i++) {
435 		ipref++;
436 		if (ipref >= fs->fs_ipg)
437 			ipref = 0;
438 		if (isclr(cgp->cg_iused, ipref)) {
439 			cgp->cg_irotor = ipref;
440 			goto gotit;
441 		}
442 	}
443 	brelse(bp);
444 	return (0);
445 gotit:
446 	setbit(cgp->cg_iused, ipref);
447 	cgp->cg_nifree--;
448 	fs->fs_nifree--;
449 	fs->fs_cs[cg].cs_nifree--;
450 	fs->fs_fmod++;
451 	if ((mode & IFMT) == IFDIR) {
452 		cgp->cg_ndir++;
453 		fs->fs_cs[cg].cs_ndir++;
454 	}
455 	bdwrite(bp);
456 	return (cg * fs->fs_ipg + ipref);
457 }
458 
459 fre(dev, bno, size)
460 	dev_t dev;
461 	daddr_t bno;
462 	int size;
463 {
464 	register struct fs *fs;
465 	register struct cg *cgp;
466 	register struct buf *bp;
467 	int cg, blk, frags, bbase;
468 	register int i;
469 
470 	if ((unsigned)size > BSIZE || size % FSIZE != 0)
471 		panic("free: bad size");
472 	fs = getfs(dev);
473 	cg = dtog(bno, fs);
474 	if (badblock(fs, bno))
475 		return;
476 	bp = bread(dev, cgtod(cg, fs), BSIZE);
477 	if (bp->b_flags & B_ERROR)
478 		return;
479 	cgp = bp->b_un.b_cg;
480 	bno %= fs->fs_fpg;
481 	if (size == BSIZE) {
482 		if (isblock(cgp->cg_free, bno/FRAG))
483 			panic("free: freeing free block");
484 		setblock(cgp->cg_free, bno/FRAG);
485 		cgp->cg_nbfree++;
486 		fs->fs_nbfree++;
487 		fs->fs_cs[cg].cs_nbfree++;
488 		i = bno * NSPF;
489 		cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]++;
490 	} else {
491 		bbase = bno - (bno % FRAG);
492 		/*
493 		 * decrement the counts associated with the old frags
494 		 */
495 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
496 		       (0xff >> (NBBY - FRAG)));
497 		fragacct(blk, cgp->cg_frsum, -1);
498 		/*
499 		 * deallocate the fragment
500 		 */
501 		frags = size / FSIZE;
502 		for (i = 0; i < frags; i++) {
503 			if (isset(cgp->cg_free, bno + i))
504 				panic("free: freeing free frag");
505 			setbit(cgp->cg_free, bno + i);
506 			cgp->cg_nffree++;
507 			fs->fs_nffree++;
508 		}
509 		/*
510 		 * add back in counts associated with the new frags
511 		 */
512 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
513 		       (0xff >> (NBBY - FRAG)));
514 		fragacct(blk, cgp->cg_frsum, 1);
515 		/*
516 		 * if a complete block has been reassembled, account for it
517 		 */
518 		if (isblock(cgp->cg_free, bbase / FRAG)) {
519 			cgp->cg_nffree -= FRAG;
520 			fs->fs_nffree -= FRAG;
521 			cgp->cg_nbfree++;
522 			fs->fs_nbfree++;
523 			fs->fs_cs[cg].cs_nbfree++;
524 			i = bbase * NSPF;
525 			cgp->cg_b[i / fs->fs_spc]
526 				 [i % fs->fs_nsect * NRPOS / fs->fs_nsect]++;
527 		}
528 	}
529 	fs->fs_fmod++;
530 	bdwrite(bp);
531 }
532 
533 ifree(dev, ino, mode)
534 	dev_t dev;
535 	ino_t ino;
536 	int mode;
537 {
538 	register struct fs *fs;
539 	register struct cg *cgp;
540 	register struct buf *bp;
541 	int i;
542 	int cg;
543 
544 	fs = getfs(dev);
545 	if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg)
546 		panic("ifree: range");
547 	cg = itog(ino, fs);
548 	bp = bread(dev, cgtod(cg, fs), BSIZE);
549 	if (bp->b_flags & B_ERROR)
550 		return;
551 	cgp = bp->b_un.b_cg;
552 	ino %= fs->fs_ipg;
553 	if (isclr(cgp->cg_iused, ino))
554 		panic("ifree: freeing free inode");
555 	clrbit(cgp->cg_iused, ino);
556 	cgp->cg_nifree++;
557 	fs->fs_nifree++;
558 	fs->fs_cs[cg].cs_nifree++;
559 	if ((mode & IFMT) == IFDIR) {
560 		cgp->cg_ndir--;
561 		fs->fs_cs[cg].cs_ndir--;
562 	}
563 	fs->fs_fmod++;
564 	bdwrite(bp);
565 }
566 
567 /*
568  * update the frsum fields to reflect addition or deletion
569  * of some frags
570  */
571 fragacct(fragmap, fraglist, cnt)
572 	int fragmap;
573 	short fraglist[];
574 	int cnt;
575 {
576 	int inblk;
577 	register int field, subfield;
578 	register int siz, pos;
579 
580 	inblk = (int)(fragtbl[fragmap]) << 1;
581 	fragmap <<= 1;
582 	for (siz = 1; siz < FRAG; siz++) {
583 		if (((1 << siz) & inblk) == 0)
584 			continue;
585 		field = around[siz];
586 		subfield = inside[siz];
587 		for (pos = siz; pos <= FRAG; pos++) {
588 			if ((fragmap & field) == subfield) {
589 				fraglist[siz] += cnt;
590 				pos += siz;
591 				field <<= siz;
592 				subfield <<= siz;
593 			}
594 			field <<= 1;
595 			subfield <<= 1;
596 		}
597 	}
598 }
599 
600 badblock(fs, bn)
601 	register struct fs *fs;
602 	daddr_t bn;
603 {
604 
605 	if ((unsigned)bn >= fs->fs_size || bn < cgdmin(dtog(bn, fs), fs)) {
606 		fserr(fs, "bad block");
607 		return (1);
608 	}
609 	return (0);
610 }
611 
612 /*
613  * getfs maps a device number into
614  * a pointer to the incore super
615  * block.  The algorithm is a linear
616  * search through the mount table.
617  * A consistency check of the
618  * in core free-block and i-node
619  * counts is performed.
620  *
621  * panic: no fs -- the device is not mounted.
622  *	this "cannot happen"
623  */
624 struct fs *
625 getfs(dev)
626 	dev_t dev;
627 {
628 	register struct mount *mp;
629 	register struct fs *fs;
630 
631 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
632 		if (mp->m_bufp != NULL && mp->m_dev == dev) {
633 			fs = mp->m_bufp->b_un.b_fs;
634 			if (fs->fs_magic != FS_MAGIC)
635 				panic("getfs: bad magic");
636 			return (fs);
637 		}
638 	panic("getfs: no fs");
639 	return (NULL);
640 }
641 
642 /*
643  * Fserr prints the name of a file system
644  * with an error diagnostic, in the form
645  *	fs: error message
646  */
647 fserr(fs, cp)
648 	struct fs *fs;
649 	char *cp;
650 {
651 
652 	printf("%s: %s\n", fs->fs_fsmnt, cp);
653 }
654 
655 /*
656  * Getfsx returns the index in the file system
657  * table of the specified device.  The swap device
658  * is also assigned a pseudo-index.  The index may
659  * be used as a compressed indication of the location
660  * of a block, recording
661  *	<getfsx(dev),blkno>
662  * rather than
663  *	<dev, blkno>
664  * provided the information need remain valid only
665  * as long as the file system is mounted.
666  */
667 getfsx(dev)
668 	dev_t dev;
669 {
670 	register struct mount *mp;
671 
672 	if (dev == swapdev)
673 		return (MSWAPX);
674 	for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
675 		if (mp->m_dev == dev)
676 			return (mp - &mount[0]);
677 	return (-1);
678 }
679 
680 /*
681  * Update is the internal name of 'sync'.  It goes through the disk
682  * queues to initiate sandbagged IO; goes through the inodes to write
683  * modified nodes; and it goes through the mount table to initiate modified
684  * super blocks.
685  */
686 update()
687 {
688 	register struct inode *ip;
689 	register struct mount *mp;
690 	register struct buf *bp;
691 	struct fs *fs;
692 	time_t tim;
693 	int i;
694 
695 	if (updlock)
696 		return;
697 	updlock++;
698 	/*
699 	 * Write back modified superblocks.
700 	 * Consistency check that the superblock
701 	 * of each file system is still in the buffer cache.
702 	 */
703 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
704 		if (mp->m_bufp != NULL) {
705 			fs = mp->m_bufp->b_un.b_fs;
706 			if (fs->fs_fmod == 0)
707 				continue;
708 			if (fs->fs_ronly != 0)
709 				panic("update: rofs mod");
710 			bp = getblk(mp->m_dev, SBLOCK, BSIZE);
711 			fs->fs_fmod = 0;
712 			fs->fs_time = TIME;
713 			if (bp->b_un.b_fs != fs)
714 				panic("update: bad b_fs");
715 			bwrite(bp);
716 			for (i = 0; i < cssize(fs); i += BSIZE) {
717 				bp = getblk(mp->m_dev, csaddr(fs) + i / FSIZE,
718 					BSIZE);
719 				bcopy(fs->fs_cs + i, bp->b_un.b_addr, BSIZE);
720 				bwrite(bp);
721 			}
722 		}
723 	/*
724 	 * Write back each (modified) inode.
725 	 */
726 	for (ip = inode; ip < inodeNINODE; ip++)
727 		if((ip->i_flag&ILOCK)==0 && ip->i_count) {
728 			ip->i_flag |= ILOCK;
729 			ip->i_count++;
730 			tim = TIME;
731 			iupdat(ip, &tim, &tim, 0);
732 			iput(ip);
733 		}
734 	updlock = 0;
735 	/*
736 	 * Force stale buffer cache information to be flushed,
737 	 * for all devices.
738 	 */
739 	bflush(NODEV);
740 }
741