xref: /csrg-svn/sys/ufs/lfs/lfs_alloc.c (revision 4472)
1 /* Copyright (c) 1981 Regents of the University of California */
2 
3 static char vers[] = "@(#)lfs_alloc.c 1.5 10/07/81";
4 
5 /*	alloc.c	4.8	81/03/08	*/
6 
7 #include "../h/param.h"
8 #include "../h/systm.h"
9 #include "../h/mount.h"
10 #include "../h/fs.h"
11 #include "../h/conf.h"
12 #include "../h/buf.h"
13 #include "../h/inode.h"
14 #include "../h/dir.h"
15 #include "../h/user.h"
16 
17 long	hashalloc();
18 long	alloccg();
19 long	ialloccg();
20 
21 struct buf *
22 alloc(dev, ip, bpref, size)
23 	dev_t dev;
24 	register struct inode *ip;
25 	daddr_t bpref;
26 	int size;
27 {
28 	daddr_t bno;
29 	register struct fs *fs;
30 	register struct buf *bp;
31 	int cg;
32 
33 	if ((unsigned)size > BSIZE || size % FSIZE != 0)
34 		panic("alloc: bad size");
35 	fs = getfs(dev);
36 	if (fs->fs_nbfree == 0 && size == BSIZE)
37 		goto nospace;
38 	if (bpref == 0)
39 		cg = itog(ip->i_number, fs);
40 	else
41 		cg = dtog(bpref, fs);
42 	bno = hashalloc(dev, fs, cg, (long)bpref, size, alloccg);
43 	if (bno == 0)
44 		goto nospace;
45 	bp = getblk(dev, bno, size);
46 	clrbuf(bp);
47 	return (bp);
48 nospace:
49 	fserr(fs, "file system full");
50 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
51 	u.u_error = ENOSPC;
52 	return (NULL);
53 }
54 
55 struct buf *
56 realloccg(dev, ip, bprev, osize, nsize)
57 	dev_t dev;
58 	register struct inode *ip;
59 	daddr_t bprev;
60 	int osize, nsize;
61 {
62 	daddr_t bno;
63 	register struct fs *fs;
64 	register struct buf *bp, *obp;
65 	caddr_t cp;
66 	int cg;
67 
68 	if ((unsigned)osize > BSIZE || osize % FSIZE != 0 ||
69 	    (unsigned)nsize > BSIZE || nsize % FSIZE != 0)
70 		panic("realloccg: bad size");
71 	fs = getfs(dev);
72 	if (bprev == 0)
73 		panic("realloccg: bad bprev");
74 	else
75 		cg = dtog(bprev, fs);
76 	bno = fragextend(dev, fs, cg, (long)bprev, osize, nsize);
77 	if (bno != 0) {
78 		bp = bread(dev, bno, osize);
79 		bp->b_bcount = nsize;
80 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
81 		return (bp);
82 	}
83 	bno = hashalloc(dev, fs, cg, (long)bprev, nsize, alloccg);
84 	if (bno != 0) {
85 		/*
86 		 * make a new copy
87 		 */
88 		obp = bread(dev, bprev, osize);
89 		bp = getblk(dev, bno, nsize);
90 		cp = bp->b_un.b_addr;
91 		bp->b_un.b_addr = obp->b_un.b_addr;
92 		obp->b_un.b_addr = cp;
93 		obp->b_flags |= B_INVAL;
94 		brelse(obp);
95 		fre(dev, bprev, osize);
96 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
97 		return(bp);
98 	}
99 	/*
100 	 * no space available
101 	 */
102 	fserr(fs, "file system full");
103 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
104 	u.u_error = ENOSPC;
105 	return (NULL);
106 }
107 
108 struct inode *
109 ialloc(dev, ipref, mode)
110 	dev_t dev;
111 	ino_t ipref;
112 	int mode;
113 {
114 	daddr_t ino;
115 	register struct fs *fs;
116 	register struct inode *ip;
117 	int cg;
118 
119 	fs = getfs(dev);
120 	if (fs->fs_nifree == 0)
121 		goto noinodes;
122 	cg = itog(ipref, fs);
123 	ino = hashalloc(dev, fs, cg, (long)ipref, mode, ialloccg);
124 	if (ino == 0)
125 		goto noinodes;
126 	ip = iget(dev, ino);
127 	if (ip == NULL) {
128 		ifree(dev, ino);
129 		return (NULL);
130 	}
131 	if (ip->i_mode)
132 		panic("ialloc: dup alloc");
133 	return (ip);
134 noinodes:
135 	fserr(fs, "out of inodes");
136 	uprintf("\n%s: create failed, no inodes free\n", fs->fs_fsmnt);
137 	u.u_error = ENOSPC;
138 	return (NULL);
139 }
140 
141 dipref(dev)
142 	dev_t dev;
143 {
144 	register struct fs *fs;
145 	int cg, minndir, mincg;
146 
147 	fs = getfs(dev);
148 	minndir = fs->fs_cs[0].cs_ndir;
149 	mincg = 0;
150 	for (cg = 1; cg < fs->fs_ncg; cg++)
151 		if (fs->fs_cs[cg].cs_ndir < minndir) {
152 			mincg = cg;
153 			minndir = fs->fs_cs[cg].cs_ndir;
154 			if (minndir == 0)
155 				break;
156 		}
157 	return (fs->fs_ipg * mincg);
158 }
159 
160 long
161 hashalloc(dev, fs, cg, pref, size, allocator)
162 	dev_t dev;
163 	register struct fs *fs;
164 	int cg;
165 	long pref;
166 	int size;	/* size for data blocks, mode for inodes */
167 	long (*allocator)();
168 {
169 	long result;
170 	int i, icg = cg;
171 
172 	/*
173 	 * 1: preferred cylinder group
174 	 */
175 	result = (*allocator)(dev, fs, cg, pref, size);
176 	if (result)
177 		return (result);
178 	/*
179 	 * 2: quadratic rehash
180 	 */
181 	for (i = 1; i < fs->fs_ncg; i *= 2) {
182 		cg += i;
183 		if (cg >= fs->fs_ncg)
184 			cg -= fs->fs_ncg;
185 		result = (*allocator)(dev, fs, cg, 0, size);
186 		if (result)
187 			return (result);
188 	}
189 	/*
190 	 * 3: brute force search
191 	 */
192 	cg = icg;
193 	for (i = 0; i < fs->fs_ncg; i++) {
194 		result = (*allocator)(dev, fs, cg, 0, size);
195 		if (result)
196 			return (result);
197 		cg++;
198 		if (cg == fs->fs_ncg)
199 			cg = 0;
200 	}
201 	return (0);
202 }
203 
204 daddr_t
205 fragextend(dev, fs, cg, bprev, osize, nsize)
206 	dev_t dev;
207 	register struct fs *fs;
208 	int cg;
209 	long bprev;
210 	int osize, nsize;
211 {
212 	register struct buf *bp;
213 	register struct cg *cgp;
214 	long bno;
215 	int frags, bbase;
216 	int i;
217 
218 	frags = nsize / FSIZE;
219 	bbase = bprev % FRAG;
220 	if (bbase > (bprev + frags - 1) % FRAG) {
221 		/* cannot extend across a block boundry */
222 		return (0);
223 	}
224 	bp = bread(dev, cgtod(cg, fs), BSIZE);
225 	if (bp->b_flags & B_ERROR)
226 		return (0);
227 	cgp = bp->b_un.b_cg;
228 	bno = bprev % fs->fs_fpg;
229 	for (i = osize / FSIZE; i < frags; i++) {
230 		if (isclr(cgp->cg_free, bno + i))
231 			break;
232 	}
233 	if (i == frags) {
234 		/*
235 		 * the current fragment can be extended
236 		 * deduct the count on fragment being extended into
237 		 * increase the count on the remaining fragment (if any)
238 		 * allocate the extended piece
239 		 */
240 		for (i = frags; i < FRAG - bbase; i++)
241 			if (isclr(cgp->cg_free, bno + i))
242 				break;
243 		cgp->cg_frsum[i - osize / FSIZE]--;
244 		if (i != frags)
245 			cgp->cg_frsum[i - frags]++;
246 		for (i = osize / FSIZE; i < frags; i++) {
247 			clrbit(cgp->cg_free, bno + i);
248 			cgp->cg_nffree--;
249 			fs->fs_nffree--;
250 		}
251 		fs->fs_fmod++;
252 		bdwrite(bp);
253 		return (bprev);
254 	}
255 	brelse(bp);
256 	return (0);
257 }
258 
259 daddr_t
260 alloccg(dev, fs, cg, bpref, size)
261 	dev_t dev;
262 	register struct fs *fs;
263 	int cg;
264 	daddr_t bpref;
265 	int size;
266 {
267 	register struct buf *bp;
268 	register struct cg *cgp;
269 	int bno, frags;
270 	int allocsiz;
271 	int start, len, loc;
272 	int blk, field, subfield, pos;
273 	register int i;
274 
275 	bp = bread(dev, cgtod(cg, fs), BSIZE);
276 	if (bp->b_flags & B_ERROR)
277 		return (0);
278 	cgp = bp->b_un.b_cg;
279 	if (size == BSIZE) {
280 		if (cgp->cg_nbfree == 0) {
281 			brelse(bp);
282 			return (0);
283 		}
284 		bno = alloccgblk(dev, fs, cgp, bpref);
285 		bdwrite(bp);
286 		return (bno);
287 	}
288 	/*
289 	 * check to see if any fragments are already available
290 	 * allocsiz is the size which will be allocated, hacking
291 	 * it down to a smaller size if necessary
292 	 */
293 	frags = size / FSIZE;
294 	for (allocsiz = frags; allocsiz < FRAG; allocsiz++)
295 		if (cgp->cg_frsum[allocsiz] != 0)
296 			break;
297 	if (allocsiz == FRAG) {
298 		/*
299 		 * no fragments were available, so a block will be
300 		 * allocated, and hacked up
301 		 */
302 		if (cgp->cg_nbfree == 0) {
303 			brelse(bp);
304 			return (0);
305 		}
306 		bno = alloccgblk(dev, fs, cgp, bpref);
307 		bpref = bno % fs->fs_fpg;
308 		for (i = frags; i < FRAG; i++)
309 			setbit(cgp->cg_free, bpref + i);
310 		i = FRAG - frags;
311 		cgp->cg_nffree += i;
312 		fs->fs_nffree += i;
313 		cgp->cg_frsum[i]++;
314 		bdwrite(bp);
315 		return (bno);
316 	}
317 	/*
318 	 * find the fragment by searching through the free block
319 	 * map for an appropriate bit pattern
320 	 */
321 	if (bpref)
322 		start = bpref % fs->fs_fpg / NBBY;
323 	else
324 		start = cgp->cg_frotor / NBBY;
325 	len = roundup(fs->fs_fpg - 1, NBBY) / NBBY - start;
326 	loc = scanc(len, &cgp->cg_free[start], fragtbl, 1 << (allocsiz - 1));
327 	if (loc == 0) {
328 		len = start - 1;
329 		start = (cgdmin(cg, fs) - cgbase(cg, fs)) / NBBY;
330 		loc = scanc(len, &cgp->cg_free[start], fragtbl,
331 			1 << (allocsiz - 1));
332 		if (loc == 0)
333 			panic("alloccg: can't find frag");
334 	}
335 	bno = (start + len - loc) * NBBY;
336 	cgp->cg_frotor = bno;
337 	/*
338 	 * found the byte in the map
339 	 * sift through the bits to find the selected frag
340 	 */
341 	for (i = 0; i < NBBY; i += FRAG) {
342 		blk = (cgp->cg_free[bno / NBBY] >> i) & (0xff >> NBBY - FRAG);
343 		blk <<= 1;
344 		field = around[allocsiz];
345 		subfield = inside[allocsiz];
346 		for (pos = 0; pos <= FRAG - allocsiz; pos++) {
347 			if ((blk & field) == subfield) {
348 				bno += i + pos;
349 				goto gotit;
350 			}
351 			field <<= 1;
352 			subfield <<= 1;
353 		}
354 	}
355 	panic("alloccg: frag not in block");
356 gotit:
357 	for (i = 0; i < frags; i++)
358 		clrbit(cgp->cg_free, bno + i);
359 	cgp->cg_nffree -= frags;
360 	fs->fs_nffree -= frags;
361 	cgp->cg_frsum[allocsiz]--;
362 	if (frags != allocsiz)
363 		cgp->cg_frsum[allocsiz - frags]++;
364 	bdwrite(bp);
365 	return (cg * fs->fs_fpg + bno);
366 }
367 
368 daddr_t
369 alloccgblk(dev, fs, cgp, bpref)
370 	dev_t dev;
371 	struct fs *fs;
372 	register struct cg *cgp;
373 	daddr_t bpref;
374 {
375 	register int i;
376 
377 	if (bpref) {
378 		bpref &= ~(FRAG - 1);
379 		bpref %= fs->fs_fpg;
380 		if (isblock(cgp->cg_free, bpref/FRAG))
381 			goto gotit;
382 	} else
383 		bpref = cgp->cg_rotor;
384 	for (i = 0; i < cgp->cg_ndblk; i += FRAG) {
385 		bpref += FRAG;
386 		if (bpref >= cgp->cg_ndblk)
387 			bpref = 0;
388 		if (isblock(cgp->cg_free, bpref/FRAG)) {
389 			cgp->cg_rotor = bpref;
390 			goto gotit;
391 		}
392 	}
393 	panic("alloccgblk: can't find a blk");
394 	return (0);
395 gotit:
396 	clrblock(cgp->cg_free, bpref/FRAG);
397 	cgp->cg_nbfree--;
398 	fs->fs_nbfree--;
399 	fs->fs_cs[cgp->cg_cgx].cs_nbfree--;
400 	i = bpref * NSPF;
401 	cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]--;
402 	fs->fs_fmod++;
403 	return (cgp->cg_cgx * fs->fs_fpg + bpref);
404 }
405 
406 long
407 ialloccg(dev, fs, cg, ipref, mode)
408 	dev_t dev;
409 	register struct fs *fs;
410 	int cg;
411 	daddr_t ipref;
412 	int mode;
413 {
414 	register struct buf *bp;
415 	register struct cg *cgp;
416 	int i;
417 
418 	bp = bread(dev, cgtod(cg, fs), BSIZE);
419 	if (bp->b_flags & B_ERROR)
420 		return (0);
421 	cgp = bp->b_un.b_cg;
422 	if (cgp->cg_nifree == 0) {
423 		brelse(bp);
424 		return (0);
425 	}
426 	if (ipref) {
427 		ipref %= fs->fs_ipg;
428 		if (isclr(cgp->cg_iused, ipref))
429 			goto gotit;
430 	} else
431 		ipref = cgp->cg_irotor;
432 	for (i = 0; i < fs->fs_ipg; i++) {
433 		ipref++;
434 		if (ipref >= fs->fs_ipg)
435 			ipref = 0;
436 		if (isclr(cgp->cg_iused, ipref)) {
437 			cgp->cg_irotor = ipref;
438 			goto gotit;
439 		}
440 	}
441 	brelse(bp);
442 	return (0);
443 gotit:
444 	setbit(cgp->cg_iused, ipref);
445 	cgp->cg_nifree--;
446 	fs->fs_nifree--;
447 	fs->fs_cs[cg].cs_nifree--;
448 	fs->fs_fmod++;
449 	if ((mode & IFMT) == IFDIR) {
450 		cgp->cg_ndir++;
451 		fs->fs_cs[cg].cs_ndir++;
452 	}
453 	bdwrite(bp);
454 	return (cg * fs->fs_ipg + ipref);
455 }
456 
457 fre(dev, bno, size)
458 	dev_t dev;
459 	daddr_t bno;
460 	int size;
461 {
462 	register struct fs *fs;
463 	register struct cg *cgp;
464 	register struct buf *bp;
465 	int cg, blk, frags, bbase;
466 	register int i;
467 
468 	if ((unsigned)size > BSIZE || size % FSIZE != 0)
469 		panic("free: bad size");
470 	fs = getfs(dev);
471 	cg = dtog(bno, fs);
472 	if (badblock(fs, bno))
473 		return;
474 	bp = bread(dev, cgtod(cg, fs), BSIZE);
475 	if (bp->b_flags & B_ERROR)
476 		return;
477 	cgp = bp->b_un.b_cg;
478 	bno %= fs->fs_fpg;
479 	if (size == BSIZE) {
480 		if (isblock(cgp->cg_free, bno/FRAG))
481 			panic("free: freeing free block");
482 		setblock(cgp->cg_free, bno/FRAG);
483 		cgp->cg_nbfree++;
484 		fs->fs_nbfree++;
485 		fs->fs_cs[cg].cs_nbfree++;
486 		i = bno * NSPF;
487 		cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]++;
488 	} else {
489 		bbase = bno - (bno % FRAG);
490 		/*
491 		 * decrement the counts associated with the old frags
492 		 */
493 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
494 		       (0xff >> (NBBY - FRAG)));
495 		fragacct(blk, cgp->cg_frsum, -1);
496 		/*
497 		 * deallocate the fragment
498 		 */
499 		frags = size / FSIZE;
500 		for (i = 0; i < frags; i++) {
501 			if (isset(cgp->cg_free, bno + i))
502 				panic("free: freeing free frag");
503 			setbit(cgp->cg_free, bno + i);
504 			cgp->cg_nffree++;
505 			fs->fs_nffree++;
506 		}
507 		/*
508 		 * add back in counts associated with the new frags
509 		 */
510 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
511 		       (0xff >> (NBBY - FRAG)));
512 		fragacct(blk, cgp->cg_frsum, 1);
513 		/*
514 		 * if a complete block has been reassembled, account for it
515 		 */
516 		if (isblock(cgp->cg_free, bbase / FRAG)) {
517 			cgp->cg_nffree -= FRAG;
518 			fs->fs_nffree -= FRAG;
519 			cgp->cg_nbfree++;
520 			fs->fs_nbfree++;
521 			fs->fs_cs[cg].cs_nbfree++;
522 			i = bbase * NSPF;
523 			cgp->cg_b[i / fs->fs_spc]
524 				 [i % fs->fs_nsect * NRPOS / fs->fs_nsect]++;
525 		}
526 	}
527 	fs->fs_fmod++;
528 	bdwrite(bp);
529 }
530 
531 ifree(dev, ino, mode)
532 	dev_t dev;
533 	ino_t ino;
534 	int mode;
535 {
536 	register struct fs *fs;
537 	register struct cg *cgp;
538 	register struct buf *bp;
539 	int i;
540 	int cg;
541 
542 	fs = getfs(dev);
543 	if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg)
544 		panic("ifree: range");
545 	cg = itog(ino, fs);
546 	bp = bread(dev, cgtod(cg, fs), BSIZE);
547 	if (bp->b_flags & B_ERROR)
548 		return;
549 	cgp = bp->b_un.b_cg;
550 	ino %= fs->fs_ipg;
551 	if (isclr(cgp->cg_iused, ino))
552 		panic("ifree: freeing free inode");
553 	clrbit(cgp->cg_iused, ino);
554 	cgp->cg_nifree++;
555 	fs->fs_nifree++;
556 	fs->fs_cs[cg].cs_nifree++;
557 	if ((mode & IFMT) == IFDIR) {
558 		cgp->cg_ndir--;
559 		fs->fs_cs[cg].cs_ndir--;
560 	}
561 	fs->fs_fmod++;
562 	bdwrite(bp);
563 }
564 
565 /*
566  * update the frsum fields to reflect addition or deletion
567  * of some frags
568  */
569 fragacct(fragmap, fraglist, cnt)
570 	int fragmap;
571 	short fraglist[];
572 	int cnt;
573 {
574 	int inblk;
575 	register int field, subfield;
576 	register int siz, pos;
577 
578 	inblk = (int)(fragtbl[fragmap]) << 1;
579 	fragmap <<= 1;
580 	for (siz = 1; siz < FRAG; siz++) {
581 		if (((1 << siz) & inblk) == 0)
582 			continue;
583 		field = around[siz];
584 		subfield = inside[siz];
585 		for (pos = siz; pos <= FRAG; pos++) {
586 			if ((fragmap & field) == subfield) {
587 				fraglist[siz] += cnt;
588 				pos += siz;
589 				field <<= siz;
590 				subfield <<= siz;
591 			}
592 			field <<= 1;
593 			subfield <<= 1;
594 		}
595 	}
596 }
597 
598 badblock(fs, bn)
599 	register struct fs *fs;
600 	daddr_t bn;
601 {
602 
603 	if ((unsigned)bn >= fs->fs_size || bn < cgdmin(dtog(bn, fs), fs)) {
604 		fserr(fs, "bad block");
605 		return (1);
606 	}
607 	return (0);
608 }
609 
610 /*
611  * getfs maps a device number into
612  * a pointer to the incore super
613  * block.  The algorithm is a linear
614  * search through the mount table.
615  * A consistency check of the
616  * in core free-block and i-node
617  * counts is performed.
618  *
619  * panic: no fs -- the device is not mounted.
620  *	this "cannot happen"
621  */
622 struct fs *
623 getfs(dev)
624 	dev_t dev;
625 {
626 	register struct mount *mp;
627 	register struct fs *fs;
628 
629 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
630 		if (mp->m_bufp != NULL && mp->m_dev == dev) {
631 			fs = mp->m_bufp->b_un.b_fs;
632 			if (fs->fs_magic != FS_MAGIC)
633 				panic("getfs: bad magic");
634 			return (fs);
635 		}
636 	panic("getfs: no fs");
637 	return (NULL);
638 }
639 
640 /*
641  * Fserr prints the name of a file system
642  * with an error diagnostic, in the form
643  *	fs: error message
644  */
645 fserr(fs, cp)
646 	struct fs *fs;
647 	char *cp;
648 {
649 
650 	printf("%s: %s\n", fs->fs_fsmnt, cp);
651 }
652 
653 /*
654  * Getfsx returns the index in the file system
655  * table of the specified device.  The swap device
656  * is also assigned a pseudo-index.  The index may
657  * be used as a compressed indication of the location
658  * of a block, recording
659  *	<getfsx(dev),blkno>
660  * rather than
661  *	<dev, blkno>
662  * provided the information need remain valid only
663  * as long as the file system is mounted.
664  */
665 getfsx(dev)
666 	dev_t dev;
667 {
668 	register struct mount *mp;
669 
670 	if (dev == swapdev)
671 		return (MSWAPX);
672 	for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
673 		if (mp->m_dev == dev)
674 			return (mp - &mount[0]);
675 	return (-1);
676 }
677 
678 /*
679  * Update is the internal name of 'sync'.  It goes through the disk
680  * queues to initiate sandbagged IO; goes through the inodes to write
681  * modified nodes; and it goes through the mount table to initiate modified
682  * super blocks.
683  */
684 update()
685 {
686 	register struct inode *ip;
687 	register struct mount *mp;
688 	register struct buf *bp;
689 	struct fs *fs;
690 	time_t tim;
691 	int i;
692 
693 	if (updlock)
694 		return;
695 	updlock++;
696 	/*
697 	 * Write back modified superblocks.
698 	 * Consistency check that the superblock
699 	 * of each file system is still in the buffer cache.
700 	 */
701 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
702 		if (mp->m_bufp != NULL) {
703 			fs = mp->m_bufp->b_un.b_fs;
704 			if (fs->fs_fmod == 0)
705 				continue;
706 			if (fs->fs_ronly != 0)
707 				panic("update: rofs mod");
708 			bp = getblk(mp->m_dev, SBLOCK, BSIZE);
709 			fs->fs_fmod = 0;
710 			fs->fs_time = TIME;
711 			if (bp->b_un.b_fs != fs)
712 				panic("update: bad b_fs");
713 			bwrite(bp);
714 			for (i = 0; i < cssize(fs); i += BSIZE) {
715 				bp = getblk(mp->m_dev, csaddr(fs) + i / FSIZE,
716 					BSIZE);
717 				bcopy(fs->fs_cs + i, bp->b_un.b_addr, BSIZE);
718 				bwrite(bp);
719 			}
720 		}
721 	/*
722 	 * Write back each (modified) inode.
723 	 */
724 	for (ip = inode; ip < inodeNINODE; ip++)
725 		if((ip->i_flag&ILOCK)==0 && ip->i_count) {
726 			ip->i_flag |= ILOCK;
727 			ip->i_count++;
728 			tim = TIME;
729 			iupdat(ip, &tim, &tim, 0);
730 			iput(ip);
731 		}
732 	updlock = 0;
733 	/*
734 	 * Force stale buffer cache information to be flushed,
735 	 * for all devices.
736 	 */
737 	bflush(NODEV);
738 }
739