xref: /csrg-svn/sys/ufs/lfs/lfs_alloc.c (revision 4792)
1 /* Copyright (c) 1981 Regents of the University of California */
2 
3 static char vers[] = "@(#)lfs_alloc.c 1.8 11/07/81";
4 
5 /*	alloc.c	4.8	81/03/08	*/
6 
7 #include "../h/param.h"
8 #include "../h/systm.h"
9 #include "../h/mount.h"
10 #include "../h/fs.h"
11 #include "../h/conf.h"
12 #include "../h/buf.h"
13 #include "../h/inode.h"
14 #include "../h/dir.h"
15 #include "../h/user.h"
16 
17 extern long		hashalloc();
18 extern long		ialloccg();
19 extern daddr_t		alloccg();
20 extern daddr_t		alloccgblk();
21 extern daddr_t		fragextend();
22 extern daddr_t		blkpref();
23 extern daddr_t		mapsearch();
24 extern int		inside[], around[];
25 extern unsigned char	fragtbl[];
26 
27 struct buf *
28 alloc(dev, ip, bpref, size)
29 	dev_t dev;
30 	register struct inode *ip;
31 	daddr_t bpref;
32 	int size;
33 {
34 	daddr_t bno;
35 	register struct fs *fs;
36 	register struct buf *bp;
37 	int cg;
38 
39 	if ((unsigned)size > BSIZE || size % FSIZE != 0)
40 		panic("alloc: bad size");
41 	fs = getfs(dev);
42 	if (size == BSIZE && fs->fs_cstotal.cs_nbfree == 0)
43 		goto nospace;
44 	if (u.u_uid != 0 &&
45 	    fs->fs_cstotal.cs_nbfree * FRAG + fs->fs_cstotal.cs_nffree <
46 	      fs->fs_dsize * minfree / 100)
47 		goto nospace;
48 	if (bpref == 0)
49 		cg = itog(ip->i_number, fs);
50 	else
51 		cg = dtog(bpref, fs);
52 	bno = hashalloc(dev, fs, cg, (long)bpref, size, alloccg);
53 	if (bno == 0)
54 		goto nospace;
55 	bp = getblk(dev, bno, size);
56 	clrbuf(bp);
57 	return (bp);
58 nospace:
59 	fserr(fs, "file system full");
60 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
61 	u.u_error = ENOSPC;
62 	return (NULL);
63 }
64 
65 struct buf *
66 realloccg(dev, ip, bprev, bpref, osize, nsize)
67 	dev_t dev;
68 	register struct inode *ip;
69 	daddr_t bprev, bpref;
70 	int osize, nsize;
71 {
72 	daddr_t bno;
73 	register struct fs *fs;
74 	register struct buf *bp, *obp;
75 	caddr_t cp;
76 	int cg;
77 
78 	if ((unsigned)osize > BSIZE || osize % FSIZE != 0 ||
79 	    (unsigned)nsize > BSIZE || nsize % FSIZE != 0)
80 		panic("realloccg: bad size");
81 	fs = getfs(dev);
82 	if (u.u_uid != 0 &&
83 	    fs->fs_cstotal.cs_nbfree * FRAG + fs->fs_cstotal.cs_nffree <
84 	      fs->fs_dsize * minfree / 100)
85 		goto nospace;
86 	if (bprev == 0)
87 		panic("realloccg: bad bprev");
88 	else
89 		cg = dtog(bprev, fs);
90 	bno = fragextend(dev, fs, cg, (long)bprev, osize, nsize);
91 	if (bno != 0) {
92 		bp = bread(dev, bno, osize);
93 		bp->b_bcount = nsize;
94 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
95 		return (bp);
96 	}
97 	bno = hashalloc(dev, fs, cg, (long)bpref, nsize, alloccg);
98 	if (bno != 0) {
99 		/*
100 		 * make a new copy
101 		 */
102 		obp = bread(dev, bprev, osize);
103 		bp = getblk(dev, bno, nsize);
104 		cp = bp->b_un.b_addr;
105 		bp->b_un.b_addr = obp->b_un.b_addr;
106 		obp->b_un.b_addr = cp;
107 		obp->b_flags |= B_INVAL;
108 		brelse(obp);
109 		fre(dev, bprev, osize);
110 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
111 		return(bp);
112 	}
113 nospace:
114 	/*
115 	 * no space available
116 	 */
117 	fserr(fs, "file system full");
118 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
119 	u.u_error = ENOSPC;
120 	return (NULL);
121 }
122 
123 struct inode *
124 ialloc(dev, ipref, mode)
125 	dev_t dev;
126 	ino_t ipref;
127 	int mode;
128 {
129 	daddr_t ino;
130 	register struct fs *fs;
131 	register struct inode *ip;
132 	int cg;
133 
134 	fs = getfs(dev);
135 	if (fs->fs_cstotal.cs_nifree == 0)
136 		goto noinodes;
137 	cg = itog(ipref, fs);
138 	ino = hashalloc(dev, fs, cg, (long)ipref, mode, ialloccg);
139 	if (ino == 0)
140 		goto noinodes;
141 	ip = iget(dev, ino);
142 	if (ip == NULL) {
143 		ifree(dev, ino);
144 		return (NULL);
145 	}
146 	if (ip->i_mode)
147 		panic("ialloc: dup alloc");
148 	return (ip);
149 noinodes:
150 	fserr(fs, "out of inodes");
151 	uprintf("\n%s: create failed, no inodes free\n", fs->fs_fsmnt);
152 	u.u_error = ENOSPC;
153 	return (NULL);
154 }
155 
156 /*
157  * find a cylinder to place a directory
158  */
159 dirpref(dev)
160 	dev_t dev;
161 {
162 	register struct fs *fs;
163 	int cg, minndir, mincg, avgifree;
164 
165 	fs = getfs(dev);
166 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
167 	minndir = fs->fs_ipg;
168 	mincg = 0;
169 	for (cg = 0; cg < fs->fs_ncg; cg++)
170 		if (fs->fs_cs(cg).cs_ndir < minndir &&
171 		    fs->fs_cs(cg).cs_nifree >= avgifree) {
172 			mincg = cg;
173 			minndir = fs->fs_cs(cg).cs_ndir;
174 		}
175 	return (fs->fs_ipg * mincg);
176 }
177 
178 /*
179  * select a cylinder to place a large block of data
180  */
181 blkpref(dev)
182 	dev_t dev;
183 {
184 	register struct fs *fs;
185 	int cg, avgbfree;
186 
187 	fs = getfs(dev);
188 	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
189 	for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++)
190 		if (fs->fs_cs(cg).cs_nbfree >= avgbfree) {
191 			fs->fs_cgrotor = cg;
192 			return (fs->fs_fpg * cg + FRAG);
193 		}
194 	for (cg = 0; cg <= fs->fs_cgrotor; cg++)
195 		if (fs->fs_cs(cg).cs_nbfree >= avgbfree) {
196 			fs->fs_cgrotor = cg;
197 			return (fs->fs_fpg * cg + FRAG);
198 		}
199 	return (0);
200 }
201 
202 long
203 hashalloc(dev, fs, cg, pref, size, allocator)
204 	dev_t dev;
205 	register struct fs *fs;
206 	int cg;
207 	long pref;
208 	int size;	/* size for data blocks, mode for inodes */
209 	long (*allocator)();
210 {
211 	long result;
212 	int i, icg = cg;
213 
214 	/*
215 	 * 1: preferred cylinder group
216 	 */
217 	result = (*allocator)(dev, fs, cg, pref, size);
218 	if (result)
219 		return (result);
220 	/*
221 	 * 2: quadratic rehash
222 	 */
223 	for (i = 1; i < fs->fs_ncg; i *= 2) {
224 		cg += i;
225 		if (cg >= fs->fs_ncg)
226 			cg -= fs->fs_ncg;
227 		result = (*allocator)(dev, fs, cg, 0, size);
228 		if (result)
229 			return (result);
230 	}
231 	/*
232 	 * 3: brute force search
233 	 */
234 	cg = icg;
235 	for (i = 0; i < fs->fs_ncg; i++) {
236 		result = (*allocator)(dev, fs, cg, 0, size);
237 		if (result)
238 			return (result);
239 		cg++;
240 		if (cg == fs->fs_ncg)
241 			cg = 0;
242 	}
243 	return (0);
244 }
245 
246 daddr_t
247 fragextend(dev, fs, cg, bprev, osize, nsize)
248 	dev_t dev;
249 	register struct fs *fs;
250 	int cg;
251 	long bprev;
252 	int osize, nsize;
253 {
254 	register struct buf *bp;
255 	register struct cg *cgp;
256 	long bno;
257 	int frags, bbase;
258 	int i;
259 
260 	frags = nsize / FSIZE;
261 	bbase = bprev % FRAG;
262 	if (bbase > (bprev + frags - 1) % FRAG) {
263 		/* cannot extend across a block boundry */
264 		return (0);
265 	}
266 	bp = bread(dev, cgtod(cg, fs), BSIZE);
267 	if (bp->b_flags & B_ERROR)
268 		return (0);
269 	cgp = bp->b_un.b_cg;
270 	bno = bprev % fs->fs_fpg;
271 	for (i = osize / FSIZE; i < frags; i++) {
272 		if (isclr(cgp->cg_free, bno + i))
273 			break;
274 	}
275 	if (i == frags) {
276 		/*
277 		 * the current fragment can be extended
278 		 * deduct the count on fragment being extended into
279 		 * increase the count on the remaining fragment (if any)
280 		 * allocate the extended piece
281 		 */
282 		for (i = frags; i < FRAG - bbase; i++)
283 			if (isclr(cgp->cg_free, bno + i))
284 				break;
285 		cgp->cg_frsum[i - osize / FSIZE]--;
286 		if (i != frags)
287 			cgp->cg_frsum[i - frags]++;
288 		for (i = osize / FSIZE; i < frags; i++) {
289 			clrbit(cgp->cg_free, bno + i);
290 			cgp->cg_cs.cs_nffree--;
291 			fs->fs_cstotal.cs_nffree--;
292 			fs->fs_cs(cg).cs_nffree--;
293 		}
294 		fs->fs_fmod++;
295 		bdwrite(bp);
296 		return (bprev);
297 	}
298 	brelse(bp);
299 	return (0);
300 }
301 
302 daddr_t
303 alloccg(dev, fs, cg, bpref, size)
304 	dev_t dev;
305 	register struct fs *fs;
306 	int cg;
307 	daddr_t bpref;
308 	int size;
309 {
310 	register struct buf *bp;
311 	register struct cg *cgp;
312 	int bno, frags;
313 	int allocsiz;
314 	register int i;
315 
316 	if (fs->fs_cs(cg).cs_nbfree == 0 && size == BSIZE)
317 		return (0);
318 	bp = bread(dev, cgtod(cg, fs), BSIZE);
319 	if (bp->b_flags & B_ERROR)
320 		return (0);
321 	cgp = bp->b_un.b_cg;
322 	if (size == BSIZE) {
323 		bno = alloccgblk(dev, fs, cgp, bpref);
324 		bdwrite(bp);
325 		return (bno);
326 	}
327 	/*
328 	 * check to see if any fragments are already available
329 	 * allocsiz is the size which will be allocated, hacking
330 	 * it down to a smaller size if necessary
331 	 */
332 	frags = size / FSIZE;
333 	for (allocsiz = frags; allocsiz < FRAG; allocsiz++)
334 		if (cgp->cg_frsum[allocsiz] != 0)
335 			break;
336 	if (allocsiz == FRAG) {
337 		/*
338 		 * no fragments were available, so a block will be
339 		 * allocated, and hacked up
340 		 */
341 		if (cgp->cg_cs.cs_nbfree == 0) {
342 			brelse(bp);
343 			return (0);
344 		}
345 		bno = alloccgblk(dev, fs, cgp, bpref);
346 		bpref = bno % fs->fs_fpg;
347 		for (i = frags; i < FRAG; i++)
348 			setbit(cgp->cg_free, bpref + i);
349 		i = FRAG - frags;
350 		cgp->cg_cs.cs_nffree += i;
351 		fs->fs_cstotal.cs_nffree += i;
352 		fs->fs_cs(cg).cs_nffree += i;
353 		cgp->cg_frsum[i]++;
354 		bdwrite(bp);
355 		return (bno);
356 	}
357 	bno = mapsearch(fs, cgp, bpref, allocsiz);
358 	if (bno == 0)
359 		return (0);
360 	for (i = 0; i < frags; i++)
361 		clrbit(cgp->cg_free, bno + i);
362 	cgp->cg_cs.cs_nffree -= frags;
363 	fs->fs_cstotal.cs_nffree -= frags;
364 	fs->fs_cs(cg).cs_nffree -= frags;
365 	cgp->cg_frsum[allocsiz]--;
366 	if (frags != allocsiz)
367 		cgp->cg_frsum[allocsiz - frags]++;
368 	bdwrite(bp);
369 	return (cg * fs->fs_fpg + bno);
370 }
371 
372 daddr_t
373 alloccgblk(dev, fs, cgp, bpref)
374 	dev_t dev;
375 	struct fs *fs;
376 	register struct cg *cgp;
377 	daddr_t bpref;
378 {
379 	daddr_t bno;
380 	int cylno, pos;
381 	short *cylbp;
382 	int i, j;
383 
384 	if (bpref == 0) {
385 		bpref = cgp->cg_rotor;
386 	} else {
387 		bpref &= ~(FRAG - 1);
388 		bpref %= fs->fs_fpg;
389 		/*
390 		 * if the requested block is available, use it
391 		 */
392 		if (isblock(cgp->cg_free, bpref/FRAG)) {
393 			bno = bpref;
394 			goto gotit;
395 		}
396 		/*
397 		 * check for a block available on the same cylinder
398 		 * beginning with one which is rotationally optimal
399 		 */
400 		i = bpref * NSPF;
401 		cylno = i / fs->fs_spc;
402 		cylbp = cgp->cg_b[cylno];
403 		pos = (i + (ROTDELAY == 0) ?
404 			0 : 1 + ROTDELAY * HZ * fs->fs_nsect / (NSPF * 1000)) %
405 			fs->fs_nsect * NRPOS / fs->fs_nsect;
406 		for (i = pos; i < NRPOS; i++)
407 			if (cylbp[i] > 0)
408 				break;
409 		if (i == NRPOS)
410 			for (i = 0; i < pos; i++)
411 				if (cylbp[i] > 0)
412 					break;
413 		if (cylbp[i] > 0) {
414 			bpref = cylno * fs->fs_spc / (NSPF * FRAG);
415 			for (j = fs->fs_postbl[i]; j > -1; j = fs->fs_rotbl[j]) {
416 				if (isblock(cgp->cg_free, bpref + j)) {
417 					bno = (bpref + j) * FRAG;
418 					goto gotit;
419 				}
420 			}
421 			panic("alloccgblk: can't find blk in cyl");
422 		}
423 	}
424 	bno = mapsearch(fs, cgp, bpref, FRAG);
425 	if (bno == 0)
426 		return (0);
427 	cgp->cg_rotor = bno;
428 gotit:
429 	clrblock(cgp->cg_free, bno/FRAG);
430 	cgp->cg_cs.cs_nbfree--;
431 	fs->fs_cstotal.cs_nbfree--;
432 	fs->fs_cs(cgp->cg_cgx).cs_nbfree--;
433 	i = bno * NSPF;
434 	cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]--;
435 	fs->fs_fmod++;
436 	return (cgp->cg_cgx * fs->fs_fpg + bno);
437 }
438 
439 long
440 ialloccg(dev, fs, cg, ipref, mode)
441 	dev_t dev;
442 	register struct fs *fs;
443 	int cg;
444 	daddr_t ipref;
445 	int mode;
446 {
447 	register struct buf *bp;
448 	register struct cg *cgp;
449 	int i;
450 
451 	if (fs->fs_cs(cg).cs_nifree == 0)
452 		return (0);
453 	bp = bread(dev, cgtod(cg, fs), BSIZE);
454 	if (bp->b_flags & B_ERROR)
455 		return (0);
456 	cgp = bp->b_un.b_cg;
457 	if (ipref) {
458 		ipref %= fs->fs_ipg;
459 		if (isclr(cgp->cg_iused, ipref))
460 			goto gotit;
461 	} else
462 		ipref = cgp->cg_irotor;
463 	for (i = 0; i < fs->fs_ipg; i++) {
464 		ipref++;
465 		if (ipref >= fs->fs_ipg)
466 			ipref = 0;
467 		if (isclr(cgp->cg_iused, ipref)) {
468 			cgp->cg_irotor = ipref;
469 			goto gotit;
470 		}
471 	}
472 	brelse(bp);
473 	return (0);
474 gotit:
475 	setbit(cgp->cg_iused, ipref);
476 	cgp->cg_cs.cs_nifree--;
477 	fs->fs_cstotal.cs_nifree--;
478 	fs->fs_cs(cg).cs_nifree--;
479 	fs->fs_fmod++;
480 	if ((mode & IFMT) == IFDIR) {
481 		cgp->cg_cs.cs_ndir++;
482 		fs->fs_cstotal.cs_ndir++;
483 		fs->fs_cs(cg).cs_ndir++;
484 	}
485 	bdwrite(bp);
486 	return (cg * fs->fs_ipg + ipref);
487 }
488 
489 fre(dev, bno, size)
490 	dev_t dev;
491 	daddr_t bno;
492 	int size;
493 {
494 	register struct fs *fs;
495 	register struct cg *cgp;
496 	register struct buf *bp;
497 	int cg, blk, frags, bbase;
498 	register int i;
499 
500 	if ((unsigned)size > BSIZE || size % FSIZE != 0)
501 		panic("free: bad size");
502 	fs = getfs(dev);
503 	cg = dtog(bno, fs);
504 	if (badblock(fs, bno))
505 		return;
506 	bp = bread(dev, cgtod(cg, fs), BSIZE);
507 	if (bp->b_flags & B_ERROR)
508 		return;
509 	cgp = bp->b_un.b_cg;
510 	bno %= fs->fs_fpg;
511 	if (size == BSIZE) {
512 		if (isblock(cgp->cg_free, bno/FRAG))
513 			panic("free: freeing free block");
514 		setblock(cgp->cg_free, bno/FRAG);
515 		cgp->cg_cs.cs_nbfree++;
516 		fs->fs_cstotal.cs_nbfree++;
517 		fs->fs_cs(cg).cs_nbfree++;
518 		i = bno * NSPF;
519 		cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]++;
520 	} else {
521 		bbase = bno - (bno % FRAG);
522 		/*
523 		 * decrement the counts associated with the old frags
524 		 */
525 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
526 		       (0xff >> (NBBY - FRAG)));
527 		fragacct(blk, cgp->cg_frsum, -1);
528 		/*
529 		 * deallocate the fragment
530 		 */
531 		frags = size / FSIZE;
532 		for (i = 0; i < frags; i++) {
533 			if (isset(cgp->cg_free, bno + i))
534 				panic("free: freeing free frag");
535 			setbit(cgp->cg_free, bno + i);
536 			cgp->cg_cs.cs_nffree++;
537 			fs->fs_cstotal.cs_nffree++;
538 			fs->fs_cs(cg).cs_nffree++;
539 		}
540 		/*
541 		 * add back in counts associated with the new frags
542 		 */
543 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
544 		       (0xff >> (NBBY - FRAG)));
545 		fragacct(blk, cgp->cg_frsum, 1);
546 		/*
547 		 * if a complete block has been reassembled, account for it
548 		 */
549 		if (isblock(cgp->cg_free, bbase / FRAG)) {
550 			cgp->cg_cs.cs_nffree -= FRAG;
551 			fs->fs_cstotal.cs_nffree -= FRAG;
552 			fs->fs_cs(cg).cs_nffree -= FRAG;
553 			cgp->cg_cs.cs_nbfree++;
554 			fs->fs_cstotal.cs_nbfree++;
555 			fs->fs_cs(cg).cs_nbfree++;
556 			i = bbase * NSPF;
557 			cgp->cg_b[i / fs->fs_spc]
558 				 [i % fs->fs_nsect * NRPOS / fs->fs_nsect]++;
559 		}
560 	}
561 	fs->fs_fmod++;
562 	bdwrite(bp);
563 }
564 
565 ifree(dev, ino, mode)
566 	dev_t dev;
567 	ino_t ino;
568 	int mode;
569 {
570 	register struct fs *fs;
571 	register struct cg *cgp;
572 	register struct buf *bp;
573 	int i;
574 	int cg;
575 
576 	fs = getfs(dev);
577 	if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg)
578 		panic("ifree: range");
579 	cg = itog(ino, fs);
580 	bp = bread(dev, cgtod(cg, fs), BSIZE);
581 	if (bp->b_flags & B_ERROR)
582 		return;
583 	cgp = bp->b_un.b_cg;
584 	ino %= fs->fs_ipg;
585 	if (isclr(cgp->cg_iused, ino))
586 		panic("ifree: freeing free inode");
587 	clrbit(cgp->cg_iused, ino);
588 	cgp->cg_cs.cs_nifree++;
589 	fs->fs_cstotal.cs_nifree++;
590 	fs->fs_cs(cg).cs_nifree++;
591 	if ((mode & IFMT) == IFDIR) {
592 		cgp->cg_cs.cs_ndir--;
593 		fs->fs_cstotal.cs_ndir--;
594 		fs->fs_cs(cg).cs_ndir--;
595 	}
596 	fs->fs_fmod++;
597 	bdwrite(bp);
598 }
599 
600 /*
601  * find a block of the specified size in the specified cylinder group
602  * It is a panic if a request is made to find a block if none are
603  * available.
604  */
605 daddr_t
606 mapsearch(fs, cgp, bpref, allocsiz)
607 	register struct fs *fs;
608 	register struct cg *cgp;
609 	daddr_t bpref;
610 	int allocsiz;
611 {
612 	daddr_t bno;
613 	int start, len, loc, i;
614 	int blk, field, subfield, pos;
615 
616 	/*
617 	 * find the fragment by searching through the free block
618 	 * map for an appropriate bit pattern
619 	 */
620 	if (bpref)
621 		start = bpref % fs->fs_fpg / NBBY;
622 	else
623 		start = cgp->cg_frotor / NBBY;
624 	len = roundup(fs->fs_fpg - 1, NBBY) / NBBY - start;
625 	loc = scanc(len, &cgp->cg_free[start], fragtbl, 1 << (allocsiz - 1));
626 	if (loc == 0) {
627 		len = start - 1;
628 		start = (cgdmin(cgp->cg_cgx, fs) -
629 			 cgbase(cgp->cg_cgx, fs)) / NBBY;
630 		loc = scanc(len, &cgp->cg_free[start], fragtbl,
631 			1 << (allocsiz - 1));
632 		if (loc == 0) {
633 			panic("alloccg: map corrupted");
634 			return (0);
635 		}
636 	}
637 	bno = (start + len - loc) * NBBY;
638 	cgp->cg_frotor = bno;
639 	/*
640 	 * found the byte in the map
641 	 * sift through the bits to find the selected frag
642 	 */
643 	for (i = 0; i < NBBY; i += FRAG) {
644 		blk = (cgp->cg_free[bno / NBBY] >> i) & (0xff >> NBBY - FRAG);
645 		blk <<= 1;
646 		field = around[allocsiz];
647 		subfield = inside[allocsiz];
648 		for (pos = 0; pos <= FRAG - allocsiz; pos++) {
649 			if ((blk & field) == subfield) {
650 				return (bno + i + pos);
651 			}
652 			field <<= 1;
653 			subfield <<= 1;
654 		}
655 	}
656 	panic("alloccg: block not in map");
657 	return (0);
658 }
659 
660 /*
661  * update the frsum fields to reflect addition or deletion
662  * of some frags
663  */
664 fragacct(fragmap, fraglist, cnt)
665 	int fragmap;
666 	long fraglist[];
667 	int cnt;
668 {
669 	int inblk;
670 	register int field, subfield;
671 	register int siz, pos;
672 
673 	inblk = (int)(fragtbl[fragmap]) << 1;
674 	fragmap <<= 1;
675 	for (siz = 1; siz < FRAG; siz++) {
676 		if (((1 << siz) & inblk) == 0)
677 			continue;
678 		field = around[siz];
679 		subfield = inside[siz];
680 		for (pos = siz; pos <= FRAG; pos++) {
681 			if ((fragmap & field) == subfield) {
682 				fraglist[siz] += cnt;
683 				pos += siz;
684 				field <<= siz;
685 				subfield <<= siz;
686 			}
687 			field <<= 1;
688 			subfield <<= 1;
689 		}
690 	}
691 }
692 
693 badblock(fs, bn)
694 	register struct fs *fs;
695 	daddr_t bn;
696 {
697 
698 	if ((unsigned)bn >= fs->fs_size || bn < cgdmin(dtog(bn, fs), fs)) {
699 		fserr(fs, "bad block");
700 		return (1);
701 	}
702 	return (0);
703 }
704 
705 /*
706  * getfs maps a device number into
707  * a pointer to the incore super
708  * block.  The algorithm is a linear
709  * search through the mount table.
710  * A consistency check of the
711  * in core free-block and i-node
712  * counts is performed.
713  *
714  * panic: no fs -- the device is not mounted.
715  *	this "cannot happen"
716  */
717 struct fs *
718 getfs(dev)
719 	dev_t dev;
720 {
721 	register struct mount *mp;
722 	register struct fs *fs;
723 
724 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
725 		if (mp->m_bufp != NULL && mp->m_dev == dev) {
726 			fs = mp->m_bufp->b_un.b_fs;
727 			if (fs->fs_magic != FS_MAGIC)
728 				panic("getfs: bad magic");
729 			return (fs);
730 		}
731 	panic("getfs: no fs");
732 	return (NULL);
733 }
734 
735 /*
736  * Fserr prints the name of a file system
737  * with an error diagnostic, in the form
738  *	fs: error message
739  */
740 fserr(fs, cp)
741 	struct fs *fs;
742 	char *cp;
743 {
744 
745 	printf("%s: %s\n", fs->fs_fsmnt, cp);
746 }
747 
748 /*
749  * Getfsx returns the index in the file system
750  * table of the specified device.  The swap device
751  * is also assigned a pseudo-index.  The index may
752  * be used as a compressed indication of the location
753  * of a block, recording
754  *	<getfsx(dev),blkno>
755  * rather than
756  *	<dev, blkno>
757  * provided the information need remain valid only
758  * as long as the file system is mounted.
759  */
760 getfsx(dev)
761 	dev_t dev;
762 {
763 	register struct mount *mp;
764 
765 	if (dev == swapdev)
766 		return (MSWAPX);
767 	for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
768 		if (mp->m_dev == dev)
769 			return (mp - &mount[0]);
770 	return (-1);
771 }
772 
773 /*
774  * Update is the internal name of 'sync'.  It goes through the disk
775  * queues to initiate sandbagged IO; goes through the inodes to write
776  * modified nodes; and it goes through the mount table to initiate modified
777  * super blocks.
778  */
779 update()
780 {
781 	register struct inode *ip;
782 	register struct mount *mp;
783 	register struct buf *bp;
784 	struct fs *fs;
785 	time_t tim;
786 	int i, blks;
787 
788 	if (updlock)
789 		return;
790 	updlock++;
791 	/*
792 	 * Write back modified superblocks.
793 	 * Consistency check that the superblock
794 	 * of each file system is still in the buffer cache.
795 	 */
796 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
797 		if (mp->m_bufp != NULL) {
798 			fs = mp->m_bufp->b_un.b_fs;
799 			if (fs->fs_fmod == 0)
800 				continue;
801 			if (fs->fs_ronly != 0)
802 				panic("update: rofs mod");
803 			bp = getblk(mp->m_dev, SBLOCK, BSIZE);
804 			fs->fs_fmod = 0;
805 			fs->fs_time = TIME;
806 			if (bp->b_un.b_fs != fs)
807 				panic("update: bad b_fs");
808 			bwrite(bp);
809 			blks = howmany(cssize(fs), BSIZE);
810 			for (i = 0; i < blks; i++) {
811 				bp = getblk(mp->m_dev, csaddr(fs) + (i * FRAG),
812 					BSIZE);
813 				bwrite(bp);
814 			}
815 		}
816 	/*
817 	 * Write back each (modified) inode.
818 	 */
819 	for (ip = inode; ip < inodeNINODE; ip++)
820 		if((ip->i_flag&ILOCK)==0 && ip->i_count) {
821 			ip->i_flag |= ILOCK;
822 			ip->i_count++;
823 			tim = TIME;
824 			iupdat(ip, &tim, &tim, 0);
825 			iput(ip);
826 		}
827 	updlock = 0;
828 	/*
829 	 * Force stale buffer cache information to be flushed,
830 	 * for all devices.
831 	 */
832 	bflush(NODEV);
833 }
834