xref: /csrg-svn/sys/ufs/ffs/ffs_alloc.c (revision 4948)
1 /* Copyright (c) 1981 Regents of the University of California */
2 
3 static char vers[] = "@(#)ffs_alloc.c 1.9 11/19/81";
4 
5 /*	alloc.c	4.8	81/03/08	*/
6 
7 #include "../h/param.h"
8 #include "../h/systm.h"
9 #include "../h/mount.h"
10 #include "../h/fs.h"
11 #include "../h/conf.h"
12 #include "../h/buf.h"
13 #include "../h/inode.h"
14 #include "../h/dir.h"
15 #include "../h/user.h"
16 
17 extern long		hashalloc();
18 extern long		ialloccg();
19 extern daddr_t		alloccg();
20 extern daddr_t		alloccgblk();
21 extern daddr_t		fragextend();
22 extern daddr_t		blkpref();
23 extern daddr_t		mapsearch();
24 extern int		inside[], around[];
25 extern unsigned char	fragtbl[];
26 
27 struct buf *
28 alloc(dev, ip, bpref, size)
29 	dev_t dev;
30 	register struct inode *ip;
31 	daddr_t bpref;
32 	int size;
33 {
34 	daddr_t bno;
35 	register struct fs *fs;
36 	register struct buf *bp;
37 	int cg;
38 
39 	if ((unsigned)size > BSIZE || size % FSIZE != 0)
40 		panic("alloc: bad size");
41 	fs = getfs(dev);
42 	if (size == BSIZE && fs->fs_cstotal.cs_nbfree == 0)
43 		goto nospace;
44 	if (u.u_uid != 0 &&
45 	    fs->fs_cstotal.cs_nbfree * FRAG + fs->fs_cstotal.cs_nffree <
46 	      fs->fs_dsize * minfree / 100)
47 		goto nospace;
48 	if (bpref >= fs->fs_size)
49 		bpref = 0;
50 	if (bpref == 0)
51 		cg = itog(ip->i_number, fs);
52 	else
53 		cg = dtog(bpref, fs);
54 	bno = hashalloc(dev, fs, cg, (long)bpref, size, alloccg);
55 	if (bno == 0)
56 		goto nospace;
57 	bp = getblk(dev, bno, size);
58 	clrbuf(bp);
59 	return (bp);
60 nospace:
61 	fserr(fs, "file system full");
62 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
63 	u.u_error = ENOSPC;
64 	return (NULL);
65 }
66 
67 struct buf *
68 realloccg(dev, ip, bprev, bpref, osize, nsize)
69 	dev_t dev;
70 	register struct inode *ip;
71 	daddr_t bprev, bpref;
72 	int osize, nsize;
73 {
74 	daddr_t bno;
75 	register struct fs *fs;
76 	register struct buf *bp, *obp;
77 	caddr_t cp;
78 	int cg;
79 
80 	if ((unsigned)osize > BSIZE || osize % FSIZE != 0 ||
81 	    (unsigned)nsize > BSIZE || nsize % FSIZE != 0)
82 		panic("realloccg: bad size");
83 	fs = getfs(dev);
84 	if (u.u_uid != 0 &&
85 	    fs->fs_cstotal.cs_nbfree * FRAG + fs->fs_cstotal.cs_nffree <
86 	      fs->fs_dsize * minfree / 100)
87 		goto nospace;
88 	if (bprev == 0)
89 		panic("realloccg: bad bprev");
90 	else
91 		cg = dtog(bprev, fs);
92 	bno = fragextend(dev, fs, cg, (long)bprev, osize, nsize);
93 	if (bno != 0) {
94 		bp = bread(dev, bno, osize);
95 		bp->b_bcount = nsize;
96 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
97 		return (bp);
98 	}
99 	if (bpref >= fs->fs_size)
100 		bpref = 0;
101 	bno = hashalloc(dev, fs, cg, (long)bpref, nsize, alloccg);
102 	if (bno != 0) {
103 		/*
104 		 * make a new copy
105 		 */
106 		obp = bread(dev, bprev, osize);
107 		bp = getblk(dev, bno, nsize);
108 		cp = bp->b_un.b_addr;
109 		bp->b_un.b_addr = obp->b_un.b_addr;
110 		obp->b_un.b_addr = cp;
111 		obp->b_flags |= B_INVAL;
112 		brelse(obp);
113 		fre(dev, bprev, osize);
114 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
115 		return(bp);
116 	}
117 nospace:
118 	/*
119 	 * no space available
120 	 */
121 	fserr(fs, "file system full");
122 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
123 	u.u_error = ENOSPC;
124 	return (NULL);
125 }
126 
127 struct inode *
128 ialloc(dev, ipref, mode)
129 	dev_t dev;
130 	ino_t ipref;
131 	int mode;
132 {
133 	daddr_t ino;
134 	register struct fs *fs;
135 	register struct inode *ip;
136 	int cg;
137 
138 	fs = getfs(dev);
139 	if (fs->fs_cstotal.cs_nifree == 0)
140 		goto noinodes;
141 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
142 		ipref = 0;
143 	cg = itog(ipref, fs);
144 	ino = hashalloc(dev, fs, cg, (long)ipref, mode, ialloccg);
145 	if (ino == 0)
146 		goto noinodes;
147 	ip = iget(dev, ino);
148 	if (ip == NULL) {
149 		ifree(dev, ino);
150 		return (NULL);
151 	}
152 	if (ip->i_mode)
153 		panic("ialloc: dup alloc");
154 	return (ip);
155 noinodes:
156 	fserr(fs, "out of inodes");
157 	uprintf("\n%s: create failed, no inodes free\n", fs->fs_fsmnt);
158 	u.u_error = ENOSPC;
159 	return (NULL);
160 }
161 
162 /*
163  * find a cylinder to place a directory
164  */
165 dirpref(dev)
166 	dev_t dev;
167 {
168 	register struct fs *fs;
169 	int cg, minndir, mincg, avgifree;
170 
171 	fs = getfs(dev);
172 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
173 	minndir = fs->fs_ipg;
174 	mincg = 0;
175 	for (cg = 0; cg < fs->fs_ncg; cg++)
176 		if (fs->fs_cs(cg).cs_ndir < minndir &&
177 		    fs->fs_cs(cg).cs_nifree >= avgifree) {
178 			mincg = cg;
179 			minndir = fs->fs_cs(cg).cs_ndir;
180 		}
181 	return (fs->fs_ipg * mincg);
182 }
183 
184 /*
185  * select a cylinder to place a large block of data
186  */
187 blkpref(dev)
188 	dev_t dev;
189 {
190 	register struct fs *fs;
191 	int cg, avgbfree;
192 
193 	fs = getfs(dev);
194 	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
195 	for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++)
196 		if (fs->fs_cs(cg).cs_nbfree >= avgbfree) {
197 			fs->fs_cgrotor = cg;
198 			return (fs->fs_fpg * cg + FRAG);
199 		}
200 	for (cg = 0; cg <= fs->fs_cgrotor; cg++)
201 		if (fs->fs_cs(cg).cs_nbfree >= avgbfree) {
202 			fs->fs_cgrotor = cg;
203 			return (fs->fs_fpg * cg + FRAG);
204 		}
205 	return (0);
206 }
207 
208 long
209 hashalloc(dev, fs, cg, pref, size, allocator)
210 	dev_t dev;
211 	register struct fs *fs;
212 	int cg;
213 	long pref;
214 	int size;	/* size for data blocks, mode for inodes */
215 	long (*allocator)();
216 {
217 	long result;
218 	int i, icg = cg;
219 
220 	/*
221 	 * 1: preferred cylinder group
222 	 */
223 	result = (*allocator)(dev, fs, cg, pref, size);
224 	if (result)
225 		return (result);
226 	/*
227 	 * 2: quadratic rehash
228 	 */
229 	for (i = 1; i < fs->fs_ncg; i *= 2) {
230 		cg += i;
231 		if (cg >= fs->fs_ncg)
232 			cg -= fs->fs_ncg;
233 		result = (*allocator)(dev, fs, cg, 0, size);
234 		if (result)
235 			return (result);
236 	}
237 	/*
238 	 * 3: brute force search
239 	 */
240 	cg = icg;
241 	for (i = 0; i < fs->fs_ncg; i++) {
242 		result = (*allocator)(dev, fs, cg, 0, size);
243 		if (result)
244 			return (result);
245 		cg++;
246 		if (cg == fs->fs_ncg)
247 			cg = 0;
248 	}
249 	return (0);
250 }
251 
252 daddr_t
253 fragextend(dev, fs, cg, bprev, osize, nsize)
254 	dev_t dev;
255 	register struct fs *fs;
256 	int cg;
257 	long bprev;
258 	int osize, nsize;
259 {
260 	register struct buf *bp;
261 	register struct cg *cgp;
262 	long bno;
263 	int frags, bbase;
264 	int i;
265 
266 	frags = nsize / FSIZE;
267 	bbase = bprev % FRAG;
268 	if (bbase > (bprev + frags - 1) % FRAG) {
269 		/* cannot extend across a block boundry */
270 		return (0);
271 	}
272 	bp = bread(dev, cgtod(cg, fs), BSIZE);
273 	if (bp->b_flags & B_ERROR)
274 		return (0);
275 	cgp = bp->b_un.b_cg;
276 	bno = bprev % fs->fs_fpg;
277 	for (i = osize / FSIZE; i < frags; i++) {
278 		if (isclr(cgp->cg_free, bno + i))
279 			break;
280 	}
281 	if (i == frags) {
282 		/*
283 		 * the current fragment can be extended
284 		 * deduct the count on fragment being extended into
285 		 * increase the count on the remaining fragment (if any)
286 		 * allocate the extended piece
287 		 */
288 		for (i = frags; i < FRAG - bbase; i++)
289 			if (isclr(cgp->cg_free, bno + i))
290 				break;
291 		cgp->cg_frsum[i - osize / FSIZE]--;
292 		if (i != frags)
293 			cgp->cg_frsum[i - frags]++;
294 		for (i = osize / FSIZE; i < frags; i++) {
295 			clrbit(cgp->cg_free, bno + i);
296 			cgp->cg_cs.cs_nffree--;
297 			fs->fs_cstotal.cs_nffree--;
298 			fs->fs_cs(cg).cs_nffree--;
299 		}
300 		fs->fs_fmod++;
301 		bdwrite(bp);
302 		return (bprev);
303 	}
304 	brelse(bp);
305 	return (0);
306 }
307 
308 daddr_t
309 alloccg(dev, fs, cg, bpref, size)
310 	dev_t dev;
311 	register struct fs *fs;
312 	int cg;
313 	daddr_t bpref;
314 	int size;
315 {
316 	register struct buf *bp;
317 	register struct cg *cgp;
318 	int bno, frags;
319 	int allocsiz;
320 	register int i;
321 
322 	if (fs->fs_cs(cg).cs_nbfree == 0 && size == BSIZE)
323 		return (0);
324 	bp = bread(dev, cgtod(cg, fs), BSIZE);
325 	if (bp->b_flags & B_ERROR)
326 		return (0);
327 	cgp = bp->b_un.b_cg;
328 	if (size == BSIZE) {
329 		bno = alloccgblk(dev, fs, cgp, bpref);
330 		bdwrite(bp);
331 		return (bno);
332 	}
333 	/*
334 	 * check to see if any fragments are already available
335 	 * allocsiz is the size which will be allocated, hacking
336 	 * it down to a smaller size if necessary
337 	 */
338 	frags = size / FSIZE;
339 	for (allocsiz = frags; allocsiz < FRAG; allocsiz++)
340 		if (cgp->cg_frsum[allocsiz] != 0)
341 			break;
342 	if (allocsiz == FRAG) {
343 		/*
344 		 * no fragments were available, so a block will be
345 		 * allocated, and hacked up
346 		 */
347 		if (cgp->cg_cs.cs_nbfree == 0) {
348 			brelse(bp);
349 			return (0);
350 		}
351 		bno = alloccgblk(dev, fs, cgp, bpref);
352 		bpref = bno % fs->fs_fpg;
353 		for (i = frags; i < FRAG; i++)
354 			setbit(cgp->cg_free, bpref + i);
355 		i = FRAG - frags;
356 		cgp->cg_cs.cs_nffree += i;
357 		fs->fs_cstotal.cs_nffree += i;
358 		fs->fs_cs(cg).cs_nffree += i;
359 		cgp->cg_frsum[i]++;
360 		bdwrite(bp);
361 		return (bno);
362 	}
363 	bno = mapsearch(fs, cgp, bpref, allocsiz);
364 	if (bno == 0)
365 		return (0);
366 	for (i = 0; i < frags; i++)
367 		clrbit(cgp->cg_free, bno + i);
368 	cgp->cg_cs.cs_nffree -= frags;
369 	fs->fs_cstotal.cs_nffree -= frags;
370 	fs->fs_cs(cg).cs_nffree -= frags;
371 	cgp->cg_frsum[allocsiz]--;
372 	if (frags != allocsiz)
373 		cgp->cg_frsum[allocsiz - frags]++;
374 	bdwrite(bp);
375 	return (cg * fs->fs_fpg + bno);
376 }
377 
378 daddr_t
379 alloccgblk(dev, fs, cgp, bpref)
380 	dev_t dev;
381 	struct fs *fs;
382 	register struct cg *cgp;
383 	daddr_t bpref;
384 {
385 	daddr_t bno;
386 	int cylno, pos;
387 	short *cylbp;
388 	int i, j;
389 
390 	if (bpref == 0) {
391 		bpref = cgp->cg_rotor;
392 	} else {
393 		bpref &= ~(FRAG - 1);
394 		bpref %= fs->fs_fpg;
395 		/*
396 		 * if the requested block is available, use it
397 		 */
398 		if (isblock(cgp->cg_free, bpref/FRAG)) {
399 			bno = bpref;
400 			goto gotit;
401 		}
402 		/*
403 		 * check for a block available on the same cylinder
404 		 * beginning with one which is rotationally optimal
405 		 */
406 		i = bpref * NSPF;
407 		cylno = i / fs->fs_spc;
408 		cylbp = cgp->cg_b[cylno];
409 		pos = (i + (ROTDELAY == 0) ?
410 			0 : 1 + ROTDELAY * HZ * fs->fs_nsect / (NSPF * 1000)) %
411 			fs->fs_nsect * NRPOS / fs->fs_nsect;
412 		for (i = pos; i < NRPOS; i++)
413 			if (cylbp[i] > 0)
414 				break;
415 		if (i == NRPOS)
416 			for (i = 0; i < pos; i++)
417 				if (cylbp[i] > 0)
418 					break;
419 		if (cylbp[i] > 0) {
420 			bpref = cylno * fs->fs_spc / (NSPF * FRAG);
421 			for (j = fs->fs_postbl[i]; j > -1; j = fs->fs_rotbl[j]) {
422 				if (isblock(cgp->cg_free, bpref + j)) {
423 					bno = (bpref + j) * FRAG;
424 					goto gotit;
425 				}
426 			}
427 			panic("alloccgblk: can't find blk in cyl");
428 		}
429 	}
430 	bno = mapsearch(fs, cgp, bpref, FRAG);
431 	if (bno == 0)
432 		return (0);
433 	cgp->cg_rotor = bno;
434 gotit:
435 	clrblock(cgp->cg_free, bno/FRAG);
436 	cgp->cg_cs.cs_nbfree--;
437 	fs->fs_cstotal.cs_nbfree--;
438 	fs->fs_cs(cgp->cg_cgx).cs_nbfree--;
439 	i = bno * NSPF;
440 	cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]--;
441 	fs->fs_fmod++;
442 	return (cgp->cg_cgx * fs->fs_fpg + bno);
443 }
444 
445 long
446 ialloccg(dev, fs, cg, ipref, mode)
447 	dev_t dev;
448 	register struct fs *fs;
449 	int cg;
450 	daddr_t ipref;
451 	int mode;
452 {
453 	register struct buf *bp;
454 	register struct cg *cgp;
455 	int i;
456 
457 	if (fs->fs_cs(cg).cs_nifree == 0)
458 		return (0);
459 	bp = bread(dev, cgtod(cg, fs), BSIZE);
460 	if (bp->b_flags & B_ERROR)
461 		return (0);
462 	cgp = bp->b_un.b_cg;
463 	if (ipref) {
464 		ipref %= fs->fs_ipg;
465 		if (isclr(cgp->cg_iused, ipref))
466 			goto gotit;
467 	} else
468 		ipref = cgp->cg_irotor;
469 	for (i = 0; i < fs->fs_ipg; i++) {
470 		ipref++;
471 		if (ipref >= fs->fs_ipg)
472 			ipref = 0;
473 		if (isclr(cgp->cg_iused, ipref)) {
474 			cgp->cg_irotor = ipref;
475 			goto gotit;
476 		}
477 	}
478 	brelse(bp);
479 	return (0);
480 gotit:
481 	setbit(cgp->cg_iused, ipref);
482 	cgp->cg_cs.cs_nifree--;
483 	fs->fs_cstotal.cs_nifree--;
484 	fs->fs_cs(cg).cs_nifree--;
485 	fs->fs_fmod++;
486 	if ((mode & IFMT) == IFDIR) {
487 		cgp->cg_cs.cs_ndir++;
488 		fs->fs_cstotal.cs_ndir++;
489 		fs->fs_cs(cg).cs_ndir++;
490 	}
491 	bdwrite(bp);
492 	return (cg * fs->fs_ipg + ipref);
493 }
494 
495 fre(dev, bno, size)
496 	dev_t dev;
497 	daddr_t bno;
498 	int size;
499 {
500 	register struct fs *fs;
501 	register struct cg *cgp;
502 	register struct buf *bp;
503 	int cg, blk, frags, bbase;
504 	register int i;
505 
506 	if ((unsigned)size > BSIZE || size % FSIZE != 0)
507 		panic("free: bad size");
508 	fs = getfs(dev);
509 	cg = dtog(bno, fs);
510 	if (badblock(fs, bno))
511 		return;
512 	bp = bread(dev, cgtod(cg, fs), BSIZE);
513 	if (bp->b_flags & B_ERROR)
514 		return;
515 	cgp = bp->b_un.b_cg;
516 	bno %= fs->fs_fpg;
517 	if (size == BSIZE) {
518 		if (isblock(cgp->cg_free, bno/FRAG))
519 			panic("free: freeing free block");
520 		setblock(cgp->cg_free, bno/FRAG);
521 		cgp->cg_cs.cs_nbfree++;
522 		fs->fs_cstotal.cs_nbfree++;
523 		fs->fs_cs(cg).cs_nbfree++;
524 		i = bno * NSPF;
525 		cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]++;
526 	} else {
527 		bbase = bno - (bno % FRAG);
528 		/*
529 		 * decrement the counts associated with the old frags
530 		 */
531 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
532 		       (0xff >> (NBBY - FRAG)));
533 		fragacct(blk, cgp->cg_frsum, -1);
534 		/*
535 		 * deallocate the fragment
536 		 */
537 		frags = size / FSIZE;
538 		for (i = 0; i < frags; i++) {
539 			if (isset(cgp->cg_free, bno + i))
540 				panic("free: freeing free frag");
541 			setbit(cgp->cg_free, bno + i);
542 			cgp->cg_cs.cs_nffree++;
543 			fs->fs_cstotal.cs_nffree++;
544 			fs->fs_cs(cg).cs_nffree++;
545 		}
546 		/*
547 		 * add back in counts associated with the new frags
548 		 */
549 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
550 		       (0xff >> (NBBY - FRAG)));
551 		fragacct(blk, cgp->cg_frsum, 1);
552 		/*
553 		 * if a complete block has been reassembled, account for it
554 		 */
555 		if (isblock(cgp->cg_free, bbase / FRAG)) {
556 			cgp->cg_cs.cs_nffree -= FRAG;
557 			fs->fs_cstotal.cs_nffree -= FRAG;
558 			fs->fs_cs(cg).cs_nffree -= FRAG;
559 			cgp->cg_cs.cs_nbfree++;
560 			fs->fs_cstotal.cs_nbfree++;
561 			fs->fs_cs(cg).cs_nbfree++;
562 			i = bbase * NSPF;
563 			cgp->cg_b[i / fs->fs_spc]
564 				 [i % fs->fs_nsect * NRPOS / fs->fs_nsect]++;
565 		}
566 	}
567 	fs->fs_fmod++;
568 	bdwrite(bp);
569 }
570 
571 ifree(dev, ino, mode)
572 	dev_t dev;
573 	ino_t ino;
574 	int mode;
575 {
576 	register struct fs *fs;
577 	register struct cg *cgp;
578 	register struct buf *bp;
579 	int i;
580 	int cg;
581 
582 	fs = getfs(dev);
583 	if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg)
584 		panic("ifree: range");
585 	cg = itog(ino, fs);
586 	bp = bread(dev, cgtod(cg, fs), BSIZE);
587 	if (bp->b_flags & B_ERROR)
588 		return;
589 	cgp = bp->b_un.b_cg;
590 	ino %= fs->fs_ipg;
591 	if (isclr(cgp->cg_iused, ino))
592 		panic("ifree: freeing free inode");
593 	clrbit(cgp->cg_iused, ino);
594 	cgp->cg_cs.cs_nifree++;
595 	fs->fs_cstotal.cs_nifree++;
596 	fs->fs_cs(cg).cs_nifree++;
597 	if ((mode & IFMT) == IFDIR) {
598 		cgp->cg_cs.cs_ndir--;
599 		fs->fs_cstotal.cs_ndir--;
600 		fs->fs_cs(cg).cs_ndir--;
601 	}
602 	fs->fs_fmod++;
603 	bdwrite(bp);
604 }
605 
606 /*
607  * find a block of the specified size in the specified cylinder group
608  * It is a panic if a request is made to find a block if none are
609  * available.
610  */
611 daddr_t
612 mapsearch(fs, cgp, bpref, allocsiz)
613 	register struct fs *fs;
614 	register struct cg *cgp;
615 	daddr_t bpref;
616 	int allocsiz;
617 {
618 	daddr_t bno;
619 	int start, len, loc, i;
620 	int blk, field, subfield, pos;
621 
622 	/*
623 	 * find the fragment by searching through the free block
624 	 * map for an appropriate bit pattern
625 	 */
626 	if (bpref)
627 		start = bpref % fs->fs_fpg / NBBY;
628 	else
629 		start = cgp->cg_frotor / NBBY;
630 	len = roundup(fs->fs_fpg - 1, NBBY) / NBBY - start;
631 	loc = scanc(len, &cgp->cg_free[start], fragtbl, 1 << (allocsiz - 1));
632 	if (loc == 0) {
633 		len = start - 1;
634 		start = (cgdmin(cgp->cg_cgx, fs) -
635 			 cgbase(cgp->cg_cgx, fs)) / NBBY;
636 		loc = scanc(len, &cgp->cg_free[start], fragtbl,
637 			1 << (allocsiz - 1));
638 		if (loc == 0) {
639 			panic("alloccg: map corrupted");
640 			return (0);
641 		}
642 	}
643 	bno = (start + len - loc) * NBBY;
644 	cgp->cg_frotor = bno;
645 	/*
646 	 * found the byte in the map
647 	 * sift through the bits to find the selected frag
648 	 */
649 	for (i = 0; i < NBBY; i += FRAG) {
650 		blk = (cgp->cg_free[bno / NBBY] >> i) & (0xff >> NBBY - FRAG);
651 		blk <<= 1;
652 		field = around[allocsiz];
653 		subfield = inside[allocsiz];
654 		for (pos = 0; pos <= FRAG - allocsiz; pos++) {
655 			if ((blk & field) == subfield) {
656 				return (bno + i + pos);
657 			}
658 			field <<= 1;
659 			subfield <<= 1;
660 		}
661 	}
662 	panic("alloccg: block not in map");
663 	return (0);
664 }
665 
666 /*
667  * update the frsum fields to reflect addition or deletion
668  * of some frags
669  */
670 fragacct(fragmap, fraglist, cnt)
671 	int fragmap;
672 	long fraglist[];
673 	int cnt;
674 {
675 	int inblk;
676 	register int field, subfield;
677 	register int siz, pos;
678 
679 	inblk = (int)(fragtbl[fragmap]) << 1;
680 	fragmap <<= 1;
681 	for (siz = 1; siz < FRAG; siz++) {
682 		if (((1 << siz) & inblk) == 0)
683 			continue;
684 		field = around[siz];
685 		subfield = inside[siz];
686 		for (pos = siz; pos <= FRAG; pos++) {
687 			if ((fragmap & field) == subfield) {
688 				fraglist[siz] += cnt;
689 				pos += siz;
690 				field <<= siz;
691 				subfield <<= siz;
692 			}
693 			field <<= 1;
694 			subfield <<= 1;
695 		}
696 	}
697 }
698 
699 badblock(fs, bn)
700 	register struct fs *fs;
701 	daddr_t bn;
702 {
703 
704 	if ((unsigned)bn >= fs->fs_size || bn < cgdmin(dtog(bn, fs), fs)) {
705 		fserr(fs, "bad block");
706 		return (1);
707 	}
708 	return (0);
709 }
710 
711 /*
712  * getfs maps a device number into
713  * a pointer to the incore super
714  * block.  The algorithm is a linear
715  * search through the mount table.
716  * A consistency check of the
717  * in core free-block and i-node
718  * counts is performed.
719  *
720  * panic: no fs -- the device is not mounted.
721  *	this "cannot happen"
722  */
723 struct fs *
724 getfs(dev)
725 	dev_t dev;
726 {
727 	register struct mount *mp;
728 	register struct fs *fs;
729 
730 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
731 		if (mp->m_bufp != NULL && mp->m_dev == dev) {
732 			fs = mp->m_bufp->b_un.b_fs;
733 			if (fs->fs_magic != FS_MAGIC)
734 				panic("getfs: bad magic");
735 			return (fs);
736 		}
737 	panic("getfs: no fs");
738 	return (NULL);
739 }
740 
741 /*
742  * Fserr prints the name of a file system
743  * with an error diagnostic, in the form
744  *	fs: error message
745  */
746 fserr(fs, cp)
747 	struct fs *fs;
748 	char *cp;
749 {
750 
751 	printf("%s: %s\n", fs->fs_fsmnt, cp);
752 }
753 
754 /*
755  * Getfsx returns the index in the file system
756  * table of the specified device.  The swap device
757  * is also assigned a pseudo-index.  The index may
758  * be used as a compressed indication of the location
759  * of a block, recording
760  *	<getfsx(dev),blkno>
761  * rather than
762  *	<dev, blkno>
763  * provided the information need remain valid only
764  * as long as the file system is mounted.
765  */
766 getfsx(dev)
767 	dev_t dev;
768 {
769 	register struct mount *mp;
770 
771 	if (dev == swapdev)
772 		return (MSWAPX);
773 	for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
774 		if (mp->m_dev == dev)
775 			return (mp - &mount[0]);
776 	return (-1);
777 }
778 
779 /*
780  * Update is the internal name of 'sync'.  It goes through the disk
781  * queues to initiate sandbagged IO; goes through the inodes to write
782  * modified nodes; and it goes through the mount table to initiate modified
783  * super blocks.
784  */
785 update()
786 {
787 	register struct inode *ip;
788 	register struct mount *mp;
789 	register struct buf *bp;
790 	struct fs *fs;
791 	time_t tim;
792 	int i, blks;
793 
794 	if (updlock)
795 		return;
796 	updlock++;
797 	/*
798 	 * Write back modified superblocks.
799 	 * Consistency check that the superblock
800 	 * of each file system is still in the buffer cache.
801 	 */
802 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
803 		if (mp->m_bufp != NULL) {
804 			fs = mp->m_bufp->b_un.b_fs;
805 			if (fs->fs_fmod == 0)
806 				continue;
807 			if (fs->fs_ronly != 0)
808 				panic("update: rofs mod");
809 			bp = getblk(mp->m_dev, SBLOCK, BSIZE);
810 			fs->fs_fmod = 0;
811 			fs->fs_time = TIME;
812 			if (bp->b_un.b_fs != fs)
813 				panic("update: bad b_fs");
814 			bwrite(bp);
815 			blks = howmany(cssize(fs), BSIZE);
816 			for (i = 0; i < blks; i++) {
817 				bp = getblk(mp->m_dev, csaddr(fs) + (i * FRAG),
818 					BSIZE);
819 				bwrite(bp);
820 			}
821 		}
822 	/*
823 	 * Write back each (modified) inode.
824 	 */
825 	for (ip = inode; ip < inodeNINODE; ip++)
826 		if((ip->i_flag&ILOCK)==0 && ip->i_count) {
827 			ip->i_flag |= ILOCK;
828 			ip->i_count++;
829 			tim = TIME;
830 			iupdat(ip, &tim, &tim, 0);
831 			iput(ip);
832 		}
833 	updlock = 0;
834 	/*
835 	 * Force stale buffer cache information to be flushed,
836 	 * for all devices.
837 	 */
838 	bflush(NODEV);
839 }
840