xref: /csrg-svn/sys/ufs/ffs/ffs_alloc.c (revision 5212)
1 /* Copyright (c) 1981 Regents of the University of California */
2 
3 static char vers[] = "@(#)ffs_alloc.c 1.10 12/09/81";
4 
5 /*	alloc.c	4.8	81/03/08	*/
6 
7 #include "../h/param.h"
8 #include "../h/systm.h"
9 #include "../h/mount.h"
10 #include "../h/fs.h"
11 #include "../h/conf.h"
12 #include "../h/buf.h"
13 #include "../h/inode.h"
14 #include "../h/dir.h"
15 #include "../h/user.h"
16 
17 extern u_long		hashalloc();
18 extern ino_t		ialloccg();
19 extern daddr_t		alloccg();
20 extern daddr_t		alloccgblk();
21 extern daddr_t		fragextend();
22 extern daddr_t		blkpref();
23 extern daddr_t		mapsearch();
24 extern int		inside[], around[];
25 extern unsigned char	fragtbl[];
26 
27 struct buf *
28 alloc(dev, ip, bpref, size)
29 	dev_t dev;
30 	register struct inode *ip;
31 	daddr_t bpref;
32 	int size;
33 {
34 	daddr_t bno;
35 	register struct fs *fs;
36 	register struct buf *bp;
37 	int cg;
38 
39 	if ((unsigned)size > BSIZE || size % FSIZE != 0)
40 		panic("alloc: bad size");
41 	fs = getfs(dev);
42 	if (size == BSIZE && fs->fs_cstotal.cs_nbfree == 0)
43 		goto nospace;
44 	if (u.u_uid != 0 &&
45 	    fs->fs_cstotal.cs_nbfree * FRAG + fs->fs_cstotal.cs_nffree <
46 	      fs->fs_dsize * minfree / 100)
47 		goto nospace;
48 	if (bpref >= fs->fs_size)
49 		bpref = 0;
50 	if (bpref == 0)
51 		cg = itog(ip->i_number, fs);
52 	else
53 		cg = dtog(bpref, fs);
54 	bno = (daddr_t)hashalloc(dev, fs, cg, (long)bpref, size, alloccg);
55 	if (bno == 0)
56 		goto nospace;
57 	bp = getblk(dev, bno, size);
58 	clrbuf(bp);
59 	return (bp);
60 nospace:
61 	fserr(fs, "file system full");
62 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
63 	u.u_error = ENOSPC;
64 	return (NULL);
65 }
66 
67 struct buf *
68 realloccg(dev, bprev, bpref, osize, nsize)
69 	dev_t dev;
70 	daddr_t bprev, bpref;
71 	int osize, nsize;
72 {
73 	daddr_t bno;
74 	register struct fs *fs;
75 	register struct buf *bp, *obp;
76 	caddr_t cp;
77 	int cg;
78 
79 	if ((unsigned)osize > BSIZE || osize % FSIZE != 0 ||
80 	    (unsigned)nsize > BSIZE || nsize % FSIZE != 0)
81 		panic("realloccg: bad size");
82 	fs = getfs(dev);
83 	if (u.u_uid != 0 &&
84 	    fs->fs_cstotal.cs_nbfree * FRAG + fs->fs_cstotal.cs_nffree <
85 	      fs->fs_dsize * minfree / 100)
86 		goto nospace;
87 	if (bprev == 0)
88 		panic("realloccg: bad bprev");
89 	else
90 		cg = dtog(bprev, fs);
91 	bno = fragextend(dev, fs, cg, (long)bprev, osize, nsize);
92 	if (bno != 0) {
93 		bp = bread(dev, bno, osize);
94 		bp->b_bcount = nsize;
95 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
96 		return (bp);
97 	}
98 	if (bpref >= fs->fs_size)
99 		bpref = 0;
100 	bno = (daddr_t)hashalloc(dev, fs, cg, (long)bpref, nsize, alloccg);
101 	if (bno != 0) {
102 		/*
103 		 * make a new copy
104 		 */
105 		obp = bread(dev, bprev, osize);
106 		bp = getblk(dev, bno, nsize);
107 		cp = bp->b_un.b_addr;
108 		bp->b_un.b_addr = obp->b_un.b_addr;
109 		obp->b_un.b_addr = cp;
110 		obp->b_flags |= B_INVAL;
111 		brelse(obp);
112 		fre(dev, bprev, (off_t)osize);
113 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
114 		return(bp);
115 	}
116 nospace:
117 	/*
118 	 * no space available
119 	 */
120 	fserr(fs, "file system full");
121 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
122 	u.u_error = ENOSPC;
123 	return (NULL);
124 }
125 
126 struct inode *
127 ialloc(dev, ipref, mode)
128 	dev_t dev;
129 	ino_t ipref;
130 	int mode;
131 {
132 	ino_t ino;
133 	register struct fs *fs;
134 	register struct inode *ip;
135 	int cg;
136 
137 	fs = getfs(dev);
138 	if (fs->fs_cstotal.cs_nifree == 0)
139 		goto noinodes;
140 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
141 		ipref = 0;
142 	cg = itog(ipref, fs);
143 	ino = (ino_t)hashalloc(dev, fs, cg, (long)ipref, mode, ialloccg);
144 	if (ino == 0)
145 		goto noinodes;
146 	ip = iget(dev, ino);
147 	if (ip == NULL) {
148 		ifree(dev, ino, 0);
149 		return (NULL);
150 	}
151 	if (ip->i_mode)
152 		panic("ialloc: dup alloc");
153 	return (ip);
154 noinodes:
155 	fserr(fs, "out of inodes");
156 	uprintf("\n%s: create failed, no inodes free\n", fs->fs_fsmnt);
157 	u.u_error = ENOSPC;
158 	return (NULL);
159 }
160 
161 /*
162  * find a cylinder to place a directory
163  */
164 dirpref(dev)
165 	dev_t dev;
166 {
167 	register struct fs *fs;
168 	int cg, minndir, mincg, avgifree;
169 
170 	fs = getfs(dev);
171 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
172 	minndir = fs->fs_ipg;
173 	mincg = 0;
174 	for (cg = 0; cg < fs->fs_ncg; cg++)
175 		if (fs->fs_cs(cg).cs_ndir < minndir &&
176 		    fs->fs_cs(cg).cs_nifree >= avgifree) {
177 			mincg = cg;
178 			minndir = fs->fs_cs(cg).cs_ndir;
179 		}
180 	return (fs->fs_ipg * mincg);
181 }
182 
183 /*
184  * select a cylinder to place a large block of data
185  */
186 daddr_t
187 blkpref(dev)
188 	dev_t dev;
189 {
190 	register struct fs *fs;
191 	int cg, avgbfree;
192 
193 	fs = getfs(dev);
194 	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
195 	for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++)
196 		if (fs->fs_cs(cg).cs_nbfree >= avgbfree) {
197 			fs->fs_cgrotor = cg;
198 			return (fs->fs_fpg * cg + FRAG);
199 		}
200 	for (cg = 0; cg <= fs->fs_cgrotor; cg++)
201 		if (fs->fs_cs(cg).cs_nbfree >= avgbfree) {
202 			fs->fs_cgrotor = cg;
203 			return (fs->fs_fpg * cg + FRAG);
204 		}
205 	return (0);
206 }
207 
208 /*VARARGS5*/
209 u_long
210 hashalloc(dev, fs, cg, pref, size, allocator)
211 	dev_t dev;
212 	register struct fs *fs;
213 	int cg;
214 	long pref;
215 	int size;	/* size for data blocks, mode for inodes */
216 	u_long (*allocator)();
217 {
218 	long result;
219 	int i, icg = cg;
220 
221 	/*
222 	 * 1: preferred cylinder group
223 	 */
224 	result = (*allocator)(dev, fs, cg, pref, size);
225 	if (result)
226 		return (result);
227 	/*
228 	 * 2: quadratic rehash
229 	 */
230 	for (i = 1; i < fs->fs_ncg; i *= 2) {
231 		cg += i;
232 		if (cg >= fs->fs_ncg)
233 			cg -= fs->fs_ncg;
234 		result = (*allocator)(dev, fs, cg, 0, size);
235 		if (result)
236 			return (result);
237 	}
238 	/*
239 	 * 3: brute force search
240 	 */
241 	cg = icg;
242 	for (i = 0; i < fs->fs_ncg; i++) {
243 		result = (*allocator)(dev, fs, cg, 0, size);
244 		if (result)
245 			return (result);
246 		cg++;
247 		if (cg == fs->fs_ncg)
248 			cg = 0;
249 	}
250 	return (0);
251 }
252 
253 daddr_t
254 fragextend(dev, fs, cg, bprev, osize, nsize)
255 	dev_t dev;
256 	register struct fs *fs;
257 	int cg;
258 	long bprev;
259 	int osize, nsize;
260 {
261 	register struct buf *bp;
262 	register struct cg *cgp;
263 	long bno;
264 	int frags, bbase;
265 	int i;
266 
267 	frags = nsize / FSIZE;
268 	bbase = bprev % FRAG;
269 	if (bbase > (bprev + frags - 1) % FRAG) {
270 		/* cannot extend across a block boundry */
271 		return (0);
272 	}
273 	bp = bread(dev, cgtod(cg, fs), BSIZE);
274 	if (bp->b_flags & B_ERROR)
275 		return (0);
276 	cgp = bp->b_un.b_cg;
277 	bno = bprev % fs->fs_fpg;
278 	for (i = osize / FSIZE; i < frags; i++) {
279 		if (isclr(cgp->cg_free, bno + i))
280 			break;
281 	}
282 	if (i == frags) {
283 		/*
284 		 * the current fragment can be extended
285 		 * deduct the count on fragment being extended into
286 		 * increase the count on the remaining fragment (if any)
287 		 * allocate the extended piece
288 		 */
289 		for (i = frags; i < FRAG - bbase; i++)
290 			if (isclr(cgp->cg_free, bno + i))
291 				break;
292 		cgp->cg_frsum[i - osize / FSIZE]--;
293 		if (i != frags)
294 			cgp->cg_frsum[i - frags]++;
295 		for (i = osize / FSIZE; i < frags; i++) {
296 			clrbit(cgp->cg_free, bno + i);
297 			cgp->cg_cs.cs_nffree--;
298 			fs->fs_cstotal.cs_nffree--;
299 			fs->fs_cs(cg).cs_nffree--;
300 		}
301 		fs->fs_fmod++;
302 		bdwrite(bp);
303 		return (bprev);
304 	}
305 	brelse(bp);
306 	return (0);
307 }
308 
309 daddr_t
310 alloccg(dev, fs, cg, bpref, size)
311 	dev_t dev;
312 	register struct fs *fs;
313 	int cg;
314 	daddr_t bpref;
315 	int size;
316 {
317 	register struct buf *bp;
318 	register struct cg *cgp;
319 	int bno, frags;
320 	int allocsiz;
321 	register int i;
322 
323 	if (fs->fs_cs(cg).cs_nbfree == 0 && size == BSIZE)
324 		return (0);
325 	bp = bread(dev, cgtod(cg, fs), BSIZE);
326 	if (bp->b_flags & B_ERROR)
327 		return (0);
328 	cgp = bp->b_un.b_cg;
329 	if (size == BSIZE) {
330 		bno = alloccgblk(fs, cgp, bpref);
331 		bdwrite(bp);
332 		return (bno);
333 	}
334 	/*
335 	 * check to see if any fragments are already available
336 	 * allocsiz is the size which will be allocated, hacking
337 	 * it down to a smaller size if necessary
338 	 */
339 	frags = size / FSIZE;
340 	for (allocsiz = frags; allocsiz < FRAG; allocsiz++)
341 		if (cgp->cg_frsum[allocsiz] != 0)
342 			break;
343 	if (allocsiz == FRAG) {
344 		/*
345 		 * no fragments were available, so a block will be
346 		 * allocated, and hacked up
347 		 */
348 		if (cgp->cg_cs.cs_nbfree == 0) {
349 			brelse(bp);
350 			return (0);
351 		}
352 		bno = alloccgblk(fs, cgp, bpref);
353 		bpref = bno % fs->fs_fpg;
354 		for (i = frags; i < FRAG; i++)
355 			setbit(cgp->cg_free, bpref + i);
356 		i = FRAG - frags;
357 		cgp->cg_cs.cs_nffree += i;
358 		fs->fs_cstotal.cs_nffree += i;
359 		fs->fs_cs(cg).cs_nffree += i;
360 		cgp->cg_frsum[i]++;
361 		bdwrite(bp);
362 		return (bno);
363 	}
364 	bno = mapsearch(fs, cgp, bpref, allocsiz);
365 	if (bno == 0)
366 		return (0);
367 	for (i = 0; i < frags; i++)
368 		clrbit(cgp->cg_free, bno + i);
369 	cgp->cg_cs.cs_nffree -= frags;
370 	fs->fs_cstotal.cs_nffree -= frags;
371 	fs->fs_cs(cg).cs_nffree -= frags;
372 	cgp->cg_frsum[allocsiz]--;
373 	if (frags != allocsiz)
374 		cgp->cg_frsum[allocsiz - frags]++;
375 	bdwrite(bp);
376 	return (cg * fs->fs_fpg + bno);
377 }
378 
379 daddr_t
380 alloccgblk(fs, cgp, bpref)
381 	struct fs *fs;
382 	register struct cg *cgp;
383 	daddr_t bpref;
384 {
385 	daddr_t bno;
386 	int cylno, pos;
387 	short *cylbp;
388 	int i, j;
389 
390 	if (bpref == 0) {
391 		bpref = cgp->cg_rotor;
392 	} else {
393 		bpref &= ~(FRAG - 1);
394 		bpref %= fs->fs_fpg;
395 		/*
396 		 * if the requested block is available, use it
397 		 */
398 		if (isblock(cgp->cg_free, bpref/FRAG)) {
399 			bno = bpref;
400 			goto gotit;
401 		}
402 		/*
403 		 * check for a block available on the same cylinder
404 		 * beginning with one which is rotationally optimal
405 		 */
406 		i = bpref * NSPF;
407 		cylno = i / fs->fs_spc;
408 		cylbp = cgp->cg_b[cylno];
409 		pos = (i + (ROTDELAY == 0) ?
410 			0 : 1 + ROTDELAY * HZ * fs->fs_nsect / (NSPF * 1000)) %
411 			fs->fs_nsect * NRPOS / fs->fs_nsect;
412 		for (i = pos; i < NRPOS; i++)
413 			if (cylbp[i] > 0)
414 				break;
415 		if (i == NRPOS)
416 			for (i = 0; i < pos; i++)
417 				if (cylbp[i] > 0)
418 					break;
419 		if (cylbp[i] > 0) {
420 			bpref = cylno * fs->fs_spc / (NSPF * FRAG);
421 			for (j = fs->fs_postbl[i]; j > -1; j = fs->fs_rotbl[j]) {
422 				if (isblock(cgp->cg_free, bpref + j)) {
423 					bno = (bpref + j) * FRAG;
424 					goto gotit;
425 				}
426 			}
427 			panic("alloccgblk: can't find blk in cyl");
428 		}
429 	}
430 	bno = mapsearch(fs, cgp, bpref, FRAG);
431 	if (bno == 0)
432 		return (0);
433 	cgp->cg_rotor = bno;
434 gotit:
435 	clrblock(cgp->cg_free, bno/FRAG);
436 	cgp->cg_cs.cs_nbfree--;
437 	fs->fs_cstotal.cs_nbfree--;
438 	fs->fs_cs(cgp->cg_cgx).cs_nbfree--;
439 	i = bno * NSPF;
440 	cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]--;
441 	fs->fs_fmod++;
442 	return (cgp->cg_cgx * fs->fs_fpg + bno);
443 }
444 
445 ino_t
446 ialloccg(dev, fs, cg, ipref, mode)
447 	dev_t dev;
448 	register struct fs *fs;
449 	int cg;
450 	daddr_t ipref;
451 	int mode;
452 {
453 	register struct buf *bp;
454 	register struct cg *cgp;
455 	int i;
456 
457 	if (fs->fs_cs(cg).cs_nifree == 0)
458 		return (0);
459 	bp = bread(dev, cgtod(cg, fs), BSIZE);
460 	if (bp->b_flags & B_ERROR)
461 		return (0);
462 	cgp = bp->b_un.b_cg;
463 	if (ipref) {
464 		ipref %= fs->fs_ipg;
465 		if (isclr(cgp->cg_iused, ipref))
466 			goto gotit;
467 	} else
468 		ipref = cgp->cg_irotor;
469 	for (i = 0; i < fs->fs_ipg; i++) {
470 		ipref++;
471 		if (ipref >= fs->fs_ipg)
472 			ipref = 0;
473 		if (isclr(cgp->cg_iused, ipref)) {
474 			cgp->cg_irotor = ipref;
475 			goto gotit;
476 		}
477 	}
478 	brelse(bp);
479 	return (0);
480 gotit:
481 	setbit(cgp->cg_iused, ipref);
482 	cgp->cg_cs.cs_nifree--;
483 	fs->fs_cstotal.cs_nifree--;
484 	fs->fs_cs(cg).cs_nifree--;
485 	fs->fs_fmod++;
486 	if ((mode & IFMT) == IFDIR) {
487 		cgp->cg_cs.cs_ndir++;
488 		fs->fs_cstotal.cs_ndir++;
489 		fs->fs_cs(cg).cs_ndir++;
490 	}
491 	bdwrite(bp);
492 	return (cg * fs->fs_ipg + ipref);
493 }
494 
495 fre(dev, bno, size)
496 	dev_t dev;
497 	daddr_t bno;
498 	off_t size;
499 {
500 	register struct fs *fs;
501 	register struct cg *cgp;
502 	register struct buf *bp;
503 	int cg, blk, frags, bbase;
504 	register int i;
505 
506 	if ((unsigned)size > BSIZE || size % FSIZE != 0)
507 		panic("free: bad size");
508 	fs = getfs(dev);
509 	cg = dtog(bno, fs);
510 	if (badblock(fs, bno))
511 		return;
512 	bp = bread(dev, cgtod(cg, fs), BSIZE);
513 	if (bp->b_flags & B_ERROR)
514 		return;
515 	cgp = bp->b_un.b_cg;
516 	bno %= fs->fs_fpg;
517 	if (size == BSIZE) {
518 		if (isblock(cgp->cg_free, bno/FRAG))
519 			panic("free: freeing free block");
520 		setblock(cgp->cg_free, bno/FRAG);
521 		cgp->cg_cs.cs_nbfree++;
522 		fs->fs_cstotal.cs_nbfree++;
523 		fs->fs_cs(cg).cs_nbfree++;
524 		i = bno * NSPF;
525 		cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]++;
526 	} else {
527 		bbase = bno - (bno % FRAG);
528 		/*
529 		 * decrement the counts associated with the old frags
530 		 */
531 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
532 		       (0xff >> (NBBY - FRAG)));
533 		fragacct(blk, cgp->cg_frsum, -1);
534 		/*
535 		 * deallocate the fragment
536 		 */
537 		frags = size / FSIZE;
538 		for (i = 0; i < frags; i++) {
539 			if (isset(cgp->cg_free, bno + i))
540 				panic("free: freeing free frag");
541 			setbit(cgp->cg_free, bno + i);
542 			cgp->cg_cs.cs_nffree++;
543 			fs->fs_cstotal.cs_nffree++;
544 			fs->fs_cs(cg).cs_nffree++;
545 		}
546 		/*
547 		 * add back in counts associated with the new frags
548 		 */
549 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
550 		       (0xff >> (NBBY - FRAG)));
551 		fragacct(blk, cgp->cg_frsum, 1);
552 		/*
553 		 * if a complete block has been reassembled, account for it
554 		 */
555 		if (isblock(cgp->cg_free, bbase / FRAG)) {
556 			cgp->cg_cs.cs_nffree -= FRAG;
557 			fs->fs_cstotal.cs_nffree -= FRAG;
558 			fs->fs_cs(cg).cs_nffree -= FRAG;
559 			cgp->cg_cs.cs_nbfree++;
560 			fs->fs_cstotal.cs_nbfree++;
561 			fs->fs_cs(cg).cs_nbfree++;
562 			i = bbase * NSPF;
563 			cgp->cg_b[i / fs->fs_spc]
564 				 [i % fs->fs_nsect * NRPOS / fs->fs_nsect]++;
565 		}
566 	}
567 	fs->fs_fmod++;
568 	bdwrite(bp);
569 }
570 
571 ifree(dev, ino, mode)
572 	dev_t dev;
573 	ino_t ino;
574 	int mode;
575 {
576 	register struct fs *fs;
577 	register struct cg *cgp;
578 	register struct buf *bp;
579 	int cg;
580 
581 	fs = getfs(dev);
582 	if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg)
583 		panic("ifree: range");
584 	cg = itog(ino, fs);
585 	bp = bread(dev, cgtod(cg, fs), BSIZE);
586 	if (bp->b_flags & B_ERROR)
587 		return;
588 	cgp = bp->b_un.b_cg;
589 	ino %= fs->fs_ipg;
590 	if (isclr(cgp->cg_iused, ino))
591 		panic("ifree: freeing free inode");
592 	clrbit(cgp->cg_iused, ino);
593 	cgp->cg_cs.cs_nifree++;
594 	fs->fs_cstotal.cs_nifree++;
595 	fs->fs_cs(cg).cs_nifree++;
596 	if ((mode & IFMT) == IFDIR) {
597 		cgp->cg_cs.cs_ndir--;
598 		fs->fs_cstotal.cs_ndir--;
599 		fs->fs_cs(cg).cs_ndir--;
600 	}
601 	fs->fs_fmod++;
602 	bdwrite(bp);
603 }
604 
605 /*
606  * find a block of the specified size in the specified cylinder group
607  * It is a panic if a request is made to find a block if none are
608  * available.
609  */
610 daddr_t
611 mapsearch(fs, cgp, bpref, allocsiz)
612 	register struct fs *fs;
613 	register struct cg *cgp;
614 	daddr_t bpref;
615 	int allocsiz;
616 {
617 	daddr_t bno;
618 	int start, len, loc, i;
619 	int blk, field, subfield, pos;
620 
621 	/*
622 	 * find the fragment by searching through the free block
623 	 * map for an appropriate bit pattern
624 	 */
625 	if (bpref)
626 		start = bpref % fs->fs_fpg / NBBY;
627 	else
628 		start = cgp->cg_frotor / NBBY;
629 	len = roundup(fs->fs_fpg - 1, NBBY) / NBBY - start;
630 	loc = scanc(len, &cgp->cg_free[start], fragtbl, 1 << (allocsiz - 1));
631 	if (loc == 0) {
632 		len = start - 1;
633 		start = (cgdmin(cgp->cg_cgx, fs) -
634 			 cgbase(cgp->cg_cgx, fs)) / NBBY;
635 		loc = scanc(len, &cgp->cg_free[start], fragtbl,
636 			1 << (allocsiz - 1));
637 		if (loc == 0) {
638 			panic("alloccg: map corrupted");
639 			return (0);
640 		}
641 	}
642 	bno = (start + len - loc) * NBBY;
643 	cgp->cg_frotor = bno;
644 	/*
645 	 * found the byte in the map
646 	 * sift through the bits to find the selected frag
647 	 */
648 	for (i = 0; i < NBBY; i += FRAG) {
649 		blk = (cgp->cg_free[bno / NBBY] >> i) & (0xff >> NBBY - FRAG);
650 		blk <<= 1;
651 		field = around[allocsiz];
652 		subfield = inside[allocsiz];
653 		for (pos = 0; pos <= FRAG - allocsiz; pos++) {
654 			if ((blk & field) == subfield) {
655 				return (bno + i + pos);
656 			}
657 			field <<= 1;
658 			subfield <<= 1;
659 		}
660 	}
661 	panic("alloccg: block not in map");
662 	return (0);
663 }
664 
665 /*
666  * update the frsum fields to reflect addition or deletion
667  * of some frags
668  */
669 fragacct(fragmap, fraglist, cnt)
670 	int fragmap;
671 	long fraglist[];
672 	int cnt;
673 {
674 	int inblk;
675 	register int field, subfield;
676 	register int siz, pos;
677 
678 	inblk = (int)(fragtbl[fragmap]) << 1;
679 	fragmap <<= 1;
680 	for (siz = 1; siz < FRAG; siz++) {
681 		if (((1 << siz) & inblk) == 0)
682 			continue;
683 		field = around[siz];
684 		subfield = inside[siz];
685 		for (pos = siz; pos <= FRAG; pos++) {
686 			if ((fragmap & field) == subfield) {
687 				fraglist[siz] += cnt;
688 				pos += siz;
689 				field <<= siz;
690 				subfield <<= siz;
691 			}
692 			field <<= 1;
693 			subfield <<= 1;
694 		}
695 	}
696 }
697 
698 badblock(fs, bn)
699 	register struct fs *fs;
700 	daddr_t bn;
701 {
702 
703 	if ((unsigned)bn >= fs->fs_size || bn < cgdmin(dtog(bn, fs), fs)) {
704 		fserr(fs, "bad block");
705 		return (1);
706 	}
707 	return (0);
708 }
709 
710 /*
711  * getfs maps a device number into
712  * a pointer to the incore super
713  * block.  The algorithm is a linear
714  * search through the mount table.
715  * A consistency check of the
716  * in core free-block and i-node
717  * counts is performed.
718  *
719  * panic: no fs -- the device is not mounted.
720  *	this "cannot happen"
721  */
722 struct fs *
723 getfs(dev)
724 	dev_t dev;
725 {
726 	register struct mount *mp;
727 	register struct fs *fs;
728 
729 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
730 		if (mp->m_bufp != NULL && mp->m_dev == dev) {
731 			fs = mp->m_bufp->b_un.b_fs;
732 			if (fs->fs_magic != FS_MAGIC)
733 				panic("getfs: bad magic");
734 			return (fs);
735 		}
736 	panic("getfs: no fs");
737 	return (NULL);
738 }
739 
740 /*
741  * Fserr prints the name of a file system
742  * with an error diagnostic, in the form
743  *	fs: error message
744  */
745 fserr(fs, cp)
746 	struct fs *fs;
747 	char *cp;
748 {
749 
750 	printf("%s: %s\n", fs->fs_fsmnt, cp);
751 }
752 
753 /*
754  * Getfsx returns the index in the file system
755  * table of the specified device.  The swap device
756  * is also assigned a pseudo-index.  The index may
757  * be used as a compressed indication of the location
758  * of a block, recording
759  *	<getfsx(dev),blkno>
760  * rather than
761  *	<dev, blkno>
762  * provided the information need remain valid only
763  * as long as the file system is mounted.
764  */
765 getfsx(dev)
766 	dev_t dev;
767 {
768 	register struct mount *mp;
769 
770 	if (dev == swapdev)
771 		return (MSWAPX);
772 	for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
773 		if (mp->m_dev == dev)
774 			return (mp - &mount[0]);
775 	return (-1);
776 }
777 
778 /*
779  * Update is the internal name of 'sync'.  It goes through the disk
780  * queues to initiate sandbagged IO; goes through the inodes to write
781  * modified nodes; and it goes through the mount table to initiate modified
782  * super blocks.
783  */
784 update()
785 {
786 	register struct inode *ip;
787 	register struct mount *mp;
788 	register struct buf *bp;
789 	struct fs *fs;
790 	time_t tim;
791 	int i, blks;
792 
793 	if (updlock)
794 		return;
795 	updlock++;
796 	/*
797 	 * Write back modified superblocks.
798 	 * Consistency check that the superblock
799 	 * of each file system is still in the buffer cache.
800 	 */
801 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
802 		if (mp->m_bufp != NULL) {
803 			fs = mp->m_bufp->b_un.b_fs;
804 			if (fs->fs_fmod == 0)
805 				continue;
806 			if (fs->fs_ronly != 0)
807 				panic("update: rofs mod");
808 			bp = getblk(mp->m_dev, SBLOCK, BSIZE);
809 			fs->fs_fmod = 0;
810 			fs->fs_time = TIME;
811 			if (bp->b_un.b_fs != fs)
812 				panic("update: bad b_fs");
813 			bwrite(bp);
814 			blks = howmany(cssize(fs), BSIZE);
815 			for (i = 0; i < blks; i++) {
816 				bp = getblk(mp->m_dev, csaddr(fs) + (i * FRAG),
817 					BSIZE);
818 				bwrite(bp);
819 			}
820 		}
821 	/*
822 	 * Write back each (modified) inode.
823 	 */
824 	for (ip = inode; ip < inodeNINODE; ip++)
825 		if((ip->i_flag&ILOCK)==0 && ip->i_count) {
826 			ip->i_flag |= ILOCK;
827 			ip->i_count++;
828 			tim = TIME;
829 			iupdat(ip, &tim, &tim, 0);
830 			iput(ip);
831 		}
832 	updlock = 0;
833 	/*
834 	 * Force stale buffer cache information to be flushed,
835 	 * for all devices.
836 	 */
837 	bflush(NODEV);
838 }
839