xref: /csrg-svn/sys/ufs/lfs/lfs_alloc.c (revision 5322)
1 /* Copyright (c) 1981 Regents of the University of California */
2 
3 static char vers[] = "@(#)lfs_alloc.c 1.11 01/05/82";
4 
5 /*	alloc.c	4.8	81/03/08	*/
6 
7 #include "../h/param.h"
8 #include "../h/systm.h"
9 #include "../h/mount.h"
10 #include "../h/fs.h"
11 #include "../h/conf.h"
12 #include "../h/buf.h"
13 #include "../h/inode.h"
14 #include "../h/dir.h"
15 #include "../h/user.h"
16 
17 extern u_long		hashalloc();
18 extern ino_t		ialloccg();
19 extern daddr_t		alloccg();
20 extern daddr_t		alloccgblk();
21 extern daddr_t		fragextend();
22 extern daddr_t		blkpref();
23 extern daddr_t		mapsearch();
24 extern int		inside[], around[];
25 extern unsigned char	*fragtbl[];
26 
27 struct buf *
28 alloc(dev, ip, bpref, size)
29 	dev_t dev;
30 	register struct inode *ip;
31 	daddr_t bpref;
32 	int size;
33 {
34 	daddr_t bno;
35 	register struct fs *fs;
36 	register struct buf *bp;
37 	int cg;
38 
39 	fs = getfs(dev);
40 	if ((unsigned)size > fs->fs_bsize || size % fs->fs_fsize != 0)
41 		panic("alloc: bad size");
42 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
43 		goto nospace;
44 	if (u.u_uid != 0 &&
45 	    fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree <
46 	      fs->fs_dsize * fs->fs_minfree / 100)
47 		goto nospace;
48 	if (bpref >= fs->fs_size)
49 		bpref = 0;
50 	if (bpref == 0)
51 		cg = itog(ip->i_number, fs);
52 	else
53 		cg = dtog(bpref, fs);
54 	bno = (daddr_t)hashalloc(dev, fs, cg, (long)bpref, size, alloccg);
55 	if (bno == 0)
56 		goto nospace;
57 	bp = getblk(dev, fsbtodb(fs, bno), size);
58 	clrbuf(bp);
59 	return (bp);
60 nospace:
61 	fserr(fs, "file system full");
62 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
63 	u.u_error = ENOSPC;
64 	return (NULL);
65 }
66 
67 struct buf *
68 realloccg(dev, bprev, bpref, osize, nsize)
69 	dev_t dev;
70 	daddr_t bprev, bpref;
71 	int osize, nsize;
72 {
73 	daddr_t bno;
74 	register struct fs *fs;
75 	register struct buf *bp, *obp;
76 	caddr_t cp;
77 	int cg;
78 
79 	fs = getfs(dev);
80 	if ((unsigned)osize > fs->fs_bsize || osize % fs->fs_fsize != 0 ||
81 	    (unsigned)nsize > fs->fs_bsize || nsize % fs->fs_fsize != 0)
82 		panic("realloccg: bad size");
83 	if (u.u_uid != 0 &&
84 	    fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree <
85 	      fs->fs_dsize * fs->fs_minfree / 100)
86 		goto nospace;
87 	if (bprev == 0)
88 		panic("realloccg: bad bprev");
89 	else
90 		cg = dtog(bprev, fs);
91 	bno = fragextend(dev, fs, cg, (long)bprev, osize, nsize);
92 	if (bno != 0) {
93 		bp = bread(dev, fsbtodb(fs, bno), osize);
94 		bp->b_bcount = nsize;
95 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
96 		return (bp);
97 	}
98 	if (bpref >= fs->fs_size)
99 		bpref = 0;
100 	bno = (daddr_t)hashalloc(dev, fs, cg, (long)bpref, nsize, alloccg);
101 	if (bno != 0) {
102 		/*
103 		 * make a new copy
104 		 */
105 		obp = bread(dev, fsbtodb(fs, bprev), osize);
106 		bp = getblk(dev, fsbtodb(fs, bno), nsize);
107 		cp = bp->b_un.b_addr;
108 		bp->b_un.b_addr = obp->b_un.b_addr;
109 		obp->b_un.b_addr = cp;
110 		obp->b_flags |= B_INVAL;
111 		brelse(obp);
112 		fre(dev, bprev, (off_t)osize);
113 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
114 		return(bp);
115 	}
116 nospace:
117 	/*
118 	 * no space available
119 	 */
120 	fserr(fs, "file system full");
121 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
122 	u.u_error = ENOSPC;
123 	return (NULL);
124 }
125 
126 struct inode *
127 ialloc(dev, ipref, mode)
128 	dev_t dev;
129 	ino_t ipref;
130 	int mode;
131 {
132 	ino_t ino;
133 	register struct fs *fs;
134 	register struct inode *ip;
135 	int cg;
136 
137 	fs = getfs(dev);
138 	if (fs->fs_cstotal.cs_nifree == 0)
139 		goto noinodes;
140 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
141 		ipref = 0;
142 	cg = itog(ipref, fs);
143 	ino = (ino_t)hashalloc(dev, fs, cg, (long)ipref, mode, ialloccg);
144 	if (ino == 0)
145 		goto noinodes;
146 	ip = iget(dev, ino);
147 	if (ip == NULL) {
148 		ifree(dev, ino, 0);
149 		return (NULL);
150 	}
151 	if (ip->i_mode)
152 		panic("ialloc: dup alloc");
153 	return (ip);
154 noinodes:
155 	fserr(fs, "out of inodes");
156 	uprintf("\n%s: create failed, no inodes free\n", fs->fs_fsmnt);
157 	u.u_error = ENOSPC;
158 	return (NULL);
159 }
160 
161 /*
162  * find a cylinder to place a directory
163  */
164 dirpref(dev)
165 	dev_t dev;
166 {
167 	register struct fs *fs;
168 	int cg, minndir, mincg, avgifree;
169 
170 	fs = getfs(dev);
171 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
172 	minndir = fs->fs_ipg;
173 	mincg = 0;
174 	for (cg = 0; cg < fs->fs_ncg; cg++)
175 		if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
176 		    fs->fs_cs(fs, cg).cs_nifree >= avgifree) {
177 			mincg = cg;
178 			minndir = fs->fs_cs(fs, cg).cs_ndir;
179 		}
180 	return (fs->fs_ipg * mincg);
181 }
182 
183 /*
184  * select a cylinder to place a large block of data
185  */
186 daddr_t
187 blkpref(dev)
188 	dev_t dev;
189 {
190 	register struct fs *fs;
191 	int cg, avgbfree;
192 
193 	fs = getfs(dev);
194 	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
195 	for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++)
196 		if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
197 			fs->fs_cgrotor = cg;
198 			return (fs->fs_fpg * cg + fs->fs_frag);
199 		}
200 	for (cg = 0; cg <= fs->fs_cgrotor; cg++)
201 		if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
202 			fs->fs_cgrotor = cg;
203 			return (fs->fs_fpg * cg + fs->fs_frag);
204 		}
205 	return (0);
206 }
207 
208 /*VARARGS5*/
209 u_long
210 hashalloc(dev, fs, cg, pref, size, allocator)
211 	dev_t dev;
212 	register struct fs *fs;
213 	int cg;
214 	long pref;
215 	int size;	/* size for data blocks, mode for inodes */
216 	u_long (*allocator)();
217 {
218 	long result;
219 	int i, icg = cg;
220 
221 	/*
222 	 * 1: preferred cylinder group
223 	 */
224 	result = (*allocator)(dev, fs, cg, pref, size);
225 	if (result)
226 		return (result);
227 	/*
228 	 * 2: quadratic rehash
229 	 */
230 	for (i = 1; i < fs->fs_ncg; i *= 2) {
231 		cg += i;
232 		if (cg >= fs->fs_ncg)
233 			cg -= fs->fs_ncg;
234 		result = (*allocator)(dev, fs, cg, 0, size);
235 		if (result)
236 			return (result);
237 	}
238 	/*
239 	 * 3: brute force search
240 	 */
241 	cg = icg;
242 	for (i = 0; i < fs->fs_ncg; i++) {
243 		result = (*allocator)(dev, fs, cg, 0, size);
244 		if (result)
245 			return (result);
246 		cg++;
247 		if (cg == fs->fs_ncg)
248 			cg = 0;
249 	}
250 	return (0);
251 }
252 
253 daddr_t
254 fragextend(dev, fs, cg, bprev, osize, nsize)
255 	dev_t dev;
256 	register struct fs *fs;
257 	int cg;
258 	long bprev;
259 	int osize, nsize;
260 {
261 	register struct buf *bp;
262 	register struct cg *cgp;
263 	long bno;
264 	int frags, bbase;
265 	int i;
266 
267 	frags = nsize / fs->fs_fsize;
268 	bbase = bprev % fs->fs_frag;
269 	if (bbase > (bprev + frags - 1) % fs->fs_frag) {
270 		/* cannot extend across a block boundry */
271 		return (0);
272 	}
273 	bp = bread(dev, fsbtodb(fs, cgtod(cg, fs)), fs->fs_bsize);
274 	if (bp->b_flags & B_ERROR)
275 		return (0);
276 	cgp = bp->b_un.b_cg;
277 	bno = bprev % fs->fs_fpg;
278 	for (i = osize / fs->fs_fsize; i < frags; i++) {
279 		if (isclr(cgp->cg_free, bno + i))
280 			break;
281 	}
282 	if (i == frags) {
283 		/*
284 		 * the current fragment can be extended
285 		 * deduct the count on fragment being extended into
286 		 * increase the count on the remaining fragment (if any)
287 		 * allocate the extended piece
288 		 */
289 		for (i = frags; i < fs->fs_frag - bbase; i++)
290 			if (isclr(cgp->cg_free, bno + i))
291 				break;
292 		cgp->cg_frsum[i - osize / fs->fs_fsize]--;
293 		if (i != frags)
294 			cgp->cg_frsum[i - frags]++;
295 		for (i = osize / fs->fs_fsize; i < frags; i++) {
296 			clrbit(cgp->cg_free, bno + i);
297 			cgp->cg_cs.cs_nffree--;
298 			fs->fs_cstotal.cs_nffree--;
299 			fs->fs_cs(fs, cg).cs_nffree--;
300 		}
301 		fs->fs_fmod++;
302 		bdwrite(bp);
303 		return (bprev);
304 	}
305 	brelse(bp);
306 	return (0);
307 }
308 
309 daddr_t
310 alloccg(dev, fs, cg, bpref, size)
311 	dev_t dev;
312 	register struct fs *fs;
313 	int cg;
314 	daddr_t bpref;
315 	int size;
316 {
317 	register struct buf *bp;
318 	register struct cg *cgp;
319 	int bno, frags;
320 	int allocsiz;
321 	register int i;
322 
323 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
324 		return (0);
325 	bp = bread(dev, fsbtodb(fs, cgtod(cg, fs)), fs->fs_bsize);
326 	if (bp->b_flags & B_ERROR)
327 		return (0);
328 	cgp = bp->b_un.b_cg;
329 	if (size == fs->fs_bsize) {
330 		bno = alloccgblk(fs, cgp, bpref);
331 		bdwrite(bp);
332 		return (bno);
333 	}
334 	/*
335 	 * check to see if any fragments are already available
336 	 * allocsiz is the size which will be allocated, hacking
337 	 * it down to a smaller size if necessary
338 	 */
339 	frags = size / fs->fs_fsize;
340 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
341 		if (cgp->cg_frsum[allocsiz] != 0)
342 			break;
343 	if (allocsiz == fs->fs_frag) {
344 		/*
345 		 * no fragments were available, so a block will be
346 		 * allocated, and hacked up
347 		 */
348 		if (cgp->cg_cs.cs_nbfree == 0) {
349 			brelse(bp);
350 			return (0);
351 		}
352 		bno = alloccgblk(fs, cgp, bpref);
353 		bpref = bno % fs->fs_fpg;
354 		for (i = frags; i < fs->fs_frag; i++)
355 			setbit(cgp->cg_free, bpref + i);
356 		i = fs->fs_frag - frags;
357 		cgp->cg_cs.cs_nffree += i;
358 		fs->fs_cstotal.cs_nffree += i;
359 		fs->fs_cs(fs, cg).cs_nffree += i;
360 		cgp->cg_frsum[i]++;
361 		bdwrite(bp);
362 		return (bno);
363 	}
364 	bno = mapsearch(fs, cgp, bpref, allocsiz);
365 	if (bno == 0)
366 		return (0);
367 	for (i = 0; i < frags; i++)
368 		clrbit(cgp->cg_free, bno + i);
369 	cgp->cg_cs.cs_nffree -= frags;
370 	fs->fs_cstotal.cs_nffree -= frags;
371 	fs->fs_cs(fs, cg).cs_nffree -= frags;
372 	cgp->cg_frsum[allocsiz]--;
373 	if (frags != allocsiz)
374 		cgp->cg_frsum[allocsiz - frags]++;
375 	bdwrite(bp);
376 	return (cg * fs->fs_fpg + bno);
377 }
378 
379 daddr_t
380 alloccgblk(fs, cgp, bpref)
381 	struct fs *fs;
382 	register struct cg *cgp;
383 	daddr_t bpref;
384 {
385 	daddr_t bno;
386 	int cylno, pos;
387 	short *cylbp;
388 	int i, j;
389 
390 	if (bpref == 0) {
391 		bpref = cgp->cg_rotor;
392 	} else {
393 		bpref &= ~(fs->fs_frag - 1);
394 		bpref %= fs->fs_fpg;
395 		/*
396 		 * if the requested block is available, use it
397 		 */
398 		if (isblock(fs, cgp->cg_free, bpref/fs->fs_frag)) {
399 			bno = bpref;
400 			goto gotit;
401 		}
402 		/*
403 		 * check for a block available on the same cylinder
404 		 * beginning with one which is rotationally optimal
405 		 */
406 		i = bpref * NSPF(fs);
407 		cylno = i / fs->fs_spc;
408 		cylbp = cgp->cg_b[cylno];
409 		pos = (i + (fs->fs_rotdelay == 0) ? 0 :
410 		       1 + fs->fs_rotdelay * HZ * fs->fs_nsect /
411 		       (NSPF(fs) * 1000)) % fs->fs_nsect * NRPOS / fs->fs_nsect;
412 		for (i = pos; i < NRPOS; i++)
413 			if (cylbp[i] > 0)
414 				break;
415 		if (i == NRPOS)
416 			for (i = 0; i < pos; i++)
417 				if (cylbp[i] > 0)
418 					break;
419 		if (cylbp[i] > 0) {
420 			bpref = cylno * fs->fs_spc / NSPB(fs);
421 			for (j = fs->fs_postbl[i]; j > -1; j = fs->fs_rotbl[j]) {
422 				if (isblock(fs, cgp->cg_free, bpref + j)) {
423 					bno = (bpref + j) * fs->fs_frag;
424 					goto gotit;
425 				}
426 			}
427 			panic("alloccgblk: can't find blk in cyl");
428 		}
429 	}
430 	bno = mapsearch(fs, cgp, bpref, fs->fs_frag);
431 	if (bno == 0)
432 		return (0);
433 	cgp->cg_rotor = bno;
434 gotit:
435 	clrblock(fs, cgp->cg_free, bno/fs->fs_frag);
436 	cgp->cg_cs.cs_nbfree--;
437 	fs->fs_cstotal.cs_nbfree--;
438 	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
439 	i = bno * NSPF(fs);
440 	cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]--;
441 	fs->fs_fmod++;
442 	return (cgp->cg_cgx * fs->fs_fpg + bno);
443 }
444 
445 ino_t
446 ialloccg(dev, fs, cg, ipref, mode)
447 	dev_t dev;
448 	register struct fs *fs;
449 	int cg;
450 	daddr_t ipref;
451 	int mode;
452 {
453 	register struct buf *bp;
454 	register struct cg *cgp;
455 	int i;
456 
457 	if (fs->fs_cs(fs, cg).cs_nifree == 0)
458 		return (0);
459 	bp = bread(dev, fsbtodb(fs, cgtod(cg, fs)), fs->fs_bsize);
460 	if (bp->b_flags & B_ERROR)
461 		return (0);
462 	cgp = bp->b_un.b_cg;
463 	if (ipref) {
464 		ipref %= fs->fs_ipg;
465 		if (isclr(cgp->cg_iused, ipref))
466 			goto gotit;
467 	} else
468 		ipref = cgp->cg_irotor;
469 	for (i = 0; i < fs->fs_ipg; i++) {
470 		ipref++;
471 		if (ipref >= fs->fs_ipg)
472 			ipref = 0;
473 		if (isclr(cgp->cg_iused, ipref)) {
474 			cgp->cg_irotor = ipref;
475 			goto gotit;
476 		}
477 	}
478 	brelse(bp);
479 	return (0);
480 gotit:
481 	setbit(cgp->cg_iused, ipref);
482 	cgp->cg_cs.cs_nifree--;
483 	fs->fs_cstotal.cs_nifree--;
484 	fs->fs_cs(fs, cg).cs_nifree--;
485 	fs->fs_fmod++;
486 	if ((mode & IFMT) == IFDIR) {
487 		cgp->cg_cs.cs_ndir++;
488 		fs->fs_cstotal.cs_ndir++;
489 		fs->fs_cs(fs, cg).cs_ndir++;
490 	}
491 	bdwrite(bp);
492 	return (cg * fs->fs_ipg + ipref);
493 }
494 
495 fre(dev, bno, size)
496 	dev_t dev;
497 	daddr_t bno;
498 	off_t size;
499 {
500 	register struct fs *fs;
501 	register struct cg *cgp;
502 	register struct buf *bp;
503 	int cg, blk, frags, bbase;
504 	register int i;
505 
506 	fs = getfs(dev);
507 	if ((unsigned)size > fs->fs_bsize || size % fs->fs_fsize != 0)
508 		panic("free: bad size");
509 	cg = dtog(bno, fs);
510 	if (badblock(fs, bno))
511 		return;
512 	bp = bread(dev, fsbtodb(fs, cgtod(cg, fs)), fs->fs_bsize);
513 	if (bp->b_flags & B_ERROR)
514 		return;
515 	cgp = bp->b_un.b_cg;
516 	bno %= fs->fs_fpg;
517 	if (size == fs->fs_bsize) {
518 		if (isblock(fs, cgp->cg_free, bno/fs->fs_frag))
519 			panic("free: freeing free block");
520 		setblock(fs, cgp->cg_free, bno/fs->fs_frag);
521 		cgp->cg_cs.cs_nbfree++;
522 		fs->fs_cstotal.cs_nbfree++;
523 		fs->fs_cs(fs, cg).cs_nbfree++;
524 		i = bno * NSPF(fs);
525 		cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]++;
526 	} else {
527 		bbase = bno - (bno % fs->fs_frag);
528 		/*
529 		 * decrement the counts associated with the old frags
530 		 */
531 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
532 		       (0xff >> (NBBY - fs->fs_frag)));
533 		fragacct(fs, blk, cgp->cg_frsum, -1);
534 		/*
535 		 * deallocate the fragment
536 		 */
537 		frags = size / fs->fs_fsize;
538 		for (i = 0; i < frags; i++) {
539 			if (isset(cgp->cg_free, bno + i))
540 				panic("free: freeing free frag");
541 			setbit(cgp->cg_free, bno + i);
542 			cgp->cg_cs.cs_nffree++;
543 			fs->fs_cstotal.cs_nffree++;
544 			fs->fs_cs(fs, cg).cs_nffree++;
545 		}
546 		/*
547 		 * add back in counts associated with the new frags
548 		 */
549 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
550 		       (0xff >> (NBBY - fs->fs_frag)));
551 		fragacct(fs, blk, cgp->cg_frsum, 1);
552 		/*
553 		 * if a complete block has been reassembled, account for it
554 		 */
555 		if (isblock(fs, cgp->cg_free, bbase / fs->fs_frag)) {
556 			cgp->cg_cs.cs_nffree -= fs->fs_frag;
557 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
558 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
559 			cgp->cg_cs.cs_nbfree++;
560 			fs->fs_cstotal.cs_nbfree++;
561 			fs->fs_cs(fs, cg).cs_nbfree++;
562 			i = bbase * NSPF(fs);
563 			cgp->cg_b[i / fs->fs_spc]
564 				 [i % fs->fs_nsect * NRPOS / fs->fs_nsect]++;
565 		}
566 	}
567 	fs->fs_fmod++;
568 	bdwrite(bp);
569 }
570 
571 ifree(dev, ino, mode)
572 	dev_t dev;
573 	ino_t ino;
574 	int mode;
575 {
576 	register struct fs *fs;
577 	register struct cg *cgp;
578 	register struct buf *bp;
579 	int cg;
580 
581 	fs = getfs(dev);
582 	if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg)
583 		panic("ifree: range");
584 	cg = itog(ino, fs);
585 	bp = bread(dev, fsbtodb(fs, cgtod(cg, fs)), fs->fs_bsize);
586 	if (bp->b_flags & B_ERROR)
587 		return;
588 	cgp = bp->b_un.b_cg;
589 	ino %= fs->fs_ipg;
590 	if (isclr(cgp->cg_iused, ino))
591 		panic("ifree: freeing free inode");
592 	clrbit(cgp->cg_iused, ino);
593 	cgp->cg_cs.cs_nifree++;
594 	fs->fs_cstotal.cs_nifree++;
595 	fs->fs_cs(fs, cg).cs_nifree++;
596 	if ((mode & IFMT) == IFDIR) {
597 		cgp->cg_cs.cs_ndir--;
598 		fs->fs_cstotal.cs_ndir--;
599 		fs->fs_cs(fs, cg).cs_ndir--;
600 	}
601 	fs->fs_fmod++;
602 	bdwrite(bp);
603 }
604 
605 /*
606  * find a block of the specified size in the specified cylinder group
607  * It is a panic if a request is made to find a block if none are
608  * available.
609  */
610 daddr_t
611 mapsearch(fs, cgp, bpref, allocsiz)
612 	register struct fs *fs;
613 	register struct cg *cgp;
614 	daddr_t bpref;
615 	int allocsiz;
616 {
617 	daddr_t bno;
618 	int start, len, loc, i;
619 	int blk, field, subfield, pos;
620 
621 	/*
622 	 * find the fragment by searching through the free block
623 	 * map for an appropriate bit pattern
624 	 */
625 	if (bpref)
626 		start = bpref % fs->fs_fpg / NBBY;
627 	else
628 		start = cgp->cg_frotor / NBBY;
629 	len = roundup(fs->fs_fpg - 1, NBBY) / NBBY - start;
630 	loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag],
631 		1 << (allocsiz - 1));
632 	if (loc == 0) {
633 		len = start - 1;
634 		start = (cgdmin(cgp->cg_cgx, fs) -
635 			 cgbase(cgp->cg_cgx, fs)) / NBBY;
636 		loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag],
637 			1 << (allocsiz - 1));
638 		if (loc == 0) {
639 			panic("alloccg: map corrupted");
640 			return (0);
641 		}
642 	}
643 	bno = (start + len - loc) * NBBY;
644 	cgp->cg_frotor = bno;
645 	/*
646 	 * found the byte in the map
647 	 * sift through the bits to find the selected frag
648 	 */
649 	for (i = 0; i < NBBY; i += fs->fs_frag) {
650 		blk = (cgp->cg_free[bno / NBBY] >> i) &
651 		      (0xff >> NBBY - fs->fs_frag);
652 		blk <<= 1;
653 		field = around[allocsiz];
654 		subfield = inside[allocsiz];
655 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
656 			if ((blk & field) == subfield) {
657 				return (bno + i + pos);
658 			}
659 			field <<= 1;
660 			subfield <<= 1;
661 		}
662 	}
663 	panic("alloccg: block not in map");
664 	return (0);
665 }
666 
667 /*
668  * update the frsum fields to reflect addition or deletion
669  * of some frags
670  */
671 fragacct(fs, fragmap, fraglist, cnt)
672 	struct fs *fs;
673 	int fragmap;
674 	long fraglist[];
675 	int cnt;
676 {
677 	int inblk;
678 	register int field, subfield;
679 	register int siz, pos;
680 
681 	inblk = (int)(fragtbl[fs->fs_frag][fragmap]) << 1;
682 	fragmap <<= 1;
683 	for (siz = 1; siz < fs->fs_frag; siz++) {
684 		if (((1 << siz) & inblk) == 0)
685 			continue;
686 		field = around[siz];
687 		subfield = inside[siz];
688 		for (pos = siz; pos <= fs->fs_frag; pos++) {
689 			if ((fragmap & field) == subfield) {
690 				fraglist[siz] += cnt;
691 				pos += siz;
692 				field <<= siz;
693 				subfield <<= siz;
694 			}
695 			field <<= 1;
696 			subfield <<= 1;
697 		}
698 	}
699 }
700 
701 badblock(fs, bn)
702 	register struct fs *fs;
703 	daddr_t bn;
704 {
705 
706 	if ((unsigned)bn >= fs->fs_size || bn < cgdmin(dtog(bn, fs), fs)) {
707 		fserr(fs, "bad block");
708 		return (1);
709 	}
710 	return (0);
711 }
712 
713 /*
714  * getfs maps a device number into
715  * a pointer to the incore super
716  * block.  The algorithm is a linear
717  * search through the mount table.
718  * A consistency check of the
719  * in core free-block and i-node
720  * counts is performed.
721  *
722  * panic: no fs -- the device is not mounted.
723  *	this "cannot happen"
724  */
725 struct fs *
726 getfs(dev)
727 	dev_t dev;
728 {
729 	register struct mount *mp;
730 	register struct fs *fs;
731 
732 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
733 		if (mp->m_bufp != NULL && mp->m_dev == dev) {
734 			fs = mp->m_bufp->b_un.b_fs;
735 			if (fs->fs_magic != FS_MAGIC)
736 				panic("getfs: bad magic");
737 			return (fs);
738 		}
739 	panic("getfs: no fs");
740 	return (NULL);
741 }
742 
743 /*
744  * Fserr prints the name of a file system
745  * with an error diagnostic, in the form
746  *	fs: error message
747  */
748 fserr(fs, cp)
749 	struct fs *fs;
750 	char *cp;
751 {
752 
753 	printf("%s: %s\n", fs->fs_fsmnt, cp);
754 }
755 
756 /*
757  * Getfsx returns the index in the file system
758  * table of the specified device.  The swap device
759  * is also assigned a pseudo-index.  The index may
760  * be used as a compressed indication of the location
761  * of a block, recording
762  *	<getfsx(dev),blkno>
763  * rather than
764  *	<dev, blkno>
765  * provided the information need remain valid only
766  * as long as the file system is mounted.
767  */
768 getfsx(dev)
769 	dev_t dev;
770 {
771 	register struct mount *mp;
772 
773 	if (dev == swapdev)
774 		return (MSWAPX);
775 	for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
776 		if (mp->m_dev == dev)
777 			return (mp - &mount[0]);
778 	return (-1);
779 }
780 
781 /*
782  * Update is the internal name of 'sync'.  It goes through the disk
783  * queues to initiate sandbagged IO; goes through the inodes to write
784  * modified nodes; and it goes through the mount table to initiate modified
785  * super blocks.
786  */
787 update()
788 {
789 	register struct inode *ip;
790 	register struct mount *mp;
791 	register struct buf *bp;
792 	struct fs *fs;
793 	time_t tim;
794 	int i, blks;
795 
796 	if (updlock)
797 		return;
798 	updlock++;
799 	/*
800 	 * Write back modified superblocks.
801 	 * Consistency check that the superblock
802 	 * of each file system is still in the buffer cache.
803 	 */
804 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
805 		if (mp->m_bufp != NULL) {
806 			fs = mp->m_bufp->b_un.b_fs;
807 			if (fs->fs_fmod == 0)
808 				continue;
809 			if (fs->fs_ronly != 0)
810 				panic("update: rofs mod");
811 			bp = getblk(mp->m_dev, SBLOCK, MAXBSIZE);
812 			fs->fs_fmod = 0;
813 			fs->fs_time = TIME;
814 			if (bp->b_un.b_fs != fs)
815 				panic("update: bad b_fs");
816 			bwrite(bp);
817 			blks = howmany(fs->fs_cssize, fs->fs_bsize);
818 			for (i = 0; i < blks; i++) {
819 				bp = getblk(mp->m_dev,
820 				    fsbtodb(fs, fs->fs_csaddr + (i * fs->fs_frag)),
821 				    fs->fs_bsize);
822 				bwrite(bp);
823 			}
824 		}
825 	/*
826 	 * Write back each (modified) inode.
827 	 */
828 	for (ip = inode; ip < inodeNINODE; ip++)
829 		if((ip->i_flag&ILOCK)==0 && ip->i_count) {
830 			ip->i_flag |= ILOCK;
831 			ip->i_count++;
832 			tim = TIME;
833 			iupdat(ip, &tim, &tim, 0);
834 			iput(ip);
835 		}
836 	updlock = 0;
837 	/*
838 	 * Force stale buffer cache information to be flushed,
839 	 * for all devices.
840 	 */
841 	bflush(NODEV);
842 }
843 
844 /*
845  * block macros
846  */
847 
848 isblock(fs, cp, h)
849 	struct fs *fs;
850 	unsigned char *cp;
851 	int h;
852 {
853 	unsigned char mask;
854 
855 	switch (fs->fs_frag) {
856 	case 8:
857 		return (cp[h] == 0xff);
858 	case 4:
859 		mask = 0x0f << ((h & 0x1) << 2);
860 		return ((cp[h >> 1] & mask) == mask);
861 	case 2:
862 		mask = 0x03 << ((h & 0x3) << 1);
863 		return ((cp[h >> 2] & mask) == mask);
864 	case 1:
865 		mask = 0x01 << (h & 0x7);
866 		return ((cp[h >> 3] & mask) == mask);
867 	default:
868 		panic("isblock bad fs_frag");
869 		return;
870 	}
871 }
872 clrblock(fs, cp, h)
873 	struct fs *fs;
874 	unsigned char *cp;
875 	int h;
876 {
877 	switch ((fs)->fs_frag) {
878 	case 8:
879 		cp[h] = 0;
880 		return;
881 	case 4:
882 		cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2));
883 		return;
884 	case 2:
885 		cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1));
886 		return;
887 	case 1:
888 		cp[h >> 3] &= ~(0x01 << (h & 0x7));
889 		return;
890 	default:
891 		panic("clrblock bad fs_frag");
892 		return;
893 	}
894 }
895 setblock(fs, cp, h)
896 	struct fs *fs;
897 	unsigned char *cp;
898 	int h;
899 {
900 	switch (fs->fs_frag) {
901 	case 8:
902 		cp[h] = 0xff;
903 		return;
904 	case 4:
905 		cp[h >> 1] |= (0x0f << ((h & 0x1) << 2));
906 		return;
907 	case 2:
908 		cp[h >> 2] |= (0x03 << ((h & 0x3) << 1));
909 		return;
910 	case 1:
911 		cp[h >> 3] |= (0x01 << (h & 0x7));
912 		return;
913 	default:
914 		panic("setblock bad fs_frag");
915 		return;
916 	}
917 }
918