xref: /csrg-svn/sys/ufs/ffs/ffs_alloc.c (revision 5361)
1 /* Copyright (c) 1981 Regents of the University of California */
2 
3 static char vers[] = "@(#)ffs_alloc.c 1.13 01/10/82";
4 
5 /*	alloc.c	4.8	81/03/08	*/
6 
7 #include "../h/param.h"
8 #include "../h/systm.h"
9 #include "../h/mount.h"
10 #include "../h/fs.h"
11 #include "../h/conf.h"
12 #include "../h/buf.h"
13 #include "../h/inode.h"
14 #include "../h/dir.h"
15 #include "../h/user.h"
16 
17 extern u_long		hashalloc();
18 extern ino_t		ialloccg();
19 extern daddr_t		alloccg();
20 extern daddr_t		alloccgblk();
21 extern daddr_t		fragextend();
22 extern daddr_t		blkpref();
23 extern daddr_t		mapsearch();
24 extern int		inside[], around[];
25 extern unsigned char	*fragtbl[];
26 
27 struct buf *
28 alloc(dev, ip, bpref, size)
29 	dev_t dev;
30 	register struct inode *ip;
31 	daddr_t bpref;
32 	int size;
33 {
34 	daddr_t bno;
35 	register struct fs *fs;
36 	register struct buf *bp;
37 	int cg;
38 
39 	fs = getfs(dev);
40 	if ((unsigned)size > fs->fs_bsize || size % fs->fs_fsize != 0)
41 		panic("alloc: bad size");
42 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
43 		goto nospace;
44 	if (u.u_uid != 0 &&
45 	    fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree <
46 	      fs->fs_dsize * fs->fs_minfree / 100)
47 		goto nospace;
48 	if (bpref >= fs->fs_size)
49 		bpref = 0;
50 	if (bpref == 0)
51 		cg = itog(ip->i_number, fs);
52 	else
53 		cg = dtog(bpref, fs);
54 	bno = (daddr_t)hashalloc(dev, fs, cg, (long)bpref, size, alloccg);
55 	if (bno == 0)
56 		goto nospace;
57 	bp = getblk(dev, fsbtodb(fs, bno), size);
58 	clrbuf(bp);
59 	return (bp);
60 nospace:
61 	fserr(fs, "file system full");
62 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
63 	u.u_error = ENOSPC;
64 	return (NULL);
65 }
66 
67 struct buf *
68 realloccg(dev, bprev, bpref, osize, nsize)
69 	dev_t dev;
70 	daddr_t bprev, bpref;
71 	int osize, nsize;
72 {
73 	daddr_t bno;
74 	register struct fs *fs;
75 	register struct buf *bp, *obp;
76 	caddr_t cp;
77 	int cg;
78 
79 	fs = getfs(dev);
80 	if ((unsigned)osize > fs->fs_bsize || osize % fs->fs_fsize != 0 ||
81 	    (unsigned)nsize > fs->fs_bsize || nsize % fs->fs_fsize != 0)
82 		panic("realloccg: bad size");
83 	if (u.u_uid != 0 &&
84 	    fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree <
85 	      fs->fs_dsize * fs->fs_minfree / 100)
86 		goto nospace;
87 	if (bprev == 0)
88 		panic("realloccg: bad bprev");
89 	else
90 		cg = dtog(bprev, fs);
91 	bno = fragextend(dev, fs, cg, (long)bprev, osize, nsize);
92 	if (bno != 0) {
93 		bp = bread(dev, fsbtodb(fs, bno), osize);
94 		if (bp->b_flags & B_ERROR)
95 			return (0);
96 		bp->b_bcount = nsize;
97 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
98 		return (bp);
99 	}
100 	if (bpref >= fs->fs_size)
101 		bpref = 0;
102 	bno = (daddr_t)hashalloc(dev, fs, cg, (long)bpref, nsize, alloccg);
103 	if (bno != 0) {
104 		/*
105 		 * make a new copy
106 		 */
107 		obp = bread(dev, fsbtodb(fs, bprev), osize);
108 		if (obp->b_flags & B_ERROR)
109 			return (0);
110 		bp = getblk(dev, fsbtodb(fs, bno), nsize);
111 		cp = bp->b_un.b_addr;
112 		bp->b_un.b_addr = obp->b_un.b_addr;
113 		obp->b_un.b_addr = cp;
114 		obp->b_flags |= B_INVAL;
115 		brelse(obp);
116 		fre(dev, bprev, (off_t)osize);
117 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
118 		return(bp);
119 	}
120 nospace:
121 	/*
122 	 * no space available
123 	 */
124 	fserr(fs, "file system full");
125 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
126 	u.u_error = ENOSPC;
127 	return (NULL);
128 }
129 
130 struct inode *
131 ialloc(dev, ipref, mode)
132 	dev_t dev;
133 	ino_t ipref;
134 	int mode;
135 {
136 	ino_t ino;
137 	register struct fs *fs;
138 	register struct inode *ip;
139 	int cg;
140 
141 	fs = getfs(dev);
142 	if (fs->fs_cstotal.cs_nifree == 0)
143 		goto noinodes;
144 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
145 		ipref = 0;
146 	cg = itog(ipref, fs);
147 	ino = (ino_t)hashalloc(dev, fs, cg, (long)ipref, mode, ialloccg);
148 	if (ino == 0)
149 		goto noinodes;
150 	ip = iget(dev, ino);
151 	if (ip == NULL) {
152 		ifree(dev, ino, 0);
153 		return (NULL);
154 	}
155 	if (ip->i_mode)
156 		panic("ialloc: dup alloc");
157 	return (ip);
158 noinodes:
159 	fserr(fs, "out of inodes");
160 	uprintf("\n%s: create failed, no inodes free\n", fs->fs_fsmnt);
161 	u.u_error = ENOSPC;
162 	return (NULL);
163 }
164 
165 /*
166  * find a cylinder to place a directory
167  */
168 dirpref(dev)
169 	dev_t dev;
170 {
171 	register struct fs *fs;
172 	int cg, minndir, mincg, avgifree;
173 
174 	fs = getfs(dev);
175 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
176 	minndir = fs->fs_ipg;
177 	mincg = 0;
178 	for (cg = 0; cg < fs->fs_ncg; cg++)
179 		if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
180 		    fs->fs_cs(fs, cg).cs_nifree >= avgifree) {
181 			mincg = cg;
182 			minndir = fs->fs_cs(fs, cg).cs_ndir;
183 		}
184 	return (fs->fs_ipg * mincg);
185 }
186 
187 /*
188  * select a cylinder to place a large block of data
189  */
190 daddr_t
191 blkpref(dev)
192 	dev_t dev;
193 {
194 	register struct fs *fs;
195 	int cg, avgbfree;
196 
197 	fs = getfs(dev);
198 	avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
199 	for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++)
200 		if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
201 			fs->fs_cgrotor = cg;
202 			return (fs->fs_fpg * cg + fs->fs_frag);
203 		}
204 	for (cg = 0; cg <= fs->fs_cgrotor; cg++)
205 		if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
206 			fs->fs_cgrotor = cg;
207 			return (fs->fs_fpg * cg + fs->fs_frag);
208 		}
209 	return (0);
210 }
211 
212 /*VARARGS5*/
213 u_long
214 hashalloc(dev, fs, cg, pref, size, allocator)
215 	dev_t dev;
216 	register struct fs *fs;
217 	int cg;
218 	long pref;
219 	int size;	/* size for data blocks, mode for inodes */
220 	u_long (*allocator)();
221 {
222 	long result;
223 	int i, icg = cg;
224 
225 	/*
226 	 * 1: preferred cylinder group
227 	 */
228 	result = (*allocator)(dev, fs, cg, pref, size);
229 	if (result)
230 		return (result);
231 	/*
232 	 * 2: quadratic rehash
233 	 */
234 	for (i = 1; i < fs->fs_ncg; i *= 2) {
235 		cg += i;
236 		if (cg >= fs->fs_ncg)
237 			cg -= fs->fs_ncg;
238 		result = (*allocator)(dev, fs, cg, 0, size);
239 		if (result)
240 			return (result);
241 	}
242 	/*
243 	 * 3: brute force search
244 	 */
245 	cg = icg;
246 	for (i = 0; i < fs->fs_ncg; i++) {
247 		result = (*allocator)(dev, fs, cg, 0, size);
248 		if (result)
249 			return (result);
250 		cg++;
251 		if (cg == fs->fs_ncg)
252 			cg = 0;
253 	}
254 	return (0);
255 }
256 
257 daddr_t
258 fragextend(dev, fs, cg, bprev, osize, nsize)
259 	dev_t dev;
260 	register struct fs *fs;
261 	int cg;
262 	long bprev;
263 	int osize, nsize;
264 {
265 	register struct buf *bp;
266 	register struct cg *cgp;
267 	long bno;
268 	int frags, bbase;
269 	int i;
270 
271 	frags = nsize / fs->fs_fsize;
272 	bbase = bprev % fs->fs_frag;
273 	if (bbase > (bprev + frags - 1) % fs->fs_frag) {
274 		/* cannot extend across a block boundry */
275 		return (0);
276 	}
277 	bp = bread(dev, fsbtodb(fs, cgtod(cg, fs)), fs->fs_bsize);
278 	if (bp->b_flags & B_ERROR)
279 		return (0);
280 	cgp = bp->b_un.b_cg;
281 	bno = bprev % fs->fs_fpg;
282 	for (i = osize / fs->fs_fsize; i < frags; i++)
283 		if (isclr(cgp->cg_free, bno + i)) {
284 			brelse(bp);
285 			return (0);
286 		}
287 	/*
288 	 * the current fragment can be extended
289 	 * deduct the count on fragment being extended into
290 	 * increase the count on the remaining fragment (if any)
291 	 * allocate the extended piece
292 	 */
293 	for (i = frags; i < fs->fs_frag - bbase; i++)
294 		if (isclr(cgp->cg_free, bno + i))
295 			break;
296 	cgp->cg_frsum[i - osize / fs->fs_fsize]--;
297 	if (i != frags)
298 		cgp->cg_frsum[i - frags]++;
299 	for (i = osize / fs->fs_fsize; i < frags; i++) {
300 		clrbit(cgp->cg_free, bno + i);
301 		cgp->cg_cs.cs_nffree--;
302 		fs->fs_cstotal.cs_nffree--;
303 		fs->fs_cs(fs, cg).cs_nffree--;
304 	}
305 	fs->fs_fmod++;
306 	bdwrite(bp);
307 	return (bprev);
308 }
309 
310 daddr_t
311 alloccg(dev, fs, cg, bpref, size)
312 	dev_t dev;
313 	register struct fs *fs;
314 	int cg;
315 	daddr_t bpref;
316 	int size;
317 {
318 	register struct buf *bp;
319 	register struct cg *cgp;
320 	int bno, frags;
321 	int allocsiz;
322 	register int i;
323 
324 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
325 		return (0);
326 	bp = bread(dev, fsbtodb(fs, cgtod(cg, fs)), fs->fs_bsize);
327 	if (bp->b_flags & B_ERROR)
328 		return (0);
329 	cgp = bp->b_un.b_cg;
330 	if (size == fs->fs_bsize) {
331 		bno = alloccgblk(fs, cgp, bpref);
332 		bdwrite(bp);
333 		return (bno);
334 	}
335 	/*
336 	 * check to see if any fragments are already available
337 	 * allocsiz is the size which will be allocated, hacking
338 	 * it down to a smaller size if necessary
339 	 */
340 	frags = size / fs->fs_fsize;
341 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
342 		if (cgp->cg_frsum[allocsiz] != 0)
343 			break;
344 	if (allocsiz == fs->fs_frag) {
345 		/*
346 		 * no fragments were available, so a block will be
347 		 * allocated, and hacked up
348 		 */
349 		if (cgp->cg_cs.cs_nbfree == 0) {
350 			brelse(bp);
351 			return (0);
352 		}
353 		bno = alloccgblk(fs, cgp, bpref);
354 		bpref = bno % fs->fs_fpg;
355 		for (i = frags; i < fs->fs_frag; i++)
356 			setbit(cgp->cg_free, bpref + i);
357 		i = fs->fs_frag - frags;
358 		cgp->cg_cs.cs_nffree += i;
359 		fs->fs_cstotal.cs_nffree += i;
360 		fs->fs_cs(fs, cg).cs_nffree += i;
361 		cgp->cg_frsum[i]++;
362 		bdwrite(bp);
363 		return (bno);
364 	}
365 	bno = mapsearch(fs, cgp, bpref, allocsiz);
366 	if (bno == 0)
367 		return (0);
368 	for (i = 0; i < frags; i++)
369 		clrbit(cgp->cg_free, bno + i);
370 	cgp->cg_cs.cs_nffree -= frags;
371 	fs->fs_cstotal.cs_nffree -= frags;
372 	fs->fs_cs(fs, cg).cs_nffree -= frags;
373 	cgp->cg_frsum[allocsiz]--;
374 	if (frags != allocsiz)
375 		cgp->cg_frsum[allocsiz - frags]++;
376 	bdwrite(bp);
377 	return (cg * fs->fs_fpg + bno);
378 }
379 
380 daddr_t
381 alloccgblk(fs, cgp, bpref)
382 	struct fs *fs;
383 	register struct cg *cgp;
384 	daddr_t bpref;
385 {
386 	daddr_t bno;
387 	int cylno, pos;
388 	short *cylbp;
389 	register int i;
390 
391 	if (bpref == 0) {
392 		bpref = cgp->cg_rotor;
393 		goto norot;
394 	}
395 	bpref &= ~(fs->fs_frag - 1);
396 	bpref %= fs->fs_fpg;
397 	/*
398 	 * if the requested block is available, use it
399 	 */
400 	if (isblock(fs, cgp->cg_free, bpref/fs->fs_frag)) {
401 		bno = bpref;
402 		goto gotit;
403 	}
404 	if (fs->fs_cpc == 0)
405 		goto norot;
406 	/*
407 	 * check for a block available on the same cylinder
408 	 * beginning with one which is rotationally optimal
409 	 */
410 	cylno = cbtocylno(fs, bpref);
411 	cylbp = cgp->cg_b[cylno];
412 	if (fs->fs_rotdelay == 0) {
413 		pos = cbtorpos(fs, bpref);
414 	} else {
415 		/*
416 		 * here we convert ms of delay to frags as:
417 		 * (frags) = (ms) * (rev/sec) * (sect/rev) /
418 		 *	((sect/frag) * (ms/sec))
419 		 * then round up to the next rotational position
420 		 */
421 		bpref += fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
422 		    (NSPF(fs) * 1000);
423 		pos = cbtorpos(fs, bpref);
424 		pos = (pos + 1) % NRPOS;
425 	}
426 	/*
427 	 * check the summary information to see if a block is
428 	 * available in the requested cylinder starting at the
429 	 * optimal rotational position and proceeding around.
430 	 */
431 	for (i = pos; i < NRPOS; i++)
432 		if (cylbp[i] > 0)
433 			break;
434 	if (i == NRPOS)
435 		for (i = 0; i < pos; i++)
436 			if (cylbp[i] > 0)
437 				break;
438 	if (cylbp[i] > 0) {
439 		/*
440 		 * found a rotational position, now find the actual
441 		 * block. A panic if none is actually there.
442 		 */
443 		pos = cylno % fs->fs_cpc;
444 		bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
445 		if (fs->fs_postbl[pos][i] == -1)
446 			panic("alloccgblk: cyl groups corrupted");
447 		for (i = fs->fs_postbl[pos][i]; ; i += fs->fs_rotbl[i]) {
448 			if (isblock(fs, cgp->cg_free, bno + i)) {
449 				bno = (bno + i) * fs->fs_frag;
450 				goto gotit;
451 			}
452 			if (fs->fs_rotbl[i] == 0)
453 				break;
454 		}
455 		panic("alloccgblk: can't find blk in cyl");
456 	}
457 norot:
458 	/*
459 	 * no blocks in the requested cylinder, so take next
460 	 * available one in this cylinder group.
461 	 */
462 	bno = mapsearch(fs, cgp, bpref, fs->fs_frag);
463 	if (bno == 0)
464 		return (0);
465 	cgp->cg_rotor = bno;
466 gotit:
467 	clrblock(fs, cgp->cg_free, bno/fs->fs_frag);
468 	cgp->cg_cs.cs_nbfree--;
469 	fs->fs_cstotal.cs_nbfree--;
470 	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
471 	cgp->cg_b[cbtocylno(fs, bno)][cbtorpos(fs, bno)]--;
472 	fs->fs_fmod++;
473 	return (cgp->cg_cgx * fs->fs_fpg + bno);
474 }
475 
476 ino_t
477 ialloccg(dev, fs, cg, ipref, mode)
478 	dev_t dev;
479 	register struct fs *fs;
480 	int cg;
481 	daddr_t ipref;
482 	int mode;
483 {
484 	register struct buf *bp;
485 	register struct cg *cgp;
486 	int i;
487 
488 	if (fs->fs_cs(fs, cg).cs_nifree == 0)
489 		return (0);
490 	bp = bread(dev, fsbtodb(fs, cgtod(cg, fs)), fs->fs_bsize);
491 	if (bp->b_flags & B_ERROR)
492 		return (0);
493 	cgp = bp->b_un.b_cg;
494 	if (ipref) {
495 		ipref %= fs->fs_ipg;
496 		if (isclr(cgp->cg_iused, ipref))
497 			goto gotit;
498 	} else
499 		ipref = cgp->cg_irotor;
500 	for (i = 0; i < fs->fs_ipg; i++) {
501 		ipref++;
502 		if (ipref >= fs->fs_ipg)
503 			ipref = 0;
504 		if (isclr(cgp->cg_iused, ipref)) {
505 			cgp->cg_irotor = ipref;
506 			goto gotit;
507 		}
508 	}
509 	brelse(bp);
510 	return (0);
511 gotit:
512 	setbit(cgp->cg_iused, ipref);
513 	cgp->cg_cs.cs_nifree--;
514 	fs->fs_cstotal.cs_nifree--;
515 	fs->fs_cs(fs, cg).cs_nifree--;
516 	fs->fs_fmod++;
517 	if ((mode & IFMT) == IFDIR) {
518 		cgp->cg_cs.cs_ndir++;
519 		fs->fs_cstotal.cs_ndir++;
520 		fs->fs_cs(fs, cg).cs_ndir++;
521 	}
522 	bdwrite(bp);
523 	return (cg * fs->fs_ipg + ipref);
524 }
525 
526 fre(dev, bno, size)
527 	dev_t dev;
528 	daddr_t bno;
529 	off_t size;
530 {
531 	register struct fs *fs;
532 	register struct cg *cgp;
533 	register struct buf *bp;
534 	int cg, blk, frags, bbase;
535 	register int i;
536 
537 	fs = getfs(dev);
538 	if ((unsigned)size > fs->fs_bsize || size % fs->fs_fsize != 0)
539 		panic("free: bad size");
540 	cg = dtog(bno, fs);
541 	if (badblock(fs, bno))
542 		return;
543 	bp = bread(dev, fsbtodb(fs, cgtod(cg, fs)), fs->fs_bsize);
544 	if (bp->b_flags & B_ERROR)
545 		return;
546 	cgp = bp->b_un.b_cg;
547 	bno %= fs->fs_fpg;
548 	if (size == fs->fs_bsize) {
549 		if (isblock(fs, cgp->cg_free, bno/fs->fs_frag))
550 			panic("free: freeing free block");
551 		setblock(fs, cgp->cg_free, bno/fs->fs_frag);
552 		cgp->cg_cs.cs_nbfree++;
553 		fs->fs_cstotal.cs_nbfree++;
554 		fs->fs_cs(fs, cg).cs_nbfree++;
555 		cgp->cg_b[cbtocylno(fs, bno)][cbtorpos(fs, bno)]++;
556 	} else {
557 		bbase = bno - (bno % fs->fs_frag);
558 		/*
559 		 * decrement the counts associated with the old frags
560 		 */
561 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
562 		       (0xff >> (NBBY - fs->fs_frag)));
563 		fragacct(fs, blk, cgp->cg_frsum, -1);
564 		/*
565 		 * deallocate the fragment
566 		 */
567 		frags = size / fs->fs_fsize;
568 		for (i = 0; i < frags; i++) {
569 			if (isset(cgp->cg_free, bno + i))
570 				panic("free: freeing free frag");
571 			setbit(cgp->cg_free, bno + i);
572 			cgp->cg_cs.cs_nffree++;
573 			fs->fs_cstotal.cs_nffree++;
574 			fs->fs_cs(fs, cg).cs_nffree++;
575 		}
576 		/*
577 		 * add back in counts associated with the new frags
578 		 */
579 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
580 		       (0xff >> (NBBY - fs->fs_frag)));
581 		fragacct(fs, blk, cgp->cg_frsum, 1);
582 		/*
583 		 * if a complete block has been reassembled, account for it
584 		 */
585 		if (isblock(fs, cgp->cg_free, bbase / fs->fs_frag)) {
586 			cgp->cg_cs.cs_nffree -= fs->fs_frag;
587 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
588 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
589 			cgp->cg_cs.cs_nbfree++;
590 			fs->fs_cstotal.cs_nbfree++;
591 			fs->fs_cs(fs, cg).cs_nbfree++;
592 			cgp->cg_b[cbtocylno(fs, bbase)][cbtorpos(fs, bbase)]++;
593 		}
594 	}
595 	fs->fs_fmod++;
596 	bdwrite(bp);
597 }
598 
599 ifree(dev, ino, mode)
600 	dev_t dev;
601 	ino_t ino;
602 	int mode;
603 {
604 	register struct fs *fs;
605 	register struct cg *cgp;
606 	register struct buf *bp;
607 	int cg;
608 
609 	fs = getfs(dev);
610 	if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg)
611 		panic("ifree: range");
612 	cg = itog(ino, fs);
613 	bp = bread(dev, fsbtodb(fs, cgtod(cg, fs)), fs->fs_bsize);
614 	if (bp->b_flags & B_ERROR)
615 		return;
616 	cgp = bp->b_un.b_cg;
617 	ino %= fs->fs_ipg;
618 	if (isclr(cgp->cg_iused, ino))
619 		panic("ifree: freeing free inode");
620 	clrbit(cgp->cg_iused, ino);
621 	cgp->cg_cs.cs_nifree++;
622 	fs->fs_cstotal.cs_nifree++;
623 	fs->fs_cs(fs, cg).cs_nifree++;
624 	if ((mode & IFMT) == IFDIR) {
625 		cgp->cg_cs.cs_ndir--;
626 		fs->fs_cstotal.cs_ndir--;
627 		fs->fs_cs(fs, cg).cs_ndir--;
628 	}
629 	fs->fs_fmod++;
630 	bdwrite(bp);
631 }
632 
633 /*
634  * find a block of the specified size in the specified cylinder group
635  * It is a panic if a request is made to find a block if none are
636  * available.
637  */
638 daddr_t
639 mapsearch(fs, cgp, bpref, allocsiz)
640 	register struct fs *fs;
641 	register struct cg *cgp;
642 	daddr_t bpref;
643 	int allocsiz;
644 {
645 	daddr_t bno;
646 	int start, len, loc, i;
647 	int blk, field, subfield, pos;
648 
649 	/*
650 	 * find the fragment by searching through the free block
651 	 * map for an appropriate bit pattern
652 	 */
653 	if (bpref)
654 		start = bpref % fs->fs_fpg / NBBY;
655 	else
656 		start = cgp->cg_frotor / NBBY;
657 	len = roundup(fs->fs_fpg - 1, NBBY) / NBBY - start;
658 	loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag],
659 		1 << (allocsiz - 1));
660 	if (loc == 0) {
661 		len = start - 1;
662 		start = (cgdmin(cgp->cg_cgx, fs) -
663 			 cgbase(cgp->cg_cgx, fs)) / NBBY;
664 		loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag],
665 			1 << (allocsiz - 1));
666 		if (loc == 0) {
667 			panic("alloccg: map corrupted");
668 			return (0);
669 		}
670 	}
671 	bno = (start + len - loc) * NBBY;
672 	cgp->cg_frotor = bno;
673 	/*
674 	 * found the byte in the map
675 	 * sift through the bits to find the selected frag
676 	 */
677 	for (i = 0; i < NBBY; i += fs->fs_frag) {
678 		blk = (cgp->cg_free[bno / NBBY] >> i) &
679 		      (0xff >> NBBY - fs->fs_frag);
680 		blk <<= 1;
681 		field = around[allocsiz];
682 		subfield = inside[allocsiz];
683 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
684 			if ((blk & field) == subfield) {
685 				return (bno + i + pos);
686 			}
687 			field <<= 1;
688 			subfield <<= 1;
689 		}
690 	}
691 	panic("alloccg: block not in map");
692 	return (0);
693 }
694 
695 /*
696  * update the frsum fields to reflect addition or deletion
697  * of some frags
698  */
699 fragacct(fs, fragmap, fraglist, cnt)
700 	struct fs *fs;
701 	int fragmap;
702 	long fraglist[];
703 	int cnt;
704 {
705 	int inblk;
706 	register int field, subfield;
707 	register int siz, pos;
708 
709 	inblk = (int)(fragtbl[fs->fs_frag][fragmap]) << 1;
710 	fragmap <<= 1;
711 	for (siz = 1; siz < fs->fs_frag; siz++) {
712 		if (((1 << siz) & inblk) == 0)
713 			continue;
714 		field = around[siz];
715 		subfield = inside[siz];
716 		for (pos = siz; pos <= fs->fs_frag; pos++) {
717 			if ((fragmap & field) == subfield) {
718 				fraglist[siz] += cnt;
719 				pos += siz;
720 				field <<= siz;
721 				subfield <<= siz;
722 			}
723 			field <<= 1;
724 			subfield <<= 1;
725 		}
726 	}
727 }
728 
729 badblock(fs, bn)
730 	register struct fs *fs;
731 	daddr_t bn;
732 {
733 
734 	if ((unsigned)bn >= fs->fs_size || bn < cgdmin(dtog(bn, fs), fs)) {
735 		fserr(fs, "bad block");
736 		return (1);
737 	}
738 	return (0);
739 }
740 
741 /*
742  * getfs maps a device number into
743  * a pointer to the incore super
744  * block.  The algorithm is a linear
745  * search through the mount table.
746  * A consistency check of the
747  * in core free-block and i-node
748  * counts is performed.
749  *
750  * panic: no fs -- the device is not mounted.
751  *	this "cannot happen"
752  */
753 struct fs *
754 getfs(dev)
755 	dev_t dev;
756 {
757 	register struct mount *mp;
758 	register struct fs *fs;
759 
760 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
761 		if (mp->m_bufp != NULL && mp->m_dev == dev) {
762 			fs = mp->m_bufp->b_un.b_fs;
763 			if (fs->fs_magic != FS_MAGIC)
764 				panic("getfs: bad magic");
765 			return (fs);
766 		}
767 	panic("getfs: no fs");
768 	return (NULL);
769 }
770 
771 /*
772  * Fserr prints the name of a file system
773  * with an error diagnostic, in the form
774  *	fs: error message
775  */
776 fserr(fs, cp)
777 	struct fs *fs;
778 	char *cp;
779 {
780 
781 	printf("%s: %s\n", fs->fs_fsmnt, cp);
782 }
783 
784 /*
785  * Getfsx returns the index in the file system
786  * table of the specified device.  The swap device
787  * is also assigned a pseudo-index.  The index may
788  * be used as a compressed indication of the location
789  * of a block, recording
790  *	<getfsx(dev),blkno>
791  * rather than
792  *	<dev, blkno>
793  * provided the information need remain valid only
794  * as long as the file system is mounted.
795  */
796 getfsx(dev)
797 	dev_t dev;
798 {
799 	register struct mount *mp;
800 
801 	if (dev == swapdev)
802 		return (MSWAPX);
803 	for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
804 		if (mp->m_dev == dev)
805 			return (mp - &mount[0]);
806 	return (-1);
807 }
808 
809 /*
810  * Update is the internal name of 'sync'.  It goes through the disk
811  * queues to initiate sandbagged IO; goes through the inodes to write
812  * modified nodes; and it goes through the mount table to initiate modified
813  * super blocks.
814  */
815 update()
816 {
817 	register struct inode *ip;
818 	register struct mount *mp;
819 	register struct buf *bp;
820 	struct fs *fs;
821 	time_t tim;
822 	int i, blks;
823 
824 	if (updlock)
825 		return;
826 	updlock++;
827 	/*
828 	 * Write back modified superblocks.
829 	 * Consistency check that the superblock
830 	 * of each file system is still in the buffer cache.
831 	 */
832 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
833 		if (mp->m_bufp != NULL) {
834 			fs = mp->m_bufp->b_un.b_fs;
835 			if (fs->fs_fmod == 0)
836 				continue;
837 			if (fs->fs_ronly != 0)
838 				panic("update: rofs mod");
839 			bp = getblk(mp->m_dev, SBLOCK, SBSIZE);
840 			fs->fs_fmod = 0;
841 			fs->fs_time = TIME;
842 			if (bp->b_un.b_fs != fs)
843 				panic("update: bad b_fs");
844 			bwrite(bp);
845 			blks = howmany(fs->fs_cssize, fs->fs_bsize);
846 			for (i = 0; i < blks; i++) {
847 				bp = getblk(mp->m_dev,
848 				    fsbtodb(fs, fs->fs_csaddr + (i * fs->fs_frag)),
849 				    fs->fs_bsize);
850 				bwrite(bp);
851 			}
852 		}
853 	/*
854 	 * Write back each (modified) inode.
855 	 */
856 	for (ip = inode; ip < inodeNINODE; ip++)
857 		if((ip->i_flag&ILOCK)==0 && ip->i_count) {
858 			ip->i_flag |= ILOCK;
859 			ip->i_count++;
860 			tim = TIME;
861 			iupdat(ip, &tim, &tim, 0);
862 			iput(ip);
863 		}
864 	updlock = 0;
865 	/*
866 	 * Force stale buffer cache information to be flushed,
867 	 * for all devices.
868 	 */
869 	bflush(NODEV);
870 }
871 
872 /*
873  * block macros
874  */
875 
876 isblock(fs, cp, h)
877 	struct fs *fs;
878 	unsigned char *cp;
879 	int h;
880 {
881 	unsigned char mask;
882 
883 	switch (fs->fs_frag) {
884 	case 8:
885 		return (cp[h] == 0xff);
886 	case 4:
887 		mask = 0x0f << ((h & 0x1) << 2);
888 		return ((cp[h >> 1] & mask) == mask);
889 	case 2:
890 		mask = 0x03 << ((h & 0x3) << 1);
891 		return ((cp[h >> 2] & mask) == mask);
892 	case 1:
893 		mask = 0x01 << (h & 0x7);
894 		return ((cp[h >> 3] & mask) == mask);
895 	default:
896 		panic("isblock bad fs_frag");
897 		return;
898 	}
899 }
900 clrblock(fs, cp, h)
901 	struct fs *fs;
902 	unsigned char *cp;
903 	int h;
904 {
905 	switch ((fs)->fs_frag) {
906 	case 8:
907 		cp[h] = 0;
908 		return;
909 	case 4:
910 		cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2));
911 		return;
912 	case 2:
913 		cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1));
914 		return;
915 	case 1:
916 		cp[h >> 3] &= ~(0x01 << (h & 0x7));
917 		return;
918 	default:
919 		panic("clrblock bad fs_frag");
920 		return;
921 	}
922 }
923 setblock(fs, cp, h)
924 	struct fs *fs;
925 	unsigned char *cp;
926 	int h;
927 {
928 	switch (fs->fs_frag) {
929 	case 8:
930 		cp[h] = 0xff;
931 		return;
932 	case 4:
933 		cp[h >> 1] |= (0x0f << ((h & 0x1) << 2));
934 		return;
935 	case 2:
936 		cp[h >> 2] |= (0x03 << ((h & 0x3) << 1));
937 		return;
938 	case 1:
939 		cp[h >> 3] |= (0x01 << (h & 0x7));
940 		return;
941 	default:
942 		panic("setblock bad fs_frag");
943 		return;
944 	}
945 }
946