xref: /csrg-svn/sys/ufs/ffs/ffs_alloc.c (revision 25256)
1 /*
2  * Copyright (c) 1982 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)ffs_alloc.c	6.17 (Berkeley) 10/23/85
7  */
8 
9 #include "param.h"
10 #include "systm.h"
11 #include "mount.h"
12 #include "fs.h"
13 #include "buf.h"
14 #include "inode.h"
15 #include "dir.h"
16 #include "user.h"
17 #include "quota.h"
18 #include "kernel.h"
19 #include "syslog.h"
20 
21 extern u_long		hashalloc();
22 extern ino_t		ialloccg();
23 extern daddr_t		alloccg();
24 extern daddr_t		alloccgblk();
25 extern daddr_t		fragextend();
26 extern daddr_t		blkpref();
27 extern daddr_t		mapsearch();
28 extern int		inside[], around[];
29 extern unsigned char	*fragtbl[];
30 
31 /*
32  * Allocate a block in the file system.
33  *
34  * The size of the requested block is given, which must be some
35  * multiple of fs_fsize and <= fs_bsize.
36  * A preference may be optionally specified. If a preference is given
37  * the following hierarchy is used to allocate a block:
38  *   1) allocate the requested block.
39  *   2) allocate a rotationally optimal block in the same cylinder.
40  *   3) allocate a block in the same cylinder group.
41  *   4) quadradically rehash into other cylinder groups, until an
42  *      available block is located.
43  * If no block preference is given the following heirarchy is used
44  * to allocate a block:
45  *   1) allocate a block in the cylinder group that contains the
46  *      inode for the file.
47  *   2) quadradically rehash into other cylinder groups, until an
48  *      available block is located.
49  */
50 struct buf *
51 alloc(ip, bpref, size)
52 	register struct inode *ip;
53 	daddr_t bpref;
54 	int size;
55 {
56 	daddr_t bno;
57 	register struct fs *fs;
58 	register struct buf *bp;
59 	int cg;
60 
61 	fs = ip->i_fs;
62 	if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
63 		printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
64 		    ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
65 		panic("alloc: bad size");
66 	}
67 	if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
68 		goto nospace;
69 	if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
70 		goto nospace;
71 #ifdef QUOTA
72 	u.u_error = chkdq(ip, (long)btodb(size), 0);
73 	if (u.u_error)
74 		return (NULL);
75 #endif
76 	if (bpref >= fs->fs_size)
77 		bpref = 0;
78 	if (bpref == 0)
79 		cg = itog(fs, ip->i_number);
80 	else
81 		cg = dtog(fs, bpref);
82 	bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size,
83 		(u_long (*)())alloccg);
84 	if (bno <= 0)
85 		goto nospace;
86 	ip->i_blocks += btodb(size);
87 	ip->i_flag |= IUPD|ICHG;
88 	bp = getblk(ip->i_dev, fsbtodb(fs, bno), size);
89 	clrbuf(bp);
90 	return (bp);
91 nospace:
92 	fserr(fs, "file system full");
93 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
94 	u.u_error = ENOSPC;
95 	return (NULL);
96 }
97 
98 /*
99  * Reallocate a fragment to a bigger size
100  *
101  * The number and size of the old block is given, and a preference
102  * and new size is also specified. The allocator attempts to extend
103  * the original block. Failing that, the regular block allocator is
104  * invoked to get an appropriate block.
105  */
106 struct buf *
107 realloccg(ip, bprev, bpref, osize, nsize)
108 	register struct inode *ip;
109 	daddr_t bprev, bpref;
110 	int osize, nsize;
111 {
112 	daddr_t bno;
113 	register struct fs *fs;
114 	register struct buf *bp, *obp;
115 	int cg, request;
116 
117 	fs = ip->i_fs;
118 	if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 ||
119 	    (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) {
120 		printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n",
121 		    ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt);
122 		panic("realloccg: bad size");
123 	}
124 	if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0)
125 		goto nospace;
126 	if (bprev == 0) {
127 		printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n",
128 		    ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt);
129 		panic("realloccg: bad bprev");
130 	}
131 #ifdef QUOTA
132 	u.u_error = chkdq(ip, (long)btodb(nsize - osize), 0);
133 	if (u.u_error)
134 		return (NULL);
135 #endif
136 	cg = dtog(fs, bprev);
137 	bno = fragextend(ip, cg, (long)bprev, osize, nsize);
138 	if (bno != 0) {
139 		do {
140 			bp = bread(ip->i_dev, fsbtodb(fs, bno), osize);
141 			if (bp->b_flags & B_ERROR) {
142 				brelse(bp);
143 				return (NULL);
144 			}
145 		} while (brealloc(bp, nsize) == 0);
146 		bp->b_flags |= B_DONE;
147 		bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
148 		ip->i_blocks += btodb(nsize - osize);
149 		ip->i_flag |= IUPD|ICHG;
150 		return (bp);
151 	}
152 	if (bpref >= fs->fs_size)
153 		bpref = 0;
154 	switch (fs->fs_optim) {
155 	case FS_OPTSPACE:
156 		/*
157 		 * Allocate an exact sized fragment. Although this makes
158 		 * best use of space, we will waste time relocating it if
159 		 * the file continues to grow. If the fragmentation is
160 		 * less than half of the minimum free reserve, we choose
161 		 * to begin optimizing for time.
162 		 */
163 		request = nsize;
164 		if (fs->fs_minfree < 5 ||
165 		    fs->fs_cstotal.cs_nffree >
166 		    fs->fs_dsize * fs->fs_minfree / (2 * 100))
167 			break;
168 		log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n",
169 			fs->fs_fsmnt);
170 		fs->fs_optim = FS_OPTTIME;
171 		break;
172 	case FS_OPTTIME:
173 		/*
174 		 * At this point we have discovered a file that is trying
175 		 * to grow a small fragment to a larger fragment. To save
176 		 * time, we allocate a full sized block, then free the
177 		 * unused portion. If the file continues to grow, the
178 		 * `fragextend' call above will be able to grow it in place
179 		 * without further copying. If aberrant programs cause
180 		 * disk fragmentation to grow within 2% of the free reserve,
181 		 * we choose to begin optimizing for space.
182 		 */
183 		request = fs->fs_bsize;
184 		if (fs->fs_cstotal.cs_nffree <
185 		    fs->fs_dsize * (fs->fs_minfree - 2) / 100)
186 			break;
187 		log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n",
188 			fs->fs_fsmnt);
189 		fs->fs_optim = FS_OPTSPACE;
190 		break;
191 	default:
192 		printf("dev = 0x%x, optim = %d, fs = %s\n",
193 		    ip->i_dev, fs->fs_optim, fs->fs_fsmnt);
194 		panic("realloccg: bad optim");
195 		/* NOTREACHED */
196 	}
197 	bno = (daddr_t)hashalloc(ip, cg, (long)bpref, request,
198 		(u_long (*)())alloccg);
199 	if (bno > 0) {
200 		obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize);
201 		if (obp->b_flags & B_ERROR) {
202 			brelse(obp);
203 			return (NULL);
204 		}
205 		bp = getblk(ip->i_dev, fsbtodb(fs, bno), nsize);
206 		bcopy(obp->b_un.b_addr, bp->b_un.b_addr, (u_int)osize);
207 		bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize);
208 		if (obp->b_flags & B_DELWRI) {
209 			obp->b_flags &= ~B_DELWRI;
210 			u.u_ru.ru_oublock--;		/* delete charge */
211 		}
212 		brelse(obp);
213 		free(ip, bprev, (off_t)osize);
214 		if (nsize < request)
215 			free(ip, bno + numfrags(fs, nsize),
216 				(off_t)(request - nsize));
217 		ip->i_blocks += btodb(nsize - osize);
218 		ip->i_flag |= IUPD|ICHG;
219 		return (bp);
220 	}
221 nospace:
222 	/*
223 	 * no space available
224 	 */
225 	fserr(fs, "file system full");
226 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
227 	u.u_error = ENOSPC;
228 	return (NULL);
229 }
230 
231 /*
232  * Allocate an inode in the file system.
233  *
234  * A preference may be optionally specified. If a preference is given
235  * the following hierarchy is used to allocate an inode:
236  *   1) allocate the requested inode.
237  *   2) allocate an inode in the same cylinder group.
238  *   3) quadradically rehash into other cylinder groups, until an
239  *      available inode is located.
240  * If no inode preference is given the following heirarchy is used
241  * to allocate an inode:
242  *   1) allocate an inode in cylinder group 0.
243  *   2) quadradically rehash into other cylinder groups, until an
244  *      available inode is located.
245  */
246 struct inode *
247 ialloc(pip, ipref, mode)
248 	register struct inode *pip;
249 	ino_t ipref;
250 	int mode;
251 {
252 	ino_t ino;
253 	register struct fs *fs;
254 	register struct inode *ip;
255 	int cg;
256 
257 	fs = pip->i_fs;
258 	if (fs->fs_cstotal.cs_nifree == 0)
259 		goto noinodes;
260 #ifdef QUOTA
261 	u.u_error = chkiq(pip->i_dev, (struct inode *)NULL, u.u_uid, 0);
262 	if (u.u_error)
263 		return (NULL);
264 #endif
265 	if (ipref >= fs->fs_ncg * fs->fs_ipg)
266 		ipref = 0;
267 	cg = itog(fs, ipref);
268 	ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg);
269 	if (ino == 0)
270 		goto noinodes;
271 	ip = iget(pip->i_dev, pip->i_fs, ino);
272 	if (ip == NULL) {
273 		ifree(pip, ino, 0);
274 		return (NULL);
275 	}
276 	if (ip->i_mode) {
277 		printf("mode = 0%o, inum = %d, fs = %s\n",
278 		    ip->i_mode, ip->i_number, fs->fs_fsmnt);
279 		panic("ialloc: dup alloc");
280 	}
281 	if (ip->i_blocks) {				/* XXX */
282 		printf("free inode %s/%d had %d blocks\n",
283 		    fs->fs_fsmnt, ino, ip->i_blocks);
284 		ip->i_blocks = 0;
285 	}
286 	return (ip);
287 noinodes:
288 	fserr(fs, "out of inodes");
289 	uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt);
290 	u.u_error = ENOSPC;
291 	return (NULL);
292 }
293 
294 /*
295  * Find a cylinder to place a directory.
296  *
297  * The policy implemented by this algorithm is to select from
298  * among those cylinder groups with above the average number of
299  * free inodes, the one with the smallest number of directories.
300  */
301 ino_t
302 dirpref(fs)
303 	register struct fs *fs;
304 {
305 	int cg, minndir, mincg, avgifree;
306 
307 	avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg;
308 	minndir = fs->fs_ipg;
309 	mincg = 0;
310 	for (cg = 0; cg < fs->fs_ncg; cg++)
311 		if (fs->fs_cs(fs, cg).cs_ndir < minndir &&
312 		    fs->fs_cs(fs, cg).cs_nifree >= avgifree) {
313 			mincg = cg;
314 			minndir = fs->fs_cs(fs, cg).cs_ndir;
315 		}
316 	return ((ino_t)(fs->fs_ipg * mincg));
317 }
318 
319 /*
320  * Select the desired position for the next block in a file.  The file is
321  * logically divided into sections. The first section is composed of the
322  * direct blocks. Each additional section contains fs_maxbpg blocks.
323  *
324  * If no blocks have been allocated in the first section, the policy is to
325  * request a block in the same cylinder group as the inode that describes
326  * the file. If no blocks have been allocated in any other section, the
327  * policy is to place the section in a cylinder group with a greater than
328  * average number of free blocks.  An appropriate cylinder group is found
329  * by using a rotor that sweeps the cylinder groups. When a new group of
330  * blocks is needed, the sweep begins in the cylinder group following the
331  * cylinder group from which the previous allocation was made. The sweep
332  * continues until a cylinder group with greater than the average number
333  * of free blocks is found. If the allocation is for the first block in an
334  * indirect block, the information on the previous allocation is unavailable;
335  * here a best guess is made based upon the logical block number being
336  * allocated.
337  *
338  * If a section is already partially allocated, the policy is to
339  * contiguously allocate fs_maxcontig blocks.  The end of one of these
340  * contiguous blocks and the beginning of the next is physically separated
341  * so that the disk head will be in transit between them for at least
342  * fs_rotdelay milliseconds.  This is to allow time for the processor to
343  * schedule another I/O transfer.
344  */
345 daddr_t
346 blkpref(ip, lbn, indx, bap)
347 	struct inode *ip;
348 	daddr_t lbn;
349 	int indx;
350 	daddr_t *bap;
351 {
352 	register struct fs *fs;
353 	register int cg;
354 	int avgbfree, startcg;
355 	daddr_t nextblk;
356 
357 	fs = ip->i_fs;
358 	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
359 		if (lbn < NDADDR) {
360 			cg = itog(fs, ip->i_number);
361 			return (fs->fs_fpg * cg + fs->fs_frag);
362 		}
363 		/*
364 		 * Find a cylinder with greater than average number of
365 		 * unused data blocks.
366 		 */
367 		if (indx == 0 || bap[indx - 1] == 0)
368 			startcg = itog(fs, ip->i_number) + lbn / fs->fs_maxbpg;
369 		else
370 			startcg = dtog(fs, bap[indx - 1]) + 1;
371 		startcg %= fs->fs_ncg;
372 		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
373 		for (cg = startcg; cg < fs->fs_ncg; cg++)
374 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
375 				fs->fs_cgrotor = cg;
376 				return (fs->fs_fpg * cg + fs->fs_frag);
377 			}
378 		for (cg = 0; cg <= startcg; cg++)
379 			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
380 				fs->fs_cgrotor = cg;
381 				return (fs->fs_fpg * cg + fs->fs_frag);
382 			}
383 		return (NULL);
384 	}
385 	/*
386 	 * One or more previous blocks have been laid out. If less
387 	 * than fs_maxcontig previous blocks are contiguous, the
388 	 * next block is requested contiguously, otherwise it is
389 	 * requested rotationally delayed by fs_rotdelay milliseconds.
390 	 */
391 	nextblk = bap[indx - 1] + fs->fs_frag;
392 	if (indx > fs->fs_maxcontig &&
393 	    bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig)
394 	    != nextblk)
395 		return (nextblk);
396 	if (fs->fs_rotdelay != 0)
397 		/*
398 		 * Here we convert ms of delay to frags as:
399 		 * (frags) = (ms) * (rev/sec) * (sect/rev) /
400 		 *	((sect/frag) * (ms/sec))
401 		 * then round up to the next block.
402 		 */
403 		nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect /
404 		    (NSPF(fs) * 1000), fs->fs_frag);
405 	return (nextblk);
406 }
407 
408 /*
409  * Implement the cylinder overflow algorithm.
410  *
411  * The policy implemented by this algorithm is:
412  *   1) allocate the block in its requested cylinder group.
413  *   2) quadradically rehash on the cylinder group number.
414  *   3) brute force search for a free block.
415  */
416 /*VARARGS5*/
417 u_long
418 hashalloc(ip, cg, pref, size, allocator)
419 	struct inode *ip;
420 	int cg;
421 	long pref;
422 	int size;	/* size for data blocks, mode for inodes */
423 	u_long (*allocator)();
424 {
425 	register struct fs *fs;
426 	long result;
427 	int i, icg = cg;
428 
429 	fs = ip->i_fs;
430 	/*
431 	 * 1: preferred cylinder group
432 	 */
433 	result = (*allocator)(ip, cg, pref, size);
434 	if (result)
435 		return (result);
436 	/*
437 	 * 2: quadratic rehash
438 	 */
439 	for (i = 1; i < fs->fs_ncg; i *= 2) {
440 		cg += i;
441 		if (cg >= fs->fs_ncg)
442 			cg -= fs->fs_ncg;
443 		result = (*allocator)(ip, cg, 0, size);
444 		if (result)
445 			return (result);
446 	}
447 	/*
448 	 * 3: brute force search
449 	 * Note that we start at i == 2, since 0 was checked initially,
450 	 * and 1 is always checked in the quadratic rehash.
451 	 */
452 	cg = (icg + 2) % fs->fs_ncg;
453 	for (i = 2; i < fs->fs_ncg; i++) {
454 		result = (*allocator)(ip, cg, 0, size);
455 		if (result)
456 			return (result);
457 		cg++;
458 		if (cg == fs->fs_ncg)
459 			cg = 0;
460 	}
461 	return (NULL);
462 }
463 
464 /*
465  * Determine whether a fragment can be extended.
466  *
467  * Check to see if the necessary fragments are available, and
468  * if they are, allocate them.
469  */
470 daddr_t
471 fragextend(ip, cg, bprev, osize, nsize)
472 	struct inode *ip;
473 	int cg;
474 	long bprev;
475 	int osize, nsize;
476 {
477 	register struct fs *fs;
478 	register struct buf *bp;
479 	register struct cg *cgp;
480 	long bno;
481 	int frags, bbase;
482 	int i;
483 
484 	fs = ip->i_fs;
485 	if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize))
486 		return (NULL);
487 	frags = numfrags(fs, nsize);
488 	bbase = fragnum(fs, bprev);
489 	if (bbase > fragnum(fs, (bprev + frags - 1))) {
490 		/* cannot extend across a block boundry */
491 		return (NULL);
492 	}
493 	bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
494 	cgp = bp->b_un.b_cg;
495 	if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) {
496 		brelse(bp);
497 		return (NULL);
498 	}
499 	cgp->cg_time = time.tv_sec;
500 	bno = dtogd(fs, bprev);
501 	for (i = numfrags(fs, osize); i < frags; i++)
502 		if (isclr(cgp->cg_free, bno + i)) {
503 			brelse(bp);
504 			return (NULL);
505 		}
506 	/*
507 	 * the current fragment can be extended
508 	 * deduct the count on fragment being extended into
509 	 * increase the count on the remaining fragment (if any)
510 	 * allocate the extended piece
511 	 */
512 	for (i = frags; i < fs->fs_frag - bbase; i++)
513 		if (isclr(cgp->cg_free, bno + i))
514 			break;
515 	cgp->cg_frsum[i - numfrags(fs, osize)]--;
516 	if (i != frags)
517 		cgp->cg_frsum[i - frags]++;
518 	for (i = numfrags(fs, osize); i < frags; i++) {
519 		clrbit(cgp->cg_free, bno + i);
520 		cgp->cg_cs.cs_nffree--;
521 		fs->fs_cstotal.cs_nffree--;
522 		fs->fs_cs(fs, cg).cs_nffree--;
523 	}
524 	fs->fs_fmod++;
525 	bdwrite(bp);
526 	return (bprev);
527 }
528 
529 /*
530  * Determine whether a block can be allocated.
531  *
532  * Check to see if a block of the apprpriate size is available,
533  * and if it is, allocate it.
534  */
535 daddr_t
536 alloccg(ip, cg, bpref, size)
537 	struct inode *ip;
538 	int cg;
539 	daddr_t bpref;
540 	int size;
541 {
542 	register struct fs *fs;
543 	register struct buf *bp;
544 	register struct cg *cgp;
545 	int bno, frags;
546 	int allocsiz;
547 	register int i;
548 
549 	fs = ip->i_fs;
550 	if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
551 		return (NULL);
552 	bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
553 	cgp = bp->b_un.b_cg;
554 	if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC ||
555 	    (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
556 		brelse(bp);
557 		return (NULL);
558 	}
559 	cgp->cg_time = time.tv_sec;
560 	if (size == fs->fs_bsize) {
561 		bno = alloccgblk(fs, cgp, bpref);
562 		bdwrite(bp);
563 		return (bno);
564 	}
565 	/*
566 	 * check to see if any fragments are already available
567 	 * allocsiz is the size which will be allocated, hacking
568 	 * it down to a smaller size if necessary
569 	 */
570 	frags = numfrags(fs, size);
571 	for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
572 		if (cgp->cg_frsum[allocsiz] != 0)
573 			break;
574 	if (allocsiz == fs->fs_frag) {
575 		/*
576 		 * no fragments were available, so a block will be
577 		 * allocated, and hacked up
578 		 */
579 		if (cgp->cg_cs.cs_nbfree == 0) {
580 			brelse(bp);
581 			return (NULL);
582 		}
583 		bno = alloccgblk(fs, cgp, bpref);
584 		bpref = dtogd(fs, bno);
585 		for (i = frags; i < fs->fs_frag; i++)
586 			setbit(cgp->cg_free, bpref + i);
587 		i = fs->fs_frag - frags;
588 		cgp->cg_cs.cs_nffree += i;
589 		fs->fs_cstotal.cs_nffree += i;
590 		fs->fs_cs(fs, cg).cs_nffree += i;
591 		fs->fs_fmod++;
592 		cgp->cg_frsum[i]++;
593 		bdwrite(bp);
594 		return (bno);
595 	}
596 	bno = mapsearch(fs, cgp, bpref, allocsiz);
597 	if (bno < 0) {
598 		brelse(bp);
599 		return (NULL);
600 	}
601 	for (i = 0; i < frags; i++)
602 		clrbit(cgp->cg_free, bno + i);
603 	cgp->cg_cs.cs_nffree -= frags;
604 	fs->fs_cstotal.cs_nffree -= frags;
605 	fs->fs_cs(fs, cg).cs_nffree -= frags;
606 	fs->fs_fmod++;
607 	cgp->cg_frsum[allocsiz]--;
608 	if (frags != allocsiz)
609 		cgp->cg_frsum[allocsiz - frags]++;
610 	bdwrite(bp);
611 	return (cg * fs->fs_fpg + bno);
612 }
613 
614 /*
615  * Allocate a block in a cylinder group.
616  *
617  * This algorithm implements the following policy:
618  *   1) allocate the requested block.
619  *   2) allocate a rotationally optimal block in the same cylinder.
620  *   3) allocate the next available block on the block rotor for the
621  *      specified cylinder group.
622  * Note that this routine only allocates fs_bsize blocks; these
623  * blocks may be fragmented by the routine that allocates them.
624  */
625 daddr_t
626 alloccgblk(fs, cgp, bpref)
627 	register struct fs *fs;
628 	register struct cg *cgp;
629 	daddr_t bpref;
630 {
631 	daddr_t bno;
632 	int cylno, pos, delta;
633 	short *cylbp;
634 	register int i;
635 
636 	if (bpref == 0) {
637 		bpref = cgp->cg_rotor;
638 		goto norot;
639 	}
640 	bpref = blknum(fs, bpref);
641 	bpref = dtogd(fs, bpref);
642 	/*
643 	 * if the requested block is available, use it
644 	 */
645 	if (isblock(fs, cgp->cg_free, fragstoblks(fs, bpref))) {
646 		bno = bpref;
647 		goto gotit;
648 	}
649 	/*
650 	 * check for a block available on the same cylinder
651 	 */
652 	cylno = cbtocylno(fs, bpref);
653 	if (cgp->cg_btot[cylno] == 0)
654 		goto norot;
655 	if (fs->fs_cpc == 0) {
656 		/*
657 		 * block layout info is not available, so just have
658 		 * to take any block in this cylinder.
659 		 */
660 		bpref = howmany(fs->fs_spc * cylno, NSPF(fs));
661 		goto norot;
662 	}
663 	/*
664 	 * check the summary information to see if a block is
665 	 * available in the requested cylinder starting at the
666 	 * requested rotational position and proceeding around.
667 	 */
668 	cylbp = cgp->cg_b[cylno];
669 	pos = cbtorpos(fs, bpref);
670 	for (i = pos; i < NRPOS; i++)
671 		if (cylbp[i] > 0)
672 			break;
673 	if (i == NRPOS)
674 		for (i = 0; i < pos; i++)
675 			if (cylbp[i] > 0)
676 				break;
677 	if (cylbp[i] > 0) {
678 		/*
679 		 * found a rotational position, now find the actual
680 		 * block. A panic if none is actually there.
681 		 */
682 		pos = cylno % fs->fs_cpc;
683 		bno = (cylno - pos) * fs->fs_spc / NSPB(fs);
684 		if (fs->fs_postbl[pos][i] == -1) {
685 			printf("pos = %d, i = %d, fs = %s\n",
686 			    pos, i, fs->fs_fsmnt);
687 			panic("alloccgblk: cyl groups corrupted");
688 		}
689 		for (i = fs->fs_postbl[pos][i];; ) {
690 			if (isblock(fs, cgp->cg_free, bno + i)) {
691 				bno = blkstofrags(fs, (bno + i));
692 				goto gotit;
693 			}
694 			delta = fs->fs_rotbl[i];
695 			if (delta <= 0 || delta > MAXBPC - i)
696 				break;
697 			i += delta;
698 		}
699 		printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt);
700 		panic("alloccgblk: can't find blk in cyl");
701 	}
702 norot:
703 	/*
704 	 * no blocks in the requested cylinder, so take next
705 	 * available one in this cylinder group.
706 	 */
707 	bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
708 	if (bno < 0)
709 		return (NULL);
710 	cgp->cg_rotor = bno;
711 gotit:
712 	clrblock(fs, cgp->cg_free, (long)fragstoblks(fs, bno));
713 	cgp->cg_cs.cs_nbfree--;
714 	fs->fs_cstotal.cs_nbfree--;
715 	fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--;
716 	cylno = cbtocylno(fs, bno);
717 	cgp->cg_b[cylno][cbtorpos(fs, bno)]--;
718 	cgp->cg_btot[cylno]--;
719 	fs->fs_fmod++;
720 	return (cgp->cg_cgx * fs->fs_fpg + bno);
721 }
722 
723 /*
724  * Determine whether an inode can be allocated.
725  *
726  * Check to see if an inode is available, and if it is,
727  * allocate it using the following policy:
728  *   1) allocate the requested inode.
729  *   2) allocate the next available inode after the requested
730  *      inode in the specified cylinder group.
731  */
732 ino_t
733 ialloccg(ip, cg, ipref, mode)
734 	struct inode *ip;
735 	int cg;
736 	daddr_t ipref;
737 	int mode;
738 {
739 	register struct fs *fs;
740 	register struct cg *cgp;
741 	struct buf *bp;
742 	int start, len, loc, map, i;
743 
744 	fs = ip->i_fs;
745 	if (fs->fs_cs(fs, cg).cs_nifree == 0)
746 		return (NULL);
747 	bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
748 	cgp = bp->b_un.b_cg;
749 	if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC ||
750 	    cgp->cg_cs.cs_nifree == 0) {
751 		brelse(bp);
752 		return (NULL);
753 	}
754 	cgp->cg_time = time.tv_sec;
755 	if (ipref) {
756 		ipref %= fs->fs_ipg;
757 		if (isclr(cgp->cg_iused, ipref))
758 			goto gotit;
759 	}
760 	start = cgp->cg_irotor / NBBY;
761 	len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY);
762 	loc = skpc(0xff, len, &cgp->cg_iused[start]);
763 	if (loc == 0) {
764 		len = start + 1;
765 		start = 0;
766 		loc = skpc(0xff, len, &cgp->cg_iused[0]);
767 		if (loc == 0) {
768 			printf("cg = %s, irotor = %d, fs = %s\n",
769 			    cg, cgp->cg_irotor, fs->fs_fsmnt);
770 			panic("ialloccg: map corrupted");
771 			/* NOTREACHED */
772 		}
773 	}
774 	i = start + len - loc;
775 	map = cgp->cg_iused[i];
776 	ipref = i * NBBY;
777 	for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
778 		if ((map & i) == 0) {
779 			cgp->cg_irotor = ipref;
780 			goto gotit;
781 		}
782 	}
783 	printf("fs = %s\n", fs->fs_fsmnt);
784 	panic("ialloccg: block not in map");
785 	/* NOTREACHED */
786 gotit:
787 	setbit(cgp->cg_iused, ipref);
788 	cgp->cg_cs.cs_nifree--;
789 	fs->fs_cstotal.cs_nifree--;
790 	fs->fs_cs(fs, cg).cs_nifree--;
791 	fs->fs_fmod++;
792 	if ((mode & IFMT) == IFDIR) {
793 		cgp->cg_cs.cs_ndir++;
794 		fs->fs_cstotal.cs_ndir++;
795 		fs->fs_cs(fs, cg).cs_ndir++;
796 	}
797 	bdwrite(bp);
798 	return (cg * fs->fs_ipg + ipref);
799 }
800 
801 /*
802  * Free a block or fragment.
803  *
804  * The specified block or fragment is placed back in the
805  * free map. If a fragment is deallocated, a possible
806  * block reassembly is checked.
807  */
808 free(ip, bno, size)
809 	register struct inode *ip;
810 	daddr_t bno;
811 	off_t size;
812 {
813 	register struct fs *fs;
814 	register struct cg *cgp;
815 	register struct buf *bp;
816 	int cg, blk, frags, bbase;
817 	register int i;
818 
819 	fs = ip->i_fs;
820 	if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) {
821 		printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n",
822 		    ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt);
823 		panic("free: bad size");
824 	}
825 	cg = dtog(fs, bno);
826 	if (badblock(fs, bno)) {
827 		printf("bad block %d, ino %d\n", bno, ip->i_number);
828 		return;
829 	}
830 	bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
831 	cgp = bp->b_un.b_cg;
832 	if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) {
833 		brelse(bp);
834 		return;
835 	}
836 	cgp->cg_time = time.tv_sec;
837 	bno = dtogd(fs, bno);
838 	if (size == fs->fs_bsize) {
839 		if (isblock(fs, cgp->cg_free, fragstoblks(fs, bno))) {
840 			printf("dev = 0x%x, block = %d, fs = %s\n",
841 			    ip->i_dev, bno, fs->fs_fsmnt);
842 			panic("free: freeing free block");
843 		}
844 		setblock(fs, cgp->cg_free, fragstoblks(fs, bno));
845 		cgp->cg_cs.cs_nbfree++;
846 		fs->fs_cstotal.cs_nbfree++;
847 		fs->fs_cs(fs, cg).cs_nbfree++;
848 		i = cbtocylno(fs, bno);
849 		cgp->cg_b[i][cbtorpos(fs, bno)]++;
850 		cgp->cg_btot[i]++;
851 	} else {
852 		bbase = bno - fragnum(fs, bno);
853 		/*
854 		 * decrement the counts associated with the old frags
855 		 */
856 		blk = blkmap(fs, cgp->cg_free, bbase);
857 		fragacct(fs, blk, cgp->cg_frsum, -1);
858 		/*
859 		 * deallocate the fragment
860 		 */
861 		frags = numfrags(fs, size);
862 		for (i = 0; i < frags; i++) {
863 			if (isset(cgp->cg_free, bno + i)) {
864 				printf("dev = 0x%x, block = %d, fs = %s\n",
865 				    ip->i_dev, bno + i, fs->fs_fsmnt);
866 				panic("free: freeing free frag");
867 			}
868 			setbit(cgp->cg_free, bno + i);
869 		}
870 		cgp->cg_cs.cs_nffree += i;
871 		fs->fs_cstotal.cs_nffree += i;
872 		fs->fs_cs(fs, cg).cs_nffree += i;
873 		/*
874 		 * add back in counts associated with the new frags
875 		 */
876 		blk = blkmap(fs, cgp->cg_free, bbase);
877 		fragacct(fs, blk, cgp->cg_frsum, 1);
878 		/*
879 		 * if a complete block has been reassembled, account for it
880 		 */
881 		if (isblock(fs, cgp->cg_free, fragstoblks(fs, bbase))) {
882 			cgp->cg_cs.cs_nffree -= fs->fs_frag;
883 			fs->fs_cstotal.cs_nffree -= fs->fs_frag;
884 			fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
885 			cgp->cg_cs.cs_nbfree++;
886 			fs->fs_cstotal.cs_nbfree++;
887 			fs->fs_cs(fs, cg).cs_nbfree++;
888 			i = cbtocylno(fs, bbase);
889 			cgp->cg_b[i][cbtorpos(fs, bbase)]++;
890 			cgp->cg_btot[i]++;
891 		}
892 	}
893 	fs->fs_fmod++;
894 	bdwrite(bp);
895 }
896 
897 /*
898  * Free an inode.
899  *
900  * The specified inode is placed back in the free map.
901  */
902 ifree(ip, ino, mode)
903 	struct inode *ip;
904 	ino_t ino;
905 	int mode;
906 {
907 	register struct fs *fs;
908 	register struct cg *cgp;
909 	register struct buf *bp;
910 	int cg;
911 
912 	fs = ip->i_fs;
913 	if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) {
914 		printf("dev = 0x%x, ino = %d, fs = %s\n",
915 		    ip->i_dev, ino, fs->fs_fsmnt);
916 		panic("ifree: range");
917 	}
918 	cg = itog(fs, ino);
919 	bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize);
920 	cgp = bp->b_un.b_cg;
921 	if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) {
922 		brelse(bp);
923 		return;
924 	}
925 	cgp->cg_time = time.tv_sec;
926 	ino %= fs->fs_ipg;
927 	if (isclr(cgp->cg_iused, ino)) {
928 		printf("dev = 0x%x, ino = %d, fs = %s\n",
929 		    ip->i_dev, ino, fs->fs_fsmnt);
930 		panic("ifree: freeing free inode");
931 	}
932 	clrbit(cgp->cg_iused, ino);
933 	if (ino < cgp->cg_irotor)
934 		cgp->cg_irotor = ino;
935 	cgp->cg_cs.cs_nifree++;
936 	fs->fs_cstotal.cs_nifree++;
937 	fs->fs_cs(fs, cg).cs_nifree++;
938 	if ((mode & IFMT) == IFDIR) {
939 		cgp->cg_cs.cs_ndir--;
940 		fs->fs_cstotal.cs_ndir--;
941 		fs->fs_cs(fs, cg).cs_ndir--;
942 	}
943 	fs->fs_fmod++;
944 	bdwrite(bp);
945 }
946 
947 /*
948  * Find a block of the specified size in the specified cylinder group.
949  *
950  * It is a panic if a request is made to find a block if none are
951  * available.
952  */
953 daddr_t
954 mapsearch(fs, cgp, bpref, allocsiz)
955 	register struct fs *fs;
956 	register struct cg *cgp;
957 	daddr_t bpref;
958 	int allocsiz;
959 {
960 	daddr_t bno;
961 	int start, len, loc, i;
962 	int blk, field, subfield, pos;
963 
964 	/*
965 	 * find the fragment by searching through the free block
966 	 * map for an appropriate bit pattern
967 	 */
968 	if (bpref)
969 		start = dtogd(fs, bpref) / NBBY;
970 	else
971 		start = cgp->cg_frotor / NBBY;
972 	len = howmany(fs->fs_fpg, NBBY) - start;
973 	loc = scanc((unsigned)len, (caddr_t)&cgp->cg_free[start],
974 		(caddr_t)fragtbl[fs->fs_frag],
975 		(int)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
976 	if (loc == 0) {
977 		len = start + 1;
978 		start = 0;
979 		loc = scanc((unsigned)len, (caddr_t)&cgp->cg_free[0],
980 			(caddr_t)fragtbl[fs->fs_frag],
981 			(int)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
982 		if (loc == 0) {
983 			printf("start = %d, len = %d, fs = %s\n",
984 			    start, len, fs->fs_fsmnt);
985 			panic("alloccg: map corrupted");
986 			/* NOTREACHED */
987 		}
988 	}
989 	bno = (start + len - loc) * NBBY;
990 	cgp->cg_frotor = bno;
991 	/*
992 	 * found the byte in the map
993 	 * sift through the bits to find the selected frag
994 	 */
995 	for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
996 		blk = blkmap(fs, cgp->cg_free, bno);
997 		blk <<= 1;
998 		field = around[allocsiz];
999 		subfield = inside[allocsiz];
1000 		for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
1001 			if ((blk & field) == subfield)
1002 				return (bno + pos);
1003 			field <<= 1;
1004 			subfield <<= 1;
1005 		}
1006 	}
1007 	printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt);
1008 	panic("alloccg: block not in map");
1009 	return (-1);
1010 }
1011 
1012 /*
1013  * Fserr prints the name of a file system with an error diagnostic.
1014  *
1015  * The form of the error message is:
1016  *	fs: error message
1017  */
1018 fserr(fs, cp)
1019 	struct fs *fs;
1020 	char *cp;
1021 {
1022 
1023 	log(LOG_ERR, "%s: %s\n", fs->fs_fsmnt, cp);
1024 }
1025