xref: /csrg-svn/sys/ufs/lfs/lfs_alloc.c (revision 4651)
1 /* Copyright (c) 1981 Regents of the University of California */
2 
3 static char vers[] = "@(#)lfs_alloc.c 1.7 10/29/81";
4 
5 /*	alloc.c	4.8	81/03/08	*/
6 
7 #include "../h/param.h"
8 #include "../h/systm.h"
9 #include "../h/mount.h"
10 #include "../h/fs.h"
11 #include "../h/conf.h"
12 #include "../h/buf.h"
13 #include "../h/inode.h"
14 #include "../h/dir.h"
15 #include "../h/user.h"
16 
17 extern long		hashalloc();
18 extern long		ialloccg();
19 extern daddr_t		alloccg();
20 extern daddr_t		alloccgblk();
21 extern daddr_t		fragextend();
22 extern daddr_t		blkpref();
23 extern daddr_t		mapsearch();
24 extern int		inside[], around[];
25 extern unsigned char	fragtbl[];
26 
27 struct buf *
28 alloc(dev, ip, bpref, size)
29 	dev_t dev;
30 	register struct inode *ip;
31 	daddr_t bpref;
32 	int size;
33 {
34 	daddr_t bno;
35 	register struct fs *fs;
36 	register struct buf *bp;
37 	int cg;
38 
39 	if ((unsigned)size > BSIZE || size % FSIZE != 0)
40 		panic("alloc: bad size");
41 	fs = getfs(dev);
42 	if (size == BSIZE &&
43 	    (fs->fs_nbfree == 0 ||
44 	    (!suser() && fs->fs_nbfree < fs->fs_nbfree * minfree / 100)))
45 		goto nospace;
46 	if (bpref == 0)
47 		cg = itog(ip->i_number, fs);
48 	else
49 		cg = dtog(bpref, fs);
50 	bno = hashalloc(dev, fs, cg, (long)bpref, size, alloccg);
51 	if (bno == 0)
52 		goto nospace;
53 	bp = getblk(dev, bno, size);
54 	clrbuf(bp);
55 	return (bp);
56 nospace:
57 	fserr(fs, "file system full");
58 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
59 	u.u_error = ENOSPC;
60 	return (NULL);
61 }
62 
63 struct buf *
64 realloccg(dev, ip, bprev, bpref, osize, nsize)
65 	dev_t dev;
66 	register struct inode *ip;
67 	daddr_t bprev, bpref;
68 	int osize, nsize;
69 {
70 	daddr_t bno;
71 	register struct fs *fs;
72 	register struct buf *bp, *obp;
73 	caddr_t cp;
74 	int cg;
75 
76 	if ((unsigned)osize > BSIZE || osize % FSIZE != 0 ||
77 	    (unsigned)nsize > BSIZE || nsize % FSIZE != 0)
78 		panic("realloccg: bad size");
79 	fs = getfs(dev);
80 	if (bprev == 0)
81 		panic("realloccg: bad bprev");
82 	else
83 		cg = dtog(bprev, fs);
84 	bno = fragextend(dev, fs, cg, (long)bprev, osize, nsize);
85 	if (bno != 0) {
86 		bp = bread(dev, bno, osize);
87 		bp->b_bcount = nsize;
88 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
89 		return (bp);
90 	}
91 	bno = hashalloc(dev, fs, cg, (long)bpref, nsize, alloccg);
92 	if (bno != 0) {
93 		/*
94 		 * make a new copy
95 		 */
96 		obp = bread(dev, bprev, osize);
97 		bp = getblk(dev, bno, nsize);
98 		cp = bp->b_un.b_addr;
99 		bp->b_un.b_addr = obp->b_un.b_addr;
100 		obp->b_un.b_addr = cp;
101 		obp->b_flags |= B_INVAL;
102 		brelse(obp);
103 		fre(dev, bprev, osize);
104 		blkclr(bp->b_un.b_addr + osize, nsize - osize);
105 		return(bp);
106 	}
107 	/*
108 	 * no space available
109 	 */
110 	fserr(fs, "file system full");
111 	uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt);
112 	u.u_error = ENOSPC;
113 	return (NULL);
114 }
115 
116 struct inode *
117 ialloc(dev, ipref, mode)
118 	dev_t dev;
119 	ino_t ipref;
120 	int mode;
121 {
122 	daddr_t ino;
123 	register struct fs *fs;
124 	register struct inode *ip;
125 	int cg;
126 
127 	fs = getfs(dev);
128 	if (fs->fs_nifree == 0)
129 		goto noinodes;
130 	cg = itog(ipref, fs);
131 	ino = hashalloc(dev, fs, cg, (long)ipref, mode, ialloccg);
132 	if (ino == 0)
133 		goto noinodes;
134 	ip = iget(dev, ino);
135 	if (ip == NULL) {
136 		ifree(dev, ino);
137 		return (NULL);
138 	}
139 	if (ip->i_mode)
140 		panic("ialloc: dup alloc");
141 	return (ip);
142 noinodes:
143 	fserr(fs, "out of inodes");
144 	uprintf("\n%s: create failed, no inodes free\n", fs->fs_fsmnt);
145 	u.u_error = ENOSPC;
146 	return (NULL);
147 }
148 
149 /*
150  * find a cylinder to place a directory
151  */
152 dirpref(dev)
153 	dev_t dev;
154 {
155 	register struct fs *fs;
156 	int cg, minndir, mincg, avgifree;
157 
158 	fs = getfs(dev);
159 	avgifree = fs->fs_nifree / fs->fs_ncg;
160 	minndir = fs->fs_ipg;
161 	mincg = 0;
162 	for (cg = 0; cg < fs->fs_ncg; cg++)
163 		if (fs->fs_cs(cg).cs_ndir < minndir &&
164 		    fs->fs_cs(cg).cs_nifree >= avgifree) {
165 			mincg = cg;
166 			minndir = fs->fs_cs(cg).cs_ndir;
167 		}
168 	return (fs->fs_ipg * mincg);
169 }
170 
171 /*
172  * select a cylinder to place a large block of data
173  */
174 blkpref(dev)
175 	dev_t dev;
176 {
177 	register struct fs *fs;
178 	int cg, avgbfree;
179 
180 	fs = getfs(dev);
181 	avgbfree = fs->fs_nbfree / fs->fs_ncg;
182 	for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++)
183 		if (fs->fs_cs(cg).cs_nbfree >= avgbfree) {
184 			fs->fs_cgrotor = cg;
185 			return (fs->fs_fpg * cg + FRAG);
186 		}
187 	for (cg = 0; cg <= fs->fs_cgrotor; cg++)
188 		if (fs->fs_cs(cg).cs_nbfree >= avgbfree) {
189 			fs->fs_cgrotor = cg;
190 			return (fs->fs_fpg * cg + FRAG);
191 		}
192 	return (0);
193 }
194 
195 long
196 hashalloc(dev, fs, cg, pref, size, allocator)
197 	dev_t dev;
198 	register struct fs *fs;
199 	int cg;
200 	long pref;
201 	int size;	/* size for data blocks, mode for inodes */
202 	long (*allocator)();
203 {
204 	long result;
205 	int i, icg = cg;
206 
207 	/*
208 	 * 1: preferred cylinder group
209 	 */
210 	result = (*allocator)(dev, fs, cg, pref, size);
211 	if (result)
212 		return (result);
213 	/*
214 	 * 2: quadratic rehash
215 	 */
216 	for (i = 1; i < fs->fs_ncg; i *= 2) {
217 		cg += i;
218 		if (cg >= fs->fs_ncg)
219 			cg -= fs->fs_ncg;
220 		result = (*allocator)(dev, fs, cg, 0, size);
221 		if (result)
222 			return (result);
223 	}
224 	/*
225 	 * 3: brute force search
226 	 */
227 	cg = icg;
228 	for (i = 0; i < fs->fs_ncg; i++) {
229 		result = (*allocator)(dev, fs, cg, 0, size);
230 		if (result)
231 			return (result);
232 		cg++;
233 		if (cg == fs->fs_ncg)
234 			cg = 0;
235 	}
236 	return (0);
237 }
238 
239 daddr_t
240 fragextend(dev, fs, cg, bprev, osize, nsize)
241 	dev_t dev;
242 	register struct fs *fs;
243 	int cg;
244 	long bprev;
245 	int osize, nsize;
246 {
247 	register struct buf *bp;
248 	register struct cg *cgp;
249 	long bno;
250 	int frags, bbase;
251 	int i;
252 
253 	frags = nsize / FSIZE;
254 	bbase = bprev % FRAG;
255 	if (bbase > (bprev + frags - 1) % FRAG) {
256 		/* cannot extend across a block boundry */
257 		return (0);
258 	}
259 	bp = bread(dev, cgtod(cg, fs), BSIZE);
260 	if (bp->b_flags & B_ERROR)
261 		return (0);
262 	cgp = bp->b_un.b_cg;
263 	bno = bprev % fs->fs_fpg;
264 	for (i = osize / FSIZE; i < frags; i++) {
265 		if (isclr(cgp->cg_free, bno + i))
266 			break;
267 	}
268 	if (i == frags) {
269 		/*
270 		 * the current fragment can be extended
271 		 * deduct the count on fragment being extended into
272 		 * increase the count on the remaining fragment (if any)
273 		 * allocate the extended piece
274 		 */
275 		for (i = frags; i < FRAG - bbase; i++)
276 			if (isclr(cgp->cg_free, bno + i))
277 				break;
278 		cgp->cg_frsum[i - osize / FSIZE]--;
279 		if (i != frags)
280 			cgp->cg_frsum[i - frags]++;
281 		for (i = osize / FSIZE; i < frags; i++) {
282 			clrbit(cgp->cg_free, bno + i);
283 			cgp->cg_nffree--;
284 			fs->fs_nffree--;
285 		}
286 		fs->fs_fmod++;
287 		bdwrite(bp);
288 		return (bprev);
289 	}
290 	brelse(bp);
291 	return (0);
292 }
293 
294 daddr_t
295 alloccg(dev, fs, cg, bpref, size)
296 	dev_t dev;
297 	register struct fs *fs;
298 	int cg;
299 	daddr_t bpref;
300 	int size;
301 {
302 	register struct buf *bp;
303 	register struct cg *cgp;
304 	int bno, frags;
305 	int allocsiz;
306 	register int i;
307 
308 	bp = bread(dev, cgtod(cg, fs), BSIZE);
309 	if (bp->b_flags & B_ERROR)
310 		return (0);
311 	cgp = bp->b_un.b_cg;
312 	if (size == BSIZE) {
313 		if (cgp->cg_nbfree == 0) {
314 			brelse(bp);
315 			return (0);
316 		}
317 		bno = alloccgblk(dev, fs, cgp, bpref);
318 		bdwrite(bp);
319 		return (bno);
320 	}
321 	/*
322 	 * check to see if any fragments are already available
323 	 * allocsiz is the size which will be allocated, hacking
324 	 * it down to a smaller size if necessary
325 	 */
326 	frags = size / FSIZE;
327 	for (allocsiz = frags; allocsiz < FRAG; allocsiz++)
328 		if (cgp->cg_frsum[allocsiz] != 0)
329 			break;
330 	if (allocsiz == FRAG) {
331 		/*
332 		 * no fragments were available, so a block will be
333 		 * allocated, and hacked up
334 		 */
335 		if (cgp->cg_nbfree == 0) {
336 			brelse(bp);
337 			return (0);
338 		}
339 		bno = alloccgblk(dev, fs, cgp, bpref);
340 		bpref = bno % fs->fs_fpg;
341 		for (i = frags; i < FRAG; i++)
342 			setbit(cgp->cg_free, bpref + i);
343 		i = FRAG - frags;
344 		cgp->cg_nffree += i;
345 		fs->fs_nffree += i;
346 		cgp->cg_frsum[i]++;
347 		bdwrite(bp);
348 		return (bno);
349 	}
350 	bno = mapsearch(fs, cgp, bpref, allocsiz);
351 	if (bno == 0)
352 		return (0);
353 	for (i = 0; i < frags; i++)
354 		clrbit(cgp->cg_free, bno + i);
355 	cgp->cg_nffree -= frags;
356 	fs->fs_nffree -= frags;
357 	cgp->cg_frsum[allocsiz]--;
358 	if (frags != allocsiz)
359 		cgp->cg_frsum[allocsiz - frags]++;
360 	bdwrite(bp);
361 	return (cg * fs->fs_fpg + bno);
362 }
363 
364 daddr_t
365 alloccgblk(dev, fs, cgp, bpref)
366 	dev_t dev;
367 	struct fs *fs;
368 	register struct cg *cgp;
369 	daddr_t bpref;
370 {
371 	daddr_t bno;
372 	int cylno, pos;
373 	short *cylbp;
374 	int i, j;
375 
376 	if (bpref == 0) {
377 		bpref = cgp->cg_rotor;
378 	} else {
379 		bpref &= ~(FRAG - 1);
380 		bpref %= fs->fs_fpg;
381 		/*
382 		 * if the requested block is available, use it
383 		 */
384 		if (isblock(cgp->cg_free, bpref/FRAG)) {
385 			bno = bpref;
386 			goto gotit;
387 		}
388 		/*
389 		 * check for a block available on the same cylinder
390 		 * beginning with one which is rotationally optimal
391 		 */
392 		i = bpref * NSPF;
393 		cylno = i / fs->fs_spc;
394 		cylbp = cgp->cg_b[cylno];
395 		pos = (i + (ROTDELAY == 0) ?
396 			0 : 1 + ROTDELAY * HZ * fs->fs_nsect / (NSPF * 1000)) %
397 			fs->fs_nsect * NRPOS / fs->fs_nsect;
398 		for (i = pos; i < NRPOS; i++)
399 			if (cylbp[i] > 0)
400 				break;
401 		if (i == NRPOS)
402 			for (i = 0; i < pos; i++)
403 				if (cylbp[i] > 0)
404 					break;
405 		if (cylbp[i] > 0) {
406 			bpref = cylno * fs->fs_spc / (NSPF * FRAG);
407 			for (j = fs->fs_postbl[i]; j > -1; j = fs->fs_rotbl[j]) {
408 				if (isblock(cgp->cg_free, bpref + j)) {
409 					bno = (bpref + j) * FRAG;
410 					goto gotit;
411 				}
412 			}
413 			panic("alloccgblk: can't find blk in cyl");
414 		}
415 	}
416 	bno = mapsearch(fs, cgp, bpref, FRAG);
417 	if (bno == 0)
418 		return (0);
419 	cgp->cg_rotor = bno;
420 gotit:
421 	clrblock(cgp->cg_free, bno/FRAG);
422 	cgp->cg_nbfree--;
423 	fs->fs_nbfree--;
424 	fs->fs_cs(cgp->cg_cgx).cs_nbfree--;
425 	i = bno * NSPF;
426 	cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]--;
427 	fs->fs_fmod++;
428 	return (cgp->cg_cgx * fs->fs_fpg + bno);
429 }
430 
431 long
432 ialloccg(dev, fs, cg, ipref, mode)
433 	dev_t dev;
434 	register struct fs *fs;
435 	int cg;
436 	daddr_t ipref;
437 	int mode;
438 {
439 	register struct buf *bp;
440 	register struct cg *cgp;
441 	int i;
442 
443 	bp = bread(dev, cgtod(cg, fs), BSIZE);
444 	if (bp->b_flags & B_ERROR)
445 		return (0);
446 	cgp = bp->b_un.b_cg;
447 	if (cgp->cg_nifree == 0) {
448 		brelse(bp);
449 		return (0);
450 	}
451 	if (ipref) {
452 		ipref %= fs->fs_ipg;
453 		if (isclr(cgp->cg_iused, ipref))
454 			goto gotit;
455 	} else
456 		ipref = cgp->cg_irotor;
457 	for (i = 0; i < fs->fs_ipg; i++) {
458 		ipref++;
459 		if (ipref >= fs->fs_ipg)
460 			ipref = 0;
461 		if (isclr(cgp->cg_iused, ipref)) {
462 			cgp->cg_irotor = ipref;
463 			goto gotit;
464 		}
465 	}
466 	brelse(bp);
467 	return (0);
468 gotit:
469 	setbit(cgp->cg_iused, ipref);
470 	cgp->cg_nifree--;
471 	fs->fs_nifree--;
472 	fs->fs_cs(cg).cs_nifree--;
473 	fs->fs_fmod++;
474 	if ((mode & IFMT) == IFDIR) {
475 		cgp->cg_ndir++;
476 		fs->fs_cs(cg).cs_ndir++;
477 	}
478 	bdwrite(bp);
479 	return (cg * fs->fs_ipg + ipref);
480 }
481 
482 fre(dev, bno, size)
483 	dev_t dev;
484 	daddr_t bno;
485 	int size;
486 {
487 	register struct fs *fs;
488 	register struct cg *cgp;
489 	register struct buf *bp;
490 	int cg, blk, frags, bbase;
491 	register int i;
492 
493 	if ((unsigned)size > BSIZE || size % FSIZE != 0)
494 		panic("free: bad size");
495 	fs = getfs(dev);
496 	cg = dtog(bno, fs);
497 	if (badblock(fs, bno))
498 		return;
499 	bp = bread(dev, cgtod(cg, fs), BSIZE);
500 	if (bp->b_flags & B_ERROR)
501 		return;
502 	cgp = bp->b_un.b_cg;
503 	bno %= fs->fs_fpg;
504 	if (size == BSIZE) {
505 		if (isblock(cgp->cg_free, bno/FRAG))
506 			panic("free: freeing free block");
507 		setblock(cgp->cg_free, bno/FRAG);
508 		cgp->cg_nbfree++;
509 		fs->fs_nbfree++;
510 		fs->fs_cs(cg).cs_nbfree++;
511 		i = bno * NSPF;
512 		cgp->cg_b[i/fs->fs_spc][i%fs->fs_nsect*NRPOS/fs->fs_nsect]++;
513 	} else {
514 		bbase = bno - (bno % FRAG);
515 		/*
516 		 * decrement the counts associated with the old frags
517 		 */
518 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
519 		       (0xff >> (NBBY - FRAG)));
520 		fragacct(blk, cgp->cg_frsum, -1);
521 		/*
522 		 * deallocate the fragment
523 		 */
524 		frags = size / FSIZE;
525 		for (i = 0; i < frags; i++) {
526 			if (isset(cgp->cg_free, bno + i))
527 				panic("free: freeing free frag");
528 			setbit(cgp->cg_free, bno + i);
529 			cgp->cg_nffree++;
530 			fs->fs_nffree++;
531 		}
532 		/*
533 		 * add back in counts associated with the new frags
534 		 */
535 		blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) &
536 		       (0xff >> (NBBY - FRAG)));
537 		fragacct(blk, cgp->cg_frsum, 1);
538 		/*
539 		 * if a complete block has been reassembled, account for it
540 		 */
541 		if (isblock(cgp->cg_free, bbase / FRAG)) {
542 			cgp->cg_nffree -= FRAG;
543 			fs->fs_nffree -= FRAG;
544 			cgp->cg_nbfree++;
545 			fs->fs_nbfree++;
546 			fs->fs_cs(cg).cs_nbfree++;
547 			i = bbase * NSPF;
548 			cgp->cg_b[i / fs->fs_spc]
549 				 [i % fs->fs_nsect * NRPOS / fs->fs_nsect]++;
550 		}
551 	}
552 	fs->fs_fmod++;
553 	bdwrite(bp);
554 }
555 
556 ifree(dev, ino, mode)
557 	dev_t dev;
558 	ino_t ino;
559 	int mode;
560 {
561 	register struct fs *fs;
562 	register struct cg *cgp;
563 	register struct buf *bp;
564 	int i;
565 	int cg;
566 
567 	fs = getfs(dev);
568 	if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg)
569 		panic("ifree: range");
570 	cg = itog(ino, fs);
571 	bp = bread(dev, cgtod(cg, fs), BSIZE);
572 	if (bp->b_flags & B_ERROR)
573 		return;
574 	cgp = bp->b_un.b_cg;
575 	ino %= fs->fs_ipg;
576 	if (isclr(cgp->cg_iused, ino))
577 		panic("ifree: freeing free inode");
578 	clrbit(cgp->cg_iused, ino);
579 	cgp->cg_nifree++;
580 	fs->fs_nifree++;
581 	fs->fs_cs(cg).cs_nifree++;
582 	if ((mode & IFMT) == IFDIR) {
583 		cgp->cg_ndir--;
584 		fs->fs_cs(cg).cs_ndir--;
585 	}
586 	fs->fs_fmod++;
587 	bdwrite(bp);
588 }
589 
590 /*
591  * find a block of the specified size in the specified cylinder group
592  * It is a panic if a request is made to find a block if none are
593  * available.
594  */
595 daddr_t
596 mapsearch(fs, cgp, bpref, allocsiz)
597 	register struct fs *fs;
598 	register struct cg *cgp;
599 	daddr_t bpref;
600 	int allocsiz;
601 {
602 	daddr_t bno;
603 	int start, len, loc, i;
604 	int blk, field, subfield, pos;
605 
606 	/*
607 	 * find the fragment by searching through the free block
608 	 * map for an appropriate bit pattern
609 	 */
610 	if (bpref)
611 		start = bpref % fs->fs_fpg / NBBY;
612 	else
613 		start = cgp->cg_frotor / NBBY;
614 	len = roundup(fs->fs_fpg - 1, NBBY) / NBBY - start;
615 	loc = scanc(len, &cgp->cg_free[start], fragtbl, 1 << (allocsiz - 1));
616 	if (loc == 0) {
617 		len = start - 1;
618 		start = (cgdmin(cgp->cg_cgx, fs) -
619 			 cgbase(cgp->cg_cgx, fs)) / NBBY;
620 		loc = scanc(len, &cgp->cg_free[start], fragtbl,
621 			1 << (allocsiz - 1));
622 		if (loc == 0) {
623 			panic("alloccg: map corrupted");
624 			return (0);
625 		}
626 	}
627 	bno = (start + len - loc) * NBBY;
628 	cgp->cg_frotor = bno;
629 	/*
630 	 * found the byte in the map
631 	 * sift through the bits to find the selected frag
632 	 */
633 	for (i = 0; i < NBBY; i += FRAG) {
634 		blk = (cgp->cg_free[bno / NBBY] >> i) & (0xff >> NBBY - FRAG);
635 		blk <<= 1;
636 		field = around[allocsiz];
637 		subfield = inside[allocsiz];
638 		for (pos = 0; pos <= FRAG - allocsiz; pos++) {
639 			if ((blk & field) == subfield) {
640 				return (bno + i + pos);
641 			}
642 			field <<= 1;
643 			subfield <<= 1;
644 		}
645 	}
646 	panic("alloccg: block not in map");
647 	return (0);
648 }
649 
650 /*
651  * update the frsum fields to reflect addition or deletion
652  * of some frags
653  */
654 fragacct(fragmap, fraglist, cnt)
655 	int fragmap;
656 	short fraglist[];
657 	int cnt;
658 {
659 	int inblk;
660 	register int field, subfield;
661 	register int siz, pos;
662 
663 	inblk = (int)(fragtbl[fragmap]) << 1;
664 	fragmap <<= 1;
665 	for (siz = 1; siz < FRAG; siz++) {
666 		if (((1 << siz) & inblk) == 0)
667 			continue;
668 		field = around[siz];
669 		subfield = inside[siz];
670 		for (pos = siz; pos <= FRAG; pos++) {
671 			if ((fragmap & field) == subfield) {
672 				fraglist[siz] += cnt;
673 				pos += siz;
674 				field <<= siz;
675 				subfield <<= siz;
676 			}
677 			field <<= 1;
678 			subfield <<= 1;
679 		}
680 	}
681 }
682 
683 badblock(fs, bn)
684 	register struct fs *fs;
685 	daddr_t bn;
686 {
687 
688 	if ((unsigned)bn >= fs->fs_size || bn < cgdmin(dtog(bn, fs), fs)) {
689 		fserr(fs, "bad block");
690 		return (1);
691 	}
692 	return (0);
693 }
694 
695 /*
696  * getfs maps a device number into
697  * a pointer to the incore super
698  * block.  The algorithm is a linear
699  * search through the mount table.
700  * A consistency check of the
701  * in core free-block and i-node
702  * counts is performed.
703  *
704  * panic: no fs -- the device is not mounted.
705  *	this "cannot happen"
706  */
707 struct fs *
708 getfs(dev)
709 	dev_t dev;
710 {
711 	register struct mount *mp;
712 	register struct fs *fs;
713 
714 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
715 		if (mp->m_bufp != NULL && mp->m_dev == dev) {
716 			fs = mp->m_bufp->b_un.b_fs;
717 			if (fs->fs_magic != FS_MAGIC)
718 				panic("getfs: bad magic");
719 			return (fs);
720 		}
721 	panic("getfs: no fs");
722 	return (NULL);
723 }
724 
725 /*
726  * Fserr prints the name of a file system
727  * with an error diagnostic, in the form
728  *	fs: error message
729  */
730 fserr(fs, cp)
731 	struct fs *fs;
732 	char *cp;
733 {
734 
735 	printf("%s: %s\n", fs->fs_fsmnt, cp);
736 }
737 
738 /*
739  * Getfsx returns the index in the file system
740  * table of the specified device.  The swap device
741  * is also assigned a pseudo-index.  The index may
742  * be used as a compressed indication of the location
743  * of a block, recording
744  *	<getfsx(dev),blkno>
745  * rather than
746  *	<dev, blkno>
747  * provided the information need remain valid only
748  * as long as the file system is mounted.
749  */
750 getfsx(dev)
751 	dev_t dev;
752 {
753 	register struct mount *mp;
754 
755 	if (dev == swapdev)
756 		return (MSWAPX);
757 	for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
758 		if (mp->m_dev == dev)
759 			return (mp - &mount[0]);
760 	return (-1);
761 }
762 
763 /*
764  * Update is the internal name of 'sync'.  It goes through the disk
765  * queues to initiate sandbagged IO; goes through the inodes to write
766  * modified nodes; and it goes through the mount table to initiate modified
767  * super blocks.
768  */
769 update()
770 {
771 	register struct inode *ip;
772 	register struct mount *mp;
773 	register struct buf *bp;
774 	struct fs *fs;
775 	time_t tim;
776 	int i, blks;
777 
778 	if (updlock)
779 		return;
780 	updlock++;
781 	/*
782 	 * Write back modified superblocks.
783 	 * Consistency check that the superblock
784 	 * of each file system is still in the buffer cache.
785 	 */
786 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
787 		if (mp->m_bufp != NULL) {
788 			fs = mp->m_bufp->b_un.b_fs;
789 			if (fs->fs_fmod == 0)
790 				continue;
791 			if (fs->fs_ronly != 0)
792 				panic("update: rofs mod");
793 			bp = getblk(mp->m_dev, SBLOCK, BSIZE);
794 			fs->fs_fmod = 0;
795 			fs->fs_time = TIME;
796 			if (bp->b_un.b_fs != fs)
797 				panic("update: bad b_fs");
798 			bwrite(bp);
799 			blks = howmany(cssize(fs), BSIZE);
800 			for (i = 0; i < blks; i++) {
801 				bp = getblk(mp->m_dev, csaddr(fs) + (i * FRAG),
802 					BSIZE);
803 				bwrite(bp);
804 			}
805 		}
806 	/*
807 	 * Write back each (modified) inode.
808 	 */
809 	for (ip = inode; ip < inodeNINODE; ip++)
810 		if((ip->i_flag&ILOCK)==0 && ip->i_count) {
811 			ip->i_flag |= ILOCK;
812 			ip->i_count++;
813 			tim = TIME;
814 			iupdat(ip, &tim, &tim, 0);
815 			iput(ip);
816 		}
817 	updlock = 0;
818 	/*
819 	 * Force stale buffer cache information to be flushed,
820 	 * for all devices.
821 	 */
822 	bflush(NODEV);
823 }
824