xref: /csrg-svn/sys/ufs/ffs/ffs_subr.c (revision 31659)
1 /*
2  * Copyright (c) 1982, 1986 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)ffs_subr.c	7.5 (Berkeley) 06/21/87
7  */
8 
9 #ifdef KERNEL
10 #include "param.h"
11 #include "systm.h"
12 #include "mount.h"
13 #include "fs.h"
14 #include "buf.h"
15 #include "inode.h"
16 #include "dir.h"
17 #include "user.h"
18 #include "quota.h"
19 #include "kernel.h"
20 #else
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/mount.h>
24 #include <sys/fs.h>
25 #include <sys/buf.h>
26 #include <sys/inode.h>
27 #include <sys/dir.h>
28 #include <sys/user.h>
29 #include <sys/quota.h>
30 #endif
31 
32 #ifdef KERNEL
33 int	syncprt = 0;
34 
35 /*
36  * Update is the internal name of 'sync'.  It goes through the disk
37  * queues to initiate sandbagged IO; goes through the inodes to write
38  * modified nodes; and it goes through the mount table to initiate
39  * the writing of the modified super blocks.
40  */
41 update()
42 {
43 	register struct inode *ip;
44 	register struct mount *mp;
45 	struct fs *fs;
46 
47 	if (syncprt)
48 		bufstats();
49 	if (updlock)
50 		return;
51 	updlock++;
52 	/*
53 	 * Write back modified superblocks.
54 	 * Consistency check that the superblock
55 	 * of each file system is still in the buffer cache.
56 	 */
57 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++) {
58 		if (mp->m_fs == NULL || mp->m_dev == NODEV)
59 			continue;
60 		fs = mp->m_fs;
61 		if (fs->fs_fmod == 0)
62 			continue;
63 		if (fs->fs_ronly != 0) {		/* XXX */
64 			printf("fs = %s\n", fs->fs_fsmnt);
65 			panic("update: rofs mod");
66 		}
67 		fs->fs_fmod = 0;
68 		fs->fs_time = time.tv_sec;
69 		sbupdate(mp);
70 	}
71 	/*
72 	 * Write back each (modified) inode.
73 	 */
74 	for (ip = inode; ip < inodeNINODE; ip++) {
75 		if ((ip->i_flag & ILOCKED) != 0 || ip->i_count == 0 ||
76 		    (ip->i_flag & (IMOD|IACC|IUPD|ICHG)) == 0)
77 			continue;
78 		ip->i_flag |= ILOCKED;
79 		ip->i_count++;
80 		iupdat(ip, &time, &time, 0);
81 		iput(ip);
82 	}
83 	updlock = 0;
84 	/*
85 	 * Force stale buffer cache information to be flushed,
86 	 * for all devices.
87 	 */
88 	bflush(NODEV);
89 }
90 
91 /*
92  * Flush all the blocks associated with an inode.
93  * There are two strategies based on the size of the file;
94  * large files are those with more than (nbuf / 2) blocks.
95  * Large files
96  * 	Walk through the buffer pool and push any dirty pages
97  *	associated with the device on which the file resides.
98  * Small files
99  *	Look up each block in the file to see if it is in the
100  *	buffer pool writing any that are found to disk.
101  *	Note that we make a more stringent check of
102  *	writing out any block in the buffer pool that may
103  *	overlap the inode. This brings the inode up to
104  *	date with recent mods to the cooked device.
105  */
106 syncip(ip)
107 	register struct inode *ip;
108 {
109 	register struct fs *fs;
110 	register struct buf *bp;
111 	struct buf *lastbufp;
112 	long lbn, lastlbn;
113 	int s;
114 	daddr_t blkno;
115 
116 	fs = ip->i_fs;
117 	lastlbn = howmany(ip->i_size, fs->fs_bsize);
118 	if (lastlbn < nbuf / 2) {
119 		for (lbn = 0; lbn < lastlbn; lbn++) {
120 			blkno = fsbtodb(fs, bmap(ip, lbn, B_READ));
121 			blkflush(ip->i_dev, blkno, blksize(fs, ip, lbn));
122 		}
123 	} else {
124 		lastbufp = &buf[nbuf];
125 		for (bp = buf; bp < lastbufp; bp++) {
126 			if (bp->b_dev != ip->i_dev ||
127 			    (bp->b_flags & B_DELWRI) == 0)
128 				continue;
129 			s = splbio();
130 			if (bp->b_flags & B_BUSY) {
131 				bp->b_flags |= B_WANTED;
132 				sleep((caddr_t)bp, PRIBIO+1);
133 				splx(s);
134 				bp--;
135 				continue;
136 			}
137 			splx(s);
138 			notavail(bp);
139 			bwrite(bp);
140 		}
141 	}
142 	iupdat(ip, &time, &time, 1);
143 }
144 #endif
145 
146 extern	int around[9];
147 extern	int inside[9];
148 extern	u_char *fragtbl[];
149 
150 /*
151  * Update the frsum fields to reflect addition or deletion
152  * of some frags.
153  */
154 fragacct(fs, fragmap, fraglist, cnt)
155 	struct fs *fs;
156 	int fragmap;
157 	long fraglist[];
158 	int cnt;
159 {
160 	int inblk;
161 	register int field, subfield;
162 	register int siz, pos;
163 
164 	inblk = (int)(fragtbl[fs->fs_frag][fragmap]) << 1;
165 	fragmap <<= 1;
166 	for (siz = 1; siz < fs->fs_frag; siz++) {
167 		if ((inblk & (1 << (siz + (fs->fs_frag % NBBY)))) == 0)
168 			continue;
169 		field = around[siz];
170 		subfield = inside[siz];
171 		for (pos = siz; pos <= fs->fs_frag; pos++) {
172 			if ((fragmap & field) == subfield) {
173 				fraglist[siz] += cnt;
174 				pos += siz;
175 				field <<= siz;
176 				subfield <<= siz;
177 			}
178 			field <<= 1;
179 			subfield <<= 1;
180 		}
181 	}
182 }
183 
184 #ifdef KERNEL
185 /*
186  * Check that a specified block number is in range.
187  */
188 badblock(fs, bn)
189 	register struct fs *fs;
190 	daddr_t bn;
191 {
192 
193 	if ((unsigned)bn >= fs->fs_size) {
194 		printf("bad block %d, ", bn);
195 		fserr(fs, "bad block");
196 		return (1);
197 	}
198 	return (0);
199 }
200 #endif
201 
202 /*
203  * block operations
204  *
205  * check if a block is available
206  */
207 isblock(fs, cp, h)
208 	struct fs *fs;
209 	unsigned char *cp;
210 	daddr_t h;
211 {
212 	unsigned char mask;
213 
214 	switch ((int)fs->fs_frag) {
215 	case 8:
216 		return (cp[h] == 0xff);
217 	case 4:
218 		mask = 0x0f << ((h & 0x1) << 2);
219 		return ((cp[h >> 1] & mask) == mask);
220 	case 2:
221 		mask = 0x03 << ((h & 0x3) << 1);
222 		return ((cp[h >> 2] & mask) == mask);
223 	case 1:
224 		mask = 0x01 << (h & 0x7);
225 		return ((cp[h >> 3] & mask) == mask);
226 	default:
227 		panic("isblock");
228 		return (NULL);
229 	}
230 }
231 
232 /*
233  * take a block out of the map
234  */
235 clrblock(fs, cp, h)
236 	struct fs *fs;
237 	u_char *cp;
238 	daddr_t h;
239 {
240 
241 	switch ((int)fs->fs_frag) {
242 	case 8:
243 		cp[h] = 0;
244 		return;
245 	case 4:
246 		cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2));
247 		return;
248 	case 2:
249 		cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1));
250 		return;
251 	case 1:
252 		cp[h >> 3] &= ~(0x01 << (h & 0x7));
253 		return;
254 	default:
255 		panic("clrblock");
256 	}
257 }
258 
259 /*
260  * put a block into the map
261  */
262 setblock(fs, cp, h)
263 	struct fs *fs;
264 	unsigned char *cp;
265 	daddr_t h;
266 {
267 
268 	switch ((int)fs->fs_frag) {
269 
270 	case 8:
271 		cp[h] = 0xff;
272 		return;
273 	case 4:
274 		cp[h >> 1] |= (0x0f << ((h & 0x1) << 2));
275 		return;
276 	case 2:
277 		cp[h >> 2] |= (0x03 << ((h & 0x3) << 1));
278 		return;
279 	case 1:
280 		cp[h >> 3] |= (0x01 << (h & 0x7));
281 		return;
282 	default:
283 		panic("setblock");
284 	}
285 }
286 
287 #ifdef KERNEL
288 /*
289  * Getfs maps a device number into a pointer to the incore super block.
290  *
291  * The algorithm is a linear search through the mount table. A
292  * consistency check of the super block magic number is performed.
293  *
294  * panic: no fs -- the device is not mounted.
295  *	this "cannot happen"
296  */
297 struct fs *
298 getfs(dev)
299 	dev_t dev;
300 {
301 	register struct mount *mp;
302 	register struct fs *fs;
303 
304 	for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++) {
305 		if (mp->m_fs == NULL || mp->m_dev != dev)
306 			continue;
307 		fs = mp->m_fs;
308 		if (fs->fs_magic != FS_MAGIC) {
309 			printf("dev = 0x%x, fs = %s\n", dev, fs->fs_fsmnt);
310 			panic("getfs: bad magic");
311 		}
312 		return (fs);
313 	}
314 	printf("dev = 0x%x\n", dev);
315 	panic("getfs: no fs");
316 	return (NULL);
317 }
318 
319 /*
320  * Getfsx returns the index in the file system
321  * table of the specified device.  The swap device
322  * is also assigned a pseudo-index.  The index may
323  * be used as a compressed indication of the location
324  * of a block, recording
325  *	<getfsx(dev),blkno>
326  * rather than
327  *	<dev, blkno>
328  * provided the information need remain valid only
329  * as long as the file system is mounted.
330  */
331 getfsx(dev)
332 	dev_t dev;
333 {
334 	register struct mount *mp;
335 
336 	if (dev == swapdev)
337 		return (MSWAPX);
338 	for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++)
339 		if (mp->m_dev == dev)
340 			return (mp - &mount[0]);
341 	return (-1);
342 }
343 
344 /*
345  * Print out statistics on the current allocation of the buffer pool.
346  * Can be enabled to print out on every ``sync'' by setting "syncprt"
347  * above.
348  */
349 bufstats()
350 {
351 	int s, i, j, count;
352 	register struct buf *bp, *dp;
353 	int counts[MAXBSIZE/CLBYTES+1];
354 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
355 
356 	for (bp = bfreelist, i = 0; bp < &bfreelist[BQUEUES]; bp++, i++) {
357 		count = 0;
358 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
359 			counts[j] = 0;
360 		s = splbio();
361 		for (dp = bp->av_forw; dp != bp; dp = dp->av_forw) {
362 			counts[dp->b_bufsize/CLBYTES]++;
363 			count++;
364 		}
365 		splx(s);
366 		printf("%s: total-%d", bname[i], count);
367 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
368 			if (counts[j] != 0)
369 				printf(", %d-%d", j * CLBYTES, counts[j]);
370 		printf("\n");
371 	}
372 }
373 #endif
374 
375 #if (!defined(vax) && !defined(tahoe)) || defined(VAX630)
376 /*
377  * C definitions of special instructions.
378  * Normally expanded with inline.
379  */
380 scanc(size, cp, table, mask)
381 	u_int size;
382 	register u_char *cp, table[];
383 	register u_char mask;
384 {
385 	register u_char *end = &cp[size];
386 
387 	while (cp < end && (table[*cp] & mask) == 0)
388 		cp++;
389 	return (end - cp);
390 }
391 #endif
392 
393 #if !defined(vax) && !defined(tahoe)
394 skpc(mask, size, cp)
395 	register u_char mask;
396 	u_int size;
397 	register u_char *cp;
398 {
399 	register u_char *end = &cp[size];
400 
401 	while (cp < end && *cp == mask)
402 		cp++;
403 	return (end - cp);
404 }
405 
406 locc(mask, size, cp)
407 	register u_char mask;
408 	u_int size;
409 	register u_char *cp;
410 {
411 	register u_char *end = &cp[size];
412 
413 	while (cp < end && *cp != mask)
414 		cp++;
415 	return (end - cp);
416 }
417 #endif
418