xref: /csrg-svn/sys/ufs/lfs/lfs_inode.c (revision 51183)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)lfs_inode.c	7.42 (Berkeley) 09/25/91
8  */
9 
10 #include "param.h"
11 #include "systm.h"
12 #include "mount.h"
13 #include "proc.h"
14 #include "file.h"
15 #include "buf.h"
16 #include "vnode.h"
17 #include "kernel.h"
18 #include "malloc.h"
19 
20 #include "../ufs/quota.h"
21 #include "../ufs/inode.h"
22 #include "../ufs/ufsmount.h"
23 #include "lfs.h"
24 #include "lfs_extern.h"
25 
26 #define	INOHSZ	512
27 #if	((INOHSZ&(INOHSZ-1)) == 0)
28 #define	INOHASH(dev,ino)	(((dev)+(ino))&(INOHSZ-1))
29 #else
30 #define	INOHASH(dev,ino)	(((unsigned)((dev)+(ino)))%INOHSZ)
31 #endif
32 
33 union lfsihead {						/* LFS */
34 	union  lfsihead *ih_head[2];
35 	struct inode *ih_chain[2];
36 } lfsihead[INOHSZ];
37 
38 								/* LFS */
39 extern int prtactive;	/* 1 => print out reclaim of active vnodes */
40 
41 /*
42  * Initialize hash links for inodes.
43  */
44 lfs_init()
45 {
46 	register int i;
47 	register union lfsihead *ih = lfsihead;
48 
49 printf("lfs_init\n");
50 #ifndef lint
51 	if (VN_MAXPRIVATE < sizeof(struct inode))
52 		panic("ihinit: too small");
53 #endif /* not lint */
54 	for (i = INOHSZ; --i >= 0; ih++) {
55 		ih->ih_head[0] = ih;
56 		ih->ih_head[1] = ih;
57 	}
58 #ifdef NOTLFS							/* LFS */
59 #ifdef QUOTA
60 	dqinit();
61 #endif /* QUOTA */
62 #endif
63 }
64 
65 lfs_hqueue(ip)
66 	struct inode *ip;
67 {
68 	union lfsihead *ih;
69 
70 printf("lfs_hqueue ino %d\n", ip->i_number);
71 	ih = &lfsihead[INOHASH(ip->i_dev, ip->i_number)];
72 	insque(ip, ih);
73 	ILOCK(ip);
74 }
75 
76 
77 /*
78  * Look up a UFS dinode number to find its incore vnode.
79  * If it is not in core, read it in from the specified device.
80  * If it is in core, wait for the lock bit to clear, then
81  * return the inode locked. Detection and handling of mount
82  * points must be done by the calling routine.
83  */
84 lfs_iget(xp, ino, ipp)
85 	struct inode *xp;
86 	ino_t ino;
87 	struct inode **ipp;
88 {
89 	dev_t dev = xp->i_dev;
90 	struct mount *mntp = ITOV(xp)->v_mount;
91 	register LFS *fs = VFSTOUFS(mntp)->um_lfs;		/* LFS */
92 	extern struct vnodeops ufs_vnodeops, spec_inodeops;
93 	register struct inode *ip, *iq;
94 	register struct vnode *vp;
95 	struct vnode *nvp;
96 	struct buf *bp;
97 	union lfsihead *ih;
98 	int i, error;
99 
100 printf("lfs_iget ino %d\n", ino);
101 	ih = &lfsihead[INOHASH(dev, ino)];
102 loop:
103 	for (ip = ih->ih_chain[0]; ip != (struct inode *)ih; ip = ip->i_forw) {
104 		if (ino != ip->i_number || dev != ip->i_dev)
105 			continue;
106 		if ((ip->i_flag&ILOCKED) != 0) {
107 			ip->i_flag |= IWANT;
108 			sleep((caddr_t)ip, PINOD);
109 			goto loop;
110 		}
111 		if (vget(ITOV(ip)))
112 			goto loop;
113 		*ipp = ip;
114 		return(0);
115 	}
116 
117 	/* Allocate new vnode/inode. */
118 	error = lfs_vcreate(mntp, ino, &nvp);
119 	if (error) {
120 		*ipp = 0;
121 		return (error);
122 	}
123 	ip = VTOI(nvp);
124 
125 	/*
126 	 * Put it onto its hash chain and lock it so that other requests for
127 	 * this inode will block if they arrive while we are sleeping waiting
128 	 * for old data structures to be purged or for the contents of the
129 	 * disk portion of this inode to be read.
130 	 */
131 	insque(ip, ih);
132 	ILOCK(ip);
133 
134 	/* Read in the disk contents for the inode, copy into the vnode. */
135 	if (error = bread(VFSTOUFS(mntp)->um_devvp, itod(fs, ino),
136 	    (int)fs->lfs_bsize, NOCRED, &bp)) {			/* LFS */
137 		/*
138 		 * The inode does not contain anything useful, so it would
139 		 * be misleading to leave it on its hash chain.
140 		 * Iput() will take care of putting it back on the free list.
141 		 */
142 		remque(ip);
143 		ip->i_forw = ip;
144 		ip->i_back = ip;
145 		/*
146 		 * Unlock and discard unneeded inode.
147 		 */
148 		iput(ip);
149 		brelse(bp);
150 		*ipp = 0;
151 		return (error);
152 	}
153 	ip->i_din = *lfs_ifind(fs, ino, bp->b_un.b_dino);
154 	brelse(bp);
155 
156 	/*
157 	 * Initialize the associated vnode
158 	 */
159 	vp = ITOV(ip);
160 	vp->v_type = IFTOVT(ip->i_mode);
161 	if (vp->v_type == VFIFO) {
162 #ifdef FIFO
163 		extern struct vnodeops fifo_inodeops;
164 		vp->v_op = &fifo_inodeops;
165 #else
166 		iput(ip);
167 		*ipp = 0;
168 		return (EOPNOTSUPP);
169 #endif /* FIFO */
170 	}
171 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
172 		vp->v_op = &spec_inodeops;
173 		if (nvp = checkalias(vp, ip->i_rdev, mntp)) {
174 			/*
175 			 * Reinitialize aliased inode.
176 			 */
177 			vp = nvp;
178 			iq = VTOI(vp);
179 			iq->i_vnode = vp;
180 			iq->i_flag = 0;
181 			ILOCK(iq);
182 			iq->i_din = ip->i_din;
183 			iq->i_dev = dev;
184 			iq->i_number = ino;
185 			insque(iq, ih);
186 			/*
187 			 * Discard unneeded vnode
188 			 */
189 			ip->i_mode = 0;
190 			iput(ip);
191 			ip = iq;
192 		}
193 	}
194 	if (ino == ROOTINO)
195 		vp->v_flag |= VROOT;
196 
197 	VREF(ip->i_devvp);
198 
199 	*ipp = ip;
200 	return (0);
201 }
202 
203 /*
204  * Last reference to an inode, write the inode out and if necessary,
205  * truncate and deallocate the file.
206  */
207 lfs_inactive(vp, p)
208 	struct vnode *vp;
209 	struct proc *p;
210 {
211 	register struct inode *ip = VTOI(vp);
212 	int mode, error = 0;
213 
214 printf("lfs_inactive: ino %d mode %d nlink %d\n",
215 ip->i_number, ip->i_mode, ip->i_nlink);
216 
217 	if (prtactive && vp->v_usecount != 0)
218 		vprint("ufs_inactive: pushing active", vp);
219 	/*
220 	 * Get rid of inodes related to stale file handles.
221 	 */
222 	if (ip->i_mode == 0) {
223 		if ((vp->v_flag & VXLOCK) == 0)
224 			vgone(vp);
225 		return (0);
226 	}
227 	ILOCK(ip);
228 	if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
229 #ifdef QUOTA
230 		if (!getinoquota(ip))
231 			(void) chkiq(ip, -1, NOCRED, 0);
232 #endif
233 		error = lfs_itrunc(ip, (u_long)0, 0);		/* LFS */
234 		mode = ip->i_mode;
235 		ip->i_mode = 0;
236 		ip->i_rdev = 0;
237 		ip->i_flag |= IUPD|ICHG;
238 #ifdef NOTLFS							/* LFS */
239 		ifree(ip, ip->i_number, mode);
240 #else
241 		lfs_ifree(ip);
242 #endif
243 	}
244 	ITIMES(ip, &time, &time);
245 	IUNLOCK(ip);
246 	ip->i_flag = 0;
247 	/*
248 	 * If we are done with the inode, reclaim it
249 	 * so that it can be reused immediately.
250 	 */
251 	if (vp->v_usecount == 0 && ip->i_mode == 0)
252 		vgone(vp);
253 	return (error);
254 }
255 
256 #define	SINGLE	0	/* index of single indirect block */
257 #define	DOUBLE	1	/* index of double indirect block */
258 #define	TRIPLE	2	/* index of triple indirect block */
259 /*
260  * Truncate the inode ip to at most length size.  Free affected disk
261  * blocks -- the blocks of the file are removed in reverse order.
262  *
263  * NB: triple indirect blocks are untested.
264  */
265 lfs_itrunc(oip, length, flags)
266 	register struct inode *oip;
267 	u_long length;
268 	int flags;
269 {
270 	register daddr_t lastblock;
271 	daddr_t bn, lbn, lastiblock[NIADDR];
272 	register LFS *fs;					/* LFS */
273 	register struct inode *ip;
274 	struct buf *bp;
275 	int offset, osize, size, level;
276 	long count, nblocks, blocksreleased = 0;
277 	register int i;
278 	int aflags, error, allerror;
279 	struct inode tip;
280 
281 	vnode_pager_setsize(ITOV(oip), length);
282 	if (oip->i_size <= length) {
283 		oip->i_flag |= ICHG|IUPD;
284 		ITIMES(oip, &time, &time);
285 		return (0);
286 	}
287 	/*
288 	 * Calculate index into inode's block list of
289 	 * last direct and indirect blocks (if any)
290 	 * which we want to keep.  Lastblock is -1 when
291 	 * the file is truncated to 0.
292 	 */
293 	fs = oip->i_lfs;					/* LFS */
294 	lastblock = lblkno(fs, length + fs->lfs_bsize - 1) - 1;	/* LFS */
295 	lastiblock[SINGLE] = lastblock - NDADDR;
296 	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
297 	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
298 	nblocks = btodb(fs->lfs_bsize);				/* LFS */
299 	/*
300 	 * Update the size of the file. If the file is not being
301 	 * truncated to a block boundry, the contents of the
302 	 * partial block following the end of the file must be
303 	 * zero'ed in case it ever become accessable again because
304 	 * of subsequent file growth.
305 	 */
306 	osize = oip->i_size;
307 	offset = blkoff(fs, length);
308 	if (offset == 0) {
309 		oip->i_size = length;
310 	} else {
311 		lbn = lblkno(fs, length);
312 		aflags = B_CLRBUF;
313 		if (flags & IO_SYNC)
314 			aflags |= B_SYNC;
315 #ifdef QUOTA
316 		if (error = getinoquota(oip))
317 			return (error);
318 #endif
319 		if (error = bread(ITOV(oip), lbn, fs->lfs_bsize, NOCRED, &bp))
320 			return (error);
321 		oip->i_size = length;
322 		size = blksize(fs);				/* LFS */
323 		(void) vnode_pager_uncache(ITOV(oip));
324 		bzero(bp->b_un.b_addr + offset, (unsigned)(size - offset));
325 		allocbuf(bp, size);
326 #ifdef NOTLFS
327 		if (flags & IO_SYNC)				/* LFS */
328 			bwrite(bp);
329 		else
330 			bdwrite(bp);
331 #else
332 		lfs_bwrite(bp);
333 #endif
334 	}
335 	/*
336 	 * Update file and block pointers
337 	 * on disk before we start freeing blocks.
338 	 * If we crash before free'ing blocks below,
339 	 * the blocks will be returned to the free list.
340 	 * lastiblock values are also normalized to -1
341 	 * for calls to indirtrunc below.
342 	 */
343 	/* Will need to modify the segment usage information */	/* LFS */
344 	tip = *oip;
345 	tip.i_size = osize;
346 	for (level = TRIPLE; level >= SINGLE; level--)
347 		if (lastiblock[level] < 0) {
348 			oip->i_ib[level] = 0;
349 			lastiblock[level] = -1;
350 		}
351 	for (i = NDADDR - 1; i > lastblock; i--)
352 		oip->i_db[i] = 0;
353 	oip->i_flag |= ICHG|IUPD;
354 #ifdef NOTLFS
355 	vinvalbuf(ITOV(oip), (length > 0));
356 	allerror = ITIMES(oip, &time, &time);
357 #else
358 	/* Need lfs_vinvalbuf to get rid of invalid buffers in the cache */
359 	ITIMES(oip, &time, &time);
360 	allerror = 0;
361 #endif
362 
363 #ifdef NOTLFS
364 	/*
365 	 * Indirect blocks first.
366 	 */
367 	ip = &tip;
368 	for (level = TRIPLE; level >= SINGLE; level--) {
369 		bn = ip->i_ib[level];
370 		if (bn != 0) {
371 			error = indirtrunc(ip, bn, lastiblock[level], level,
372 				&count);
373 			if (error)
374 				allerror = error;
375 			blocksreleased += count;
376 			if (lastiblock[level] < 0) {
377 				ip->i_ib[level] = 0;
378 				blkfree(ip, bn, (off_t)fs->fs_bsize);
379 				blocksreleased += nblocks;
380 			}
381 		}
382 		if (lastiblock[level] >= 0)
383 			goto done;
384 	}
385 #else
386 	/* LFS -- not yet implemented.  Need to rewrite indirect blocks */
387 	panic("lfs_itrunc: not yet implemented");
388 #endif
389 
390 	/*
391 	 * All whole direct blocks or frags.
392 	 */
393 	for (i = NDADDR - 1; i > lastblock; i--) {
394 		register off_t bsize;
395 
396 		bn = ip->i_db[i];
397 		if (bn == 0)
398 			continue;
399 		ip->i_db[i] = 0;
400 		bsize = (off_t)blksize(fs);			/* LFS */
401 #ifdef NOTLFS
402 		blkfree(ip, bn, bsize);
403 #else
404 		/* LFS Update segment usage information */
405 #endif
406 		blocksreleased += btodb(bsize);
407 	}
408 	if (lastblock < 0)
409 		goto done;
410 
411 	/*
412 	 * Finally, look for a change in size of the
413 	 * last direct block; release any frags.
414 	 */
415 	bn = ip->i_db[lastblock];
416 	if (bn != 0) {
417 		off_t oldspace, newspace;
418 
419 		/*
420 		 * Calculate amount of space we're giving
421 		 * back as old block size minus new block size.
422 		 */
423 		oldspace = blksize(fs);				/* LFS */
424 		ip->i_size = length;
425 		newspace = blksize(fs);				/* LFS */
426 		if (newspace == 0)
427 			panic("lfs_itrunc: newspace");
428 		if (oldspace - newspace > 0) {
429 			/*
430 			 * Block number of space to be free'd is
431 			 * the old block # plus the number of frags
432 			 * required for the storage we're keeping.
433 			 */
434 			bn += numfrags(fs, newspace);
435 			blkfree(ip, bn, oldspace - newspace);
436 			blocksreleased += btodb(oldspace - newspace);
437 		}
438 	}
439 done:
440 /* BEGIN PARANOIA */
441 	for (level = SINGLE; level <= TRIPLE; level++)
442 		if (ip->i_ib[level] != oip->i_ib[level])
443 			panic("lfs_itrunc1");
444 	for (i = 0; i < NDADDR; i++)
445 		if (ip->i_db[i] != oip->i_db[i])
446 			panic("lfs_itrunc2");
447 /* END PARANOIA */
448 	oip->i_blocks -= blocksreleased;
449 	if (oip->i_blocks < 0)			/* sanity */
450 		oip->i_blocks = 0;
451 	oip->i_flag |= ICHG;
452 #ifdef QUOTA
453 	if (!getinoquota(oip))
454 		(void) chkdq(oip, -blocksreleased, NOCRED, 0);
455 #endif
456 	return (allerror);
457 }
458 
459 /*
460  * Release blocks associated with the inode ip and
461  * stored in the indirect block bn.  Blocks are free'd
462  * in LIFO order up to (but not including) lastbn.  If
463  * level is greater than SINGLE, the block is an indirect
464  * block and recursive calls to indirtrunc must be used to
465  * cleanse other indirect blocks.
466  *
467  * NB: triple indirect blocks are untested.
468  */
469 lfs_indirtrunc(ip, bn, lastbn, level, countp)
470 	register struct inode *ip;
471 	daddr_t bn, lastbn;
472 	int level;
473 	long *countp;
474 {
475 #ifdef NOTLFS
476 	register int i;
477 	struct buf *bp;
478 	register struct fs *fs = ip->i_fs;
479 	register daddr_t *bap;
480 	daddr_t *copy, nb, last;
481 	long blkcount, factor;
482 	int nblocks, blocksreleased = 0;
483 	int error, allerror = 0;
484 
485 	/*
486 	 * Calculate index in current block of last
487 	 * block to be kept.  -1 indicates the entire
488 	 * block so we need not calculate the index.
489 	 */
490 	factor = 1;
491 	for (i = SINGLE; i < level; i++)
492 		factor *= NINDIR(fs);
493 	last = lastbn;
494 	if (lastbn > 0)
495 		last /= factor;
496 	nblocks = btodb(fs->fs_bsize);
497 	/*
498 	 * Get buffer of block pointers, zero those
499 	 * entries corresponding to blocks to be free'd,
500 	 * and update on disk copy first.
501 	 */
502 	error = bread(ip->i_devvp, fsbtodb(fs, bn), (int)fs->fs_bsize,
503 		NOCRED, &bp);
504 	if (error) {
505 		brelse(bp);
506 		*countp = 0;
507 		return (error);
508 	}
509 	bap = bp->b_un.b_daddr;
510 	MALLOC(copy, daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK);
511 	bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize);
512 	bzero((caddr_t)&bap[last + 1],
513 	  (u_int)(NINDIR(fs) - (last + 1)) * sizeof (daddr_t));
514 	if (last == -1)
515 		bp->b_flags |= B_INVAL;
516 	error = bwrite(bp);
517 	if (error)
518 		allerror = error;
519 	bap = copy;
520 
521 	/*
522 	 * Recursively free totally unused blocks.
523 	 */
524 	for (i = NINDIR(fs) - 1; i > last; i--) {
525 		nb = bap[i];
526 		if (nb == 0)
527 			continue;
528 		if (level > SINGLE) {
529 			error = indirtrunc(ip, nb, (daddr_t)-1, level - 1,
530 				&blkcount);
531 			if (error)
532 				allerror = error;
533 			blocksreleased += blkcount;
534 		}
535 		blkfree(ip, nb, (off_t)fs->fs_bsize);
536 		blocksreleased += nblocks;
537 	}
538 
539 	/*
540 	 * Recursively free last partial block.
541 	 */
542 	if (level > SINGLE && lastbn >= 0) {
543 		last = lastbn % factor;
544 		nb = bap[i];
545 		if (nb != 0) {
546 			error = indirtrunc(ip, nb, last, level - 1, &blkcount);
547 			if (error)
548 				allerror = error;
549 			blocksreleased += blkcount;
550 		}
551 	}
552 	FREE(copy, M_TEMP);
553 	*countp = blocksreleased;
554 	return (allerror);
555 #else
556 	/* LFS IMPLEMENT -- lfs_indirtrunc */
557 	panic("lfs_indirtrunc not implemented");
558 #endif
559 }
560