xref: /csrg-svn/sys/ufs/ffs/ffs_inode.c (revision 53521)
1 /*
2  * Copyright (c) 1982, 1986, 1989 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)ffs_inode.c	7.51 (Berkeley) 05/14/92
8  */
9 
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/mount.h>
13 #include <sys/proc.h>
14 #include <sys/file.h>
15 #include <sys/buf.h>
16 #include <sys/vnode.h>
17 #include <sys/kernel.h>
18 #include <sys/malloc.h>
19 
20 #include <vm/vm.h>
21 
22 #include <ufs/ufs/quota.h>
23 #include <ufs/ufs/inode.h>
24 #include <ufs/ufs/ufsmount.h>
25 #include <ufs/ufs/ufs_extern.h>
26 
27 #include <ufs/ffs/fs.h>
28 #include <ufs/ffs/ffs_extern.h>
29 
30 static int ffs_indirtrunc __P((struct inode *, daddr_t, daddr_t, int, long *));
31 
32 extern u_long nextgennumber;
33 
34 int
35 ffs_init()
36 {
37 	return (ufs_init());
38 }
39 
40 /*
41  * Look up a UFS dinode number to find its incore vnode.
42  * If it is not in core, read it in from the specified device.
43  * If it is in core, wait for the lock bit to clear, then
44  * return the inode locked. Detection and handling of mount
45  * points must be done by the calling routine.
46  */
47 ffs_vget (ap)
48 	struct vop_vget_args *ap;
49 #define mntp (ap->a_mp)
50 #define ino (ap->a_ino)
51 #define vpp (ap->a_vpp)
52 {
53 	register struct fs *fs;
54 	register struct inode *ip;
55 	struct ufsmount *ump;
56 	struct buf *bp;
57 	struct dinode *dp;
58 	struct vnode *vp;
59 	union ihead *ih;
60 	dev_t dev;
61 	int i, type, error;
62 
63 	ump = VFSTOUFS(mntp);
64 	dev = ump->um_dev;
65 	if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
66 		return (0);
67 
68 	/* Allocate a new vnode/inode. */
69 	if (error = getnewvnode(VT_UFS, mntp, ffs_vnodeop_p, &vp)) {
70 		*vpp = NULL;
71 		return (error);
72 	}
73 	type = ump->um_devvp->v_tag == VT_MFS ? M_MFSNODE : M_FFSNODE; /* XXX */
74 	MALLOC(ip, struct inode *, sizeof(struct inode), type, M_WAITOK);
75 	vp->v_data = ip;
76 	ip->i_vnode = vp;
77 	ip->i_flag = 0;
78 	ip->i_devvp = 0;
79 	ip->i_mode = 0;
80 	ip->i_diroff = 0;
81 	ip->i_lockf = 0;
82 	ip->i_fs = fs = ump->um_fs;
83 	ip->i_dev = dev;
84 	ip->i_number = ino;
85 #ifdef QUOTA
86 	for (i = 0; i < MAXQUOTAS; i++)
87 		ip->i_dquot[i] = NODQUOT;
88 #endif
89 	/*
90 	 * Put it onto its hash chain and lock it so that other requests for
91 	 * this inode will block if they arrive while we are sleeping waiting
92 	 * for old data structures to be purged or for the contents of the
93 	 * disk portion of this inode to be read.
94 	 */
95 	ufs_ihashins(ip);
96 
97 	/* Read in the disk contents for the inode, copy into the inode. */
98 	if (error = bread(ump->um_devvp, fsbtodb(fs, itod(fs, ino)),
99 	    (int)fs->fs_bsize, NOCRED, &bp)) {
100 		/*
101 		 * The inode does not contain anything useful, so it would
102 		 * be misleading to leave it on its hash chain. It will be
103 		 * returned to the free list by ufs_iput().
104 		 */
105 		remque(ip);
106 		ip->i_forw = ip;
107 		ip->i_back = ip;
108 
109 		/* Unlock and discard unneeded inode. */
110 		ufs_iput(ip);
111 		brelse(bp);
112 		*vpp = NULL;
113 		return (error);
114 	}
115 	dp = bp->b_un.b_dino;
116 	dp += itoo(fs, ino);
117 	ip->i_din = *dp;
118 	brelse(bp);
119 
120 	/*
121 	 * Initialize the vnode from the inode, check for aliases.
122 	 * Note that the underlying vnode may have changed.
123 	 */
124 	if (error = ufs_vinit(mntp, ffs_specop_p, FFS_FIFOOPS, &vp)) {
125 		ufs_iput(ip);
126 		*vpp = NULL;
127 		return (error);
128 	}
129 	/*
130 	 * Finish inode initialization now that aliasing has been resolved.
131 	 */
132 	ip->i_devvp = ump->um_devvp;
133 	VREF(ip->i_devvp);
134 	/*
135 	 * Set up a generation number for this inode if it does not
136 	 * already have one. This should only happen on old filesystems.
137 	 */
138 	if (ip->i_gen == 0) {
139 		if (++nextgennumber < (u_long)time.tv_sec)
140 			nextgennumber = time.tv_sec;
141 		ip->i_gen = nextgennumber;
142 		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
143 			ip->i_flag |= IMOD;
144 	}
145 	/*
146 	 * Ensure that uid and gid are correct. This is a temporary
147 	 * fix until fsck has been changed to do the update.
148 	 */
149 	ip->i_uid = ip->i_din.di_ouid;
150 	ip->i_gid = ip->i_din.di_ogid;
151 
152 	*vpp = vp;
153 	return (0);
154 }
155 #undef mntp
156 #undef ino
157 #undef vpp
158 
159 /*
160  * Update the access, modified, and inode change times as specified
161  * by the IACC, IUPD, and ICHG flags respectively. The IMOD flag
162  * is used to specify that the inode needs to be updated but that
163  * the times have already been set. The access and modified times
164  * are taken from the second and third parameters; the inode change
165  * time is always taken from the current time. If waitfor is set,
166  * then wait for the disk write of the inode to complete.
167  */
168 int
169 ffs_update (ap)
170 	struct vop_update_args *ap;
171 #define vp (ap->a_vp)
172 #define ta (ap->a_ta)
173 #define tm (ap->a_tm)
174 #define waitfor (ap->a_waitfor)
175 {
176 	struct buf *bp;
177 	struct inode *ip;
178 	struct dinode *dp;
179 	register struct fs *fs;
180 	int error;
181 
182 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
183 		return (0);
184 	ip = VTOI(vp);
185 	if ((ip->i_flag & (IUPD|IACC|ICHG|IMOD)) == 0)
186 		return (0);
187 	if (ip->i_flag&IACC)
188 		ip->i_atime.tv_sec = ta->tv_sec;
189 	if (ip->i_flag&IUPD) {
190 		ip->i_mtime.tv_sec = tm->tv_sec;
191 		INCRQUAD(ip->i_modrev);
192 	}
193 	if (ip->i_flag&ICHG)
194 		ip->i_ctime.tv_sec = time.tv_sec;
195 	ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD);
196 	/*
197 	 * Ensure that uid and gid are correct. This is a temporary
198 	 * fix until fsck has been changed to do the update.
199 	 */
200 	ip->i_din.di_ouid = ip->i_uid;
201 	ip->i_din.di_ogid = ip->i_gid;
202 
203 	fs = ip->i_fs;
204 	if (error = bread(ip->i_devvp, fsbtodb(fs, itod(fs, ip->i_number)),
205 		(int)fs->fs_bsize, NOCRED, &bp)) {
206 		brelse(bp);
207 		return (error);
208 	}
209 	dp = bp->b_un.b_dino + itoo(fs, ip->i_number);
210 	*dp = ip->i_din;
211 	if (waitfor)
212 		return (bwrite(bp));
213 	else {
214 		bdwrite(bp);
215 		return (0);
216 	}
217 }
218 #undef vp
219 #undef ta
220 #undef tm
221 #undef waitfor
222 
223 #define	SINGLE	0	/* index of single indirect block */
224 #define	DOUBLE	1	/* index of double indirect block */
225 #define	TRIPLE	2	/* index of triple indirect block */
226 /*
227  * Truncate the inode ip to at most length size.  Free affected disk
228  * blocks -- the blocks of the file are removed in reverse order.
229  *
230  * NB: triple indirect blocks are untested.
231  */
232 ffs_truncate (ap)
233 	struct vop_truncate_args *ap;
234 #define ovp (ap->a_vp)
235 #define length (ap->a_length)
236 #define flags (ap->a_flags)
237 #define cred (ap->a_cred)
238 {
239 	USES_VOP_UPDATE;
240 	register daddr_t lastblock;
241 	register struct inode *oip;
242 	daddr_t bn, lbn, lastiblock[NIADDR];
243 	register struct fs *fs;
244 	register struct inode *ip;
245 	struct buf *bp;
246 	int offset, size, level;
247 	long count, nblocks, blocksreleased = 0;
248 	register int i;
249 	int aflags, error, allerror;
250 	struct inode tip;
251 	off_t osize;
252 
253 	vnode_pager_setsize(ovp, (u_long)length);
254 	oip = VTOI(ovp);
255 	if (oip->i_size <= length) {
256 		oip->i_flag |= ICHG|IUPD;
257 		error = VOP_UPDATE(ovp, &time, &time, 1);
258 		return (error);
259 	}
260 	/*
261 	 * Calculate index into inode's block list of
262 	 * last direct and indirect blocks (if any)
263 	 * which we want to keep.  Lastblock is -1 when
264 	 * the file is truncated to 0.
265 	 */
266 	fs = oip->i_fs;
267 	lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
268 	lastiblock[SINGLE] = lastblock - NDADDR;
269 	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
270 	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
271 	nblocks = btodb(fs->fs_bsize);
272 	/*
273 	 * Update the size of the file. If the file is not being
274 	 * truncated to a block boundry, the contents of the
275 	 * partial block following the end of the file must be
276 	 * zero'ed in case it ever become accessable again because
277 	 * of subsequent file growth.
278 	 */
279 	osize = oip->i_size;
280 	offset = blkoff(fs, length);
281 	if (offset == 0) {
282 		oip->i_size = length;
283 	} else {
284 		lbn = lblkno(fs, length);
285 		aflags = B_CLRBUF;
286 		if (flags & IO_SYNC)
287 			aflags |= B_SYNC;
288 #ifdef QUOTA
289 		if (error = getinoquota(oip))
290 			return (error);
291 #endif
292 		if (error = ffs_balloc(oip, lbn, offset, cred, &bp, aflags))
293 			return (error);
294 		oip->i_size = length;
295 		size = blksize(fs, oip, lbn);
296 		(void) vnode_pager_uncache(ovp);
297 		bzero(bp->b_un.b_addr + offset, (unsigned)(size - offset));
298 		allocbuf(bp, size);
299 		if (flags & IO_SYNC)
300 			bwrite(bp);
301 		else
302 			bdwrite(bp);
303 	}
304 	/*
305 	 * Update file and block pointers on disk before we start freeing
306 	 * blocks.  If we crash before free'ing blocks below, the blocks
307 	 * will be returned to the free list.  lastiblock values are also
308 	 * normalized to -1 for calls to ffs_indirtrunc below.
309 	 */
310 	tip = *oip;
311 	tip.i_size = osize;
312 	for (level = TRIPLE; level >= SINGLE; level--)
313 		if (lastiblock[level] < 0) {
314 			oip->i_ib[level] = 0;
315 			lastiblock[level] = -1;
316 		}
317 	for (i = NDADDR - 1; i > lastblock; i--)
318 		oip->i_db[i] = 0;
319 	oip->i_flag |= ICHG|IUPD;
320 	vinvalbuf(ovp, (length > 0));
321 	allerror = VOP_UPDATE(ovp, &time, &time, MNT_WAIT);
322 
323 	/*
324 	 * Indirect blocks first.
325 	 */
326 	ip = &tip;
327 	for (level = TRIPLE; level >= SINGLE; level--) {
328 		bn = ip->i_ib[level];
329 		if (bn != 0) {
330 			error = ffs_indirtrunc(ip,
331 			    bn, lastiblock[level], level, &count);
332 			if (error)
333 				allerror = error;
334 			blocksreleased += count;
335 			if (lastiblock[level] < 0) {
336 				ip->i_ib[level] = 0;
337 				ffs_blkfree(ip, bn, fs->fs_bsize);
338 				blocksreleased += nblocks;
339 			}
340 		}
341 		if (lastiblock[level] >= 0)
342 			goto done;
343 	}
344 
345 	/*
346 	 * All whole direct blocks or frags.
347 	 */
348 	for (i = NDADDR - 1; i > lastblock; i--) {
349 		register long bsize;
350 
351 		bn = ip->i_db[i];
352 		if (bn == 0)
353 			continue;
354 		ip->i_db[i] = 0;
355 		bsize = blksize(fs, ip, i);
356 		ffs_blkfree(ip, bn, bsize);
357 		blocksreleased += btodb(bsize);
358 	}
359 	if (lastblock < 0)
360 		goto done;
361 
362 	/*
363 	 * Finally, look for a change in size of the
364 	 * last direct block; release any frags.
365 	 */
366 	bn = ip->i_db[lastblock];
367 	if (bn != 0) {
368 		long oldspace, newspace;
369 
370 		/*
371 		 * Calculate amount of space we're giving
372 		 * back as old block size minus new block size.
373 		 */
374 		oldspace = blksize(fs, ip, lastblock);
375 		ip->i_size = length;
376 		newspace = blksize(fs, ip, lastblock);
377 		if (newspace == 0)
378 			panic("itrunc: newspace");
379 		if (oldspace - newspace > 0) {
380 			/*
381 			 * Block number of space to be free'd is
382 			 * the old block # plus the number of frags
383 			 * required for the storage we're keeping.
384 			 */
385 			bn += numfrags(fs, newspace);
386 			ffs_blkfree(ip, bn, oldspace - newspace);
387 			blocksreleased += btodb(oldspace - newspace);
388 		}
389 	}
390 done:
391 /* BEGIN PARANOIA */
392 	for (level = SINGLE; level <= TRIPLE; level++)
393 		if (ip->i_ib[level] != oip->i_ib[level])
394 			panic("itrunc1");
395 	for (i = 0; i < NDADDR; i++)
396 		if (ip->i_db[i] != oip->i_db[i])
397 			panic("itrunc2");
398 /* END PARANOIA */
399 	oip->i_blocks -= blocksreleased;
400 	if (oip->i_blocks < 0)			/* sanity */
401 		oip->i_blocks = 0;
402 	oip->i_flag |= ICHG;
403 #ifdef QUOTA
404 	if (!getinoquota(oip))
405 		(void) chkdq(oip, -blocksreleased, NOCRED, 0);
406 #endif
407 	return (allerror);
408 }
409 #undef ovp
410 #undef length
411 #undef flags
412 #undef cred
413 
414 /*
415  * Release blocks associated with the inode ip and stored in the indirect
416  * block bn.  Blocks are free'd in LIFO order up to (but not including)
417  * lastbn.  If level is greater than SINGLE, the block is an indirect block
418  * and recursive calls to indirtrunc must be used to cleanse other indirect
419  * blocks.
420  *
421  * NB: triple indirect blocks are untested.
422  */
423 static int
424 ffs_indirtrunc(ip, bn, lastbn, level, countp)
425 	register struct inode *ip;
426 	daddr_t bn, lastbn;
427 	int level;
428 	long *countp;
429 {
430 	register int i;
431 	struct buf *bp;
432 	register struct fs *fs = ip->i_fs;
433 	register daddr_t *bap;
434 	daddr_t *copy, nb, last;
435 	long blkcount, factor;
436 	int nblocks, blocksreleased = 0;
437 	int error, allerror = 0;
438 
439 	/*
440 	 * Calculate index in current block of last
441 	 * block to be kept.  -1 indicates the entire
442 	 * block so we need not calculate the index.
443 	 */
444 	factor = 1;
445 	for (i = SINGLE; i < level; i++)
446 		factor *= NINDIR(fs);
447 	last = lastbn;
448 	if (lastbn > 0)
449 		last /= factor;
450 	nblocks = btodb(fs->fs_bsize);
451 	/*
452 	 * Get buffer of block pointers, zero those
453 	 * entries corresponding to blocks to be free'd,
454 	 * and update on disk copy first.
455 	 */
456 	error = bread(ip->i_devvp, fsbtodb(fs, bn), (int)fs->fs_bsize,
457 		NOCRED, &bp);
458 	if (error) {
459 		brelse(bp);
460 		*countp = 0;
461 		return (error);
462 	}
463 	bap = bp->b_un.b_daddr;
464 	MALLOC(copy, daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK);
465 	bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize);
466 	bzero((caddr_t)&bap[last + 1],
467 	  (u_int)(NINDIR(fs) - (last + 1)) * sizeof (daddr_t));
468 	if (last == -1)
469 		bp->b_flags |= B_INVAL;
470 	error = bwrite(bp);
471 	if (error)
472 		allerror = error;
473 	bap = copy;
474 
475 	/*
476 	 * Recursively free totally unused blocks.
477 	 */
478 	for (i = NINDIR(fs) - 1; i > last; i--) {
479 		nb = bap[i];
480 		if (nb == 0)
481 			continue;
482 		if (level > SINGLE) {
483 			if (error = ffs_indirtrunc(ip,
484 			    nb, (daddr_t)-1, level - 1, &blkcount))
485 				allerror = error;
486 			blocksreleased += blkcount;
487 		}
488 		ffs_blkfree(ip, nb, fs->fs_bsize);
489 		blocksreleased += nblocks;
490 	}
491 
492 	/*
493 	 * Recursively free last partial block.
494 	 */
495 	if (level > SINGLE && lastbn >= 0) {
496 		last = lastbn % factor;
497 		nb = bap[i];
498 		if (nb != 0) {
499 			if (error =
500 			    ffs_indirtrunc(ip, nb, last, level - 1, &blkcount))
501 				allerror = error;
502 			blocksreleased += blkcount;
503 		}
504 	}
505 	FREE(copy, M_TEMP);
506 	*countp = blocksreleased;
507 	return (allerror);
508 }
509