1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * %sccs.include.redist.c%
6 *
7 * @(#)ffs_inode.c 8.13 (Berkeley) 04/21/95
8 */
9
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/mount.h>
13 #include <sys/proc.h>
14 #include <sys/file.h>
15 #include <sys/buf.h>
16 #include <sys/vnode.h>
17 #include <sys/kernel.h>
18 #include <sys/malloc.h>
19 #include <sys/trace.h>
20 #include <sys/resourcevar.h>
21
22 #include <vm/vm.h>
23
24 #include <ufs/ufs/quota.h>
25 #include <ufs/ufs/inode.h>
26 #include <ufs/ufs/ufsmount.h>
27 #include <ufs/ufs/ufs_extern.h>
28
29 #include <ufs/ffs/fs.h>
30 #include <ufs/ffs/ffs_extern.h>
31
32 static int ffs_indirtrunc __P((struct inode *, ufs_daddr_t, ufs_daddr_t,
33 ufs_daddr_t, int, long *));
34
35 /*
36 * Update the access, modified, and inode change times as specified by the
37 * IACCESS, IUPDATE, and ICHANGE flags respectively. The IMODIFIED flag is
38 * used to specify that the inode needs to be updated but that the times have
39 * already been set. The access and modified times are taken from the second
40 * and third parameters; the inode change time is always taken from the current
41 * time. If waitfor is set, then wait for the disk write of the inode to
42 * complete.
43 */
44 int
ffs_update(ap)45 ffs_update(ap)
46 struct vop_update_args /* {
47 struct vnode *a_vp;
48 struct timeval *a_access;
49 struct timeval *a_modify;
50 int a_waitfor;
51 } */ *ap;
52 {
53 register struct fs *fs;
54 struct buf *bp;
55 struct inode *ip;
56 int error;
57
58 ip = VTOI(ap->a_vp);
59 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) {
60 ip->i_flag &=
61 ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE);
62 return (0);
63 }
64 if ((ip->i_flag &
65 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0)
66 return (0);
67 if (ip->i_flag & IN_ACCESS)
68 ip->i_atime = ap->a_access->tv_sec;
69 if (ip->i_flag & IN_UPDATE) {
70 ip->i_mtime = ap->a_modify->tv_sec;
71 ip->i_modrev++;
72 }
73 if (ip->i_flag & IN_CHANGE)
74 ip->i_ctime = time.tv_sec;
75 ip->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE);
76 fs = ip->i_fs;
77 /*
78 * Ensure that uid and gid are correct. This is a temporary
79 * fix until fsck has been changed to do the update.
80 */
81 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
82 ip->i_din.di_ouid = ip->i_uid; /* XXX */
83 ip->i_din.di_ogid = ip->i_gid; /* XXX */
84 } /* XXX */
85 if (error = bread(ip->i_devvp,
86 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
87 (int)fs->fs_bsize, NOCRED, &bp)) {
88 brelse(bp);
89 return (error);
90 }
91 *((struct dinode *)bp->b_data +
92 ino_to_fsbo(fs, ip->i_number)) = ip->i_din;
93 if (ap->a_waitfor && (ap->a_vp->v_mount->mnt_flag & MNT_ASYNC) == 0)
94 return (bwrite(bp));
95 else {
96 bdwrite(bp);
97 return (0);
98 }
99 }
100
101 #define SINGLE 0 /* index of single indirect block */
102 #define DOUBLE 1 /* index of double indirect block */
103 #define TRIPLE 2 /* index of triple indirect block */
104 /*
105 * Truncate the inode oip to at most length size, freeing the
106 * disk blocks.
107 */
108 ffs_truncate(ap)
109 struct vop_truncate_args /* {
110 struct vnode *a_vp;
111 off_t a_length;
112 int a_flags;
113 struct ucred *a_cred;
114 struct proc *a_p;
115 } */ *ap;
116 {
117 register struct vnode *ovp = ap->a_vp;
118 ufs_daddr_t lastblock;
119 register struct inode *oip;
120 ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
121 ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
122 off_t length = ap->a_length;
123 register struct fs *fs;
124 struct buf *bp;
125 int offset, size, level;
126 long count, nblocks, vflags, blocksreleased = 0;
127 struct timeval tv;
128 register int i;
129 int aflags, error, allerror;
130 off_t osize;
131
132 if (length < 0)
133 return (EINVAL);
134 oip = VTOI(ovp);
135 tv = time;
136 if (ovp->v_type == VLNK &&
137 oip->i_size < ovp->v_mount->mnt_maxsymlinklen) {
138 #ifdef DIAGNOSTIC
139 if (length != 0)
140 panic("ffs_truncate: partial truncate of symlink");
141 #endif
142 bzero((char *)&oip->i_shortlink, (u_int)oip->i_size);
143 oip->i_size = 0;
144 oip->i_flag |= IN_CHANGE | IN_UPDATE;
145 return (VOP_UPDATE(ovp, &tv, &tv, 1));
146 }
147 if (oip->i_size == length) {
148 oip->i_flag |= IN_CHANGE | IN_UPDATE;
149 return (VOP_UPDATE(ovp, &tv, &tv, 0));
150 }
151 #ifdef QUOTA
152 if (error = getinoquota(oip))
153 return (error);
154 #endif
155 fs = oip->i_fs;
156 osize = oip->i_size;
157 /*
158 * Lengthen the size of the file. We must ensure that the
159 * last byte of the file is allocated. Since the smallest
160 * value of osize is 0, length will be at least 1.
161 */
162 if (osize < length) {
163 if (length > fs->fs_maxfilesize)
164 return (EFBIG);
165 offset = blkoff(fs, length - 1);
166 lbn = lblkno(fs, length - 1);
167 aflags = B_CLRBUF;
168 if (ap->a_flags & IO_SYNC)
169 aflags |= B_SYNC;
170 if (error = ffs_balloc(oip, lbn, offset + 1, ap->a_cred, &bp,
171 aflags))
172 return (error);
173 oip->i_size = length;
174 vnode_pager_setsize(ovp, (u_long)length);
175 (void) vnode_pager_uncache(ovp);
176 if (aflags & B_SYNC)
177 bwrite(bp);
178 else
179 bawrite(bp);
180 oip->i_flag |= IN_CHANGE | IN_UPDATE;
181 return (VOP_UPDATE(ovp, &tv, &tv, 1));
182 }
183 /*
184 * Shorten the size of the file. If the file is not being
185 * truncated to a block boundry, the contents of the
186 * partial block following the end of the file must be
187 * zero'ed in case it ever become accessable again because
188 * of subsequent file growth.
189 */
190 offset = blkoff(fs, length);
191 if (offset == 0) {
192 oip->i_size = length;
193 } else {
194 lbn = lblkno(fs, length);
195 aflags = B_CLRBUF;
196 if (ap->a_flags & IO_SYNC)
197 aflags |= B_SYNC;
198 if (error = ffs_balloc(oip, lbn, offset, ap->a_cred, &bp,
199 aflags))
200 return (error);
201 oip->i_size = length;
202 size = blksize(fs, oip, lbn);
203 (void) vnode_pager_uncache(ovp);
204 bzero((char *)bp->b_data + offset, (u_int)(size - offset));
205 allocbuf(bp, size);
206 if (aflags & B_SYNC)
207 bwrite(bp);
208 else
209 bawrite(bp);
210 }
211 vnode_pager_setsize(ovp, (u_long)length);
212 /*
213 * Calculate index into inode's block list of
214 * last direct and indirect blocks (if any)
215 * which we want to keep. Lastblock is -1 when
216 * the file is truncated to 0.
217 */
218 lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
219 lastiblock[SINGLE] = lastblock - NDADDR;
220 lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
221 lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
222 nblocks = btodb(fs->fs_bsize);
223 /*
224 * Update file and block pointers on disk before we start freeing
225 * blocks. If we crash before free'ing blocks below, the blocks
226 * will be returned to the free list. lastiblock values are also
227 * normalized to -1 for calls to ffs_indirtrunc below.
228 */
229 bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks);
230 for (level = TRIPLE; level >= SINGLE; level--)
231 if (lastiblock[level] < 0) {
232 oip->i_ib[level] = 0;
233 lastiblock[level] = -1;
234 }
235 for (i = NDADDR - 1; i > lastblock; i--)
236 oip->i_db[i] = 0;
237 oip->i_flag |= IN_CHANGE | IN_UPDATE;
238 if (error = VOP_UPDATE(ovp, &tv, &tv, MNT_WAIT))
239 allerror = error;
240 /*
241 * Having written the new inode to disk, save its new configuration
242 * and put back the old block pointers long enough to process them.
243 * Note that we save the new block configuration so we can check it
244 * when we are done.
245 */
246 bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks);
247 bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks);
248 oip->i_size = osize;
249 vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA;
250 allerror = vinvalbuf(ovp, vflags, ap->a_cred, ap->a_p, 0, 0);
251
252 /*
253 * Indirect blocks first.
254 */
255 indir_lbn[SINGLE] = -NDADDR;
256 indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
257 indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
258 for (level = TRIPLE; level >= SINGLE; level--) {
259 bn = oip->i_ib[level];
260 if (bn != 0) {
261 error = ffs_indirtrunc(oip, indir_lbn[level],
262 fsbtodb(fs, bn), lastiblock[level], level, &count);
263 if (error)
264 allerror = error;
265 blocksreleased += count;
266 if (lastiblock[level] < 0) {
267 oip->i_ib[level] = 0;
268 ffs_blkfree(oip, bn, fs->fs_bsize);
269 blocksreleased += nblocks;
270 }
271 }
272 if (lastiblock[level] >= 0)
273 goto done;
274 }
275
276 /*
277 * All whole direct blocks or frags.
278 */
279 for (i = NDADDR - 1; i > lastblock; i--) {
280 register long bsize;
281
282 bn = oip->i_db[i];
283 if (bn == 0)
284 continue;
285 oip->i_db[i] = 0;
286 bsize = blksize(fs, oip, i);
287 ffs_blkfree(oip, bn, bsize);
288 blocksreleased += btodb(bsize);
289 }
290 if (lastblock < 0)
291 goto done;
292
293 /*
294 * Finally, look for a change in size of the
295 * last direct block; release any frags.
296 */
297 bn = oip->i_db[lastblock];
298 if (bn != 0) {
299 long oldspace, newspace;
300
301 /*
302 * Calculate amount of space we're giving
303 * back as old block size minus new block size.
304 */
305 oldspace = blksize(fs, oip, lastblock);
306 oip->i_size = length;
307 newspace = blksize(fs, oip, lastblock);
308 if (newspace == 0)
309 panic("itrunc: newspace");
310 if (oldspace - newspace > 0) {
311 /*
312 * Block number of space to be free'd is
313 * the old block # plus the number of frags
314 * required for the storage we're keeping.
315 */
316 bn += numfrags(fs, newspace);
317 ffs_blkfree(oip, bn, oldspace - newspace);
318 blocksreleased += btodb(oldspace - newspace);
319 }
320 }
321 done:
322 #ifdef DIAGNOSTIC
323 for (level = SINGLE; level <= TRIPLE; level++)
324 if (newblks[NDADDR + level] != oip->i_ib[level])
325 panic("itrunc1");
326 for (i = 0; i < NDADDR; i++)
327 if (newblks[i] != oip->i_db[i])
328 panic("itrunc2");
329 if (length == 0 &&
330 (ovp->v_dirtyblkhd.lh_first || ovp->v_cleanblkhd.lh_first))
331 panic("itrunc3");
332 #endif /* DIAGNOSTIC */
333 /*
334 * Put back the real size.
335 */
336 oip->i_size = length;
337 oip->i_blocks -= blocksreleased;
338 if (oip->i_blocks < 0) /* sanity */
339 oip->i_blocks = 0;
340 oip->i_flag |= IN_CHANGE;
341 #ifdef QUOTA
342 (void) chkdq(oip, -blocksreleased, NOCRED, 0);
343 #endif
344 return (allerror);
345 }
346
347 /*
348 * Release blocks associated with the inode ip and stored in the indirect
349 * block bn. Blocks are free'd in LIFO order up to (but not including)
350 * lastbn. If level is greater than SINGLE, the block is an indirect block
351 * and recursive calls to indirtrunc must be used to cleanse other indirect
352 * blocks.
353 *
354 * NB: triple indirect blocks are untested.
355 */
356 static int
ffs_indirtrunc(ip,lbn,dbn,lastbn,level,countp)357 ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp)
358 register struct inode *ip;
359 ufs_daddr_t lbn, lastbn;
360 ufs_daddr_t dbn;
361 int level;
362 long *countp;
363 {
364 register int i;
365 struct buf *bp;
366 register struct fs *fs = ip->i_fs;
367 register ufs_daddr_t *bap;
368 struct vnode *vp;
369 ufs_daddr_t *copy, nb, nlbn, last;
370 long blkcount, factor;
371 int nblocks, blocksreleased = 0;
372 int error = 0, allerror = 0;
373
374 /*
375 * Calculate index in current block of last
376 * block to be kept. -1 indicates the entire
377 * block so we need not calculate the index.
378 */
379 factor = 1;
380 for (i = SINGLE; i < level; i++)
381 factor *= NINDIR(fs);
382 last = lastbn;
383 if (lastbn > 0)
384 last /= factor;
385 nblocks = btodb(fs->fs_bsize);
386 /*
387 * Get buffer of block pointers, zero those entries corresponding
388 * to blocks to be free'd, and update on disk copy first. Since
389 * double(triple) indirect before single(double) indirect, calls
390 * to bmap on these blocks will fail. However, we already have
391 * the on disk address, so we have to set the b_blkno field
392 * explicitly instead of letting bread do everything for us.
393 */
394 vp = ITOV(ip);
395 bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0);
396 if (bp->b_flags & (B_DONE | B_DELWRI)) {
397 /* Braces must be here in case trace evaluates to nothing. */
398 trace(TR_BREADHIT, pack(vp, fs->fs_bsize), lbn);
399 } else {
400 trace(TR_BREADMISS, pack(vp, fs->fs_bsize), lbn);
401 curproc->p_stats->p_ru.ru_inblock++; /* pay for read */
402 bp->b_flags |= B_READ;
403 if (bp->b_bcount > bp->b_bufsize)
404 panic("ffs_indirtrunc: bad buffer size");
405 bp->b_blkno = dbn;
406 VOP_STRATEGY(bp);
407 error = biowait(bp);
408 }
409 if (error) {
410 brelse(bp);
411 *countp = 0;
412 return (error);
413 }
414
415 bap = (ufs_daddr_t *)bp->b_data;
416 MALLOC(copy, ufs_daddr_t *, fs->fs_bsize, M_TEMP, M_WAITOK);
417 bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize);
418 bzero((caddr_t)&bap[last + 1],
419 (u_int)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t));
420 if (last == -1)
421 bp->b_flags |= B_INVAL;
422 error = bwrite(bp);
423 if (error)
424 allerror = error;
425 bap = copy;
426
427 /*
428 * Recursively free totally unused blocks.
429 */
430 for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
431 i--, nlbn += factor) {
432 nb = bap[i];
433 if (nb == 0)
434 continue;
435 if (level > SINGLE) {
436 if (error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
437 (ufs_daddr_t)-1, level - 1, &blkcount))
438 allerror = error;
439 blocksreleased += blkcount;
440 }
441 ffs_blkfree(ip, nb, fs->fs_bsize);
442 blocksreleased += nblocks;
443 }
444
445 /*
446 * Recursively free last partial block.
447 */
448 if (level > SINGLE && lastbn >= 0) {
449 last = lastbn % factor;
450 nb = bap[i];
451 if (nb != 0) {
452 if (error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
453 last, level - 1, &blkcount))
454 allerror = error;
455 blocksreleased += blkcount;
456 }
457 }
458 FREE(copy, M_TEMP);
459 *countp = blocksreleased;
460 return (allerror);
461 }
462