xref: /netbsd-src/sys/ufs/lfs/lfs_inode.c (revision 3b01aba77a7a698587faaae455bbfe740923c1f5)
1 /*	$NetBSD: lfs_inode.c,v 1.52 2001/07/13 20:30:24 perseant Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Konrad E. Schroder <perseant@hhhh.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *      This product includes software developed by the NetBSD
21  *      Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 /*
39  * Copyright (c) 1986, 1989, 1991, 1993
40  *	The Regents of the University of California.  All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  *	@(#)lfs_inode.c	8.9 (Berkeley) 5/8/95
71  */
72 
73 #if defined(_KERNEL_OPT)
74 #include "opt_quota.h"
75 #endif
76 
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/mount.h>
80 #include <sys/proc.h>
81 #include <sys/file.h>
82 #include <sys/buf.h>
83 #include <sys/vnode.h>
84 #include <sys/kernel.h>
85 #include <sys/malloc.h>
86 #include <sys/trace.h>
87 #include <sys/resourcevar.h>
88 
89 #include <ufs/ufs/quota.h>
90 #include <ufs/ufs/inode.h>
91 #include <ufs/ufs/ufsmount.h>
92 #include <ufs/ufs/ufs_extern.h>
93 
94 #include <ufs/lfs/lfs.h>
95 #include <ufs/lfs/lfs_extern.h>
96 
97 extern int locked_queue_count;
98 extern long locked_queue_bytes;
99 
100 static int lfs_update_seguse(struct lfs *, long, size_t);
101 static int lfs_indirtrunc (struct inode *, ufs_daddr_t, ufs_daddr_t,
102 			   ufs_daddr_t, int, long *, long *, long *, size_t *,
103 			   struct proc *);
104 static int lfs_blkfree (struct lfs *, daddr_t, size_t, long *, size_t *);
105 static int lfs_vtruncbuf(struct vnode *, daddr_t, int, int);
106 
107 /* Search a block for a specific dinode. */
108 struct dinode *
109 lfs_ifind(struct lfs *fs, ino_t ino, struct buf *bp)
110 {
111 	struct dinode *dip = (struct dinode *)bp->b_data;
112 	struct dinode *ldip, *fin;
113 
114 #ifdef LFS_IFILE_FRAG_ADDRESSING
115 	if (fs->lfs_version == 1)
116 		fin = dip + INOPB(fs);
117 	else
118 		fin = dip + INOPF(fs);
119 #else
120 	fin = dip + INOPB(fs);
121 #endif
122 
123 	/*
124 	 * XXX we used to go from the top down here, presumably with the
125 	 * idea that the same inode could be written twice in the same
126 	 * block (which is not supposed to be true).
127 	 */
128 	for (ldip = dip; ldip < fin; ++ldip)
129 		if (ldip->di_inumber == ino)
130 			return (ldip);
131 
132 	printf("searched %d entries\n", (int)(fin - dip));
133 	printf("offset is 0x%x (seg %d)\n", fs->lfs_offset,
134 	       dtosn(fs, fs->lfs_offset));
135 	printf("block is 0x%x (seg %d)\n", dbtofsb(fs, bp->b_blkno),
136 	       dtosn(fs, dbtofsb(fs, bp->b_blkno)));
137 	panic("lfs_ifind: dinode %u not found", ino);
138 	/* NOTREACHED */
139 }
140 
141 int
142 lfs_update(void *v)
143 {
144 	struct vop_update_args /* {
145 				  struct vnode *a_vp;
146 				  struct timespec *a_access;
147 				  struct timespec *a_modify;
148 				  int a_flags;
149 				  } */ *ap = v;
150 	struct inode *ip;
151 	struct vnode *vp = ap->a_vp;
152 	int oflag;
153 	struct timespec ts;
154 	struct lfs *fs = VFSTOUFS(vp->v_mount)->um_lfs;
155 
156 	if (vp->v_mount->mnt_flag & MNT_RDONLY)
157 		return (0);
158 	ip = VTOI(vp);
159 
160 	/*
161 	 * If we are called from vinvalbuf, and the file's blocks have
162 	 * already been scheduled for writing, but the writes have not
163 	 * yet completed, lfs_vflush will not be called, and vinvalbuf
164 	 * will cause a panic.  So, we must wait until any pending write
165 	 * for our inode completes, if we are called with UPDATE_WAIT set.
166 	 */
167 	while((ap->a_flags & (UPDATE_WAIT|UPDATE_DIROP)) == UPDATE_WAIT &&
168 	    WRITEINPROG(vp)) {
169 #ifdef DEBUG_LFS
170 		printf("lfs_update: sleeping on inode %d (in-progress)\n",
171 		       ip->i_number);
172 #endif
173 		tsleep(vp, (PRIBIO+1), "lfs_update", 0);
174 	}
175 	oflag = ip->i_flag;
176 	TIMEVAL_TO_TIMESPEC(&time, &ts);
177 	LFS_ITIMES(ip,
178 		   ap->a_access ? ap->a_access : &ts,
179 		   ap->a_modify ? ap->a_modify : &ts, &ts);
180 	if ((ip->i_flag & (IN_MODIFIED | IN_ACCESSED | IN_CLEANING)) == 0) {
181 		return (0);
182 	}
183 
184 	/* If sync, push back the vnode and any dirty blocks it may have. */
185 	if((ap->a_flags & (UPDATE_WAIT|UPDATE_DIROP))==UPDATE_WAIT) {
186 		/* Avoid flushing VDIROP. */
187 		++fs->lfs_diropwait;
188 		while(vp->v_flag & VDIROP) {
189 #ifdef DEBUG_LFS
190 			printf("lfs_update: sleeping on inode %d (dirops)\n",
191 			       ip->i_number);
192 			printf("lfs_update: vflags 0x%lx, iflags 0x%x\n",
193 				vp->v_flag, ip->i_flag);
194 #endif
195 			if(fs->lfs_dirops == 0)
196 				lfs_flush_fs(fs, SEGM_SYNC);
197 			else
198 				tsleep(&fs->lfs_writer, PRIBIO+1, "lfs_fsync",
199 				       0);
200 			/* XXX KS - by falling out here, are we writing the vn
201 			twice? */
202 		}
203 		--fs->lfs_diropwait;
204 		return lfs_vflush(vp);
205         }
206 	return 0;
207 }
208 
209 #define	SINGLE	0	/* index of single indirect block */
210 #define	DOUBLE	1	/* index of double indirect block */
211 #define	TRIPLE	2	/* index of triple indirect block */
212 /*
213  * Truncate the inode oip to at most length size, freeing the
214  * disk blocks.
215  */
216 /* VOP_BWRITE 1 + NIADDR + VOP_BALLOC == 2 + 2*NIADDR times */
217 int
218 lfs_truncate(void *v)
219 {
220 	struct vop_truncate_args /* {
221 		struct vnode *a_vp;
222 		off_t a_length;
223 		int a_flags;
224 		struct ucred *a_cred;
225 		struct proc *a_p;
226 	} */ *ap = v;
227 	struct vnode *ovp = ap->a_vp;
228 	ufs_daddr_t lastblock;
229 	struct inode *oip;
230 	ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
231 	ufs_daddr_t newblks[NDADDR + NIADDR];
232 	off_t length = ap->a_length;
233 	struct lfs *fs;
234 	struct buf *bp;
235 	int offset, size, level;
236 	long count, rcount, nblocks, blocksreleased = 0, real_released = 0;
237 	int i;
238 	int aflags, error, allerror = 0;
239 	off_t osize;
240 	long lastseg;
241 	size_t bc;
242 	int obufsize, odb;
243 
244 	if (length < 0)
245 		return (EINVAL);
246 	oip = VTOI(ovp);
247 
248 	/*
249 	 * Just return and not update modification times.
250 	 */
251 	if (oip->i_ffs_size == length)
252 		return (0);
253 
254 	if (ovp->v_type == VLNK &&
255 	    (oip->i_ffs_size < ovp->v_mount->mnt_maxsymlinklen ||
256 	     (ovp->v_mount->mnt_maxsymlinklen == 0 &&
257 	      oip->i_din.ffs_din.di_blocks == 0))) {
258 #ifdef DIAGNOSTIC
259 		if (length != 0)
260 			panic("lfs_truncate: partial truncate of symlink");
261 #endif
262 		memset((char *)&oip->i_ffs_shortlink, 0, (u_int)oip->i_ffs_size);
263 		oip->i_ffs_size = 0;
264 		oip->i_flag |= IN_CHANGE | IN_UPDATE;
265 		return (VOP_UPDATE(ovp, NULL, NULL, 0));
266 	}
267 	if (oip->i_ffs_size == length) {
268 		oip->i_flag |= IN_CHANGE | IN_UPDATE;
269 		return (VOP_UPDATE(ovp, NULL, NULL, 0));
270 	}
271 #ifdef QUOTA
272 	if ((error = getinoquota(oip)) != 0)
273 		return (error);
274 #endif
275 	fs = oip->i_lfs;
276 	lfs_imtime(fs);
277 	osize = oip->i_ffs_size;
278 	ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0;
279 
280 	/*
281 	 * Lengthen the size of the file. We must ensure that the
282 	 * last byte of the file is allocated. Since the smallest
283 	 * value of osize is 0, length will be at least 1.
284 	 */
285 	if (osize < length) {
286 		if (length > fs->lfs_maxfilesize)
287 			return (EFBIG);
288 		aflags = B_CLRBUF;
289 		if (ap->a_flags & IO_SYNC)
290 			aflags |= B_SYNC;
291 		error = lfs_reserve(fs, ovp, btofsb(fs, (NIADDR + 2) << fs->lfs_bshift));
292 		if (error)
293 			return (error);
294 		error = VOP_BALLOC(ovp, length - 1, 1, ap->a_cred, aflags, &bp);
295 		lfs_reserve(fs, ovp, -btofsb(fs, (NIADDR + 2) << fs->lfs_bshift));
296 		if (error)
297 			return (error);
298 		oip->i_ffs_size = length;
299 		uvm_vnp_setsize(ovp, length);
300 		(void) VOP_BWRITE(bp);
301 		oip->i_flag |= IN_CHANGE | IN_UPDATE;
302 		return (VOP_UPDATE(ovp, NULL, NULL, 0));
303 	}
304 
305 	if ((error = lfs_reserve(fs, ovp, btofsb(fs, (2 * NIADDR + 3) << fs->lfs_bshift))) != 0)
306 		return (error);
307 	/*
308 	 * Make sure no writes to this inode can happen while we're
309 	 * truncating.  Otherwise, blocks which are accounted for on the
310 	 * inode *and* which have been created for cleaning can coexist,
311 	 * and cause an overcounting.
312 	 *
313 	 * (We don't need to *hold* the seglock, though, because we already
314 	 * hold the inode lock; draining the seglock is sufficient.)
315 	 */
316 	if (ovp != fs->lfs_unlockvp) {
317 		while(fs->lfs_seglock) {
318 			tsleep(&fs->lfs_seglock, PRIBIO+1, "lfs_truncate", 0);
319 		}
320 	}
321 
322 	/*
323 	 * Shorten the size of the file. If the file is not being
324 	 * truncated to a block boundary, the contents of the
325 	 * partial block following the end of the file must be
326 	 * zero'ed in case it ever becomes accessible again because
327 	 * of subsequent file growth. Directories however are not
328 	 * zero'ed as they should grow back initialized to empty.
329 	 */
330 	offset = blkoff(fs, length);
331 	lastseg = -1;
332 	bc = 0;
333 	if (offset == 0) {
334 		oip->i_ffs_size = length;
335 	} else {
336 		lbn = lblkno(fs, length);
337 		aflags = B_CLRBUF;
338 		if (ap->a_flags & IO_SYNC)
339 			aflags |= B_SYNC;
340 		error = VOP_BALLOC(ovp, length - 1, 1, ap->a_cred, aflags, &bp);
341 		if (error) {
342 			lfs_reserve(fs, ovp, -btofsb(fs, (2 * NIADDR + 3) << fs->lfs_bshift));
343 			return (error);
344 		}
345 		obufsize = bp->b_bufsize;
346 		odb = btofsb(fs, bp->b_bcount);
347 		oip->i_ffs_size = length;
348 		size = blksize(fs, oip, lbn);
349 		if (ovp->v_type != VDIR)
350 			memset((char *)bp->b_data + offset, 0,
351 			       (u_int)(size - offset));
352 		allocbuf(bp, size);
353 		if (bp->b_flags & B_DELWRI) {
354 			if ((bp->b_flags & (B_LOCKED | B_CALL)) == B_LOCKED)
355 				locked_queue_bytes -= obufsize - bp->b_bufsize;
356 			fs->lfs_avail += odb - btofsb(fs, size);
357 		}
358 		(void) VOP_BWRITE(bp);
359 	}
360 	uvm_vnp_setsize(ovp, length);
361 	/*
362 	 * Calculate index into inode's block list of
363 	 * last direct and indirect blocks (if any)
364 	 * which we want to keep.  Lastblock is -1 when
365 	 * the file is truncated to 0.
366 	 */
367 	lastblock = lblkno(fs, length + fs->lfs_bsize - 1) - 1;
368 	lastiblock[SINGLE] = lastblock - NDADDR;
369 	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
370 	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
371 	nblocks = btofsb(fs, fs->lfs_bsize);
372 	/*
373 	 * Record changed file and block pointers before we start
374 	 * freeing blocks.  lastiblock values are also normalized to -1
375 	 * for calls to lfs_indirtrunc below.
376 	 */
377 	memcpy((caddr_t)newblks, (caddr_t)&oip->i_ffs_db[0], sizeof newblks);
378 	for (level = TRIPLE; level >= SINGLE; level--)
379 		if (lastiblock[level] < 0) {
380 			newblks[NDADDR+level] = 0;
381 			lastiblock[level] = -1;
382 		}
383 	for (i = NDADDR - 1; i > lastblock; i--)
384 		newblks[i] = 0;
385 
386 	oip->i_ffs_size = osize;
387 	error = lfs_vtruncbuf(ovp, lastblock + 1, 0, 0);
388 	if (error && !allerror)
389 		allerror = error;
390 
391 	/*
392 	 * Indirect blocks first.
393 	 */
394 	indir_lbn[SINGLE] = -NDADDR;
395 	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
396 	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
397 	for (level = TRIPLE; level >= SINGLE; level--) {
398 		bn = oip->i_ffs_ib[level];
399 		if (bn != 0) {
400 			error = lfs_indirtrunc(oip, indir_lbn[level],
401 					       bn, lastiblock[level],
402 					       level, &count, &rcount,
403 					       &lastseg, &bc, ap->a_p);
404 			if (error)
405 				allerror = error;
406 			real_released += rcount;
407 			blocksreleased += count;
408 			if (lastiblock[level] < 0) {
409 				if (oip->i_ffs_ib[level] > 0)
410 					real_released += nblocks;
411 				blocksreleased += nblocks;
412 				oip->i_ffs_ib[level] = 0;
413 				lfs_blkfree(fs, bn, fs->lfs_bsize, &lastseg, &bc);
414 			}
415 		}
416 		if (lastiblock[level] >= 0)
417 			goto done;
418 	}
419 
420 	/*
421 	 * All whole direct blocks or frags.
422 	 */
423 	for (i = NDADDR - 1; i > lastblock; i--) {
424 		long bsize;
425 
426 		bn = oip->i_ffs_db[i];
427 		if (bn == 0)
428 			continue;
429 		bsize = blksize(fs, oip, i);
430 		if (oip->i_ffs_db[i] > 0)
431 			real_released += btofsb(fs, bsize);
432 		blocksreleased += btofsb(fs, bsize);
433 		oip->i_ffs_db[i] = 0;
434 		lfs_blkfree(fs, bn, bsize, &lastseg, &bc);
435 	}
436 	if (lastblock < 0)
437 		goto done;
438 
439 	/*
440 	 * Finally, look for a change in size of the
441 	 * last direct block; release any frags.
442 	 */
443 	bn = oip->i_ffs_db[lastblock];
444 	if (bn != 0) {
445 		long oldspace, newspace;
446 
447 		/*
448 		 * Calculate amount of space we're giving
449 		 * back as old block size minus new block size.
450 		 */
451 		oldspace = blksize(fs, oip, lastblock);
452 		oip->i_ffs_size = length;
453 		newspace = blksize(fs, oip, lastblock);
454 		if (newspace == 0)
455 			panic("itrunc: newspace");
456 		if (oldspace - newspace > 0) {
457 			lfs_blkfree(fs, bn, oldspace - newspace, &lastseg, &bc);
458 			if (bn > 0)
459 				real_released += btofsb(fs, oldspace - newspace);
460 			blocksreleased += btofsb(fs, oldspace - newspace);
461 		}
462 	}
463 
464 done:
465 	/* Finish segment accounting corrections */
466 	lfs_update_seguse(fs, lastseg, bc);
467 #ifdef DIAGNOSTIC
468 	for (level = SINGLE; level <= TRIPLE; level++)
469 		if (newblks[NDADDR + level] != oip->i_ffs_ib[level])
470 			panic("lfs itrunc1");
471 	for (i = 0; i < NDADDR; i++)
472 		if (newblks[i] != oip->i_ffs_db[i])
473 			panic("lfs itrunc2");
474 	if (length == 0 &&
475 	    (!LIST_EMPTY(&ovp->v_cleanblkhd) || !LIST_EMPTY(&ovp->v_dirtyblkhd)))
476 		panic("lfs itrunc3");
477 #endif /* DIAGNOSTIC */
478 	/*
479 	 * Put back the real size.
480 	 */
481 	oip->i_ffs_size = length;
482 	oip->i_lfs_effnblks -= blocksreleased;
483 	oip->i_ffs_blocks -= real_released;
484 	fs->lfs_bfree += blocksreleased;
485 #ifdef DIAGNOSTIC
486 	if (oip->i_ffs_size == 0 && oip->i_ffs_blocks != 0) {
487 		printf("lfs_truncate: truncate to 0 but %d blocks on inode\n",
488 		       oip->i_ffs_blocks);
489 		panic("lfs_truncate: persistent blocks\n");
490 	}
491 #endif
492 	oip->i_flag |= IN_CHANGE;
493 #ifdef QUOTA
494 	(void) chkdq(oip, -blocksreleased, NOCRED, 0);
495 #endif
496 	lfs_reserve(fs, ovp, -btofsb(fs, (2 * NIADDR + 3) << fs->lfs_bshift));
497 	return (allerror);
498 }
499 
500 /* Update segment usage information when removing a block. */
501 static int
502 lfs_blkfree(struct lfs *fs, daddr_t daddr, size_t bsize, long *lastseg,
503 	    size_t *num)
504 {
505 	long seg;
506 	int error = 0;
507 
508 	bsize = fragroundup(fs, bsize);
509 	if (daddr > 0) {
510 		if (*lastseg != (seg = dtosn(fs, daddr))) {
511 			error = lfs_update_seguse(fs, *lastseg, *num);
512 			*num = bsize;
513 			*lastseg = seg;
514 		} else
515 			*num += bsize;
516 	}
517 	return error;
518 }
519 
520 /* Finish the accounting updates for a segment. */
521 static int
522 lfs_update_seguse(struct lfs *fs, long lastseg, size_t num)
523 {
524 	SEGUSE *sup;
525 	struct buf *bp;
526 
527 	if (lastseg < 0 || num == 0)
528 		return 0;
529 
530 
531 	LFS_SEGENTRY(sup, fs, lastseg, bp);
532 	if (num > sup->su_nbytes) {
533 		printf("lfs_truncate: segment %ld short by %ld\n",
534 		       lastseg, (long)num - sup->su_nbytes);
535 		panic("lfs_truncate: negative bytes");
536 		sup->su_nbytes = num;
537 	}
538 	sup->su_nbytes -= num;
539 	return (VOP_BWRITE(bp)); /* Ifile */
540 }
541 
542 /*
543  * Release blocks associated with the inode ip and stored in the indirect
544  * block bn.  Blocks are free'd in LIFO order up to (but not including)
545  * lastbn.  If level is greater than SINGLE, the block is an indirect block
546  * and recursive calls to indirtrunc must be used to cleanse other indirect
547  * blocks.
548  *
549  * NB: triple indirect blocks are untested.
550  */
551 static int
552 lfs_indirtrunc(struct inode *ip, ufs_daddr_t lbn, daddr_t dbn,
553 	       ufs_daddr_t lastbn, int level, long *countp,
554 	       long *rcountp, long *lastsegp, size_t *bcp, struct proc *p)
555 {
556 	int i;
557 	struct buf *bp;
558 	struct lfs *fs = ip->i_lfs;
559 	ufs_daddr_t *bap;
560 	struct vnode *vp;
561 	ufs_daddr_t *copy = NULL, nb, nlbn, last;
562 	long blkcount, rblkcount, factor;
563 	int nblocks, blocksreleased = 0, real_released = 0;
564 	int error = 0, allerror = 0;
565 
566 	/*
567 	 * Calculate index in current block of last
568 	 * block to be kept.  -1 indicates the entire
569 	 * block so we need not calculate the index.
570 	 */
571 	factor = 1;
572 	for (i = SINGLE; i < level; i++)
573 		factor *= NINDIR(fs);
574 	last = lastbn;
575 	if (lastbn > 0)
576 		last /= factor;
577 	nblocks = btofsb(fs, fs->lfs_bsize);
578 	/*
579 	 * Get buffer of block pointers, zero those entries corresponding
580 	 * to blocks to be free'd, and update on disk copy first.  Since
581 	 * double(triple) indirect before single(double) indirect, calls
582 	 * to bmap on these blocks will fail.  However, we already have
583 	 * the on disk address, so we have to set the b_blkno field
584 	 * explicitly instead of letting bread do everything for us.
585 	 */
586 	vp = ITOV(ip);
587 	bp = getblk(vp, lbn, (int)fs->lfs_bsize, 0, 0);
588 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
589 		/* Braces must be here in case trace evaluates to nothing. */
590 		trace(TR_BREADHIT, pack(vp, fs->lfs_bsize), lbn);
591 	} else {
592 		trace(TR_BREADMISS, pack(vp, fs->lfs_bsize), lbn);
593 		p->p_stats->p_ru.ru_inblock++;	/* pay for read */
594 		bp->b_flags |= B_READ;
595 		if (bp->b_bcount > bp->b_bufsize)
596 			panic("lfs_indirtrunc: bad buffer size");
597 		bp->b_blkno = fsbtodb(fs, dbn);
598 		VOP_STRATEGY(bp);
599 		error = biowait(bp);
600 	}
601 	if (error) {
602 		brelse(bp);
603 		*countp = *rcountp = 0;
604 		return (error);
605 	}
606 
607 	bap = (ufs_daddr_t *)bp->b_data;
608 	if (lastbn >= 0) {
609 		MALLOC(copy, ufs_daddr_t *, fs->lfs_bsize, M_TEMP, M_WAITOK);
610 		memcpy((caddr_t)copy, (caddr_t)bap, (u_int)fs->lfs_bsize);
611 		memset((caddr_t)&bap[last + 1], 0,
612 		  (u_int)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t));
613 		error = VOP_BWRITE(bp);
614 		if (error)
615 			allerror = error;
616 		bap = copy;
617 	}
618 
619 	/*
620 	 * Recursively free totally unused blocks.
621 	 */
622 	for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
623 	    i--, nlbn += factor) {
624 		nb = bap[i];
625 		if (nb == 0)
626 			continue;
627 		if (level > SINGLE) {
628 			error = lfs_indirtrunc(ip, nlbn, nb,
629 					       (ufs_daddr_t)-1, level - 1,
630 					       &blkcount, &rblkcount,
631 					       lastsegp, bcp, p);
632 			if (error)
633 				allerror = error;
634 			blocksreleased += blkcount;
635 			real_released += rblkcount;
636 		}
637 		lfs_blkfree(fs, nb, fs->lfs_bsize, lastsegp, bcp);
638 		if (bap[i] > 0)
639 			real_released += nblocks;
640 		blocksreleased += nblocks;
641 	}
642 
643 	/*
644 	 * Recursively free last partial block.
645 	 */
646 	if (level > SINGLE && lastbn >= 0) {
647 		last = lastbn % factor;
648 		nb = bap[i];
649 		if (nb != 0) {
650 			error = lfs_indirtrunc(ip, nlbn, nb,
651 					       last, level - 1, &blkcount,
652 					       &rblkcount, lastsegp, bcp, p);
653 			if (error)
654 				allerror = error;
655 			real_released += rblkcount;
656 			blocksreleased += blkcount;
657 		}
658 	}
659 
660 	if (copy != NULL) {
661 		FREE(copy, M_TEMP);
662 	} else {
663 		if (bp->b_flags & B_DELWRI) {
664 			LFS_UNLOCK_BUF(bp);
665 			fs->lfs_avail += btofsb(fs, bp->b_bcount);
666 			wakeup(&fs->lfs_avail);
667 		}
668 		bp->b_flags |= B_INVAL;
669 		brelse(bp);
670 	}
671 
672 	*countp = blocksreleased;
673 	*rcountp = real_released;
674 	return (allerror);
675 }
676 
677 /*
678  * Destroy any in core blocks past the truncation length.
679  * Inlined from vtruncbuf, so that lfs_avail could be updated.
680  */
681 static int
682 lfs_vtruncbuf(struct vnode *vp, daddr_t lbn, int slpflag, int slptimeo)
683 {
684 	struct buf *bp, *nbp;
685 	int s, error;
686 	struct lfs *fs;
687 
688 	fs = VTOI(vp)->i_lfs;
689 	s = splbio();
690 
691 restart:
692 	for (bp = LIST_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
693 		nbp = LIST_NEXT(bp, b_vnbufs);
694 		if (bp->b_lblkno < lbn)
695 			continue;
696 		if (bp->b_flags & B_BUSY) {
697 			bp->b_flags |= B_WANTED;
698 			error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
699 			    "lfs_vtruncbuf", slptimeo);
700 			if (error) {
701 				splx(s);
702 				return (error);
703 			}
704 			goto restart;
705 		}
706 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
707 		if (bp->b_flags & B_DELWRI) {
708 			bp->b_flags &= ~B_DELWRI;
709 			fs->lfs_avail += btofsb(fs, bp->b_bcount);
710 			wakeup(&fs->lfs_avail);
711 		}
712 		LFS_UNLOCK_BUF(bp);
713 		brelse(bp);
714 	}
715 
716 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
717 		nbp = LIST_NEXT(bp, b_vnbufs);
718 		if (bp->b_lblkno < lbn)
719 			continue;
720 		if (bp->b_flags & B_BUSY) {
721 			bp->b_flags |= B_WANTED;
722 			error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
723 			    "lfs_vtruncbuf", slptimeo);
724 			if (error) {
725 				splx(s);
726 				return (error);
727 			}
728 			goto restart;
729 		}
730 		bp->b_flags |= B_BUSY | B_INVAL | B_VFLUSH;
731 		if (bp->b_flags & B_DELWRI) {
732 			bp->b_flags &= ~B_DELWRI;
733 			fs->lfs_avail += btofsb(fs, bp->b_bcount);
734 			wakeup(&fs->lfs_avail);
735 		}
736 		LFS_UNLOCK_BUF(bp);
737 		brelse(bp);
738 	}
739 
740 	splx(s);
741 
742 	return (0);
743 }
744 
745