xref: /netbsd-src/sys/ufs/lfs/lfs_balloc.c (revision 8ac07aec990b9d2e483062509d0a9fa5b4f57cf2)
1 /*	$NetBSD: lfs_balloc.c,v 1.65 2008/02/15 13:30:56 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Konrad E. Schroder <perseant@hhhh.org>.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 /*
39  * Copyright (c) 1989, 1991, 1993
40  *	The Regents of the University of California.  All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)lfs_balloc.c	8.4 (Berkeley) 5/8/95
67  */
68 
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_balloc.c,v 1.65 2008/02/15 13:30:56 ad Exp $");
71 
72 #if defined(_KERNEL_OPT)
73 #include "opt_quota.h"
74 #endif
75 
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/buf.h>
79 #include <sys/proc.h>
80 #include <sys/vnode.h>
81 #include <sys/mount.h>
82 #include <sys/resourcevar.h>
83 #include <sys/tree.h>
84 #include <sys/trace.h>
85 #include <sys/kauth.h>
86 
87 #include <miscfs/specfs/specdev.h>
88 
89 #include <ufs/ufs/quota.h>
90 #include <ufs/ufs/inode.h>
91 #include <ufs/ufs/ufsmount.h>
92 #include <ufs/ufs/ufs_extern.h>
93 
94 #include <ufs/lfs/lfs.h>
95 #include <ufs/lfs/lfs_extern.h>
96 
97 #include <uvm/uvm.h>
98 
99 int lfs_fragextend(struct vnode *, int, int, daddr_t, struct buf **, kauth_cred_t);
100 
101 u_int64_t locked_fakequeue_count;
102 
103 /*
104  * Allocate a block, and to inode and filesystem block accounting for it
105  * and for any indirect blocks the may need to be created in order for
106  * this block to be created.
107  *
108  * Blocks which have never been accounted for (i.e., which "do not exist")
109  * have disk address 0, which is translated by ufs_bmap to the special value
110  * UNASSIGNED == -1, as in the historical UFS.
111  *
112  * Blocks which have been accounted for but which have not yet been written
113  * to disk are given the new special disk address UNWRITTEN == -2, so that
114  * they can be differentiated from completely new blocks.
115  */
116 /* VOP_BWRITE NIADDR+2 times */
117 int
118 lfs_balloc(struct vnode *vp, off_t startoffset, int iosize, kauth_cred_t cred,
119     int flags, struct buf **bpp)
120 {
121 	int offset;
122 	daddr_t daddr, idaddr;
123 	struct buf *ibp, *bp;
124 	struct inode *ip;
125 	struct lfs *fs;
126 	struct indir indirs[NIADDR+2], *idp;
127 	daddr_t	lbn, lastblock;
128 	int bb, bcount;
129 	int error, frags, i, nsize, osize, num;
130 
131 	ip = VTOI(vp);
132 	fs = ip->i_lfs;
133 	offset = blkoff(fs, startoffset);
134 	KASSERT(iosize <= fs->lfs_bsize);
135 	lbn = lblkno(fs, startoffset);
136 	/* (void)lfs_check(vp, lbn, 0); */
137 
138 	ASSERT_MAYBE_SEGLOCK(fs);
139 
140 	/*
141 	 * Three cases: it's a block beyond the end of file, it's a block in
142 	 * the file that may or may not have been assigned a disk address or
143 	 * we're writing an entire block.
144 	 *
145 	 * Note, if the daddr is UNWRITTEN, the block already exists in
146 	 * the cache (it was read or written earlier).	If so, make sure
147 	 * we don't count it as a new block or zero out its contents. If
148 	 * it did not, make sure we allocate any necessary indirect
149 	 * blocks.
150 	 *
151 	 * If we are writing a block beyond the end of the file, we need to
152 	 * check if the old last block was a fragment.	If it was, we need
153 	 * to rewrite it.
154 	 */
155 
156 	if (bpp)
157 		*bpp = NULL;
158 
159 	/* Check for block beyond end of file and fragment extension needed. */
160 	lastblock = lblkno(fs, ip->i_size);
161 	if (lastblock < NDADDR && lastblock < lbn) {
162 		osize = blksize(fs, ip, lastblock);
163 		if (osize < fs->lfs_bsize && osize > 0) {
164 			if ((error = lfs_fragextend(vp, osize, fs->lfs_bsize,
165 						    lastblock,
166 						    (bpp ? &bp : NULL), cred)))
167 				return (error);
168 			ip->i_ffs1_size = ip->i_size =
169 			    (lastblock + 1) * fs->lfs_bsize;
170 			uvm_vnp_setsize(vp, ip->i_size);
171 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
172 			if (bpp)
173 				(void) VOP_BWRITE(bp);
174 		}
175 	}
176 
177 	/*
178 	 * If the block we are writing is a direct block, it's the last
179 	 * block in the file, and offset + iosize is less than a full
180 	 * block, we can write one or more fragments.  There are two cases:
181 	 * the block is brand new and we should allocate it the correct
182 	 * size or it already exists and contains some fragments and
183 	 * may need to extend it.
184 	 */
185 	if (lbn < NDADDR && lblkno(fs, ip->i_size) <= lbn) {
186 		osize = blksize(fs, ip, lbn);
187 		nsize = fragroundup(fs, offset + iosize);
188 		if (lblktosize(fs, lbn) >= ip->i_size) {
189 			/* Brand new block or fragment */
190 			frags = numfrags(fs, nsize);
191 			bb = fragstofsb(fs, frags);
192 			if (!ISSPACE(fs, bb, cred))
193 				return ENOSPC;
194 			if (bpp) {
195 				*bpp = bp = getblk(vp, lbn, nsize, 0, 0);
196 				bp->b_blkno = UNWRITTEN;
197 				if (flags & B_CLRBUF)
198 					clrbuf(bp);
199 			}
200 			ip->i_lfs_effnblks += bb;
201 			mutex_enter(&lfs_lock);
202 			fs->lfs_bfree -= bb;
203 			mutex_exit(&lfs_lock);
204 			ip->i_ffs1_db[lbn] = UNWRITTEN;
205 		} else {
206 			if (nsize <= osize) {
207 				/* No need to extend */
208 				if (bpp && (error = bread(vp, lbn, osize, NOCRED, &bp)))
209 					return error;
210 			} else {
211 				/* Extend existing block */
212 				if ((error =
213 				     lfs_fragextend(vp, osize, nsize, lbn,
214 						    (bpp ? &bp : NULL), cred)))
215 					return error;
216 			}
217 			if (bpp)
218 				*bpp = bp;
219 		}
220 		return 0;
221 	}
222 
223 	error = ufs_bmaparray(vp, lbn, &daddr, &indirs[0], &num, NULL, NULL);
224 	if (error)
225 		return (error);
226 
227 	daddr = (daddr_t)((int32_t)daddr); /* XXX ondisk32 */
228 	KASSERT(daddr <= LFS_MAX_DADDR);
229 
230 	/*
231 	 * Do byte accounting all at once, so we can gracefully fail *before*
232 	 * we start assigning blocks.
233 	 */
234 	bb = VFSTOUFS(vp->v_mount)->um_seqinc;
235 	bcount = 0;
236 	if (daddr == UNASSIGNED) {
237 		bcount = bb;
238 	}
239 	for (i = 1; i < num; ++i) {
240 		if (!indirs[i].in_exists) {
241 			bcount += bb;
242 		}
243 	}
244 	if (ISSPACE(fs, bcount, cred)) {
245 		mutex_enter(&lfs_lock);
246 		fs->lfs_bfree -= bcount;
247 		mutex_exit(&lfs_lock);
248 		ip->i_lfs_effnblks += bcount;
249 	} else {
250 		return ENOSPC;
251 	}
252 
253 	if (daddr == UNASSIGNED) {
254 		if (num > 0 && ip->i_ffs1_ib[indirs[0].in_off] == 0) {
255 			ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
256 		}
257 
258 		/*
259 		 * Create new indirect blocks if necessary
260 		 */
261 		if (num > 1) {
262 			idaddr = ip->i_ffs1_ib[indirs[0].in_off];
263 			for (i = 1; i < num; ++i) {
264 				ibp = getblk(vp, indirs[i].in_lbn,
265 				    fs->lfs_bsize, 0,0);
266 				if (!indirs[i].in_exists) {
267 					clrbuf(ibp);
268 					ibp->b_blkno = UNWRITTEN;
269 				} else if (!(ibp->b_oflags & (BO_DELWRI | BO_DONE))) {
270 					ibp->b_blkno = fsbtodb(fs, idaddr);
271 					ibp->b_flags |= B_READ;
272 					VOP_STRATEGY(vp, ibp);
273 					biowait(ibp);
274 				}
275 				/*
276 				 * This block exists, but the next one may not.
277 				 * If that is the case mark it UNWRITTEN to keep
278 				 * the accounting straight.
279 				 */
280 				/* XXX ondisk32 */
281 				if (((int32_t *)ibp->b_data)[indirs[i].in_off] == 0)
282 					((int32_t *)ibp->b_data)[indirs[i].in_off] =
283 						UNWRITTEN;
284 				/* XXX ondisk32 */
285 				idaddr = ((int32_t *)ibp->b_data)[indirs[i].in_off];
286 #ifdef DEBUG
287 				if (vp == fs->lfs_ivnode) {
288 					LFS_ENTER_LOG("balloc", __FILE__,
289 						__LINE__, indirs[i].in_lbn,
290 						ibp->b_flags, curproc->p_pid);
291 				}
292 #endif
293 				if ((error = VOP_BWRITE(ibp)))
294 					return error;
295 			}
296 		}
297 	}
298 
299 
300 	/*
301 	 * Get the existing block from the cache, if requested.
302 	 */
303 	frags = fsbtofrags(fs, bb);
304 	if (bpp)
305 		*bpp = bp = getblk(vp, lbn, blksize(fs, ip, lbn), 0, 0);
306 
307 	/*
308 	 * Do accounting on blocks that represent pages.
309 	 */
310 	if (!bpp)
311 		lfs_register_block(vp, lbn);
312 
313 	/*
314 	 * The block we are writing may be a brand new block
315 	 * in which case we need to do accounting.
316 	 *
317 	 * We can tell a truly new block because ufs_bmaparray will say
318 	 * it is UNASSIGNED.  Once we allocate it we will assign it the
319 	 * disk address UNWRITTEN.
320 	 */
321 	if (daddr == UNASSIGNED) {
322 		if (bpp) {
323 			if (flags & B_CLRBUF)
324 				clrbuf(bp);
325 
326 			/* Note the new address */
327 			bp->b_blkno = UNWRITTEN;
328 		}
329 
330 		switch (num) {
331 		    case 0:
332 			ip->i_ffs1_db[lbn] = UNWRITTEN;
333 			break;
334 		    case 1:
335 			ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
336 			break;
337 		    default:
338 			idp = &indirs[num - 1];
339 			if (bread(vp, idp->in_lbn, fs->lfs_bsize, NOCRED,
340 				  &ibp))
341 				panic("lfs_balloc: bread bno %lld",
342 				    (long long)idp->in_lbn);
343 			/* XXX ondisk32 */
344 			((int32_t *)ibp->b_data)[idp->in_off] = UNWRITTEN;
345 #ifdef DEBUG
346 			if (vp == fs->lfs_ivnode) {
347 				LFS_ENTER_LOG("balloc", __FILE__,
348 					__LINE__, idp->in_lbn,
349 					ibp->b_flags, curproc->p_pid);
350 			}
351 #endif
352 			VOP_BWRITE(ibp);
353 		}
354 	} else if (bpp && !(bp->b_oflags & (BO_DONE|BO_DELWRI))) {
355 		/*
356 		 * Not a brand new block, also not in the cache;
357 		 * read it in from disk.
358 		 */
359 		if (iosize == fs->lfs_bsize)
360 			/* Optimization: I/O is unnecessary. */
361 			bp->b_blkno = daddr;
362 		else {
363 			/*
364 			 * We need to read the block to preserve the
365 			 * existing bytes.
366 			 */
367 			bp->b_blkno = daddr;
368 			bp->b_flags |= B_READ;
369 			VOP_STRATEGY(vp, bp);
370 			return (biowait(bp));
371 		}
372 	}
373 
374 	return (0);
375 }
376 
377 /* VOP_BWRITE 1 time */
378 int
379 lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf **bpp,
380     kauth_cred_t cred)
381 {
382 	struct inode *ip;
383 	struct lfs *fs;
384 	long bb;
385 	int error;
386 	extern long locked_queue_bytes;
387 	size_t obufsize;
388 
389 	ip = VTOI(vp);
390 	fs = ip->i_lfs;
391 	bb = (long)fragstofsb(fs, numfrags(fs, nsize - osize));
392 	error = 0;
393 
394 	ASSERT_NO_SEGLOCK(fs);
395 
396 	/*
397 	 * Get the seglock so we don't enlarge blocks while a segment
398 	 * is being written.  If we're called with bpp==NULL, though,
399 	 * we are only pretending to change a buffer, so we don't have to
400 	 * lock.
401 	 */
402     top:
403 	if (bpp) {
404 		rw_enter(&fs->lfs_fraglock, RW_READER);
405 		LFS_DEBUG_COUNTLOCKED("frag");
406 	}
407 
408 	if (!ISSPACE(fs, bb, cred)) {
409 		error = ENOSPC;
410 		goto out;
411 	}
412 
413 	/*
414 	 * If we are not asked to actually return the block, all we need
415 	 * to do is allocate space for it.  UBC will handle dirtying the
416 	 * appropriate things and making sure it all goes to disk.
417 	 * Don't bother to read in that case.
418 	 */
419 	if (bpp && (error = bread(vp, lbn, osize, NOCRED, bpp))) {
420 		brelse(*bpp, 0);
421 		goto out;
422 	}
423 #ifdef QUOTA
424 	if ((error = chkdq(ip, bb, cred, 0))) {
425 		if (bpp)
426 			brelse(*bpp, 0);
427 		goto out;
428 	}
429 #endif
430 	/*
431 	 * Adjust accounting for lfs_avail.  If there's not enough room,
432 	 * we will have to wait for the cleaner, which we can't do while
433 	 * holding a block busy or while holding the seglock.  In that case,
434 	 * release both and start over after waiting.
435 	 */
436 
437 	if (bpp && ((*bpp)->b_oflags & BO_DELWRI)) {
438 		if (!lfs_fits(fs, bb)) {
439 			if (bpp)
440 				brelse(*bpp, 0);
441 #ifdef QUOTA
442 			chkdq(ip, -bb, cred, 0);
443 #endif
444 			rw_exit(&fs->lfs_fraglock);
445 			lfs_availwait(fs, bb);
446 			goto top;
447 		}
448 		fs->lfs_avail -= bb;
449 	}
450 
451 	mutex_enter(&lfs_lock);
452 	fs->lfs_bfree -= bb;
453 	mutex_exit(&lfs_lock);
454 	ip->i_lfs_effnblks += bb;
455 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
456 
457 	if (bpp) {
458 		obufsize = (*bpp)->b_bufsize;
459 		allocbuf(*bpp, nsize, 1);
460 
461 		/* Adjust locked-list accounting */
462 		if (((*bpp)->b_flags & B_LOCKED) != 0 &&
463 		    (*bpp)->b_iodone == NULL) {
464 			mutex_enter(&lfs_lock);
465 			locked_queue_bytes += (*bpp)->b_bufsize - obufsize;
466 			mutex_exit(&lfs_lock);
467 		}
468 
469 		bzero((char *)((*bpp)->b_data) + osize, (u_int)(nsize - osize));
470 	}
471 
472     out:
473 	if (bpp) {
474 		rw_exit(&fs->lfs_fraglock);
475 	}
476 	return (error);
477 }
478 
479 static inline int
480 lge(struct lbnentry *a, struct lbnentry *b)
481 {
482 	return a->lbn - b->lbn;
483 }
484 
485 SPLAY_PROTOTYPE(lfs_splay, lbnentry, entry, lge);
486 
487 SPLAY_GENERATE(lfs_splay, lbnentry, entry, lge);
488 
489 /*
490  * Record this lbn as being "write pending".  We used to have this information
491  * on the buffer headers, but since pages don't have buffer headers we
492  * record it here instead.
493  */
494 void
495 lfs_register_block(struct vnode *vp, daddr_t lbn)
496 {
497 	struct lfs *fs;
498 	struct inode *ip;
499 	struct lbnentry *lbp;
500 
501 	ip = VTOI(vp);
502 
503 	/* Don't count metadata */
504 	if (lbn < 0 || vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM)
505 		return;
506 
507 	fs = ip->i_lfs;
508 
509 	ASSERT_NO_SEGLOCK(fs);
510 
511 	/* If no space, wait for the cleaner */
512 	lfs_availwait(fs, btofsb(fs, 1 << fs->lfs_bshift));
513 
514 	lbp = (struct lbnentry *)pool_get(&lfs_lbnentry_pool, PR_WAITOK);
515 	lbp->lbn = lbn;
516 	mutex_enter(&lfs_lock);
517 	if (SPLAY_INSERT(lfs_splay, &ip->i_lfs_lbtree, lbp) != NULL) {
518 		mutex_exit(&lfs_lock);
519 		/* Already there */
520 		pool_put(&lfs_lbnentry_pool, lbp);
521 		return;
522 	}
523 
524 	++ip->i_lfs_nbtree;
525 	fs->lfs_favail += btofsb(fs, (1 << fs->lfs_bshift));
526 	fs->lfs_pages += fs->lfs_bsize >> PAGE_SHIFT;
527 	++locked_fakequeue_count;
528 	lfs_subsys_pages += fs->lfs_bsize >> PAGE_SHIFT;
529 	mutex_exit(&lfs_lock);
530 }
531 
532 static void
533 lfs_do_deregister(struct lfs *fs, struct inode *ip, struct lbnentry *lbp)
534 {
535 	ASSERT_MAYBE_SEGLOCK(fs);
536 
537 	mutex_enter(&lfs_lock);
538 	--ip->i_lfs_nbtree;
539 	SPLAY_REMOVE(lfs_splay, &ip->i_lfs_lbtree, lbp);
540 	if (fs->lfs_favail > btofsb(fs, (1 << fs->lfs_bshift)))
541 		fs->lfs_favail -= btofsb(fs, (1 << fs->lfs_bshift));
542 	fs->lfs_pages -= fs->lfs_bsize >> PAGE_SHIFT;
543 	if (locked_fakequeue_count > 0)
544 		--locked_fakequeue_count;
545 	lfs_subsys_pages -= fs->lfs_bsize >> PAGE_SHIFT;
546 	mutex_exit(&lfs_lock);
547 
548 	pool_put(&lfs_lbnentry_pool, lbp);
549 }
550 
551 void
552 lfs_deregister_block(struct vnode *vp, daddr_t lbn)
553 {
554 	struct lfs *fs;
555 	struct inode *ip;
556 	struct lbnentry *lbp;
557 	struct lbnentry tmp;
558 
559 	ip = VTOI(vp);
560 
561 	/* Don't count metadata */
562 	if (lbn < 0 || vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM)
563 		return;
564 
565 	fs = ip->i_lfs;
566 	tmp.lbn = lbn;
567 	lbp = SPLAY_FIND(lfs_splay, &ip->i_lfs_lbtree, &tmp);
568 	if (lbp == NULL)
569 		return;
570 
571 	lfs_do_deregister(fs, ip, lbp);
572 }
573 
574 void
575 lfs_deregister_all(struct vnode *vp)
576 {
577 	struct lbnentry *lbp, *nlbp;
578 	struct lfs_splay *hd;
579 	struct lfs *fs;
580 	struct inode *ip;
581 
582 	ip = VTOI(vp);
583 	fs = ip->i_lfs;
584 	hd = &ip->i_lfs_lbtree;
585 
586 	for (lbp = SPLAY_MIN(lfs_splay, hd); lbp != NULL; lbp = nlbp) {
587 		nlbp = SPLAY_NEXT(lfs_splay, hd, lbp);
588 		lfs_do_deregister(fs, ip, lbp);
589 	}
590 }
591