1 /* $NetBSD: lfs_subr.c,v 1.55 2005/12/11 12:25:26 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Konrad E. Schroder <perseant@hhhh.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 /* 39 * Copyright (c) 1991, 1993 40 * The Regents of the University of California. All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)lfs_subr.c 8.4 (Berkeley) 5/8/95 67 */ 68 69 #include <sys/cdefs.h> 70 __KERNEL_RCSID(0, "$NetBSD: lfs_subr.c,v 1.55 2005/12/11 12:25:26 christos Exp $"); 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/namei.h> 75 #include <sys/vnode.h> 76 #include <sys/buf.h> 77 #include <sys/mount.h> 78 #include <sys/malloc.h> 79 #include <sys/proc.h> 80 81 #include <ufs/ufs/inode.h> 82 #include <ufs/lfs/lfs.h> 83 #include <ufs/lfs/lfs_extern.h> 84 85 #include <uvm/uvm.h> 86 87 /* 88 * Return buffer with the contents of block "offset" from the beginning of 89 * directory "ip". If "res" is non-zero, fill it in with a pointer to the 90 * remaining space in the directory. 91 */ 92 int 93 lfs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp) 94 { 95 struct lfs *fs; 96 struct inode *ip; 97 struct buf *bp; 98 daddr_t lbn; 99 int bsize, error; 100 101 ip = VTOI(vp); 102 fs = ip->i_lfs; 103 lbn = lblkno(fs, offset); 104 bsize = blksize(fs, ip, lbn); 105 106 *bpp = NULL; 107 if ((error = bread(vp, lbn, bsize, NOCRED, &bp)) != 0) { 108 brelse(bp); 109 return (error); 110 } 111 if (res) 112 *res = (char *)bp->b_data + blkoff(fs, offset); 113 *bpp = bp; 114 return (0); 115 } 116 117 #ifdef DEBUG 118 const char *lfs_res_names[LFS_NB_COUNT] = { 119 "summary", 120 "superblock", 121 "file block", 122 "cluster", 123 "clean", 124 "blkiov", 125 }; 126 #endif 127 128 int lfs_res_qty[LFS_NB_COUNT] = { 129 LFS_N_SUMMARIES, 130 LFS_N_SBLOCKS, 131 LFS_N_IBLOCKS, 132 LFS_N_CLUSTERS, 133 LFS_N_CLEAN, 134 LFS_N_BLKIOV, 135 }; 136 137 void 138 lfs_setup_resblks(struct lfs *fs) 139 { 140 int i, j; 141 int maxbpp; 142 143 ASSERT_NO_SEGLOCK(fs); 144 fs->lfs_resblk = (res_t *)malloc(LFS_N_TOTAL * sizeof(res_t), M_SEGMENT, 145 M_WAITOK); 146 for (i = 0; i < LFS_N_TOTAL; i++) { 147 fs->lfs_resblk[i].inuse = 0; 148 fs->lfs_resblk[i].p = NULL; 149 } 150 for (i = 0; i < LFS_RESHASH_WIDTH; i++) 151 LIST_INIT(fs->lfs_reshash + i); 152 153 /* 154 * These types of allocations can be larger than a page, 155 * so we can't use the pool subsystem for them. 156 */ 157 for (i = 0, j = 0; j < LFS_N_SUMMARIES; j++, i++) 158 fs->lfs_resblk[i].size = fs->lfs_sumsize; 159 for (j = 0; j < LFS_N_SBLOCKS; j++, i++) 160 fs->lfs_resblk[i].size = LFS_SBPAD; 161 for (j = 0; j < LFS_N_IBLOCKS; j++, i++) 162 fs->lfs_resblk[i].size = fs->lfs_bsize; 163 for (j = 0; j < LFS_N_CLUSTERS; j++, i++) 164 fs->lfs_resblk[i].size = MAXPHYS; 165 for (j = 0; j < LFS_N_CLEAN; j++, i++) 166 fs->lfs_resblk[i].size = MAXPHYS; 167 for (j = 0; j < LFS_N_BLKIOV; j++, i++) 168 fs->lfs_resblk[i].size = LFS_MARKV_MAXBLKCNT * sizeof(BLOCK_INFO); 169 170 for (i = 0; i < LFS_N_TOTAL; i++) { 171 fs->lfs_resblk[i].p = malloc(fs->lfs_resblk[i].size, 172 M_SEGMENT, M_WAITOK); 173 } 174 175 /* 176 * Initialize pools for small types (XXX is BPP small?) 177 */ 178 pool_init(&fs->lfs_clpool, sizeof(struct lfs_cluster), 0, 0, 0, 179 "lfsclpl", &pool_allocator_nointr); 180 pool_init(&fs->lfs_segpool, sizeof(struct segment), 0, 0, 0, 181 "lfssegpool", &pool_allocator_nointr); 182 maxbpp = ((fs->lfs_sumsize - SEGSUM_SIZE(fs)) / sizeof(int32_t) + 2); 183 maxbpp = MIN(maxbpp, segsize(fs) / fs->lfs_fsize + 2); 184 pool_init(&fs->lfs_bpppool, maxbpp * sizeof(struct buf *), 0, 0, 0, 185 "lfsbpppl", &pool_allocator_nointr); 186 } 187 188 void 189 lfs_free_resblks(struct lfs *fs) 190 { 191 int i; 192 193 pool_destroy(&fs->lfs_bpppool); 194 pool_destroy(&fs->lfs_segpool); 195 pool_destroy(&fs->lfs_clpool); 196 197 simple_lock(&fs->lfs_interlock); 198 for (i = 0; i < LFS_N_TOTAL; i++) { 199 while (fs->lfs_resblk[i].inuse) 200 ltsleep(&fs->lfs_resblk, PRIBIO + 1, "lfs_free", 0, 201 &fs->lfs_interlock); 202 if (fs->lfs_resblk[i].p != NULL) 203 free(fs->lfs_resblk[i].p, M_SEGMENT); 204 } 205 free(fs->lfs_resblk, M_SEGMENT); 206 simple_unlock(&fs->lfs_interlock); 207 } 208 209 static unsigned int 210 lfs_mhash(void *vp) 211 { 212 return (unsigned int)(((unsigned long)vp) >> 2) % LFS_RESHASH_WIDTH; 213 } 214 215 /* 216 * Return memory of the given size for the given purpose, or use one of a 217 * number of spare last-resort buffers, if malloc returns NULL. 218 */ 219 void * 220 lfs_malloc(struct lfs *fs, size_t size, int type) 221 { 222 struct lfs_res_blk *re; 223 void *r; 224 int i, s, start; 225 unsigned int h; 226 227 ASSERT_MAYBE_SEGLOCK(fs); 228 r = NULL; 229 230 /* If no mem allocated for this type, it just waits */ 231 if (lfs_res_qty[type] == 0) { 232 r = malloc(size, M_SEGMENT, M_WAITOK); 233 return r; 234 } 235 236 /* Otherwise try a quick malloc, and if it works, great */ 237 if ((r = malloc(size, M_SEGMENT, M_NOWAIT)) != NULL) { 238 return r; 239 } 240 241 /* 242 * If malloc returned NULL, we are forced to use one of our 243 * reserve blocks. We have on hand at least one summary block, 244 * at least one cluster block, at least one superblock, 245 * and several indirect blocks. 246 */ 247 248 simple_lock(&fs->lfs_interlock); 249 /* skip over blocks of other types */ 250 for (i = 0, start = 0; i < type; i++) 251 start += lfs_res_qty[i]; 252 while (r == NULL) { 253 for (i = 0; i < lfs_res_qty[type]; i++) { 254 if (fs->lfs_resblk[start + i].inuse == 0) { 255 re = fs->lfs_resblk + start + i; 256 re->inuse = 1; 257 r = re->p; 258 KASSERT(re->size >= size); 259 h = lfs_mhash(r); 260 s = splbio(); 261 LIST_INSERT_HEAD(&fs->lfs_reshash[h], re, res); 262 splx(s); 263 simple_unlock(&fs->lfs_interlock); 264 return r; 265 } 266 } 267 DLOG((DLOG_MALLOC, "sleeping on %s (%d)\n", 268 lfs_res_names[type], lfs_res_qty[type])); 269 ltsleep(&fs->lfs_resblk, PVM, "lfs_malloc", 0, 270 &fs->lfs_interlock); 271 DLOG((DLOG_MALLOC, "done sleeping on %s\n", 272 lfs_res_names[type])); 273 } 274 /* NOTREACHED */ 275 simple_unlock(&fs->lfs_interlock); 276 return r; 277 } 278 279 void 280 lfs_free(struct lfs *fs, void *p, int type) 281 { 282 int s; 283 unsigned int h; 284 res_t *re; 285 #ifdef DEBUG 286 int i; 287 #endif 288 289 ASSERT_MAYBE_SEGLOCK(fs); 290 h = lfs_mhash(p); 291 simple_lock(&fs->lfs_interlock); 292 s = splbio(); 293 LIST_FOREACH(re, &fs->lfs_reshash[h], res) { 294 if (re->p == p) { 295 KASSERT(re->inuse == 1); 296 LIST_REMOVE(re, res); 297 re->inuse = 0; 298 wakeup(&fs->lfs_resblk); 299 splx(s); 300 simple_unlock(&fs->lfs_interlock); 301 return; 302 } 303 } 304 #ifdef DEBUG 305 for (i = 0; i < LFS_N_TOTAL; i++) { 306 if (fs->lfs_resblk[i].p == p) 307 panic("lfs_free: inconsistent reserved block"); 308 } 309 #endif 310 splx(s); 311 simple_unlock(&fs->lfs_interlock); 312 313 /* 314 * If we didn't find it, free it. 315 */ 316 free(p, M_SEGMENT); 317 } 318 319 /* 320 * lfs_seglock -- 321 * Single thread the segment writer. 322 */ 323 int 324 lfs_seglock(struct lfs *fs, unsigned long flags) 325 { 326 struct segment *sp; 327 328 simple_lock(&fs->lfs_interlock); 329 if (fs->lfs_seglock) { 330 if (fs->lfs_lockpid == curproc->p_pid) { 331 simple_unlock(&fs->lfs_interlock); 332 ++fs->lfs_seglock; 333 fs->lfs_sp->seg_flags |= flags; 334 return 0; 335 } else if (flags & SEGM_PAGEDAEMON) { 336 simple_unlock(&fs->lfs_interlock); 337 return EWOULDBLOCK; 338 } else { 339 while (fs->lfs_seglock) { 340 (void)ltsleep(&fs->lfs_seglock, PRIBIO + 1, 341 "lfs seglock", 0, &fs->lfs_interlock); 342 } 343 } 344 } 345 346 fs->lfs_seglock = 1; 347 fs->lfs_lockpid = curproc->p_pid; 348 simple_unlock(&fs->lfs_interlock); 349 fs->lfs_cleanind = 0; 350 351 #ifdef DEBUG 352 LFS_ENTER_LOG("seglock", __FILE__, __LINE__, 0, flags, curproc->p_pid); 353 #endif 354 /* Drain fragment size changes out */ 355 lockmgr(&fs->lfs_fraglock, LK_EXCLUSIVE, 0); 356 357 sp = fs->lfs_sp = pool_get(&fs->lfs_segpool, PR_WAITOK); 358 sp->bpp = pool_get(&fs->lfs_bpppool, PR_WAITOK); 359 sp->seg_flags = flags; 360 sp->vp = NULL; 361 sp->seg_iocount = 0; 362 (void) lfs_initseg(fs); 363 364 /* 365 * Keep a cumulative count of the outstanding I/O operations. If the 366 * disk drive catches up with us it could go to zero before we finish, 367 * so we artificially increment it by one until we've scheduled all of 368 * the writes we intend to do. 369 */ 370 simple_lock(&fs->lfs_interlock); 371 ++fs->lfs_iocount; 372 simple_unlock(&fs->lfs_interlock); 373 return 0; 374 } 375 376 static void lfs_unmark_dirop(struct lfs *); 377 378 static void 379 lfs_unmark_dirop(struct lfs *fs) 380 { 381 struct inode *ip, *nip; 382 struct vnode *vp; 383 int doit; 384 385 ASSERT_NO_SEGLOCK(fs); 386 simple_lock(&fs->lfs_interlock); 387 doit = !(fs->lfs_flags & LFS_UNDIROP); 388 if (doit) 389 fs->lfs_flags |= LFS_UNDIROP; 390 if (!doit) { 391 simple_unlock(&fs->lfs_interlock); 392 return; 393 } 394 395 for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) { 396 nip = TAILQ_NEXT(ip, i_lfs_dchain); 397 simple_unlock(&fs->lfs_interlock); 398 vp = ITOV(ip); 399 400 simple_lock(&vp->v_interlock); 401 if (VOP_ISLOCKED(vp) && 402 vp->v_lock.lk_lockholder != curproc->p_pid) { 403 simple_lock(&fs->lfs_interlock); 404 simple_unlock(&vp->v_interlock); 405 continue; 406 } 407 if ((VTOI(vp)->i_flag & IN_ADIROP) == 0) { 408 simple_lock(&fs->lfs_interlock); 409 simple_lock(&lfs_subsys_lock); 410 --lfs_dirvcount; 411 simple_unlock(&lfs_subsys_lock); 412 vp->v_flag &= ~VDIROP; 413 TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain); 414 simple_unlock(&fs->lfs_interlock); 415 wakeup(&lfs_dirvcount); 416 simple_unlock(&vp->v_interlock); 417 simple_lock(&fs->lfs_interlock); 418 fs->lfs_unlockvp = vp; 419 simple_unlock(&fs->lfs_interlock); 420 vrele(vp); 421 simple_lock(&fs->lfs_interlock); 422 fs->lfs_unlockvp = NULL; 423 simple_unlock(&fs->lfs_interlock); 424 } else 425 simple_unlock(&vp->v_interlock); 426 simple_lock(&fs->lfs_interlock); 427 } 428 429 fs->lfs_flags &= ~LFS_UNDIROP; 430 simple_unlock(&fs->lfs_interlock); 431 wakeup(&fs->lfs_flags); 432 } 433 434 static void 435 lfs_auto_segclean(struct lfs *fs) 436 { 437 int i, error, s, waited; 438 439 ASSERT_SEGLOCK(fs); 440 /* 441 * Now that we've swapped lfs_activesb, but while we still 442 * hold the segment lock, run through the segment list marking 443 * the empty ones clean. 444 * XXX - do we really need to do them all at once? 445 */ 446 waited = 0; 447 for (i = 0; i < fs->lfs_nseg; i++) { 448 if ((fs->lfs_suflags[0][i] & 449 (SEGUSE_ACTIVE | SEGUSE_DIRTY | SEGUSE_EMPTY)) == 450 (SEGUSE_DIRTY | SEGUSE_EMPTY) && 451 (fs->lfs_suflags[1][i] & 452 (SEGUSE_ACTIVE | SEGUSE_DIRTY | SEGUSE_EMPTY)) == 453 (SEGUSE_DIRTY | SEGUSE_EMPTY)) { 454 455 /* Make sure the sb is written before we clean */ 456 simple_lock(&fs->lfs_interlock); 457 s = splbio(); 458 while (waited == 0 && fs->lfs_sbactive) 459 ltsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs asb", 460 0, &fs->lfs_interlock); 461 splx(s); 462 simple_unlock(&fs->lfs_interlock); 463 waited = 1; 464 465 if ((error = lfs_do_segclean(fs, i)) != 0) { 466 DLOG((DLOG_CLEAN, "lfs_auto_segclean: lfs_do_segclean returned %d for seg %d\n", error, i)); 467 } 468 } 469 fs->lfs_suflags[1 - fs->lfs_activesb][i] = 470 fs->lfs_suflags[fs->lfs_activesb][i]; 471 } 472 } 473 474 /* 475 * lfs_segunlock -- 476 * Single thread the segment writer. 477 */ 478 void 479 lfs_segunlock(struct lfs *fs) 480 { 481 struct segment *sp; 482 unsigned long sync, ckp; 483 struct buf *bp; 484 int do_unmark_dirop = 0; 485 486 sp = fs->lfs_sp; 487 488 simple_lock(&fs->lfs_interlock); 489 LOCK_ASSERT(LFS_SEGLOCK_HELD(fs)); 490 if (fs->lfs_seglock == 1) { 491 if ((sp->seg_flags & SEGM_PROT) == 0) 492 do_unmark_dirop = 1; 493 simple_unlock(&fs->lfs_interlock); 494 sync = sp->seg_flags & SEGM_SYNC; 495 ckp = sp->seg_flags & SEGM_CKP; 496 if (sp->bpp != sp->cbpp) { 497 /* Free allocated segment summary */ 498 fs->lfs_offset -= btofsb(fs, fs->lfs_sumsize); 499 bp = *sp->bpp; 500 lfs_freebuf(fs, bp); 501 } else 502 DLOG((DLOG_SEG, "lfs_segunlock: unlock to 0 with no summary")); 503 504 pool_put(&fs->lfs_bpppool, sp->bpp); 505 sp->bpp = NULL; 506 507 /* 508 * If we're not sync, we're done with sp, get rid of it. 509 * Otherwise, we keep a local copy around but free 510 * fs->lfs_sp so another process can use it (we have to 511 * wait but they don't have to wait for us). 512 */ 513 if (!sync) 514 pool_put(&fs->lfs_segpool, sp); 515 fs->lfs_sp = NULL; 516 517 /* 518 * If the I/O count is non-zero, sleep until it reaches zero. 519 * At the moment, the user's process hangs around so we can 520 * sleep. 521 */ 522 simple_lock(&fs->lfs_interlock); 523 if (--fs->lfs_iocount == 0) 524 LFS_DEBUG_COUNTLOCKED("lfs_segunlock"); 525 if (fs->lfs_iocount <= 1) 526 wakeup(&fs->lfs_iocount); 527 simple_unlock(&fs->lfs_interlock); 528 /* 529 * If we're not checkpointing, we don't have to block 530 * other processes to wait for a synchronous write 531 * to complete. 532 */ 533 if (!ckp) { 534 #ifdef DEBUG 535 LFS_ENTER_LOG("segunlock_std", __FILE__, __LINE__, 0, 0, curproc->p_pid); 536 #endif 537 simple_lock(&fs->lfs_interlock); 538 --fs->lfs_seglock; 539 fs->lfs_lockpid = 0; 540 simple_unlock(&fs->lfs_interlock); 541 wakeup(&fs->lfs_seglock); 542 } 543 /* 544 * We let checkpoints happen asynchronously. That means 545 * that during recovery, we have to roll forward between 546 * the two segments described by the first and second 547 * superblocks to make sure that the checkpoint described 548 * by a superblock completed. 549 */ 550 simple_lock(&fs->lfs_interlock); 551 while (ckp && sync && fs->lfs_iocount) 552 (void)ltsleep(&fs->lfs_iocount, PRIBIO + 1, 553 "lfs_iocount", 0, &fs->lfs_interlock); 554 while (sync && sp->seg_iocount) { 555 (void)ltsleep(&sp->seg_iocount, PRIBIO + 1, 556 "seg_iocount", 0, &fs->lfs_interlock); 557 DLOG((DLOG_SEG, "sleeping on iocount %x == %d\n", sp, sp->seg_iocount)); 558 } 559 simple_unlock(&fs->lfs_interlock); 560 if (sync) 561 pool_put(&fs->lfs_segpool, sp); 562 563 if (ckp) { 564 fs->lfs_nactive = 0; 565 /* If we *know* everything's on disk, write both sbs */ 566 /* XXX should wait for this one */ 567 if (sync) 568 lfs_writesuper(fs, fs->lfs_sboffs[fs->lfs_activesb]); 569 lfs_writesuper(fs, fs->lfs_sboffs[1 - fs->lfs_activesb]); 570 if (!(fs->lfs_ivnode->v_mount->mnt_iflag & IMNT_UNMOUNT)) { 571 lfs_auto_segclean(fs); 572 /* If sync, we can clean the remainder too */ 573 if (sync) 574 lfs_auto_segclean(fs); 575 } 576 fs->lfs_activesb = 1 - fs->lfs_activesb; 577 #ifdef DEBUG 578 LFS_ENTER_LOG("segunlock_ckp", __FILE__, __LINE__, 0, 0, curproc->p_pid); 579 #endif 580 simple_lock(&fs->lfs_interlock); 581 --fs->lfs_seglock; 582 fs->lfs_lockpid = 0; 583 simple_unlock(&fs->lfs_interlock); 584 wakeup(&fs->lfs_seglock); 585 } 586 /* Reenable fragment size changes */ 587 lockmgr(&fs->lfs_fraglock, LK_RELEASE, 0); 588 if (do_unmark_dirop) 589 lfs_unmark_dirop(fs); 590 } else if (fs->lfs_seglock == 0) { 591 simple_unlock(&fs->lfs_interlock); 592 panic ("Seglock not held"); 593 } else { 594 --fs->lfs_seglock; 595 simple_unlock(&fs->lfs_interlock); 596 } 597 } 598 599 /* 600 * drain dirops and start writer. 601 */ 602 int 603 lfs_writer_enter(struct lfs *fs, const char *wmesg) 604 { 605 int error = 0; 606 607 ASSERT_MAYBE_SEGLOCK(fs); 608 simple_lock(&fs->lfs_interlock); 609 610 /* disallow dirops during flush */ 611 fs->lfs_writer++; 612 613 while (fs->lfs_dirops > 0) { 614 ++fs->lfs_diropwait; 615 error = ltsleep(&fs->lfs_writer, PRIBIO+1, wmesg, 0, 616 &fs->lfs_interlock); 617 --fs->lfs_diropwait; 618 } 619 620 if (error) 621 fs->lfs_writer--; 622 623 simple_unlock(&fs->lfs_interlock); 624 625 return error; 626 } 627 628 void 629 lfs_writer_leave(struct lfs *fs) 630 { 631 boolean_t dowakeup; 632 633 ASSERT_MAYBE_SEGLOCK(fs); 634 simple_lock(&fs->lfs_interlock); 635 dowakeup = !(--fs->lfs_writer); 636 simple_unlock(&fs->lfs_interlock); 637 if (dowakeup) 638 wakeup(&fs->lfs_dirops); 639 } 640