1 /* $NetBSD: lfs_segment.c,v 1.195 2006/11/16 01:33:53 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Konrad E. Schroder <perseant@hhhh.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 /* 39 * Copyright (c) 1991, 1993 40 * The Regents of the University of California. All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 * @(#)lfs_segment.c 8.10 (Berkeley) 6/10/95 67 */ 68 69 #include <sys/cdefs.h> 70 __KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.195 2006/11/16 01:33:53 christos Exp $"); 71 72 #ifdef DEBUG 73 # define vndebug(vp, str) do { \ 74 if (VTOI(vp)->i_flag & IN_CLEANING) \ 75 DLOG((DLOG_WVNODE, "not writing ino %d because %s (op %d)\n", \ 76 VTOI(vp)->i_number, (str), op)); \ 77 } while(0) 78 #else 79 # define vndebug(vp, str) 80 #endif 81 #define ivndebug(vp, str) \ 82 DLOG((DLOG_WVNODE, "ino %d: %s\n", VTOI(vp)->i_number, (str))) 83 84 #if defined(_KERNEL_OPT) 85 #include "opt_ddb.h" 86 #endif 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/namei.h> 91 #include <sys/kernel.h> 92 #include <sys/resourcevar.h> 93 #include <sys/file.h> 94 #include <sys/stat.h> 95 #include <sys/buf.h> 96 #include <sys/proc.h> 97 #include <sys/vnode.h> 98 #include <sys/mount.h> 99 #include <sys/kauth.h> 100 #include <sys/syslog.h> 101 102 #include <miscfs/specfs/specdev.h> 103 #include <miscfs/fifofs/fifo.h> 104 105 #include <ufs/ufs/inode.h> 106 #include <ufs/ufs/dir.h> 107 #include <ufs/ufs/ufsmount.h> 108 #include <ufs/ufs/ufs_extern.h> 109 110 #include <ufs/lfs/lfs.h> 111 #include <ufs/lfs/lfs_extern.h> 112 113 #include <uvm/uvm.h> 114 #include <uvm/uvm_extern.h> 115 116 MALLOC_DEFINE(M_SEGMENT, "LFS segment", "Segment for LFS"); 117 118 extern int count_lock_queue(void); 119 extern struct simplelock vnode_free_list_slock; /* XXX */ 120 extern struct simplelock bqueue_slock; /* XXX */ 121 122 static void lfs_generic_callback(struct buf *, void (*)(struct buf *)); 123 static void lfs_free_aiodone(struct buf *); 124 static void lfs_super_aiodone(struct buf *); 125 static void lfs_cluster_aiodone(struct buf *); 126 static void lfs_cluster_callback(struct buf *); 127 128 /* 129 * Determine if it's OK to start a partial in this segment, or if we need 130 * to go on to a new segment. 131 */ 132 #define LFS_PARTIAL_FITS(fs) \ 133 ((fs)->lfs_fsbpseg - ((fs)->lfs_offset - (fs)->lfs_curseg) > \ 134 fragstofsb((fs), (fs)->lfs_frag)) 135 136 /* 137 * Figure out whether we should do a checkpoint write or go ahead with 138 * an ordinary write. 139 */ 140 #define LFS_SHOULD_CHECKPOINT(fs, flags) \ 141 ((flags & SEGM_CLEAN) == 0 && \ 142 ((fs->lfs_nactive > LFS_MAX_ACTIVE || \ 143 (flags & SEGM_CKP) || \ 144 fs->lfs_nclean < LFS_MAX_ACTIVE))) 145 146 int lfs_match_fake(struct lfs *, struct buf *); 147 void lfs_newseg(struct lfs *); 148 /* XXX ondisk32 */ 149 void lfs_shellsort(struct buf **, int32_t *, int, int); 150 void lfs_supercallback(struct buf *); 151 void lfs_updatemeta(struct segment *); 152 void lfs_writesuper(struct lfs *, daddr_t); 153 int lfs_writevnodes(struct lfs *fs, struct mount *mp, 154 struct segment *sp, int dirops); 155 156 int lfs_allclean_wakeup; /* Cleaner wakeup address. */ 157 int lfs_writeindir = 1; /* whether to flush indir on non-ckp */ 158 int lfs_clean_vnhead = 0; /* Allow freeing to head of vn list */ 159 int lfs_dirvcount = 0; /* # active dirops */ 160 161 /* Statistics Counters */ 162 int lfs_dostats = 1; 163 struct lfs_stats lfs_stats; 164 165 /* op values to lfs_writevnodes */ 166 #define VN_REG 0 167 #define VN_DIROP 1 168 #define VN_EMPTY 2 169 #define VN_CLEAN 3 170 171 /* 172 * XXX KS - Set modification time on the Ifile, so the cleaner can 173 * read the fs mod time off of it. We don't set IN_UPDATE here, 174 * since we don't really need this to be flushed to disk (and in any 175 * case that wouldn't happen to the Ifile until we checkpoint). 176 */ 177 void 178 lfs_imtime(struct lfs *fs) 179 { 180 struct timespec ts; 181 struct inode *ip; 182 183 ASSERT_MAYBE_SEGLOCK(fs); 184 vfs_timestamp(&ts); 185 ip = VTOI(fs->lfs_ivnode); 186 ip->i_ffs1_mtime = ts.tv_sec; 187 ip->i_ffs1_mtimensec = ts.tv_nsec; 188 } 189 190 /* 191 * Ifile and meta data blocks are not marked busy, so segment writes MUST be 192 * single threaded. Currently, there are two paths into lfs_segwrite, sync() 193 * and getnewbuf(). They both mark the file system busy. Lfs_vflush() 194 * explicitly marks the file system busy. So lfs_segwrite is safe. I think. 195 */ 196 197 #define IS_FLUSHING(fs,vp) ((fs)->lfs_flushvp == (vp)) 198 199 int 200 lfs_vflush(struct vnode *vp) 201 { 202 struct inode *ip; 203 struct lfs *fs; 204 struct segment *sp; 205 struct buf *bp, *nbp, *tbp, *tnbp; 206 int error, s; 207 int flushed; 208 int relock; 209 int loopcount; 210 211 ip = VTOI(vp); 212 fs = VFSTOUFS(vp->v_mount)->um_lfs; 213 relock = 0; 214 215 top: 216 ASSERT_NO_SEGLOCK(fs); 217 if (ip->i_flag & IN_CLEANING) { 218 ivndebug(vp,"vflush/in_cleaning"); 219 LFS_CLR_UINO(ip, IN_CLEANING); 220 LFS_SET_UINO(ip, IN_MODIFIED); 221 222 /* 223 * Toss any cleaning buffers that have real counterparts 224 * to avoid losing new data. 225 */ 226 s = splbio(); 227 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 228 nbp = LIST_NEXT(bp, b_vnbufs); 229 if (!LFS_IS_MALLOC_BUF(bp)) 230 continue; 231 /* 232 * Look for pages matching the range covered 233 * by cleaning blocks. It's okay if more dirty 234 * pages appear, so long as none disappear out 235 * from under us. 236 */ 237 if (bp->b_lblkno > 0 && vp->v_type == VREG && 238 vp != fs->lfs_ivnode) { 239 struct vm_page *pg; 240 voff_t off; 241 242 simple_lock(&vp->v_interlock); 243 for (off = lblktosize(fs, bp->b_lblkno); 244 off < lblktosize(fs, bp->b_lblkno + 1); 245 off += PAGE_SIZE) { 246 pg = uvm_pagelookup(&vp->v_uobj, off); 247 if (pg == NULL) 248 continue; 249 if ((pg->flags & PG_CLEAN) == 0 || 250 pmap_is_modified(pg)) { 251 fs->lfs_avail += btofsb(fs, 252 bp->b_bcount); 253 wakeup(&fs->lfs_avail); 254 lfs_freebuf(fs, bp); 255 bp = NULL; 256 simple_unlock(&vp->v_interlock); 257 goto nextbp; 258 } 259 } 260 simple_unlock(&vp->v_interlock); 261 } 262 for (tbp = LIST_FIRST(&vp->v_dirtyblkhd); tbp; 263 tbp = tnbp) 264 { 265 tnbp = LIST_NEXT(tbp, b_vnbufs); 266 if (tbp->b_vp == bp->b_vp 267 && tbp->b_lblkno == bp->b_lblkno 268 && tbp != bp) 269 { 270 fs->lfs_avail += btofsb(fs, 271 bp->b_bcount); 272 wakeup(&fs->lfs_avail); 273 lfs_freebuf(fs, bp); 274 bp = NULL; 275 break; 276 } 277 } 278 nextbp: 279 ; 280 } 281 splx(s); 282 } 283 284 /* If the node is being written, wait until that is done */ 285 simple_lock(&vp->v_interlock); 286 s = splbio(); 287 if (WRITEINPROG(vp)) { 288 ivndebug(vp,"vflush/writeinprog"); 289 ltsleep(vp, (PRIBIO+1), "lfs_vw", 0, &vp->v_interlock); 290 } 291 splx(s); 292 simple_unlock(&vp->v_interlock); 293 294 /* Protect against VXLOCK deadlock in vinvalbuf() */ 295 lfs_seglock(fs, SEGM_SYNC); 296 297 /* If we're supposed to flush a freed inode, just toss it */ 298 if (ip->i_lfs_iflags & LFSI_DELETED) { 299 DLOG((DLOG_VNODE, "lfs_vflush: ino %d freed, not flushing\n", 300 ip->i_number)); 301 s = splbio(); 302 /* Drain v_numoutput */ 303 simple_lock(&global_v_numoutput_slock); 304 while (vp->v_numoutput > 0) { 305 vp->v_flag |= VBWAIT; 306 ltsleep(&vp->v_numoutput, PRIBIO + 1, "lfs_vf4", 0, 307 &global_v_numoutput_slock); 308 } 309 simple_unlock(&global_v_numoutput_slock); 310 KASSERT(vp->v_numoutput == 0); 311 312 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 313 nbp = LIST_NEXT(bp, b_vnbufs); 314 315 KASSERT((bp->b_flags & B_GATHERED) == 0); 316 if (bp->b_flags & B_DELWRI) { /* XXX always true? */ 317 fs->lfs_avail += btofsb(fs, bp->b_bcount); 318 wakeup(&fs->lfs_avail); 319 } 320 /* Copied from lfs_writeseg */ 321 if (bp->b_flags & B_CALL) { 322 biodone(bp); 323 } else { 324 bremfree(bp); 325 LFS_UNLOCK_BUF(bp); 326 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI | 327 B_GATHERED); 328 bp->b_flags |= B_DONE; 329 reassignbuf(bp, vp); 330 brelse(bp); 331 } 332 } 333 splx(s); 334 LFS_CLR_UINO(ip, IN_CLEANING); 335 LFS_CLR_UINO(ip, IN_MODIFIED | IN_ACCESSED); 336 ip->i_flag &= ~IN_ALLMOD; 337 DLOG((DLOG_VNODE, "lfs_vflush: done not flushing ino %d\n", 338 ip->i_number)); 339 lfs_segunlock(fs); 340 341 KASSERT(LIST_FIRST(&vp->v_dirtyblkhd) == NULL); 342 343 return 0; 344 } 345 346 fs->lfs_flushvp = vp; 347 if (LFS_SHOULD_CHECKPOINT(fs, fs->lfs_sp->seg_flags)) { 348 error = lfs_segwrite(vp->v_mount, SEGM_CKP | SEGM_SYNC); 349 fs->lfs_flushvp = NULL; 350 KASSERT(fs->lfs_flushvp_fakevref == 0); 351 lfs_segunlock(fs); 352 353 /* Make sure that any pending buffers get written */ 354 s = splbio(); 355 simple_lock(&global_v_numoutput_slock); 356 while (vp->v_numoutput > 0) { 357 vp->v_flag |= VBWAIT; 358 ltsleep(&vp->v_numoutput, PRIBIO + 1, "lfs_vf3", 0, 359 &global_v_numoutput_slock); 360 } 361 simple_unlock(&global_v_numoutput_slock); 362 splx(s); 363 364 KASSERT(LIST_FIRST(&vp->v_dirtyblkhd) == NULL); 365 KASSERT(vp->v_numoutput == 0); 366 367 return error; 368 } 369 sp = fs->lfs_sp; 370 371 flushed = 0; 372 if (VPISEMPTY(vp)) { 373 lfs_writevnodes(fs, vp->v_mount, sp, VN_EMPTY); 374 ++flushed; 375 } else if ((ip->i_flag & IN_CLEANING) && 376 (fs->lfs_sp->seg_flags & SEGM_CLEAN)) { 377 ivndebug(vp,"vflush/clean"); 378 lfs_writevnodes(fs, vp->v_mount, sp, VN_CLEAN); 379 ++flushed; 380 } else if (lfs_dostats) { 381 if (!VPISEMPTY(vp) || (VTOI(vp)->i_flag & IN_ALLMOD)) 382 ++lfs_stats.vflush_invoked; 383 ivndebug(vp,"vflush"); 384 } 385 386 #ifdef DIAGNOSTIC 387 if (vp->v_flag & VDIROP) { 388 DLOG((DLOG_VNODE, "lfs_vflush: flushing VDIROP\n")); 389 /* panic("lfs_vflush: VDIROP being flushed...this can\'t happen"); */ 390 } 391 if (vp->v_usecount < 0) { 392 printf("usecount=%ld\n", (long)vp->v_usecount); 393 panic("lfs_vflush: usecount<0"); 394 } 395 #endif 396 397 do { 398 loopcount = 0; 399 do { 400 if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL) { 401 relock = lfs_writefile(fs, sp, vp); 402 if (relock) { 403 /* 404 * Might have to wait for the 405 * cleaner to run; but we're 406 * still not done with this vnode. 407 */ 408 KDASSERT(ip->i_number != LFS_IFILE_INUM); 409 lfs_writeinode(fs, sp, ip); 410 LFS_SET_UINO(ip, IN_MODIFIED); 411 lfs_writeseg(fs, sp); 412 lfs_segunlock(fs); 413 lfs_segunlock_relock(fs); 414 goto top; 415 } 416 } 417 /* 418 * If we begin a new segment in the middle of writing 419 * the Ifile, it creates an inconsistent checkpoint, 420 * since the Ifile information for the new segment 421 * is not up-to-date. Take care of this here by 422 * sending the Ifile through again in case there 423 * are newly dirtied blocks. But wait, there's more! 424 * This second Ifile write could *also* cross a segment 425 * boundary, if the first one was large. The second 426 * one is guaranteed to be no more than 8 blocks, 427 * though (two segment blocks and supporting indirects) 428 * so the third write *will not* cross the boundary. 429 */ 430 if (vp == fs->lfs_ivnode) { 431 lfs_writefile(fs, sp, vp); 432 lfs_writefile(fs, sp, vp); 433 } 434 #ifdef DEBUG 435 if (++loopcount > 2) 436 log(LOG_NOTICE, "lfs_vflush: looping count=%d\n", loopcount); 437 #endif 438 } while (lfs_writeinode(fs, sp, ip)); 439 } while (lfs_writeseg(fs, sp) && ip->i_number == LFS_IFILE_INUM); 440 441 if (lfs_dostats) { 442 ++lfs_stats.nwrites; 443 if (sp->seg_flags & SEGM_SYNC) 444 ++lfs_stats.nsync_writes; 445 if (sp->seg_flags & SEGM_CKP) 446 ++lfs_stats.ncheckpoints; 447 } 448 /* 449 * If we were called from somewhere that has already held the seglock 450 * (e.g., lfs_markv()), the lfs_segunlock will not wait for 451 * the write to complete because we are still locked. 452 * Since lfs_vflush() must return the vnode with no dirty buffers, 453 * we must explicitly wait, if that is the case. 454 * 455 * We compare the iocount against 1, not 0, because it is 456 * artificially incremented by lfs_seglock(). 457 */ 458 simple_lock(&fs->lfs_interlock); 459 if (fs->lfs_seglock > 1) { 460 while (fs->lfs_iocount > 1) 461 (void)ltsleep(&fs->lfs_iocount, PRIBIO + 1, 462 "lfs_vflush", 0, &fs->lfs_interlock); 463 } 464 simple_unlock(&fs->lfs_interlock); 465 466 lfs_segunlock(fs); 467 468 /* Wait for these buffers to be recovered by aiodoned */ 469 s = splbio(); 470 simple_lock(&global_v_numoutput_slock); 471 while (vp->v_numoutput > 0) { 472 vp->v_flag |= VBWAIT; 473 ltsleep(&vp->v_numoutput, PRIBIO + 1, "lfs_vf2", 0, 474 &global_v_numoutput_slock); 475 } 476 simple_unlock(&global_v_numoutput_slock); 477 splx(s); 478 479 KASSERT(LIST_FIRST(&vp->v_dirtyblkhd) == NULL); 480 KASSERT(vp->v_numoutput == 0); 481 482 fs->lfs_flushvp = NULL; 483 KASSERT(fs->lfs_flushvp_fakevref == 0); 484 485 return (0); 486 } 487 488 int 489 lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op) 490 { 491 struct inode *ip; 492 struct vnode *vp; 493 int inodes_written = 0, only_cleaning; 494 int error = 0; 495 496 ASSERT_SEGLOCK(fs); 497 #if 0 498 /* start at last (newest) vnode. */ 499 loop: 500 TAILQ_FOREACH_REVERSE(vp, &mp->mnt_vnodelist, vnodelst, v_mntvnodes) { 501 #else 502 /* start at oldest accessed vnode */ 503 loop: 504 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { 505 #endif 506 /* 507 * If the vnode that we are about to sync is no longer 508 * associated with this mount point, start over. 509 */ 510 if (vp->v_mount != mp) { 511 DLOG((DLOG_VNODE, "lfs_writevnodes: starting over\n")); 512 /* 513 * After this, pages might be busy 514 * due to our own previous putpages. 515 * Start actual segment write here to avoid deadlock. 516 */ 517 (void)lfs_writeseg(fs, sp); 518 goto loop; 519 } 520 521 if (vp->v_type == VNON) { 522 continue; 523 } 524 525 ip = VTOI(vp); 526 if ((op == VN_DIROP && !(vp->v_flag & VDIROP)) || 527 (op != VN_DIROP && op != VN_CLEAN && 528 (vp->v_flag & VDIROP))) { 529 vndebug(vp,"dirop"); 530 continue; 531 } 532 533 if (op == VN_EMPTY && !VPISEMPTY(vp)) { 534 vndebug(vp,"empty"); 535 continue; 536 } 537 538 if (op == VN_CLEAN && ip->i_number != LFS_IFILE_INUM 539 && vp != fs->lfs_flushvp 540 && !(ip->i_flag & IN_CLEANING)) { 541 vndebug(vp,"cleaning"); 542 continue; 543 } 544 545 if (lfs_vref(vp)) { 546 vndebug(vp,"vref"); 547 continue; 548 } 549 550 only_cleaning = 0; 551 /* 552 * Write the inode/file if dirty and it's not the IFILE. 553 */ 554 if ((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp)) { 555 only_cleaning = 556 ((ip->i_flag & IN_ALLMOD) == IN_CLEANING); 557 558 if (ip->i_number != LFS_IFILE_INUM) { 559 error = lfs_writefile(fs, sp, vp); 560 if (error) { 561 lfs_vunref(vp); 562 if (error == EAGAIN) { 563 /* 564 * This error from lfs_putpages 565 * indicates we need to drop 566 * the segment lock and start 567 * over after the cleaner has 568 * had a chance to run. 569 */ 570 lfs_writeinode(fs, sp, ip); 571 lfs_writeseg(fs, sp); 572 if (!VPISEMPTY(vp) && 573 !WRITEINPROG(vp) && 574 !(ip->i_flag & IN_ALLMOD)) 575 LFS_SET_UINO(ip, IN_MODIFIED); 576 break; 577 } 578 error = 0; /* XXX not quite right */ 579 continue; 580 } 581 582 if (!VPISEMPTY(vp)) { 583 if (WRITEINPROG(vp)) { 584 ivndebug(vp,"writevnodes/write2"); 585 } else if (!(ip->i_flag & IN_ALLMOD)) { 586 LFS_SET_UINO(ip, IN_MODIFIED); 587 } 588 } 589 (void) lfs_writeinode(fs, sp, ip); 590 inodes_written++; 591 } 592 } 593 594 if (lfs_clean_vnhead && only_cleaning) 595 lfs_vunref_head(vp); 596 else 597 lfs_vunref(vp); 598 } 599 return error; 600 } 601 602 /* 603 * Do a checkpoint. 604 */ 605 int 606 lfs_segwrite(struct mount *mp, int flags) 607 { 608 struct buf *bp; 609 struct inode *ip; 610 struct lfs *fs; 611 struct segment *sp; 612 struct vnode *vp; 613 SEGUSE *segusep; 614 int do_ckp, did_ckp, error, s; 615 unsigned n, segleft, maxseg, sn, i, curseg; 616 int writer_set = 0; 617 int dirty; 618 int redo; 619 int um_error; 620 int loopcount; 621 622 fs = VFSTOUFS(mp)->um_lfs; 623 ASSERT_MAYBE_SEGLOCK(fs); 624 625 if (fs->lfs_ronly) 626 return EROFS; 627 628 lfs_imtime(fs); 629 630 /* 631 * Allocate a segment structure and enough space to hold pointers to 632 * the maximum possible number of buffers which can be described in a 633 * single summary block. 634 */ 635 do_ckp = LFS_SHOULD_CHECKPOINT(fs, flags); 636 637 lfs_seglock(fs, flags | (do_ckp ? SEGM_CKP : 0)); 638 sp = fs->lfs_sp; 639 if (sp->seg_flags & (SEGM_CLEAN | SEGM_CKP)) 640 do_ckp = 1; 641 642 /* 643 * If lfs_flushvp is non-NULL, we are called from lfs_vflush, 644 * in which case we have to flush *all* buffers off of this vnode. 645 * We don't care about other nodes, but write any non-dirop nodes 646 * anyway in anticipation of another getnewvnode(). 647 * 648 * If we're cleaning we only write cleaning and ifile blocks, and 649 * no dirops, since otherwise we'd risk corruption in a crash. 650 */ 651 if (sp->seg_flags & SEGM_CLEAN) 652 lfs_writevnodes(fs, mp, sp, VN_CLEAN); 653 else if (!(sp->seg_flags & SEGM_FORCE_CKP)) { 654 do { 655 um_error = lfs_writevnodes(fs, mp, sp, VN_REG); 656 if (!fs->lfs_dirops || !fs->lfs_flushvp) { 657 if (!writer_set) { 658 lfs_writer_enter(fs, "lfs writer"); 659 writer_set = 1; 660 } 661 error = lfs_writevnodes(fs, mp, sp, VN_DIROP); 662 if (um_error == 0) 663 um_error = error; 664 /* In case writevnodes errored out */ 665 lfs_flush_dirops(fs); 666 ((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT); 667 lfs_finalize_fs_seguse(fs); 668 } 669 if (do_ckp && um_error) { 670 lfs_segunlock_relock(fs); 671 sp = fs->lfs_sp; 672 } 673 } while (do_ckp && um_error != 0); 674 } 675 676 /* 677 * If we are doing a checkpoint, mark everything since the 678 * last checkpoint as no longer ACTIVE. 679 */ 680 if (do_ckp || fs->lfs_doifile) { 681 segleft = fs->lfs_nseg; 682 curseg = 0; 683 for (n = 0; n < fs->lfs_segtabsz; n++) { 684 dirty = 0; 685 if (bread(fs->lfs_ivnode, 686 fs->lfs_cleansz + n, fs->lfs_bsize, NOCRED, &bp)) 687 panic("lfs_segwrite: ifile read"); 688 segusep = (SEGUSE *)bp->b_data; 689 maxseg = min(segleft, fs->lfs_sepb); 690 for (i = 0; i < maxseg; i++) { 691 sn = curseg + i; 692 if (sn != dtosn(fs, fs->lfs_curseg) && 693 segusep->su_flags & SEGUSE_ACTIVE) { 694 segusep->su_flags &= ~SEGUSE_ACTIVE; 695 --fs->lfs_nactive; 696 ++dirty; 697 } 698 fs->lfs_suflags[fs->lfs_activesb][sn] = 699 segusep->su_flags; 700 if (fs->lfs_version > 1) 701 ++segusep; 702 else 703 segusep = (SEGUSE *) 704 ((SEGUSE_V1 *)segusep + 1); 705 } 706 707 if (dirty) 708 error = LFS_BWRITE_LOG(bp); /* Ifile */ 709 else 710 brelse(bp); 711 segleft -= fs->lfs_sepb; 712 curseg += fs->lfs_sepb; 713 } 714 } 715 716 LOCK_ASSERT(LFS_SEGLOCK_HELD(fs)); 717 718 did_ckp = 0; 719 if (do_ckp || fs->lfs_doifile) { 720 vp = fs->lfs_ivnode; 721 vn_lock(vp, LK_EXCLUSIVE); 722 loopcount = 0; 723 do { 724 #ifdef DEBUG 725 LFS_ENTER_LOG("pretend", __FILE__, __LINE__, 0, 0, curproc->p_pid); 726 #endif 727 simple_lock(&fs->lfs_interlock); 728 fs->lfs_flags &= ~LFS_IFDIRTY; 729 simple_unlock(&fs->lfs_interlock); 730 731 ip = VTOI(vp); 732 733 if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL) { 734 /* 735 * Ifile has no pages, so we don't need 736 * to check error return here. 737 */ 738 lfs_writefile(fs, sp, vp); 739 /* 740 * Ensure the Ifile takes the current segment 741 * into account. See comment in lfs_vflush. 742 */ 743 lfs_writefile(fs, sp, vp); 744 lfs_writefile(fs, sp, vp); 745 } 746 747 if (ip->i_flag & IN_ALLMOD) 748 ++did_ckp; 749 #if 0 750 redo = (do_ckp ? lfs_writeinode(fs, sp, ip) : 0); 751 #else 752 redo = lfs_writeinode(fs, sp, ip); 753 #endif 754 redo += lfs_writeseg(fs, sp); 755 simple_lock(&fs->lfs_interlock); 756 redo += (fs->lfs_flags & LFS_IFDIRTY); 757 simple_unlock(&fs->lfs_interlock); 758 #ifdef DEBUG 759 if (++loopcount > 2) 760 log(LOG_NOTICE, "lfs_segwrite: looping count=%d\n", 761 loopcount); 762 #endif 763 } while (redo && do_ckp); 764 765 /* 766 * Unless we are unmounting, the Ifile may continue to have 767 * dirty blocks even after a checkpoint, due to changes to 768 * inodes' atime. If we're checkpointing, it's "impossible" 769 * for other parts of the Ifile to be dirty after the loop 770 * above, since we hold the segment lock. 771 */ 772 s = splbio(); 773 if (LIST_EMPTY(&vp->v_dirtyblkhd)) { 774 LFS_CLR_UINO(ip, IN_ALLMOD); 775 } 776 #ifdef DIAGNOSTIC 777 else if (do_ckp) { 778 int do_panic = 0; 779 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) { 780 if (bp->b_lblkno < fs->lfs_cleansz + 781 fs->lfs_segtabsz && 782 !(bp->b_flags & B_GATHERED)) { 783 printf("ifile lbn %ld still dirty (flags %lx)\n", 784 (long)bp->b_lblkno, 785 (long)bp->b_flags); 786 ++do_panic; 787 } 788 } 789 if (do_panic) 790 panic("dirty blocks"); 791 } 792 #endif 793 splx(s); 794 VOP_UNLOCK(vp, 0); 795 } else { 796 (void) lfs_writeseg(fs, sp); 797 } 798 799 /* Note Ifile no longer needs to be written */ 800 fs->lfs_doifile = 0; 801 if (writer_set) 802 lfs_writer_leave(fs); 803 804 /* 805 * If we didn't write the Ifile, we didn't really do anything. 806 * That means that (1) there is a checkpoint on disk and (2) 807 * nothing has changed since it was written. 808 * 809 * Take the flags off of the segment so that lfs_segunlock 810 * doesn't have to write the superblock either. 811 */ 812 if (do_ckp && !did_ckp) { 813 sp->seg_flags &= ~SEGM_CKP; 814 } 815 816 if (lfs_dostats) { 817 ++lfs_stats.nwrites; 818 if (sp->seg_flags & SEGM_SYNC) 819 ++lfs_stats.nsync_writes; 820 if (sp->seg_flags & SEGM_CKP) 821 ++lfs_stats.ncheckpoints; 822 } 823 lfs_segunlock(fs); 824 return (0); 825 } 826 827 /* 828 * Write the dirty blocks associated with a vnode. 829 */ 830 int 831 lfs_writefile(struct lfs *fs, struct segment *sp, struct vnode *vp) 832 { 833 struct finfo *fip; 834 struct inode *ip; 835 int i, frag; 836 int error; 837 838 ASSERT_SEGLOCK(fs); 839 error = 0; 840 ip = VTOI(vp); 841 842 fip = sp->fip; 843 lfs_acquire_finfo(fs, ip->i_number, ip->i_gen); 844 845 if (vp->v_flag & VDIROP) 846 ((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT); 847 848 if (sp->seg_flags & SEGM_CLEAN) { 849 lfs_gather(fs, sp, vp, lfs_match_fake); 850 /* 851 * For a file being flushed, we need to write *all* blocks. 852 * This means writing the cleaning blocks first, and then 853 * immediately following with any non-cleaning blocks. 854 * The same is true of the Ifile since checkpoints assume 855 * that all valid Ifile blocks are written. 856 */ 857 if (IS_FLUSHING(fs, vp) || vp == fs->lfs_ivnode) { 858 lfs_gather(fs, sp, vp, lfs_match_data); 859 /* 860 * Don't call VOP_PUTPAGES: if we're flushing, 861 * we've already done it, and the Ifile doesn't 862 * use the page cache. 863 */ 864 } 865 } else { 866 lfs_gather(fs, sp, vp, lfs_match_data); 867 /* 868 * If we're flushing, we've already called VOP_PUTPAGES 869 * so don't do it again. Otherwise, we want to write 870 * everything we've got. 871 */ 872 if (!IS_FLUSHING(fs, vp)) { 873 simple_lock(&vp->v_interlock); 874 error = VOP_PUTPAGES(vp, 0, 0, 875 PGO_CLEANIT | PGO_ALLPAGES | PGO_LOCKED); 876 } 877 } 878 879 /* 880 * It may not be necessary to write the meta-data blocks at this point, 881 * as the roll-forward recovery code should be able to reconstruct the 882 * list. 883 * 884 * We have to write them anyway, though, under two conditions: (1) the 885 * vnode is being flushed (for reuse by vinvalbuf); or (2) we are 886 * checkpointing. 887 * 888 * BUT if we are cleaning, we might have indirect blocks that refer to 889 * new blocks not being written yet, in addition to fragments being 890 * moved out of a cleaned segment. If that is the case, don't 891 * write the indirect blocks, or the finfo will have a small block 892 * in the middle of it! 893 * XXX in this case isn't the inode size wrong too? 894 */ 895 frag = 0; 896 if (sp->seg_flags & SEGM_CLEAN) { 897 for (i = 0; i < NDADDR; i++) 898 if (ip->i_lfs_fragsize[i] > 0 && 899 ip->i_lfs_fragsize[i] < fs->lfs_bsize) 900 ++frag; 901 } 902 #ifdef DIAGNOSTIC 903 if (frag > 1) 904 panic("lfs_writefile: more than one fragment!"); 905 #endif 906 if (IS_FLUSHING(fs, vp) || 907 (frag == 0 && (lfs_writeindir || (sp->seg_flags & SEGM_CKP)))) { 908 lfs_gather(fs, sp, vp, lfs_match_indir); 909 lfs_gather(fs, sp, vp, lfs_match_dindir); 910 lfs_gather(fs, sp, vp, lfs_match_tindir); 911 } 912 fip = sp->fip; 913 lfs_release_finfo(fs); 914 915 return error; 916 } 917 918 /* 919 * Update segment accounting to reflect this inode's change of address. 920 */ 921 static int 922 lfs_update_iaddr(struct lfs *fs, struct segment *sp, struct inode *ip, daddr_t ndaddr) 923 { 924 struct buf *bp; 925 daddr_t daddr; 926 IFILE *ifp; 927 SEGUSE *sup; 928 ino_t ino; 929 int redo_ifile, error; 930 u_int32_t sn; 931 932 redo_ifile = 0; 933 934 /* 935 * If updating the ifile, update the super-block. Update the disk 936 * address and access times for this inode in the ifile. 937 */ 938 ino = ip->i_number; 939 if (ino == LFS_IFILE_INUM) { 940 daddr = fs->lfs_idaddr; 941 fs->lfs_idaddr = dbtofsb(fs, ndaddr); 942 } else { 943 LFS_IENTRY(ifp, fs, ino, bp); 944 daddr = ifp->if_daddr; 945 ifp->if_daddr = dbtofsb(fs, ndaddr); 946 error = LFS_BWRITE_LOG(bp); /* Ifile */ 947 } 948 949 /* 950 * If this is the Ifile and lfs_offset is set to the first block 951 * in the segment, dirty the new segment's accounting block 952 * (XXX should already be dirty?) and tell the caller to do it again. 953 */ 954 if (ip->i_number == LFS_IFILE_INUM) { 955 sn = dtosn(fs, fs->lfs_offset); 956 if (sntod(fs, sn) + btofsb(fs, fs->lfs_sumsize) == 957 fs->lfs_offset) { 958 LFS_SEGENTRY(sup, fs, sn, bp); 959 KASSERT(bp->b_flags & B_DELWRI); 960 LFS_WRITESEGENTRY(sup, fs, sn, bp); 961 /* fs->lfs_flags |= LFS_IFDIRTY; */ 962 redo_ifile |= 1; 963 } 964 } 965 966 /* 967 * The inode's last address should not be in the current partial 968 * segment, except under exceptional circumstances (lfs_writevnodes 969 * had to start over, and in the meantime more blocks were written 970 * to a vnode). Both inodes will be accounted to this segment 971 * in lfs_writeseg so we need to subtract the earlier version 972 * here anyway. The segment count can temporarily dip below 973 * zero here; keep track of how many duplicates we have in 974 * "dupino" so we don't panic below. 975 */ 976 if (daddr >= fs->lfs_lastpseg && daddr <= fs->lfs_offset) { 977 ++sp->ndupino; 978 DLOG((DLOG_SEG, "lfs_writeinode: last inode addr in current pseg " 979 "(ino %d daddr 0x%llx) ndupino=%d\n", ino, 980 (long long)daddr, sp->ndupino)); 981 } 982 /* 983 * Account the inode: it no longer belongs to its former segment, 984 * though it will not belong to the new segment until that segment 985 * is actually written. 986 */ 987 if (daddr != LFS_UNUSED_DADDR) { 988 u_int32_t oldsn = dtosn(fs, daddr); 989 #ifdef DIAGNOSTIC 990 int ndupino = (sp->seg_number == oldsn) ? sp->ndupino : 0; 991 #endif 992 LFS_SEGENTRY(sup, fs, oldsn, bp); 993 #ifdef DIAGNOSTIC 994 if (sup->su_nbytes + 995 sizeof (struct ufs1_dinode) * ndupino 996 < sizeof (struct ufs1_dinode)) { 997 printf("lfs_writeinode: negative bytes " 998 "(segment %" PRIu32 " short by %d, " 999 "oldsn=%" PRIu32 ", cursn=%" PRIu32 1000 ", daddr=%" PRId64 ", su_nbytes=%u, " 1001 "ndupino=%d)\n", 1002 dtosn(fs, daddr), 1003 (int)sizeof (struct ufs1_dinode) * 1004 (1 - sp->ndupino) - sup->su_nbytes, 1005 oldsn, sp->seg_number, daddr, 1006 (unsigned int)sup->su_nbytes, 1007 sp->ndupino); 1008 panic("lfs_writeinode: negative bytes"); 1009 sup->su_nbytes = sizeof (struct ufs1_dinode); 1010 } 1011 #endif 1012 DLOG((DLOG_SU, "seg %d -= %d for ino %d inode\n", 1013 dtosn(fs, daddr), sizeof (struct ufs1_dinode), ino)); 1014 sup->su_nbytes -= sizeof (struct ufs1_dinode); 1015 redo_ifile |= 1016 (ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED)); 1017 if (redo_ifile) { 1018 simple_lock(&fs->lfs_interlock); 1019 fs->lfs_flags |= LFS_IFDIRTY; 1020 simple_unlock(&fs->lfs_interlock); 1021 /* Don't double-account */ 1022 fs->lfs_idaddr = 0x0; 1023 } 1024 LFS_WRITESEGENTRY(sup, fs, oldsn, bp); /* Ifile */ 1025 } 1026 1027 return redo_ifile; 1028 } 1029 1030 int 1031 lfs_writeinode(struct lfs *fs, struct segment *sp, struct inode *ip) 1032 { 1033 struct buf *bp; 1034 struct ufs1_dinode *cdp; 1035 daddr_t daddr; 1036 int32_t *daddrp; /* XXX ondisk32 */ 1037 int i, ndx; 1038 int redo_ifile = 0; 1039 int gotblk = 0; 1040 int count; 1041 1042 ASSERT_SEGLOCK(fs); 1043 if (!(ip->i_flag & IN_ALLMOD)) 1044 return (0); 1045 1046 /* Can't write ifile when writer is not set */ 1047 KASSERT(ip->i_number != LFS_IFILE_INUM || fs->lfs_writer > 0 || 1048 (sp->seg_flags & SEGM_CLEAN)); 1049 1050 /* 1051 * If this is the Ifile, see if writing it here will generate a 1052 * temporary misaccounting. If it will, do the accounting and write 1053 * the blocks, postponing the inode write until the accounting is 1054 * solid. 1055 */ 1056 count = 0; 1057 while (ip->i_number == LFS_IFILE_INUM) { 1058 int redo = 0; 1059 1060 if (sp->idp == NULL && sp->ibp == NULL && 1061 (sp->seg_bytes_left < fs->lfs_ibsize || 1062 sp->sum_bytes_left < sizeof(int32_t))) { 1063 (void) lfs_writeseg(fs, sp); 1064 continue; 1065 } 1066 1067 /* Look for dirty Ifile blocks */ 1068 LIST_FOREACH(bp, &fs->lfs_ivnode->v_dirtyblkhd, b_vnbufs) { 1069 if (!(bp->b_flags & B_GATHERED)) { 1070 redo = 1; 1071 break; 1072 } 1073 } 1074 1075 if (redo == 0) 1076 redo = lfs_update_iaddr(fs, sp, ip, 0x0); 1077 if (redo == 0) 1078 break; 1079 1080 if (sp->idp) { 1081 sp->idp->di_inumber = 0; 1082 sp->idp = NULL; 1083 } 1084 ++count; 1085 if (count > 2) 1086 log(LOG_NOTICE, "lfs_writeinode: looping count=%d\n", count); 1087 lfs_writefile(fs, sp, fs->lfs_ivnode); 1088 } 1089 1090 /* Allocate a new inode block if necessary. */ 1091 if ((ip->i_number != LFS_IFILE_INUM || sp->idp == NULL) && 1092 sp->ibp == NULL) { 1093 /* Allocate a new segment if necessary. */ 1094 if (sp->seg_bytes_left < fs->lfs_ibsize || 1095 sp->sum_bytes_left < sizeof(int32_t)) 1096 (void) lfs_writeseg(fs, sp); 1097 1098 /* Get next inode block. */ 1099 daddr = fs->lfs_offset; 1100 fs->lfs_offset += btofsb(fs, fs->lfs_ibsize); 1101 sp->ibp = *sp->cbpp++ = 1102 getblk(VTOI(fs->lfs_ivnode)->i_devvp, 1103 fsbtodb(fs, daddr), fs->lfs_ibsize, 0, 0); 1104 gotblk++; 1105 1106 /* Zero out inode numbers */ 1107 for (i = 0; i < INOPB(fs); ++i) 1108 ((struct ufs1_dinode *)sp->ibp->b_data)[i].di_inumber = 1109 0; 1110 1111 ++sp->start_bpp; 1112 fs->lfs_avail -= btofsb(fs, fs->lfs_ibsize); 1113 /* Set remaining space counters. */ 1114 sp->seg_bytes_left -= fs->lfs_ibsize; 1115 sp->sum_bytes_left -= sizeof(int32_t); 1116 ndx = fs->lfs_sumsize / sizeof(int32_t) - 1117 sp->ninodes / INOPB(fs) - 1; 1118 ((int32_t *)(sp->segsum))[ndx] = daddr; 1119 } 1120 1121 /* Check VDIROP in case there is a new file with no data blocks */ 1122 if (ITOV(ip)->v_flag & VDIROP) 1123 ((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT); 1124 1125 /* Update the inode times and copy the inode onto the inode page. */ 1126 /* XXX kludge --- don't redirty the ifile just to put times on it */ 1127 if (ip->i_number != LFS_IFILE_INUM) 1128 LFS_ITIMES(ip, NULL, NULL, NULL); 1129 1130 /* 1131 * If this is the Ifile, and we've already written the Ifile in this 1132 * partial segment, just overwrite it (it's not on disk yet) and 1133 * continue. 1134 * 1135 * XXX we know that the bp that we get the second time around has 1136 * already been gathered. 1137 */ 1138 if (ip->i_number == LFS_IFILE_INUM && sp->idp) { 1139 *(sp->idp) = *ip->i_din.ffs1_din; 1140 ip->i_lfs_osize = ip->i_size; 1141 return 0; 1142 } 1143 1144 bp = sp->ibp; 1145 cdp = ((struct ufs1_dinode *)bp->b_data) + (sp->ninodes % INOPB(fs)); 1146 *cdp = *ip->i_din.ffs1_din; 1147 1148 /* 1149 * If cleaning, link counts and directory file sizes cannot change, 1150 * since those would be directory operations---even if the file 1151 * we are writing is marked VDIROP we should write the old values. 1152 * If we're not cleaning, of course, update the values so we get 1153 * current values the next time we clean. 1154 */ 1155 if (sp->seg_flags & SEGM_CLEAN) { 1156 if (ITOV(ip)->v_flag & VDIROP) { 1157 cdp->di_nlink = ip->i_lfs_odnlink; 1158 /* if (ITOV(ip)->v_type == VDIR) */ 1159 cdp->di_size = ip->i_lfs_osize; 1160 } 1161 } else { 1162 ip->i_lfs_odnlink = cdp->di_nlink; 1163 ip->i_lfs_osize = ip->i_size; 1164 } 1165 1166 1167 /* We can finish the segment accounting for truncations now */ 1168 lfs_finalize_ino_seguse(fs, ip); 1169 1170 /* 1171 * If we are cleaning, ensure that we don't write UNWRITTEN disk 1172 * addresses to disk; possibly change the on-disk record of 1173 * the inode size, either by reverting to the previous size 1174 * (in the case of cleaning) or by verifying the inode's block 1175 * holdings (in the case of files being allocated as they are being 1176 * written). 1177 * XXX By not writing UNWRITTEN blocks, we are making the lfs_avail 1178 * XXX count on disk wrong by the same amount. We should be 1179 * XXX able to "borrow" from lfs_avail and return it after the 1180 * XXX Ifile is written. See also in lfs_writeseg. 1181 */ 1182 1183 /* Check file size based on highest allocated block */ 1184 if (((ip->i_ffs1_mode & IFMT) == IFREG || 1185 (ip->i_ffs1_mode & IFMT) == IFDIR) && 1186 ip->i_size > ((ip->i_lfs_hiblk + 1) << fs->lfs_bshift)) { 1187 cdp->di_size = (ip->i_lfs_hiblk + 1) << fs->lfs_bshift; 1188 DLOG((DLOG_SEG, "lfs_writeinode: ino %d size %" PRId64 " -> %" 1189 PRId64 "\n", (int)ip->i_number, ip->i_size, cdp->di_size)); 1190 } 1191 if (ip->i_lfs_effnblks != ip->i_ffs1_blocks) { 1192 DLOG((DLOG_SEG, "lfs_writeinode: cleansing ino %d eff %d != nblk %d)" 1193 " at %x\n", ip->i_number, ip->i_lfs_effnblks, 1194 ip->i_ffs1_blocks, fs->lfs_offset)); 1195 for (daddrp = cdp->di_db; daddrp < cdp->di_ib + NIADDR; 1196 daddrp++) { 1197 if (*daddrp == UNWRITTEN) { 1198 DLOG((DLOG_SEG, "lfs_writeinode: wiping UNWRITTEN\n")); 1199 *daddrp = 0; 1200 } 1201 } 1202 } 1203 1204 #ifdef DIAGNOSTIC 1205 /* 1206 * Check dinode held blocks against dinode size. 1207 * This should be identical to the check in lfs_vget(). 1208 */ 1209 for (i = (cdp->di_size + fs->lfs_bsize - 1) >> fs->lfs_bshift; 1210 i < NDADDR; i++) { 1211 KASSERT(i >= 0); 1212 if ((cdp->di_mode & IFMT) == IFLNK) 1213 continue; 1214 if (((cdp->di_mode & IFMT) == IFBLK || 1215 (cdp->di_mode & IFMT) == IFCHR) && i == 0) 1216 continue; 1217 if (cdp->di_db[i] != 0) { 1218 # ifdef DEBUG 1219 lfs_dump_dinode(cdp); 1220 # endif 1221 panic("writing inconsistent inode"); 1222 } 1223 } 1224 #endif /* DIAGNOSTIC */ 1225 1226 if (ip->i_flag & IN_CLEANING) 1227 LFS_CLR_UINO(ip, IN_CLEANING); 1228 else { 1229 /* XXX IN_ALLMOD */ 1230 LFS_CLR_UINO(ip, IN_ACCESSED | IN_ACCESS | IN_CHANGE | 1231 IN_UPDATE | IN_MODIFY); 1232 if (ip->i_lfs_effnblks == ip->i_ffs1_blocks) 1233 LFS_CLR_UINO(ip, IN_MODIFIED); 1234 else { 1235 DLOG((DLOG_VNODE, "lfs_writeinode: ino %d: real " 1236 "blks=%d, eff=%d\n", ip->i_number, 1237 ip->i_ffs1_blocks, ip->i_lfs_effnblks)); 1238 } 1239 } 1240 1241 if (ip->i_number == LFS_IFILE_INUM) { 1242 /* We know sp->idp == NULL */ 1243 sp->idp = ((struct ufs1_dinode *)bp->b_data) + 1244 (sp->ninodes % INOPB(fs)); 1245 1246 /* Not dirty any more */ 1247 simple_lock(&fs->lfs_interlock); 1248 fs->lfs_flags &= ~LFS_IFDIRTY; 1249 simple_unlock(&fs->lfs_interlock); 1250 } 1251 1252 if (gotblk) { 1253 LFS_LOCK_BUF(bp); 1254 brelse(bp); 1255 } 1256 1257 /* Increment inode count in segment summary block. */ 1258 ++((SEGSUM *)(sp->segsum))->ss_ninos; 1259 1260 /* If this page is full, set flag to allocate a new page. */ 1261 if (++sp->ninodes % INOPB(fs) == 0) 1262 sp->ibp = NULL; 1263 1264 redo_ifile = lfs_update_iaddr(fs, sp, ip, bp->b_blkno); 1265 1266 KASSERT(redo_ifile == 0); 1267 return (redo_ifile); 1268 } 1269 1270 int 1271 lfs_gatherblock(struct segment *sp, struct buf *bp, int *sptr) 1272 { 1273 struct lfs *fs; 1274 int vers; 1275 int j, blksinblk; 1276 1277 ASSERT_SEGLOCK(sp->fs); 1278 /* 1279 * If full, finish this segment. We may be doing I/O, so 1280 * release and reacquire the splbio(). 1281 */ 1282 #ifdef DIAGNOSTIC 1283 if (sp->vp == NULL) 1284 panic ("lfs_gatherblock: Null vp in segment"); 1285 #endif 1286 fs = sp->fs; 1287 blksinblk = howmany(bp->b_bcount, fs->lfs_bsize); 1288 if (sp->sum_bytes_left < sizeof(int32_t) * blksinblk || 1289 sp->seg_bytes_left < bp->b_bcount) { 1290 if (sptr) 1291 splx(*sptr); 1292 lfs_updatemeta(sp); 1293 1294 vers = sp->fip->fi_version; 1295 (void) lfs_writeseg(fs, sp); 1296 1297 /* Add the current file to the segment summary. */ 1298 lfs_acquire_finfo(fs, VTOI(sp->vp)->i_number, vers); 1299 1300 if (sptr) 1301 *sptr = splbio(); 1302 return (1); 1303 } 1304 1305 if (bp->b_flags & B_GATHERED) { 1306 DLOG((DLOG_SEG, "lfs_gatherblock: already gathered! Ino %d," 1307 " lbn %" PRId64 "\n", 1308 sp->fip->fi_ino, bp->b_lblkno)); 1309 return (0); 1310 } 1311 1312 /* Insert into the buffer list, update the FINFO block. */ 1313 bp->b_flags |= B_GATHERED; 1314 1315 *sp->cbpp++ = bp; 1316 for (j = 0; j < blksinblk; j++) { 1317 sp->fip->fi_blocks[sp->fip->fi_nblocks++] = bp->b_lblkno + j; 1318 /* This block's accounting moves from lfs_favail to lfs_avail */ 1319 lfs_deregister_block(sp->vp, bp->b_lblkno + j); 1320 } 1321 1322 sp->sum_bytes_left -= sizeof(int32_t) * blksinblk; 1323 sp->seg_bytes_left -= bp->b_bcount; 1324 return (0); 1325 } 1326 1327 int 1328 lfs_gather(struct lfs *fs, struct segment *sp, struct vnode *vp, 1329 int (*match)(struct lfs *, struct buf *)) 1330 { 1331 struct buf *bp, *nbp; 1332 int s, count = 0; 1333 1334 ASSERT_SEGLOCK(fs); 1335 if (vp->v_type == VBLK) 1336 return 0; 1337 KASSERT(sp->vp == NULL); 1338 sp->vp = vp; 1339 s = splbio(); 1340 1341 #ifndef LFS_NO_BACKBUF_HACK 1342 /* This is a hack to see if ordering the blocks in LFS makes a difference. */ 1343 # define BUF_OFFSET \ 1344 (((caddr_t)&LIST_NEXT(bp, b_vnbufs)) - (caddr_t)bp) 1345 # define BACK_BUF(BP) \ 1346 ((struct buf *)(((caddr_t)(BP)->b_vnbufs.le_prev) - BUF_OFFSET)) 1347 # define BEG_OF_LIST \ 1348 ((struct buf *)(((caddr_t)&LIST_FIRST(&vp->v_dirtyblkhd)) - BUF_OFFSET)) 1349 1350 loop: 1351 /* Find last buffer. */ 1352 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); 1353 bp && LIST_NEXT(bp, b_vnbufs) != NULL; 1354 bp = LIST_NEXT(bp, b_vnbufs)) 1355 /* nothing */; 1356 for (; bp && bp != BEG_OF_LIST; bp = nbp) { 1357 nbp = BACK_BUF(bp); 1358 #else /* LFS_NO_BACKBUF_HACK */ 1359 loop: 1360 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 1361 nbp = LIST_NEXT(bp, b_vnbufs); 1362 #endif /* LFS_NO_BACKBUF_HACK */ 1363 if ((bp->b_flags & (B_BUSY|B_GATHERED)) || !match(fs, bp)) { 1364 #ifdef DEBUG 1365 if (vp == fs->lfs_ivnode && 1366 (bp->b_flags & (B_BUSY|B_GATHERED)) == B_BUSY) 1367 log(LOG_NOTICE, "lfs_gather: ifile lbn %" 1368 PRId64 " busy (%x) at 0x%x", 1369 bp->b_lblkno, bp->b_flags, 1370 (unsigned)fs->lfs_offset); 1371 #endif 1372 continue; 1373 } 1374 #ifdef DIAGNOSTIC 1375 # ifdef LFS_USE_B_INVAL 1376 if ((bp->b_flags & (B_CALL|B_INVAL)) == B_INVAL) { 1377 DLOG((DLOG_SEG, "lfs_gather: lbn %" PRId64 1378 " is B_INVAL\n", bp->b_lblkno)); 1379 VOP_PRINT(bp->b_vp); 1380 } 1381 # endif /* LFS_USE_B_INVAL */ 1382 if (!(bp->b_flags & B_DELWRI)) 1383 panic("lfs_gather: bp not B_DELWRI"); 1384 if (!(bp->b_flags & B_LOCKED)) { 1385 DLOG((DLOG_SEG, "lfs_gather: lbn %" PRId64 1386 " blk %" PRId64 " not B_LOCKED\n", 1387 bp->b_lblkno, 1388 dbtofsb(fs, bp->b_blkno))); 1389 VOP_PRINT(bp->b_vp); 1390 panic("lfs_gather: bp not B_LOCKED"); 1391 } 1392 #endif 1393 if (lfs_gatherblock(sp, bp, &s)) { 1394 goto loop; 1395 } 1396 count++; 1397 } 1398 splx(s); 1399 lfs_updatemeta(sp); 1400 KASSERT(sp->vp == vp); 1401 sp->vp = NULL; 1402 return count; 1403 } 1404 1405 #if DEBUG 1406 # define DEBUG_OOFF(n) do { \ 1407 if (ooff == 0) { \ 1408 DLOG((DLOG_SEG, "lfs_updatemeta[%d]: warning: writing " \ 1409 "ino %d lbn %" PRId64 " at 0x%" PRIx32 \ 1410 ", was 0x0 (or %" PRId64 ")\n", \ 1411 (n), ip->i_number, lbn, ndaddr, daddr)); \ 1412 } \ 1413 } while (0) 1414 #else 1415 # define DEBUG_OOFF(n) 1416 #endif 1417 1418 /* 1419 * Change the given block's address to ndaddr, finding its previous 1420 * location using ufs_bmaparray(). 1421 * 1422 * Account for this change in the segment table. 1423 * 1424 * called with sp == NULL by roll-forwarding code. 1425 */ 1426 void 1427 lfs_update_single(struct lfs *fs, struct segment *sp, 1428 struct vnode *vp, daddr_t lbn, int32_t ndaddr, int size) 1429 { 1430 SEGUSE *sup; 1431 struct buf *bp; 1432 struct indir a[NIADDR + 2], *ap; 1433 struct inode *ip; 1434 daddr_t daddr, ooff; 1435 int num, error; 1436 int bb, osize, obb; 1437 1438 ASSERT_SEGLOCK(fs); 1439 KASSERT(sp == NULL || sp->vp == vp); 1440 ip = VTOI(vp); 1441 1442 error = ufs_bmaparray(vp, lbn, &daddr, a, &num, NULL, NULL); 1443 if (error) 1444 panic("lfs_updatemeta: ufs_bmaparray returned %d", error); 1445 1446 daddr = (daddr_t)((int32_t)daddr); /* XXX ondisk32 */ 1447 KASSERT(daddr <= LFS_MAX_DADDR); 1448 if (daddr > 0) 1449 daddr = dbtofsb(fs, daddr); 1450 1451 bb = fragstofsb(fs, numfrags(fs, size)); 1452 switch (num) { 1453 case 0: 1454 ooff = ip->i_ffs1_db[lbn]; 1455 DEBUG_OOFF(0); 1456 if (ooff == UNWRITTEN) 1457 ip->i_ffs1_blocks += bb; 1458 else { 1459 /* possible fragment truncation or extension */ 1460 obb = btofsb(fs, ip->i_lfs_fragsize[lbn]); 1461 ip->i_ffs1_blocks += (bb - obb); 1462 } 1463 ip->i_ffs1_db[lbn] = ndaddr; 1464 break; 1465 case 1: 1466 ooff = ip->i_ffs1_ib[a[0].in_off]; 1467 DEBUG_OOFF(1); 1468 if (ooff == UNWRITTEN) 1469 ip->i_ffs1_blocks += bb; 1470 ip->i_ffs1_ib[a[0].in_off] = ndaddr; 1471 break; 1472 default: 1473 ap = &a[num - 1]; 1474 if (bread(vp, ap->in_lbn, fs->lfs_bsize, NOCRED, &bp)) 1475 panic("lfs_updatemeta: bread bno %" PRId64, 1476 ap->in_lbn); 1477 1478 /* XXX ondisk32 */ 1479 ooff = ((int32_t *)bp->b_data)[ap->in_off]; 1480 DEBUG_OOFF(num); 1481 if (ooff == UNWRITTEN) 1482 ip->i_ffs1_blocks += bb; 1483 /* XXX ondisk32 */ 1484 ((int32_t *)bp->b_data)[ap->in_off] = ndaddr; 1485 (void) VOP_BWRITE(bp); 1486 } 1487 1488 KASSERT(ooff == 0 || ooff == UNWRITTEN || ooff == daddr); 1489 1490 /* Update hiblk when extending the file */ 1491 if (lbn > ip->i_lfs_hiblk) 1492 ip->i_lfs_hiblk = lbn; 1493 1494 /* 1495 * Though we'd rather it couldn't, this *can* happen right now 1496 * if cleaning blocks and regular blocks coexist. 1497 */ 1498 /* KASSERT(daddr < fs->lfs_lastpseg || daddr > ndaddr); */ 1499 1500 /* 1501 * Update segment usage information, based on old size 1502 * and location. 1503 */ 1504 if (daddr > 0) { 1505 u_int32_t oldsn = dtosn(fs, daddr); 1506 #ifdef DIAGNOSTIC 1507 int ndupino; 1508 1509 if (sp && sp->seg_number == oldsn) { 1510 ndupino = sp->ndupino; 1511 } else { 1512 ndupino = 0; 1513 } 1514 #endif 1515 KASSERT(oldsn < fs->lfs_nseg); 1516 if (lbn >= 0 && lbn < NDADDR) 1517 osize = ip->i_lfs_fragsize[lbn]; 1518 else 1519 osize = fs->lfs_bsize; 1520 LFS_SEGENTRY(sup, fs, oldsn, bp); 1521 #ifdef DIAGNOSTIC 1522 if (sup->su_nbytes + sizeof (struct ufs1_dinode) * ndupino 1523 < osize) { 1524 printf("lfs_updatemeta: negative bytes " 1525 "(segment %" PRIu32 " short by %" PRId64 1526 ")\n", dtosn(fs, daddr), 1527 (int64_t)osize - 1528 (sizeof (struct ufs1_dinode) * ndupino + 1529 sup->su_nbytes)); 1530 printf("lfs_updatemeta: ino %llu, lbn %" PRId64 1531 ", addr = 0x%" PRIx64 "\n", 1532 (unsigned long long)ip->i_number, lbn, daddr); 1533 printf("lfs_updatemeta: ndupino=%d\n", ndupino); 1534 panic("lfs_updatemeta: negative bytes"); 1535 sup->su_nbytes = osize - 1536 sizeof (struct ufs1_dinode) * ndupino; 1537 } 1538 #endif 1539 DLOG((DLOG_SU, "seg %" PRIu32 " -= %d for ino %d lbn %" PRId64 1540 " db 0x%" PRIx64 "\n", 1541 dtosn(fs, daddr), osize, 1542 ip->i_number, lbn, daddr)); 1543 sup->su_nbytes -= osize; 1544 if (!(bp->b_flags & B_GATHERED)) { 1545 simple_lock(&fs->lfs_interlock); 1546 fs->lfs_flags |= LFS_IFDIRTY; 1547 simple_unlock(&fs->lfs_interlock); 1548 } 1549 LFS_WRITESEGENTRY(sup, fs, oldsn, bp); 1550 } 1551 /* 1552 * Now that this block has a new address, and its old 1553 * segment no longer owns it, we can forget about its 1554 * old size. 1555 */ 1556 if (lbn >= 0 && lbn < NDADDR) 1557 ip->i_lfs_fragsize[lbn] = size; 1558 } 1559 1560 /* 1561 * Update the metadata that points to the blocks listed in the FINFO 1562 * array. 1563 */ 1564 void 1565 lfs_updatemeta(struct segment *sp) 1566 { 1567 struct buf *sbp; 1568 struct lfs *fs; 1569 struct vnode *vp; 1570 daddr_t lbn; 1571 int i, nblocks, num; 1572 int bb; 1573 int bytesleft, size; 1574 1575 ASSERT_SEGLOCK(sp->fs); 1576 vp = sp->vp; 1577 nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp; 1578 KASSERT(nblocks >= 0); 1579 KASSERT(vp != NULL); 1580 if (nblocks == 0) 1581 return; 1582 1583 /* 1584 * This count may be high due to oversize blocks from lfs_gop_write. 1585 * Correct for this. (XXX we should be able to keep track of these.) 1586 */ 1587 fs = sp->fs; 1588 for (i = 0; i < nblocks; i++) { 1589 if (sp->start_bpp[i] == NULL) { 1590 DLOG((DLOG_SEG, "lfs_updatemeta: nblocks = %d, not %d\n", i, nblocks)); 1591 nblocks = i; 1592 break; 1593 } 1594 num = howmany(sp->start_bpp[i]->b_bcount, fs->lfs_bsize); 1595 KASSERT(sp->start_bpp[i]->b_lblkno >= 0 || num == 1); 1596 nblocks -= num - 1; 1597 } 1598 1599 KASSERT(vp->v_type == VREG || 1600 nblocks == &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp); 1601 KASSERT(nblocks == sp->cbpp - sp->start_bpp); 1602 1603 /* 1604 * Sort the blocks. 1605 * 1606 * We have to sort even if the blocks come from the 1607 * cleaner, because there might be other pending blocks on the 1608 * same inode...and if we don't sort, and there are fragments 1609 * present, blocks may be written in the wrong place. 1610 */ 1611 lfs_shellsort(sp->start_bpp, sp->start_lbp, nblocks, fs->lfs_bsize); 1612 1613 /* 1614 * Record the length of the last block in case it's a fragment. 1615 * If there are indirect blocks present, they sort last. An 1616 * indirect block will be lfs_bsize and its presence indicates 1617 * that you cannot have fragments. 1618 * 1619 * XXX This last is a lie. A cleaned fragment can coexist with 1620 * XXX a later indirect block. This will continue to be 1621 * XXX true until lfs_markv is fixed to do everything with 1622 * XXX fake blocks (including fake inodes and fake indirect blocks). 1623 */ 1624 sp->fip->fi_lastlength = ((sp->start_bpp[nblocks - 1]->b_bcount - 1) & 1625 fs->lfs_bmask) + 1; 1626 1627 /* 1628 * Assign disk addresses, and update references to the logical 1629 * block and the segment usage information. 1630 */ 1631 for (i = nblocks; i--; ++sp->start_bpp) { 1632 sbp = *sp->start_bpp; 1633 lbn = *sp->start_lbp; 1634 KASSERT(sbp->b_lblkno == lbn); 1635 1636 sbp->b_blkno = fsbtodb(fs, fs->lfs_offset); 1637 1638 /* 1639 * If we write a frag in the wrong place, the cleaner won't 1640 * be able to correctly identify its size later, and the 1641 * segment will be uncleanable. (Even worse, it will assume 1642 * that the indirect block that actually ends the list 1643 * is of a smaller size!) 1644 */ 1645 if ((sbp->b_bcount & fs->lfs_bmask) && i != 0) 1646 panic("lfs_updatemeta: fragment is not last block"); 1647 1648 /* 1649 * For each subblock in this possibly oversized block, 1650 * update its address on disk. 1651 */ 1652 KASSERT(lbn >= 0 || sbp->b_bcount == fs->lfs_bsize); 1653 KASSERT(vp == sbp->b_vp); 1654 for (bytesleft = sbp->b_bcount; bytesleft > 0; 1655 bytesleft -= fs->lfs_bsize) { 1656 size = MIN(bytesleft, fs->lfs_bsize); 1657 bb = fragstofsb(fs, numfrags(fs, size)); 1658 lbn = *sp->start_lbp++; 1659 lfs_update_single(fs, sp, sp->vp, lbn, fs->lfs_offset, 1660 size); 1661 fs->lfs_offset += bb; 1662 } 1663 1664 } 1665 } 1666 1667 /* 1668 * Move lfs_offset to a segment earlier than sn. 1669 */ 1670 int 1671 lfs_rewind(struct lfs *fs, int newsn) 1672 { 1673 int sn, osn, isdirty; 1674 struct buf *bp; 1675 SEGUSE *sup; 1676 1677 ASSERT_SEGLOCK(fs); 1678 1679 osn = dtosn(fs, fs->lfs_offset); 1680 if (osn < newsn) 1681 return 0; 1682 1683 /* lfs_avail eats the remaining space in this segment */ 1684 fs->lfs_avail -= fs->lfs_fsbpseg - (fs->lfs_offset - fs->lfs_curseg); 1685 1686 /* Find a low-numbered segment */ 1687 for (sn = 0; sn < fs->lfs_nseg; ++sn) { 1688 LFS_SEGENTRY(sup, fs, sn, bp); 1689 isdirty = sup->su_flags & SEGUSE_DIRTY; 1690 brelse(bp); 1691 1692 if (!isdirty) 1693 break; 1694 } 1695 if (sn == fs->lfs_nseg) 1696 panic("lfs_rewind: no clean segments"); 1697 if (newsn >= 0 && sn >= newsn) 1698 return ENOENT; 1699 fs->lfs_nextseg = sn; 1700 lfs_newseg(fs); 1701 fs->lfs_offset = fs->lfs_curseg; 1702 1703 return 0; 1704 } 1705 1706 /* 1707 * Start a new partial segment. 1708 * 1709 * Return 1 when we entered to a new segment. 1710 * Otherwise, return 0. 1711 */ 1712 int 1713 lfs_initseg(struct lfs *fs) 1714 { 1715 struct segment *sp = fs->lfs_sp; 1716 SEGSUM *ssp; 1717 struct buf *sbp; /* buffer for SEGSUM */ 1718 int repeat = 0; /* return value */ 1719 1720 ASSERT_SEGLOCK(fs); 1721 /* Advance to the next segment. */ 1722 if (!LFS_PARTIAL_FITS(fs)) { 1723 SEGUSE *sup; 1724 struct buf *bp; 1725 1726 /* lfs_avail eats the remaining space */ 1727 fs->lfs_avail -= fs->lfs_fsbpseg - (fs->lfs_offset - 1728 fs->lfs_curseg); 1729 /* Wake up any cleaning procs waiting on this file system. */ 1730 lfs_wakeup_cleaner(fs); 1731 lfs_newseg(fs); 1732 repeat = 1; 1733 fs->lfs_offset = fs->lfs_curseg; 1734 1735 sp->seg_number = dtosn(fs, fs->lfs_curseg); 1736 sp->seg_bytes_left = fsbtob(fs, fs->lfs_fsbpseg); 1737 1738 /* 1739 * If the segment contains a superblock, update the offset 1740 * and summary address to skip over it. 1741 */ 1742 LFS_SEGENTRY(sup, fs, sp->seg_number, bp); 1743 if (sup->su_flags & SEGUSE_SUPERBLOCK) { 1744 fs->lfs_offset += btofsb(fs, LFS_SBPAD); 1745 sp->seg_bytes_left -= LFS_SBPAD; 1746 } 1747 brelse(bp); 1748 /* Segment zero could also contain the labelpad */ 1749 if (fs->lfs_version > 1 && sp->seg_number == 0 && 1750 fs->lfs_start < btofsb(fs, LFS_LABELPAD)) { 1751 fs->lfs_offset += 1752 btofsb(fs, LFS_LABELPAD) - fs->lfs_start; 1753 sp->seg_bytes_left -= 1754 LFS_LABELPAD - fsbtob(fs, fs->lfs_start); 1755 } 1756 } else { 1757 sp->seg_number = dtosn(fs, fs->lfs_curseg); 1758 sp->seg_bytes_left = fsbtob(fs, fs->lfs_fsbpseg - 1759 (fs->lfs_offset - fs->lfs_curseg)); 1760 } 1761 fs->lfs_lastpseg = fs->lfs_offset; 1762 1763 /* Record first address of this partial segment */ 1764 if (sp->seg_flags & SEGM_CLEAN) { 1765 fs->lfs_cleanint[fs->lfs_cleanind] = fs->lfs_offset; 1766 if (++fs->lfs_cleanind >= LFS_MAX_CLEANIND) { 1767 /* "1" is the artificial inc in lfs_seglock */ 1768 simple_lock(&fs->lfs_interlock); 1769 while (fs->lfs_iocount > 1) { 1770 ltsleep(&fs->lfs_iocount, PRIBIO + 1, 1771 "lfs_initseg", 0, &fs->lfs_interlock); 1772 } 1773 simple_unlock(&fs->lfs_interlock); 1774 fs->lfs_cleanind = 0; 1775 } 1776 } 1777 1778 sp->fs = fs; 1779 sp->ibp = NULL; 1780 sp->idp = NULL; 1781 sp->ninodes = 0; 1782 sp->ndupino = 0; 1783 1784 sp->cbpp = sp->bpp; 1785 1786 /* Get a new buffer for SEGSUM */ 1787 sbp = lfs_newbuf(fs, VTOI(fs->lfs_ivnode)->i_devvp, 1788 fsbtodb(fs, fs->lfs_offset), fs->lfs_sumsize, LFS_NB_SUMMARY); 1789 1790 /* ... and enter it into the buffer list. */ 1791 *sp->cbpp = sbp; 1792 sp->cbpp++; 1793 fs->lfs_offset += btofsb(fs, fs->lfs_sumsize); 1794 1795 sp->start_bpp = sp->cbpp; 1796 1797 /* Set point to SEGSUM, initialize it. */ 1798 ssp = sp->segsum = sbp->b_data; 1799 memset(ssp, 0, fs->lfs_sumsize); 1800 ssp->ss_next = fs->lfs_nextseg; 1801 ssp->ss_nfinfo = ssp->ss_ninos = 0; 1802 ssp->ss_magic = SS_MAGIC; 1803 1804 /* Set pointer to first FINFO, initialize it. */ 1805 sp->fip = (struct finfo *)((caddr_t)sp->segsum + SEGSUM_SIZE(fs)); 1806 sp->fip->fi_nblocks = 0; 1807 sp->start_lbp = &sp->fip->fi_blocks[0]; 1808 sp->fip->fi_lastlength = 0; 1809 1810 sp->seg_bytes_left -= fs->lfs_sumsize; 1811 sp->sum_bytes_left = fs->lfs_sumsize - SEGSUM_SIZE(fs); 1812 1813 return (repeat); 1814 } 1815 1816 /* 1817 * Remove SEGUSE_INVAL from all segments. 1818 */ 1819 void 1820 lfs_unset_inval_all(struct lfs *fs) 1821 { 1822 SEGUSE *sup; 1823 struct buf *bp; 1824 int i; 1825 1826 for (i = 0; i < fs->lfs_nseg; i++) { 1827 LFS_SEGENTRY(sup, fs, i, bp); 1828 if (sup->su_flags & SEGUSE_INVAL) { 1829 sup->su_flags &= ~SEGUSE_INVAL; 1830 LFS_WRITESEGENTRY(sup, fs, i, bp); 1831 } else 1832 brelse(bp); 1833 } 1834 } 1835 1836 /* 1837 * Return the next segment to write. 1838 */ 1839 void 1840 lfs_newseg(struct lfs *fs) 1841 { 1842 CLEANERINFO *cip; 1843 SEGUSE *sup; 1844 struct buf *bp; 1845 int curseg, isdirty, sn, skip_inval; 1846 1847 ASSERT_SEGLOCK(fs); 1848 1849 /* Honor LFCNWRAPSTOP */ 1850 simple_lock(&fs->lfs_interlock); 1851 while (fs->lfs_nextseg < fs->lfs_curseg && fs->lfs_nowrap) { 1852 if (fs->lfs_wrappass) { 1853 log(LOG_NOTICE, "%s: wrappass=%d\n", 1854 fs->lfs_fsmnt, fs->lfs_wrappass); 1855 fs->lfs_wrappass = 0; 1856 break; 1857 } 1858 fs->lfs_wrapstatus = LFS_WRAP_WAITING; 1859 wakeup(&fs->lfs_nowrap); 1860 log(LOG_NOTICE, "%s: waiting at log wrap\n", fs->lfs_fsmnt); 1861 ltsleep(&fs->lfs_wrappass, PVFS, "newseg", 10 * hz, 1862 &fs->lfs_interlock); 1863 } 1864 fs->lfs_wrapstatus = LFS_WRAP_GOING; 1865 simple_unlock(&fs->lfs_interlock); 1866 1867 LFS_SEGENTRY(sup, fs, dtosn(fs, fs->lfs_nextseg), bp); 1868 DLOG((DLOG_SU, "lfs_newseg: seg %d := 0 in newseg\n", 1869 dtosn(fs, fs->lfs_nextseg))); 1870 sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE; 1871 sup->su_nbytes = 0; 1872 sup->su_nsums = 0; 1873 sup->su_ninos = 0; 1874 LFS_WRITESEGENTRY(sup, fs, dtosn(fs, fs->lfs_nextseg), bp); 1875 1876 LFS_CLEANERINFO(cip, fs, bp); 1877 --cip->clean; 1878 ++cip->dirty; 1879 fs->lfs_nclean = cip->clean; 1880 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1); 1881 1882 fs->lfs_lastseg = fs->lfs_curseg; 1883 fs->lfs_curseg = fs->lfs_nextseg; 1884 skip_inval = 1; 1885 for (sn = curseg = dtosn(fs, fs->lfs_curseg) + fs->lfs_interleave;;) { 1886 sn = (sn + 1) % fs->lfs_nseg; 1887 1888 if (sn == curseg) { 1889 if (skip_inval) 1890 skip_inval = 0; 1891 else 1892 panic("lfs_nextseg: no clean segments"); 1893 } 1894 LFS_SEGENTRY(sup, fs, sn, bp); 1895 isdirty = sup->su_flags & (SEGUSE_DIRTY | (skip_inval ? SEGUSE_INVAL : 0)); 1896 /* Check SEGUSE_EMPTY as we go along */ 1897 if (isdirty && sup->su_nbytes == 0 && 1898 !(sup->su_flags & SEGUSE_EMPTY)) 1899 LFS_WRITESEGENTRY(sup, fs, sn, bp); 1900 else 1901 brelse(bp); 1902 1903 if (!isdirty) 1904 break; 1905 } 1906 if (skip_inval == 0) 1907 lfs_unset_inval_all(fs); 1908 1909 ++fs->lfs_nactive; 1910 fs->lfs_nextseg = sntod(fs, sn); 1911 if (lfs_dostats) { 1912 ++lfs_stats.segsused; 1913 } 1914 } 1915 1916 static struct buf * 1917 lfs_newclusterbuf(struct lfs *fs, struct vnode *vp, daddr_t addr, 1918 int n) 1919 { 1920 struct lfs_cluster *cl; 1921 struct buf **bpp, *bp; 1922 1923 ASSERT_SEGLOCK(fs); 1924 cl = (struct lfs_cluster *)pool_get(&fs->lfs_clpool, PR_WAITOK); 1925 bpp = (struct buf **)pool_get(&fs->lfs_bpppool, PR_WAITOK); 1926 memset(cl, 0, sizeof(*cl)); 1927 cl->fs = fs; 1928 cl->bpp = bpp; 1929 cl->bufcount = 0; 1930 cl->bufsize = 0; 1931 1932 /* If this segment is being written synchronously, note that */ 1933 if (fs->lfs_sp->seg_flags & SEGM_SYNC) { 1934 cl->flags |= LFS_CL_SYNC; 1935 cl->seg = fs->lfs_sp; 1936 ++cl->seg->seg_iocount; 1937 } 1938 1939 /* Get an empty buffer header, or maybe one with something on it */ 1940 bp = getiobuf(); 1941 bp->b_flags = B_BUSY | B_CALL; 1942 bp->b_dev = NODEV; 1943 bp->b_blkno = bp->b_lblkno = addr; 1944 bp->b_iodone = lfs_cluster_callback; 1945 bp->b_private = cl; 1946 bp->b_vp = vp; 1947 1948 return bp; 1949 } 1950 1951 int 1952 lfs_writeseg(struct lfs *fs, struct segment *sp) 1953 { 1954 struct buf **bpp, *bp, *cbp, *newbp; 1955 SEGUSE *sup; 1956 SEGSUM *ssp; 1957 int i, s; 1958 int do_again, nblocks, byteoffset; 1959 size_t el_size; 1960 struct lfs_cluster *cl; 1961 u_short ninos; 1962 struct vnode *devvp; 1963 char *p = NULL; 1964 struct vnode *vp; 1965 int32_t *daddrp; /* XXX ondisk32 */ 1966 int changed; 1967 u_int32_t sum; 1968 #ifdef DEBUG 1969 FINFO *fip; 1970 int findex; 1971 #endif 1972 1973 ASSERT_SEGLOCK(fs); 1974 1975 ssp = (SEGSUM *)sp->segsum; 1976 1977 /* 1978 * If there are no buffers other than the segment summary to write, 1979 * don't do anything. If we are the end of a dirop sequence, however, 1980 * write the empty segment summary anyway, to help out the 1981 * roll-forward agent. 1982 */ 1983 if ((nblocks = sp->cbpp - sp->bpp) == 1) { 1984 if ((ssp->ss_flags & (SS_DIROP | SS_CONT)) != SS_DIROP) 1985 return 0; 1986 } 1987 1988 /* Note if partial segment is being written by the cleaner */ 1989 if (sp->seg_flags & SEGM_CLEAN) 1990 ssp->ss_flags |= SS_CLEAN; 1991 1992 devvp = VTOI(fs->lfs_ivnode)->i_devvp; 1993 1994 /* Update the segment usage information. */ 1995 LFS_SEGENTRY(sup, fs, sp->seg_number, bp); 1996 1997 /* Loop through all blocks, except the segment summary. */ 1998 for (bpp = sp->bpp; ++bpp < sp->cbpp; ) { 1999 if ((*bpp)->b_vp != devvp) { 2000 sup->su_nbytes += (*bpp)->b_bcount; 2001 DLOG((DLOG_SU, "seg %" PRIu32 " += %ld for ino %d" 2002 " lbn %" PRId64 " db 0x%" PRIx64 "\n", 2003 sp->seg_number, (*bpp)->b_bcount, 2004 VTOI((*bpp)->b_vp)->i_number, (*bpp)->b_lblkno, 2005 (*bpp)->b_blkno)); 2006 } 2007 } 2008 2009 #ifdef DEBUG 2010 /* Check for zero-length and zero-version FINFO entries. */ 2011 fip = (struct finfo *)((caddr_t)ssp + SEGSUM_SIZE(fs)); 2012 for (findex = 0; findex < ssp->ss_nfinfo; findex++) { 2013 KDASSERT(fip->fi_nblocks > 0); 2014 KDASSERT(fip->fi_version > 0); 2015 fip = (FINFO *)((caddr_t)fip + FINFOSIZE + 2016 sizeof(int32_t) * fip->fi_nblocks); 2017 } 2018 #endif /* DEBUG */ 2019 2020 ninos = (ssp->ss_ninos + INOPB(fs) - 1) / INOPB(fs); 2021 DLOG((DLOG_SU, "seg %d += %d for %d inodes\n", 2022 sp->seg_number, ssp->ss_ninos * sizeof (struct ufs1_dinode), 2023 ssp->ss_ninos)); 2024 sup->su_nbytes += ssp->ss_ninos * sizeof (struct ufs1_dinode); 2025 /* sup->su_nbytes += fs->lfs_sumsize; */ 2026 if (fs->lfs_version == 1) 2027 sup->su_olastmod = time_second; 2028 else 2029 sup->su_lastmod = time_second; 2030 sup->su_ninos += ninos; 2031 ++sup->su_nsums; 2032 fs->lfs_avail -= btofsb(fs, fs->lfs_sumsize); 2033 2034 do_again = !(bp->b_flags & B_GATHERED); 2035 LFS_WRITESEGENTRY(sup, fs, sp->seg_number, bp); /* Ifile */ 2036 2037 /* 2038 * Mark blocks B_BUSY, to prevent then from being changed between 2039 * the checksum computation and the actual write. 2040 * 2041 * If we are cleaning, check indirect blocks for UNWRITTEN, and if 2042 * there are any, replace them with copies that have UNASSIGNED 2043 * instead. 2044 */ 2045 for (bpp = sp->bpp, i = nblocks - 1; i--;) { 2046 ++bpp; 2047 bp = *bpp; 2048 if (bp->b_flags & B_CALL) { /* UBC or malloced buffer */ 2049 bp->b_flags |= B_BUSY; 2050 continue; 2051 } 2052 2053 simple_lock(&bp->b_interlock); 2054 s = splbio(); 2055 while (bp->b_flags & B_BUSY) { 2056 DLOG((DLOG_SEG, "lfs_writeseg: avoiding potential" 2057 " data summary corruption for ino %d, lbn %" 2058 PRId64 "\n", 2059 VTOI(bp->b_vp)->i_number, bp->b_lblkno)); 2060 bp->b_flags |= B_WANTED; 2061 ltsleep(bp, (PRIBIO + 1), "lfs_writeseg", 0, 2062 &bp->b_interlock); 2063 splx(s); 2064 s = splbio(); 2065 } 2066 bp->b_flags |= B_BUSY; 2067 splx(s); 2068 simple_unlock(&bp->b_interlock); 2069 2070 /* 2071 * Check and replace indirect block UNWRITTEN bogosity. 2072 * XXX See comment in lfs_writefile. 2073 */ 2074 if (bp->b_lblkno < 0 && bp->b_vp != devvp && bp->b_vp && 2075 VTOI(bp->b_vp)->i_ffs1_blocks != 2076 VTOI(bp->b_vp)->i_lfs_effnblks) { 2077 DLOG((DLOG_VNODE, "lfs_writeseg: cleansing ino %d (%d != %d)\n", 2078 VTOI(bp->b_vp)->i_number, 2079 VTOI(bp->b_vp)->i_lfs_effnblks, 2080 VTOI(bp->b_vp)->i_ffs1_blocks)); 2081 /* Make a copy we'll make changes to */ 2082 newbp = lfs_newbuf(fs, bp->b_vp, bp->b_lblkno, 2083 bp->b_bcount, LFS_NB_IBLOCK); 2084 newbp->b_blkno = bp->b_blkno; 2085 memcpy(newbp->b_data, bp->b_data, 2086 newbp->b_bcount); 2087 2088 changed = 0; 2089 /* XXX ondisk32 */ 2090 for (daddrp = (int32_t *)(newbp->b_data); 2091 daddrp < (int32_t *)(newbp->b_data + 2092 newbp->b_bcount); daddrp++) { 2093 if (*daddrp == UNWRITTEN) { 2094 ++changed; 2095 *daddrp = 0; 2096 } 2097 } 2098 /* 2099 * Get rid of the old buffer. Don't mark it clean, 2100 * though, if it still has dirty data on it. 2101 */ 2102 if (changed) { 2103 DLOG((DLOG_SEG, "lfs_writeseg: replacing UNWRITTEN(%d):" 2104 " bp = %p newbp = %p\n", changed, bp, 2105 newbp)); 2106 *bpp = newbp; 2107 bp->b_flags &= ~(B_ERROR | B_GATHERED); 2108 if (bp->b_flags & B_CALL) { 2109 DLOG((DLOG_SEG, "lfs_writeseg: " 2110 "indir bp should not be B_CALL\n")); 2111 s = splbio(); 2112 biodone(bp); 2113 splx(s); 2114 bp = NULL; 2115 } else { 2116 /* Still on free list, leave it there */ 2117 s = splbio(); 2118 bp->b_flags &= ~B_BUSY; 2119 if (bp->b_flags & B_WANTED) 2120 wakeup(bp); 2121 splx(s); 2122 /* 2123 * We have to re-decrement lfs_avail 2124 * since this block is going to come 2125 * back around to us in the next 2126 * segment. 2127 */ 2128 fs->lfs_avail -= 2129 btofsb(fs, bp->b_bcount); 2130 } 2131 } else { 2132 lfs_freebuf(fs, newbp); 2133 } 2134 } 2135 } 2136 /* 2137 * Compute checksum across data and then across summary; the first 2138 * block (the summary block) is skipped. Set the create time here 2139 * so that it's guaranteed to be later than the inode mod times. 2140 */ 2141 sum = 0; 2142 if (fs->lfs_version == 1) 2143 el_size = sizeof(u_long); 2144 else 2145 el_size = sizeof(u_int32_t); 2146 for (bpp = sp->bpp, i = nblocks - 1; i--; ) { 2147 ++bpp; 2148 /* Loop through gop_write cluster blocks */ 2149 for (byteoffset = 0; byteoffset < (*bpp)->b_bcount; 2150 byteoffset += fs->lfs_bsize) { 2151 #ifdef LFS_USE_B_INVAL 2152 if (((*bpp)->b_flags & (B_CALL | B_INVAL)) == 2153 (B_CALL | B_INVAL)) { 2154 if (copyin((caddr_t)(*bpp)->b_saveaddr + 2155 byteoffset, dp, el_size)) { 2156 panic("lfs_writeseg: copyin failed [1]:" 2157 " ino %d blk %" PRId64, 2158 VTOI((*bpp)->b_vp)->i_number, 2159 (*bpp)->b_lblkno); 2160 } 2161 } else 2162 #endif /* LFS_USE_B_INVAL */ 2163 { 2164 sum = lfs_cksum_part( 2165 (*bpp)->b_data + byteoffset, el_size, sum); 2166 } 2167 } 2168 } 2169 if (fs->lfs_version == 1) 2170 ssp->ss_ocreate = time_second; 2171 else { 2172 ssp->ss_create = time_second; 2173 ssp->ss_serial = ++fs->lfs_serial; 2174 ssp->ss_ident = fs->lfs_ident; 2175 } 2176 ssp->ss_datasum = lfs_cksum_fold(sum); 2177 ssp->ss_sumsum = cksum(&ssp->ss_datasum, 2178 fs->lfs_sumsize - sizeof(ssp->ss_sumsum)); 2179 2180 simple_lock(&fs->lfs_interlock); 2181 fs->lfs_bfree -= (btofsb(fs, ninos * fs->lfs_ibsize) + 2182 btofsb(fs, fs->lfs_sumsize)); 2183 fs->lfs_dmeta += (btofsb(fs, ninos * fs->lfs_ibsize) + 2184 btofsb(fs, fs->lfs_sumsize)); 2185 simple_unlock(&fs->lfs_interlock); 2186 2187 /* 2188 * When we simply write the blocks we lose a rotation for every block 2189 * written. To avoid this problem, we cluster the buffers into a 2190 * chunk and write the chunk. MAXPHYS is the largest size I/O 2191 * devices can handle, use that for the size of the chunks. 2192 * 2193 * Blocks that are already clusters (from GOP_WRITE), however, we 2194 * don't bother to copy into other clusters. 2195 */ 2196 2197 #define CHUNKSIZE MAXPHYS 2198 2199 if (devvp == NULL) 2200 panic("devvp is NULL"); 2201 for (bpp = sp->bpp, i = nblocks; i;) { 2202 cbp = lfs_newclusterbuf(fs, devvp, (*bpp)->b_blkno, i); 2203 cl = cbp->b_private; 2204 2205 cbp->b_flags |= B_ASYNC | B_BUSY; 2206 cbp->b_bcount = 0; 2207 2208 #if defined(DEBUG) && defined(DIAGNOSTIC) 2209 if (bpp - sp->bpp > (fs->lfs_sumsize - SEGSUM_SIZE(fs)) 2210 / sizeof(int32_t)) { 2211 panic("lfs_writeseg: real bpp overwrite"); 2212 } 2213 if (bpp - sp->bpp > segsize(fs) / fs->lfs_fsize) { 2214 panic("lfs_writeseg: theoretical bpp overwrite"); 2215 } 2216 #endif 2217 2218 /* 2219 * Construct the cluster. 2220 */ 2221 simple_lock(&fs->lfs_interlock); 2222 ++fs->lfs_iocount; 2223 simple_unlock(&fs->lfs_interlock); 2224 while (i && cbp->b_bcount < CHUNKSIZE) { 2225 bp = *bpp; 2226 2227 if (bp->b_bcount > (CHUNKSIZE - cbp->b_bcount)) 2228 break; 2229 if (cbp->b_bcount > 0 && !(cl->flags & LFS_CL_MALLOC)) 2230 break; 2231 2232 /* Clusters from GOP_WRITE are expedited */ 2233 if (bp->b_bcount > fs->lfs_bsize) { 2234 if (cbp->b_bcount > 0) 2235 /* Put in its own buffer */ 2236 break; 2237 else { 2238 cbp->b_data = bp->b_data; 2239 } 2240 } else if (cbp->b_bcount == 0) { 2241 p = cbp->b_data = lfs_malloc(fs, CHUNKSIZE, 2242 LFS_NB_CLUSTER); 2243 cl->flags |= LFS_CL_MALLOC; 2244 } 2245 #ifdef DIAGNOSTIC 2246 if (dtosn(fs, dbtofsb(fs, bp->b_blkno + 2247 btodb(bp->b_bcount - 1))) != 2248 sp->seg_number) { 2249 printf("blk size %d daddr %" PRIx64 2250 " not in seg %d\n", 2251 bp->b_bcount, bp->b_blkno, 2252 sp->seg_number); 2253 panic("segment overwrite"); 2254 } 2255 #endif 2256 2257 #ifdef LFS_USE_B_INVAL 2258 /* 2259 * Fake buffers from the cleaner are marked as B_INVAL. 2260 * We need to copy the data from user space rather than 2261 * from the buffer indicated. 2262 * XXX == what do I do on an error? 2263 */ 2264 if ((bp->b_flags & (B_CALL|B_INVAL)) == 2265 (B_CALL|B_INVAL)) { 2266 if (copyin(bp->b_saveaddr, p, bp->b_bcount)) 2267 panic("lfs_writeseg: " 2268 "copyin failed [2]"); 2269 } else 2270 #endif /* LFS_USE_B_INVAL */ 2271 if (cl->flags & LFS_CL_MALLOC) { 2272 /* copy data into our cluster. */ 2273 memcpy(p, bp->b_data, bp->b_bcount); 2274 p += bp->b_bcount; 2275 } 2276 2277 cbp->b_bcount += bp->b_bcount; 2278 cl->bufsize += bp->b_bcount; 2279 2280 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI | B_DONE); 2281 cl->bpp[cl->bufcount++] = bp; 2282 vp = bp->b_vp; 2283 s = splbio(); 2284 reassignbuf(bp, vp); 2285 V_INCR_NUMOUTPUT(vp); 2286 splx(s); 2287 2288 bpp++; 2289 i--; 2290 } 2291 if (fs->lfs_sp->seg_flags & SEGM_SYNC) 2292 BIO_SETPRIO(cbp, BPRIO_TIMECRITICAL); 2293 else 2294 BIO_SETPRIO(cbp, BPRIO_TIMELIMITED); 2295 s = splbio(); 2296 V_INCR_NUMOUTPUT(devvp); 2297 splx(s); 2298 VOP_STRATEGY(devvp, cbp); 2299 curproc->p_stats->p_ru.ru_oublock++; 2300 } 2301 2302 if (lfs_dostats) { 2303 ++lfs_stats.psegwrites; 2304 lfs_stats.blocktot += nblocks - 1; 2305 if (fs->lfs_sp->seg_flags & SEGM_SYNC) 2306 ++lfs_stats.psyncwrites; 2307 if (fs->lfs_sp->seg_flags & SEGM_CLEAN) { 2308 ++lfs_stats.pcleanwrites; 2309 lfs_stats.cleanblocks += nblocks - 1; 2310 } 2311 } 2312 return (lfs_initseg(fs) || do_again); 2313 } 2314 2315 void 2316 lfs_writesuper(struct lfs *fs, daddr_t daddr) 2317 { 2318 struct buf *bp; 2319 int s; 2320 struct vnode *devvp = VTOI(fs->lfs_ivnode)->i_devvp; 2321 2322 ASSERT_MAYBE_SEGLOCK(fs); 2323 #ifdef DIAGNOSTIC 2324 KASSERT(fs->lfs_magic == LFS_MAGIC); 2325 #endif 2326 /* 2327 * If we can write one superblock while another is in 2328 * progress, we risk not having a complete checkpoint if we crash. 2329 * So, block here if a superblock write is in progress. 2330 */ 2331 simple_lock(&fs->lfs_interlock); 2332 s = splbio(); 2333 while (fs->lfs_sbactive) { 2334 ltsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0, 2335 &fs->lfs_interlock); 2336 } 2337 fs->lfs_sbactive = daddr; 2338 splx(s); 2339 simple_unlock(&fs->lfs_interlock); 2340 2341 /* Set timestamp of this version of the superblock */ 2342 if (fs->lfs_version == 1) 2343 fs->lfs_otstamp = time_second; 2344 fs->lfs_tstamp = time_second; 2345 2346 /* Checksum the superblock and copy it into a buffer. */ 2347 fs->lfs_cksum = lfs_sb_cksum(&(fs->lfs_dlfs)); 2348 bp = lfs_newbuf(fs, devvp, 2349 fsbtodb(fs, daddr), LFS_SBPAD, LFS_NB_SBLOCK); 2350 memset(bp->b_data + sizeof(struct dlfs), 0, 2351 LFS_SBPAD - sizeof(struct dlfs)); 2352 *(struct dlfs *)bp->b_data = fs->lfs_dlfs; 2353 2354 bp->b_flags |= B_BUSY | B_CALL | B_ASYNC; 2355 bp->b_flags &= ~(B_DONE | B_ERROR | B_READ | B_DELWRI); 2356 bp->b_iodone = lfs_supercallback; 2357 2358 if (fs->lfs_sp != NULL && fs->lfs_sp->seg_flags & SEGM_SYNC) 2359 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 2360 else 2361 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 2362 curproc->p_stats->p_ru.ru_oublock++; 2363 s = splbio(); 2364 V_INCR_NUMOUTPUT(bp->b_vp); 2365 splx(s); 2366 simple_lock(&fs->lfs_interlock); 2367 ++fs->lfs_iocount; 2368 simple_unlock(&fs->lfs_interlock); 2369 VOP_STRATEGY(devvp, bp); 2370 } 2371 2372 /* 2373 * Logical block number match routines used when traversing the dirty block 2374 * chain. 2375 */ 2376 int 2377 lfs_match_fake(struct lfs *fs, struct buf *bp) 2378 { 2379 2380 ASSERT_SEGLOCK(fs); 2381 return LFS_IS_MALLOC_BUF(bp); 2382 } 2383 2384 #if 0 2385 int 2386 lfs_match_real(struct lfs *fs, struct buf *bp) 2387 { 2388 2389 ASSERT_SEGLOCK(fs); 2390 return (lfs_match_data(fs, bp) && !lfs_match_fake(fs, bp)); 2391 } 2392 #endif 2393 2394 int 2395 lfs_match_data(struct lfs *fs, struct buf *bp) 2396 { 2397 2398 ASSERT_SEGLOCK(fs); 2399 return (bp->b_lblkno >= 0); 2400 } 2401 2402 int 2403 lfs_match_indir(struct lfs *fs, struct buf *bp) 2404 { 2405 daddr_t lbn; 2406 2407 ASSERT_SEGLOCK(fs); 2408 lbn = bp->b_lblkno; 2409 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 0); 2410 } 2411 2412 int 2413 lfs_match_dindir(struct lfs *fs, struct buf *bp) 2414 { 2415 daddr_t lbn; 2416 2417 ASSERT_SEGLOCK(fs); 2418 lbn = bp->b_lblkno; 2419 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 1); 2420 } 2421 2422 int 2423 lfs_match_tindir(struct lfs *fs, struct buf *bp) 2424 { 2425 daddr_t lbn; 2426 2427 ASSERT_SEGLOCK(fs); 2428 lbn = bp->b_lblkno; 2429 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 2); 2430 } 2431 2432 static void 2433 lfs_free_aiodone(struct buf *bp) 2434 { 2435 struct lfs *fs; 2436 2437 fs = bp->b_private; 2438 ASSERT_NO_SEGLOCK(fs); 2439 lfs_freebuf(fs, bp); 2440 } 2441 2442 static void 2443 lfs_super_aiodone(struct buf *bp) 2444 { 2445 struct lfs *fs; 2446 2447 fs = bp->b_private; 2448 ASSERT_NO_SEGLOCK(fs); 2449 simple_lock(&fs->lfs_interlock); 2450 fs->lfs_sbactive = 0; 2451 if (--fs->lfs_iocount <= 1) 2452 wakeup(&fs->lfs_iocount); 2453 simple_unlock(&fs->lfs_interlock); 2454 wakeup(&fs->lfs_sbactive); 2455 lfs_freebuf(fs, bp); 2456 } 2457 2458 static void 2459 lfs_cluster_aiodone(struct buf *bp) 2460 { 2461 struct lfs_cluster *cl; 2462 struct lfs *fs; 2463 struct buf *tbp, *fbp; 2464 struct vnode *vp, *devvp; 2465 struct inode *ip; 2466 int s, error=0; 2467 2468 if (bp->b_flags & B_ERROR) 2469 error = bp->b_error; 2470 2471 cl = bp->b_private; 2472 fs = cl->fs; 2473 devvp = VTOI(fs->lfs_ivnode)->i_devvp; 2474 ASSERT_NO_SEGLOCK(fs); 2475 2476 /* Put the pages back, and release the buffer */ 2477 while (cl->bufcount--) { 2478 tbp = cl->bpp[cl->bufcount]; 2479 KASSERT(tbp->b_flags & B_BUSY); 2480 if (error) { 2481 tbp->b_flags |= B_ERROR; 2482 tbp->b_error = error; 2483 } 2484 2485 /* 2486 * We're done with tbp. If it has not been re-dirtied since 2487 * the cluster was written, free it. Otherwise, keep it on 2488 * the locked list to be written again. 2489 */ 2490 vp = tbp->b_vp; 2491 2492 tbp->b_flags &= ~B_GATHERED; 2493 2494 LFS_BCLEAN_LOG(fs, tbp); 2495 2496 if (!(tbp->b_flags & B_CALL)) { 2497 KASSERT(tbp->b_flags & B_LOCKED); 2498 s = splbio(); 2499 simple_lock(&bqueue_slock); 2500 bremfree(tbp); 2501 simple_unlock(&bqueue_slock); 2502 if (vp) 2503 reassignbuf(tbp, vp); 2504 splx(s); 2505 tbp->b_flags |= B_ASYNC; /* for biodone */ 2506 } 2507 2508 if ((tbp->b_flags & (B_LOCKED | B_DELWRI)) == B_LOCKED) 2509 LFS_UNLOCK_BUF(tbp); 2510 2511 if (tbp->b_flags & B_DONE) { 2512 DLOG((DLOG_SEG, "blk %d biodone already (flags %lx)\n", 2513 cl->bufcount, (long)tbp->b_flags)); 2514 } 2515 2516 if ((tbp->b_flags & B_CALL) && !LFS_IS_MALLOC_BUF(tbp)) { 2517 /* 2518 * A buffer from the page daemon. 2519 * We use the same iodone as it does, 2520 * so we must manually disassociate its 2521 * buffers from the vp. 2522 */ 2523 if (tbp->b_vp) { 2524 /* This is just silly */ 2525 s = splbio(); 2526 brelvp(tbp); 2527 tbp->b_vp = vp; 2528 splx(s); 2529 } 2530 /* Put it back the way it was */ 2531 tbp->b_flags |= B_ASYNC; 2532 /* Master buffers have B_AGE */ 2533 if (tbp->b_private == tbp) 2534 tbp->b_flags |= B_AGE; 2535 } 2536 s = splbio(); 2537 biodone(tbp); 2538 2539 /* 2540 * If this is the last block for this vnode, but 2541 * there are other blocks on its dirty list, 2542 * set IN_MODIFIED/IN_CLEANING depending on what 2543 * sort of block. Only do this for our mount point, 2544 * not for, e.g., inode blocks that are attached to 2545 * the devvp. 2546 * XXX KS - Shouldn't we set *both* if both types 2547 * of blocks are present (traverse the dirty list?) 2548 */ 2549 simple_lock(&global_v_numoutput_slock); 2550 if (vp != devvp && vp->v_numoutput == 0 && 2551 (fbp = LIST_FIRST(&vp->v_dirtyblkhd)) != NULL) { 2552 ip = VTOI(vp); 2553 DLOG((DLOG_SEG, "lfs_cluster_aiodone: mark ino %d\n", 2554 ip->i_number)); 2555 if (LFS_IS_MALLOC_BUF(fbp)) 2556 LFS_SET_UINO(ip, IN_CLEANING); 2557 else 2558 LFS_SET_UINO(ip, IN_MODIFIED); 2559 } 2560 simple_unlock(&global_v_numoutput_slock); 2561 splx(s); 2562 wakeup(vp); 2563 } 2564 2565 /* Fix up the cluster buffer, and release it */ 2566 if (cl->flags & LFS_CL_MALLOC) 2567 lfs_free(fs, bp->b_data, LFS_NB_CLUSTER); 2568 putiobuf(bp); 2569 2570 /* Note i/o done */ 2571 if (cl->flags & LFS_CL_SYNC) { 2572 if (--cl->seg->seg_iocount == 0) 2573 wakeup(&cl->seg->seg_iocount); 2574 } 2575 simple_lock(&fs->lfs_interlock); 2576 #ifdef DIAGNOSTIC 2577 if (fs->lfs_iocount == 0) 2578 panic("lfs_cluster_aiodone: zero iocount"); 2579 #endif 2580 if (--fs->lfs_iocount <= 1) 2581 wakeup(&fs->lfs_iocount); 2582 simple_unlock(&fs->lfs_interlock); 2583 2584 pool_put(&fs->lfs_bpppool, cl->bpp); 2585 cl->bpp = NULL; 2586 pool_put(&fs->lfs_clpool, cl); 2587 } 2588 2589 static void 2590 lfs_generic_callback(struct buf *bp, void (*aiodone)(struct buf *)) 2591 { 2592 /* reset b_iodone for when this is a single-buf i/o. */ 2593 bp->b_iodone = aiodone; 2594 2595 simple_lock(&uvm.aiodoned_lock); /* locks uvm.aio_done */ 2596 TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist); 2597 wakeup(&uvm.aiodoned); 2598 simple_unlock(&uvm.aiodoned_lock); 2599 } 2600 2601 static void 2602 lfs_cluster_callback(struct buf *bp) 2603 { 2604 2605 lfs_generic_callback(bp, lfs_cluster_aiodone); 2606 } 2607 2608 void 2609 lfs_supercallback(struct buf *bp) 2610 { 2611 2612 lfs_generic_callback(bp, lfs_super_aiodone); 2613 } 2614 2615 /* 2616 * The only buffers that are going to hit these functions are the 2617 * segment write blocks, or the segment summaries, or the superblocks. 2618 * 2619 * All of the above are created by lfs_newbuf, and so do not need to be 2620 * released via brelse. 2621 */ 2622 void 2623 lfs_callback(struct buf *bp) 2624 { 2625 2626 lfs_generic_callback(bp, lfs_free_aiodone); 2627 } 2628 2629 /* 2630 * Shellsort (diminishing increment sort) from Data Structures and 2631 * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290; 2632 * see also Knuth Vol. 3, page 84. The increments are selected from 2633 * formula (8), page 95. Roughly O(N^3/2). 2634 */ 2635 /* 2636 * This is our own private copy of shellsort because we want to sort 2637 * two parallel arrays (the array of buffer pointers and the array of 2638 * logical block numbers) simultaneously. Note that we cast the array 2639 * of logical block numbers to a unsigned in this routine so that the 2640 * negative block numbers (meta data blocks) sort AFTER the data blocks. 2641 */ 2642 2643 void 2644 lfs_shellsort(struct buf **bp_array, int32_t *lb_array, int nmemb, int size) 2645 { 2646 static int __rsshell_increments[] = { 4, 1, 0 }; 2647 int incr, *incrp, t1, t2; 2648 struct buf *bp_temp; 2649 2650 #ifdef DEBUG 2651 incr = 0; 2652 for (t1 = 0; t1 < nmemb; t1++) { 2653 for (t2 = 0; t2 * size < bp_array[t1]->b_bcount; t2++) { 2654 if (lb_array[incr++] != bp_array[t1]->b_lblkno + t2) { 2655 /* dump before panic */ 2656 printf("lfs_shellsort: nmemb=%d, size=%d\n", 2657 nmemb, size); 2658 incr = 0; 2659 for (t1 = 0; t1 < nmemb; t1++) { 2660 const struct buf *bp = bp_array[t1]; 2661 2662 printf("bp[%d]: lbn=%" PRIu64 ", size=%" 2663 PRIu64 "\n", t1, 2664 (uint64_t)bp->b_bcount, 2665 (uint64_t)bp->b_lblkno); 2666 printf("lbns:"); 2667 for (t2 = 0; t2 * size < bp->b_bcount; 2668 t2++) { 2669 printf(" %" PRId32, 2670 lb_array[incr++]); 2671 } 2672 printf("\n"); 2673 } 2674 panic("lfs_shellsort: inconsistent input"); 2675 } 2676 } 2677 } 2678 #endif 2679 2680 for (incrp = __rsshell_increments; (incr = *incrp++) != 0;) 2681 for (t1 = incr; t1 < nmemb; ++t1) 2682 for (t2 = t1 - incr; t2 >= 0;) 2683 if ((u_int32_t)bp_array[t2]->b_lblkno > 2684 (u_int32_t)bp_array[t2 + incr]->b_lblkno) { 2685 bp_temp = bp_array[t2]; 2686 bp_array[t2] = bp_array[t2 + incr]; 2687 bp_array[t2 + incr] = bp_temp; 2688 t2 -= incr; 2689 } else 2690 break; 2691 2692 /* Reform the list of logical blocks */ 2693 incr = 0; 2694 for (t1 = 0; t1 < nmemb; t1++) { 2695 for (t2 = 0; t2 * size < bp_array[t1]->b_bcount; t2++) { 2696 lb_array[incr++] = bp_array[t1]->b_lblkno + t2; 2697 } 2698 } 2699 } 2700 2701 /* 2702 * Call vget with LK_NOWAIT. If we are the one who holds VXLOCK/VFREEING, 2703 * however, we must press on. Just fake success in that case. 2704 */ 2705 int 2706 lfs_vref(struct vnode *vp) 2707 { 2708 int error; 2709 struct lfs *fs; 2710 2711 fs = VTOI(vp)->i_lfs; 2712 2713 ASSERT_MAYBE_SEGLOCK(fs); 2714 2715 /* 2716 * If we return 1 here during a flush, we risk vinvalbuf() not 2717 * being able to flush all of the pages from this vnode, which 2718 * will cause it to panic. So, return 0 if a flush is in progress. 2719 */ 2720 error = vget(vp, LK_NOWAIT); 2721 if (error == EBUSY && IS_FLUSHING(VTOI(vp)->i_lfs, vp)) { 2722 ++fs->lfs_flushvp_fakevref; 2723 return 0; 2724 } 2725 return error; 2726 } 2727 2728 /* 2729 * This is vrele except that we do not want to VOP_INACTIVE this vnode. We 2730 * inline vrele here to avoid the vn_lock and VOP_INACTIVE call at the end. 2731 */ 2732 void 2733 lfs_vunref(struct vnode *vp) 2734 { 2735 struct lfs *fs; 2736 2737 fs = VTOI(vp)->i_lfs; 2738 ASSERT_MAYBE_SEGLOCK(fs); 2739 2740 /* 2741 * Analogous to lfs_vref, if the node is flushing, fake it. 2742 */ 2743 if (IS_FLUSHING(fs, vp) && fs->lfs_flushvp_fakevref) { 2744 --fs->lfs_flushvp_fakevref; 2745 return; 2746 } 2747 2748 simple_lock(&vp->v_interlock); 2749 #ifdef DIAGNOSTIC 2750 if (vp->v_usecount <= 0) { 2751 printf("lfs_vunref: inum is %llu\n", (unsigned long long) 2752 VTOI(vp)->i_number); 2753 printf("lfs_vunref: flags are 0x%lx\n", (u_long)vp->v_flag); 2754 printf("lfs_vunref: usecount = %ld\n", (long)vp->v_usecount); 2755 panic("lfs_vunref: v_usecount < 0"); 2756 } 2757 #endif 2758 vp->v_usecount--; 2759 if (vp->v_usecount > 0) { 2760 simple_unlock(&vp->v_interlock); 2761 return; 2762 } 2763 /* 2764 * insert at tail of LRU list 2765 */ 2766 simple_lock(&vnode_free_list_slock); 2767 if (vp->v_holdcnt > 0) 2768 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 2769 else 2770 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 2771 simple_unlock(&vnode_free_list_slock); 2772 simple_unlock(&vp->v_interlock); 2773 } 2774 2775 /* 2776 * We use this when we have vnodes that were loaded in solely for cleaning. 2777 * There is no reason to believe that these vnodes will be referenced again 2778 * soon, since the cleaning process is unrelated to normal filesystem 2779 * activity. Putting cleaned vnodes at the tail of the list has the effect 2780 * of flushing the vnode LRU. So, put vnodes that were loaded only for 2781 * cleaning at the head of the list, instead. 2782 */ 2783 void 2784 lfs_vunref_head(struct vnode *vp) 2785 { 2786 2787 ASSERT_SEGLOCK(VTOI(vp)->i_lfs); 2788 simple_lock(&vp->v_interlock); 2789 #ifdef DIAGNOSTIC 2790 if (vp->v_usecount == 0) { 2791 panic("lfs_vunref: v_usecount<0"); 2792 } 2793 #endif 2794 vp->v_usecount--; 2795 if (vp->v_usecount > 0) { 2796 simple_unlock(&vp->v_interlock); 2797 return; 2798 } 2799 /* 2800 * insert at head of LRU list 2801 */ 2802 simple_lock(&vnode_free_list_slock); 2803 if (vp->v_holdcnt > 0) 2804 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist); 2805 else 2806 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 2807 simple_unlock(&vnode_free_list_slock); 2808 simple_unlock(&vp->v_interlock); 2809 } 2810 2811 2812 /* 2813 * Set up an FINFO entry for a new file. The fip pointer is assumed to 2814 * point at uninitialized space. 2815 */ 2816 void 2817 lfs_acquire_finfo(struct lfs *fs, ino_t ino, int vers) 2818 { 2819 struct segment *sp = fs->lfs_sp; 2820 2821 KASSERT(vers > 0); 2822 2823 if (sp->seg_bytes_left < fs->lfs_bsize || 2824 sp->sum_bytes_left < sizeof(struct finfo)) 2825 (void) lfs_writeseg(fs, fs->lfs_sp); 2826 2827 sp->sum_bytes_left -= FINFOSIZE; 2828 ++((SEGSUM *)(sp->segsum))->ss_nfinfo; 2829 sp->fip->fi_nblocks = 0; 2830 sp->fip->fi_ino = ino; 2831 sp->fip->fi_version = vers; 2832 } 2833 2834 /* 2835 * Release the FINFO entry, either clearing out an unused entry or 2836 * advancing us to the next available entry. 2837 */ 2838 void 2839 lfs_release_finfo(struct lfs *fs) 2840 { 2841 struct segment *sp = fs->lfs_sp; 2842 2843 if (sp->fip->fi_nblocks != 0) { 2844 sp->fip = (FINFO*)((caddr_t)sp->fip + FINFOSIZE + 2845 sizeof(int32_t) * sp->fip->fi_nblocks); 2846 sp->start_lbp = &sp->fip->fi_blocks[0]; 2847 } else { 2848 sp->sum_bytes_left += FINFOSIZE; 2849 --((SEGSUM *)(sp->segsum))->ss_nfinfo; 2850 } 2851 } 2852