151188Sbostic /* 251188Sbostic * Copyright (c) 1991 Regents of the University of California. 351188Sbostic * All rights reserved. 451188Sbostic * 551188Sbostic * %sccs.include.redist.c% 651188Sbostic * 7*56069Sbostic * @(#)lfs_segment.c 7.32 (Berkeley) 08/27/92 851188Sbostic */ 951188Sbostic 1051490Sbostic #include <sys/param.h> 1151490Sbostic #include <sys/systm.h> 1251490Sbostic #include <sys/namei.h> 1352085Sbostic #include <sys/kernel.h> 1451490Sbostic #include <sys/resourcevar.h> 1551490Sbostic #include <sys/file.h> 1651490Sbostic #include <sys/stat.h> 1751490Sbostic #include <sys/buf.h> 1851490Sbostic #include <sys/proc.h> 1951490Sbostic #include <sys/conf.h> 2051490Sbostic #include <sys/vnode.h> 2151490Sbostic #include <sys/malloc.h> 2251490Sbostic #include <sys/mount.h> 2351188Sbostic 2455033Smckusick #include <miscfs/specfs/specdev.h> 2555033Smckusick #include <miscfs/fifofs/fifo.h> 2655033Smckusick 2751499Sbostic #include <ufs/ufs/quota.h> 2851499Sbostic #include <ufs/ufs/inode.h> 2951499Sbostic #include <ufs/ufs/dir.h> 3051499Sbostic #include <ufs/ufs/ufsmount.h> 3151490Sbostic 3251499Sbostic #include <ufs/lfs/lfs.h> 3351499Sbostic #include <ufs/lfs/lfs_extern.h> 3451490Sbostic 3555940Sbostic #define MAX_ACTIVE 10 3651188Sbostic /* 3751860Sbostic * Determine if it's OK to start a partial in this segment, or if we need 3851860Sbostic * to go on to a new segment. 3951301Sbostic */ 4051860Sbostic #define LFS_PARTIAL_FITS(fs) \ 4151860Sbostic ((fs)->lfs_dbpseg - ((fs)->lfs_offset - (fs)->lfs_curseg) > \ 4251860Sbostic 1 << (fs)->lfs_fsbtodb) 4351188Sbostic 4453347Sbostic void lfs_callback __P((struct buf *)); 4552085Sbostic void lfs_gather __P((struct lfs *, struct segment *, 4652085Sbostic struct vnode *, int (*) __P((struct lfs *, struct buf *)))); 4755940Sbostic int lfs_gatherblock __P((struct segment *, struct buf *, int *)); 4852085Sbostic void lfs_initseg __P((struct lfs *, struct segment *)); 4952085Sbostic void lfs_iset __P((struct inode *, daddr_t, time_t)); 5052085Sbostic int lfs_match_data __P((struct lfs *, struct buf *)); 5152085Sbostic int lfs_match_dindir __P((struct lfs *, struct buf *)); 5252085Sbostic int lfs_match_indir __P((struct lfs *, struct buf *)); 5352085Sbostic int lfs_match_tindir __P((struct lfs *, struct buf *)); 5452077Sbostic void lfs_newseg __P((struct lfs *)); 5552085Sbostic void lfs_shellsort __P((struct buf **, daddr_t *, register int)); 5655940Sbostic void lfs_supercallback __P((struct buf *)); 5756027Sbostic void lfs_updatemeta __P((struct segment *)); 5852085Sbostic void lfs_writefile __P((struct lfs *, struct segment *, struct vnode *)); 5954264Sbostic int lfs_writeinode __P((struct lfs *, struct segment *, struct inode *)); 6054264Sbostic int lfs_writeseg __P((struct lfs *, struct segment *)); 6152085Sbostic void lfs_writesuper __P((struct lfs *, struct segment *)); 6254264Sbostic void lfs_writevnodes __P((struct lfs *fs, struct mount *mp, 6354264Sbostic struct segment *sp, int dirops)); 6451188Sbostic 6551860Sbostic int lfs_allclean_wakeup; /* Cleaner wakeup address. */ 6651860Sbostic 6752328Sbostic /* 6852328Sbostic * Ifile and meta data blocks are not marked busy, so segment writes MUST be 6952328Sbostic * single threaded. Currently, there are two paths into lfs_segwrite, sync() 7052328Sbostic * and getnewbuf(). They both mark the file system busy. Lfs_vflush() 7152328Sbostic * explicitly marks the file system busy. So lfs_segwrite is safe. I think. 7252328Sbostic */ 7352328Sbostic 7451188Sbostic int 7552328Sbostic lfs_vflush(vp) 7652328Sbostic struct vnode *vp; 7752328Sbostic { 7852328Sbostic struct inode *ip; 7952328Sbostic struct lfs *fs; 8052328Sbostic struct segment *sp; 8152328Sbostic int error, s; 8252328Sbostic 8354690Sbostic fs = VFSTOUFS(vp->v_mount)->um_lfs; 8454690Sbostic lfs_seglock(fs); 8552328Sbostic 8652328Sbostic /* 8752328Sbostic * Allocate a segment structure and enough space to hold pointers to 8852328Sbostic * the maximum possible number of buffers which can be described in a 8952328Sbostic * single summary block. 9052328Sbostic */ 9152328Sbostic sp = malloc(sizeof(struct segment), M_SEGMENT, M_WAITOK); 9252328Sbostic sp->bpp = malloc(((LFS_SUMMARY_SIZE - sizeof(SEGSUM)) / 9352328Sbostic sizeof(daddr_t) + 1) * sizeof(struct buf *), M_SEGMENT, M_WAITOK); 9452328Sbostic sp->seg_flags = SEGM_CKP; 9556027Sbostic sp->vp = NULL; 9652328Sbostic 9752328Sbostic /* 9852328Sbostic * Keep a cumulative count of the outstanding I/O operations. If the 9952328Sbostic * disk drive catches up with us it could go to zero before we finish, 10052328Sbostic * so we artificially increment it by one until we've scheduled all of 10152328Sbostic * the writes we intend to do. 10252328Sbostic */ 10352328Sbostic s = splbio(); 10452688Sbostic ++fs->lfs_iocount; 10552328Sbostic splx(s); 10652328Sbostic 10752328Sbostic ip = VTOI(vp); 10855551Sbostic do { 10955803Sbostic lfs_initseg(fs, sp); 11055551Sbostic do { 11155551Sbostic if (vp->v_dirtyblkhd != NULL) 11255551Sbostic lfs_writefile(fs, sp, vp); 11355551Sbostic } while (lfs_writeinode(fs, sp, ip)); 11452328Sbostic 11555551Sbostic } while (lfs_writeseg(fs, sp) && ip->i_number == LFS_IFILE_INUM); 11652328Sbostic 11752328Sbostic /* 11852328Sbostic * If the I/O count is non-zero, sleep until it reaches zero. At the 11952328Sbostic * moment, the user's process hangs around so we can sleep. 12052328Sbostic */ 12152328Sbostic s = splbio(); 12252328Sbostic if (--fs->lfs_iocount && (error = 12352995Sbostic tsleep(&fs->lfs_iocount, PRIBIO + 1, "lfs vflush", 0))) { 12452995Sbostic free(sp->bpp, M_SEGMENT); 12552995Sbostic free(sp, M_SEGMENT); 12652328Sbostic return (error); 12752995Sbostic } 12852328Sbostic splx(s); 12954690Sbostic lfs_segunlock(fs); 13052328Sbostic 13152995Sbostic /* 13252995Sbostic * XXX 13352995Sbostic * Should be writing a checkpoint? 13452995Sbostic */ 13552328Sbostic free(sp->bpp, M_SEGMENT); 13652328Sbostic free(sp, M_SEGMENT); 13752328Sbostic 13852328Sbostic return (0); 13952328Sbostic } 14052328Sbostic 14154264Sbostic void 14254264Sbostic lfs_writevnodes(fs, mp, sp, dirops) 14354264Sbostic struct lfs *fs; 14454264Sbostic struct mount *mp; 14554264Sbostic struct segment *sp; 14654264Sbostic int dirops; 14754264Sbostic { 14854264Sbostic struct inode *ip; 14954264Sbostic struct vnode *vp; 15054264Sbostic int error, s; 15154264Sbostic 15254264Sbostic loop: for (vp = mp->mnt_mounth; vp; vp = vp->v_mountf) { 15354264Sbostic /* 15454264Sbostic * If the vnode that we are about to sync is no longer 15554264Sbostic * associated with this mount point, start over. 15654264Sbostic */ 15754264Sbostic if (vp->v_mount != mp) 15854264Sbostic goto loop; 15954264Sbostic 16054264Sbostic if (dirops && !(vp->v_flag & VDIROP) || 16154264Sbostic !dirops && (vp->v_flag & VDIROP)) 16254264Sbostic continue; 16354264Sbostic /* 16454264Sbostic * XXX 16554264Sbostic * Up the ref count so we don't get tossed out of 16654264Sbostic * memory. 16754264Sbostic */ 16854264Sbostic VREF(vp); 16954264Sbostic 17054264Sbostic /* 17154264Sbostic * Write the inode/file if dirty and it's not the 17254264Sbostic * the IFILE. 17354264Sbostic */ 17454264Sbostic ip = VTOI(vp); 17554264Sbostic if ((ip->i_flag & (IMOD | IACC | IUPD | ICHG) || 17654264Sbostic vp->v_dirtyblkhd != NULL) && 17754264Sbostic ip->i_number != LFS_IFILE_INUM) { 17854264Sbostic if (vp->v_dirtyblkhd != NULL) 17954264Sbostic lfs_writefile(fs, sp, vp); 18054264Sbostic (void) lfs_writeinode(fs, sp, ip); 18154264Sbostic } 18254264Sbostic vp->v_flag &= ~VDIROP; 18354264Sbostic vrele(vp); 18454264Sbostic } 18554264Sbostic } 18654264Sbostic 18752328Sbostic int 18851215Sbostic lfs_segwrite(mp, do_ckp) 18952085Sbostic struct mount *mp; 19051860Sbostic int do_ckp; /* Do a checkpoint. */ 19151188Sbostic { 19255592Sbostic struct buf *bp; 19352085Sbostic struct inode *ip; 19451499Sbostic struct lfs *fs; 19552085Sbostic struct segment *sp; 19652085Sbostic struct vnode *vp; 19755592Sbostic SEGUSE *segusep; 19855592Sbostic daddr_t ibno; 19955940Sbostic CLEANERINFO *cip; 20055940Sbostic int clean, error, i, s; 20151188Sbostic 20252328Sbostic fs = VFSTOUFS(mp)->um_lfs; 20355940Sbostic 20455940Sbostic /* 20555940Sbostic * If we have fewer than 2 clean segments, wait until cleaner 20655940Sbostic * writes. 20755940Sbostic */ 20855940Sbostic do { 20955940Sbostic LFS_CLEANERINFO(cip, fs, bp); 21055940Sbostic clean = cip->clean; 21155940Sbostic brelse(bp); 21255940Sbostic if (clean <= 2) { 21355940Sbostic printf ("segs clean: %d\n", clean); 21455940Sbostic wakeup(&lfs_allclean_wakeup); 21555940Sbostic if (error = tsleep(&fs->lfs_avail, PRIBIO + 1, 21655940Sbostic "lfs writer", 0)) 21755940Sbostic return (error); 21855940Sbostic } 21955940Sbostic } while (clean <= 2 ); 22054690Sbostic lfs_seglock(fs); 22152085Sbostic 22251860Sbostic /* 22352328Sbostic * Allocate a segment structure and enough space to hold pointers to 22452328Sbostic * the maximum possible number of buffers which can be described in a 22552328Sbostic * single summary block. 22652328Sbostic */ 22755940Sbostic do_ckp = do_ckp || fs->lfs_nactive > MAX_ACTIVE; 22852328Sbostic sp = malloc(sizeof(struct segment), M_SEGMENT, M_WAITOK); 22952328Sbostic sp->bpp = malloc(((LFS_SUMMARY_SIZE - sizeof(SEGSUM)) / 23052328Sbostic sizeof(daddr_t) + 1) * sizeof(struct buf *), M_SEGMENT, M_WAITOK); 23152328Sbostic sp->seg_flags = do_ckp ? SEGM_CKP : 0; 23256027Sbostic sp->vp = NULL; 23352328Sbostic lfs_initseg(fs, sp); 23452328Sbostic 23552328Sbostic /* 23652688Sbostic * Keep a cumulative count of the outstanding I/O operations. If the 23752688Sbostic * disk drive catches up with us it could go to zero before we finish, 23852688Sbostic * so we artificially increment it by one until we've scheduled all of 23952688Sbostic * the writes we intend to do. If not a checkpoint, we never do the 24052688Sbostic * final decrement, avoiding the wakeup in the callback routine. 24151860Sbostic */ 24252688Sbostic s = splbio(); 24355551Sbostic ++fs->lfs_iocount; 24452688Sbostic splx(s); 24551342Sbostic 24654264Sbostic lfs_writevnodes(fs, mp, sp, 0); 24754264Sbostic fs->lfs_writer = 1; 24854264Sbostic if (fs->lfs_dirops && (error = 24954264Sbostic tsleep(&fs->lfs_writer, PRIBIO + 1, "lfs writer", 0))) { 25054264Sbostic free(sp->bpp, M_SEGMENT); 25154264Sbostic free(sp, M_SEGMENT); 25254264Sbostic fs->lfs_writer = 0; 25355551Sbostic return (error); 25454264Sbostic } 25551860Sbostic 25654264Sbostic lfs_writevnodes(fs, mp, sp, 1); 25751860Sbostic 25854264Sbostic /* 25955592Sbostic * If we are doing a checkpoint, mark everything since the 26055592Sbostic * last checkpoint as no longer ACTIVE. 26154264Sbostic */ 26255592Sbostic if (do_ckp) 26355592Sbostic for (ibno = fs->lfs_cleansz + fs->lfs_segtabsz; 26455592Sbostic --ibno >= fs->lfs_cleansz; ) { 26555592Sbostic if (bread(fs->lfs_ivnode, ibno, fs->lfs_bsize, 26655592Sbostic NOCRED, &bp)) 26755592Sbostic 26855592Sbostic panic("lfs: ifile read"); 26955592Sbostic segusep = (SEGUSE *)bp->b_un.b_addr; 27055592Sbostic for (i = fs->lfs_sepb; i--; segusep++) 27155592Sbostic segusep->su_flags &= ~SEGUSE_ACTIVE; 27255592Sbostic 27355940Sbostic error = VOP_BWRITE(bp); 27455592Sbostic } 27555592Sbostic 27654264Sbostic if (do_ckp || fs->lfs_doifile) { 27754264Sbostic vp = fs->lfs_ivnode; 27854264Sbostic while (vget(vp)); 27952328Sbostic ip = VTOI(vp); 28055592Sbostic if (vp->v_dirtyblkhd != NULL) 28155592Sbostic lfs_writefile(fs, sp, vp); 28255592Sbostic (void)lfs_writeinode(fs, sp, ip); 28352077Sbostic vput(vp); 28455592Sbostic /* 28555592Sbostic * This should never happen because we just guaranteed 28655592Sbostic * that all the segment usage table blocks are dirty, so 28755592Sbostic * no new ones should get written. 28855592Sbostic */ 28955592Sbostic if (lfs_writeseg(fs, sp) && do_ckp) 29055592Sbostic panic("lfs_segwrite: created dirty blocks on ckp"); 29154264Sbostic } else 29254264Sbostic (void) lfs_writeseg(fs, sp); 29351342Sbostic 29451215Sbostic /* 29551860Sbostic * If the I/O count is non-zero, sleep until it reaches zero. At the 29651860Sbostic * moment, the user's process hangs around so we can sleep. 29751215Sbostic */ 29854264Sbostic fs->lfs_writer = 0; 29954264Sbostic fs->lfs_doifile = 0; 30054264Sbostic wakeup(&fs->lfs_dirops); 30154264Sbostic 30255551Sbostic s = splbio(); 30355551Sbostic --fs->lfs_iocount; 30451860Sbostic if (do_ckp) { 30552688Sbostic if (fs->lfs_iocount && (error = 30652995Sbostic tsleep(&fs->lfs_iocount, PRIBIO + 1, "lfs sync", 0))) { 30752995Sbostic free(sp->bpp, M_SEGMENT); 30852995Sbostic free(sp, M_SEGMENT); 30951915Sbostic return (error); 31052995Sbostic } 31151860Sbostic splx(s); 31255940Sbostic fs->lfs_nactive = 0; 31351860Sbostic lfs_writesuper(fs, sp); 31452688Sbostic } else 31552688Sbostic splx(s); 31651215Sbostic 31754690Sbostic lfs_segunlock(fs); 31854690Sbostic 31951927Sbostic free(sp->bpp, M_SEGMENT); 32051927Sbostic free(sp, M_SEGMENT); 32151215Sbostic 32251860Sbostic return (0); 32351188Sbostic } 32451188Sbostic 32551860Sbostic /* 32651860Sbostic * Write the dirty blocks associated with a vnode. 32751860Sbostic */ 32852077Sbostic void 32951860Sbostic lfs_writefile(fs, sp, vp) 33051499Sbostic struct lfs *fs; 33152085Sbostic struct segment *sp; 33252085Sbostic struct vnode *vp; 33351188Sbostic { 33451860Sbostic struct buf *bp; 33552085Sbostic struct finfo *fip; 33651860Sbostic IFILE *ifp; 33751188Sbostic 33852085Sbostic if (sp->seg_bytes_left < fs->lfs_bsize || 33952085Sbostic sp->sum_bytes_left < sizeof(struct finfo)) { 34054264Sbostic (void) lfs_writeseg(fs, sp); 34152085Sbostic lfs_initseg(fs, sp); 34252085Sbostic } 34352085Sbostic sp->sum_bytes_left -= sizeof(struct finfo) - sizeof(daddr_t); 34451215Sbostic 34552085Sbostic fip = sp->fip; 34652085Sbostic fip->fi_nblocks = 0; 34752085Sbostic fip->fi_ino = VTOI(vp)->i_number; 34852085Sbostic LFS_IENTRY(ifp, fs, fip->fi_ino, bp); 34952085Sbostic fip->fi_version = ifp->if_version; 35052085Sbostic brelse(bp); 35151188Sbostic 35252085Sbostic /* 35352085Sbostic * It may not be necessary to write the meta-data blocks at this point, 35452085Sbostic * as the roll-forward recovery code should be able to reconstruct the 35552085Sbostic * list. 35652085Sbostic */ 35752085Sbostic lfs_gather(fs, sp, vp, lfs_match_data); 35852085Sbostic lfs_gather(fs, sp, vp, lfs_match_indir); 35952085Sbostic lfs_gather(fs, sp, vp, lfs_match_dindir); 36051860Sbostic #ifdef TRIPLE 36152085Sbostic lfs_gather(fs, sp, vp, lfs_match_tindir); 36251860Sbostic #endif 36351342Sbostic 36452085Sbostic fip = sp->fip; 36551860Sbostic #ifdef META 36652085Sbostic printf("lfs_writefile: adding %d blocks\n", fip->fi_nblocks); 36751860Sbostic #endif 36852085Sbostic if (fip->fi_nblocks != 0) { 36952085Sbostic ++((SEGSUM *)(sp->segsum))->ss_nfinfo; 37052085Sbostic sp->fip = 37152085Sbostic (struct finfo *)((caddr_t)fip + sizeof(struct finfo) + 37252085Sbostic sizeof(daddr_t) * (fip->fi_nblocks - 1)); 37355940Sbostic sp->start_lbp = &sp->fip->fi_blocks[0]; 37452682Sstaelin } else 37552682Sstaelin sp->sum_bytes_left += sizeof(struct finfo) - sizeof(daddr_t); 37651215Sbostic } 37751215Sbostic 37854264Sbostic int 37951915Sbostic lfs_writeinode(fs, sp, ip) 38051915Sbostic struct lfs *fs; 38152085Sbostic struct segment *sp; 38252085Sbostic struct inode *ip; 38351915Sbostic { 38452085Sbostic struct buf *bp, *ibp; 38552077Sbostic IFILE *ifp; 38652682Sstaelin SEGUSE *sup; 38752682Sstaelin daddr_t daddr; 38852077Sbostic ino_t ino; 38955940Sbostic int error, ndx; 39054264Sbostic int redo_ifile = 0; 39151915Sbostic 39255940Sbostic if (!(ip->i_flag & (IMOD | IACC | IUPD | ICHG))) 39355940Sbostic return; 39455940Sbostic 39551915Sbostic /* Allocate a new inode block if necessary. */ 39651915Sbostic if (sp->ibp == NULL) { 39751915Sbostic /* Allocate a new segment if necessary. */ 39851915Sbostic if (sp->seg_bytes_left < fs->lfs_bsize || 39951915Sbostic sp->sum_bytes_left < sizeof(daddr_t)) { 40054264Sbostic (void) lfs_writeseg(fs, sp); 40151915Sbostic lfs_initseg(fs, sp); 40251915Sbostic } 40351915Sbostic 40451915Sbostic /* Get next inode block. */ 40552682Sstaelin daddr = fs->lfs_offset; 40651915Sbostic fs->lfs_offset += fsbtodb(fs, 1); 40751915Sbostic sp->ibp = *sp->cbpp++ = 40856056Sbostic lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp, daddr, 40956056Sbostic fs->lfs_bsize); 41055940Sbostic ++sp->start_bpp; 41155940Sbostic fs->lfs_avail -= fsbtodb(fs, 1); 41252688Sbostic /* Set remaining space counters. */ 41351915Sbostic sp->seg_bytes_left -= fs->lfs_bsize; 41451915Sbostic sp->sum_bytes_left -= sizeof(daddr_t); 41552077Sbostic ndx = LFS_SUMMARY_SIZE / sizeof(daddr_t) - 41651915Sbostic sp->ninodes / INOPB(fs) - 1; 41752682Sstaelin ((daddr_t *)(sp->segsum))[ndx] = daddr; 41851915Sbostic } 41951915Sbostic 42052085Sbostic /* Update the inode times and copy the inode onto the inode page. */ 42156056Sbostic if (ip->i_flag & IMOD) 42256056Sbostic --fs->lfs_uinodes; 42352077Sbostic ITIMES(ip, &time, &time); 42455940Sbostic ip->i_flag &= ~(IMOD | IACC | IUPD | ICHG); 42551915Sbostic bp = sp->ibp; 42652085Sbostic bp->b_un.b_dino[sp->ninodes % INOPB(fs)] = ip->i_din; 42751915Sbostic /* Increment inode count in segment summary block. */ 42851915Sbostic ++((SEGSUM *)(sp->segsum))->ss_ninos; 42951915Sbostic 43051915Sbostic /* If this page is full, set flag to allocate a new page. */ 43151915Sbostic if (++sp->ninodes % INOPB(fs) == 0) 43251915Sbostic sp->ibp = NULL; 43351915Sbostic 43451915Sbostic /* 43552077Sbostic * If updating the ifile, update the super-block. Update the disk 43652077Sbostic * address and access times for this inode in the ifile. 43751915Sbostic */ 43852077Sbostic ino = ip->i_number; 43955696Sbostic if (ino == LFS_IFILE_INUM) { 44055696Sbostic daddr = fs->lfs_idaddr; 44151915Sbostic fs->lfs_idaddr = bp->b_blkno; 44255696Sbostic } else { 44355696Sbostic LFS_IENTRY(ifp, fs, ino, ibp); 44455696Sbostic daddr = ifp->if_daddr; 44555696Sbostic ifp->if_daddr = bp->b_blkno; 44655940Sbostic error = VOP_BWRITE(ibp); 44755696Sbostic } 44852077Sbostic 44954264Sbostic /* 45054264Sbostic * No need to update segment usage if there was no former inode address 45154264Sbostic * or if the last inode address is in the current partial segment. 45254264Sbostic */ 45354264Sbostic if (daddr != LFS_UNUSED_DADDR && 45455803Sbostic !(daddr >= fs->lfs_lastpseg && daddr <= bp->b_blkno)) { 45552682Sstaelin LFS_SEGENTRY(sup, fs, datosn(fs, daddr), bp); 45652682Sstaelin #ifdef DIAGNOSTIC 45754264Sbostic if (sup->su_nbytes < sizeof(struct dinode)) { 45852819Sbostic /* XXX -- Change to a panic. */ 45952819Sbostic printf("lfs: negative bytes (segment %d)\n", 46052682Sstaelin datosn(fs, daddr)); 46154264Sbostic panic("negative bytes"); 46254264Sbostic } 46352682Sstaelin #endif 46452682Sstaelin sup->su_nbytes -= sizeof(struct dinode); 465*56069Sbostic redo_ifile = 466*56069Sbostic (ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED)); 46755940Sbostic error = VOP_BWRITE(bp); 46852682Sstaelin } 46955551Sbostic return (redo_ifile); 47051915Sbostic } 47151915Sbostic 47255940Sbostic int 47355940Sbostic lfs_gatherblock(sp, bp, sptr) 47455940Sbostic struct segment *sp; 47555940Sbostic struct buf *bp; 47655940Sbostic int *sptr; 47755940Sbostic { 47855940Sbostic struct lfs *fs; 47955940Sbostic int version; 48055940Sbostic 48155940Sbostic /* 48255940Sbostic * If full, finish this segment. We may be doing I/O, so 48355940Sbostic * release and reacquire the splbio(). 48455940Sbostic */ 48556027Sbostic #ifdef DIAGNOSTIC 48656027Sbostic if (sp->vp == NULL) 48756027Sbostic panic ("lfs_gatherblock: Null vp in segment"); 48856027Sbostic #endif 48955940Sbostic fs = sp->fs; 49055940Sbostic if (sp->sum_bytes_left < sizeof(daddr_t) || 49155940Sbostic sp->seg_bytes_left < fs->lfs_bsize) { 49255940Sbostic if (sptr) 49355940Sbostic splx(*sptr); 49456027Sbostic lfs_updatemeta(sp); 49555940Sbostic 49655940Sbostic /* Add the current file to the segment summary. */ 49755940Sbostic ++((SEGSUM *)(sp->segsum))->ss_nfinfo; 49855940Sbostic 49955940Sbostic version = sp->fip->fi_version; 50055940Sbostic (void) lfs_writeseg(fs, sp); 50155940Sbostic lfs_initseg(fs, sp); 50255940Sbostic 50355940Sbostic sp->fip->fi_version = version; 50456027Sbostic sp->fip->fi_ino = VTOI(sp->vp)->i_number; 50555940Sbostic 50655940Sbostic sp->sum_bytes_left -= 50755940Sbostic sizeof(struct finfo) - sizeof(daddr_t); 50855940Sbostic 50955940Sbostic if (sptr) 51055940Sbostic *sptr = splbio(); 51155940Sbostic return(1); 51255940Sbostic } 51355940Sbostic 51455940Sbostic /* Insert into the buffer list, update the FINFO block. */ 51556056Sbostic if (bp->b_vp == sp->fs->lfs_ivnode && 51656056Sbostic ((bp->b_lblkno == 0 && (bp->b_un.b_daddr[0] > 26 || bp->b_un.b_daddr[1] > 26)) || 51756056Sbostic (bp->b_lblkno > 2))) 51856056Sbostic printf ("Bad ifile block\n"); 51955940Sbostic bp->b_flags |= B_GATHERED; 52055940Sbostic *sp->cbpp++ = bp; 52155940Sbostic sp->fip->fi_blocks[sp->fip->fi_nblocks++] = bp->b_lblkno; 52255940Sbostic 52355940Sbostic sp->sum_bytes_left -= sizeof(daddr_t); 52455940Sbostic sp->seg_bytes_left -= bp->b_bufsize; 52555940Sbostic return(0); 52655940Sbostic } 52755940Sbostic 52852077Sbostic void 52951215Sbostic lfs_gather(fs, sp, vp, match) 53051499Sbostic struct lfs *fs; 53152085Sbostic struct segment *sp; 53252085Sbostic struct vnode *vp; 53352085Sbostic int (*match) __P((struct lfs *, struct buf *)); 53451215Sbostic { 53555940Sbostic struct buf *bp; 53651342Sbostic int s; 53751215Sbostic 53856027Sbostic sp->vp = vp; 53955940Sbostic s = splbio(); 54055940Sbostic loop: for (bp = vp->v_dirtyblkhd; bp; bp = bp->b_blockf) { 54154264Sbostic if (bp->b_flags & B_BUSY || !match(fs, bp) || 54254264Sbostic bp->b_flags & B_GATHERED) 54351215Sbostic continue; 54451342Sbostic #ifdef DIAGNOSTIC 54551860Sbostic if (!(bp->b_flags & B_DELWRI)) 54651915Sbostic panic("lfs_gather: bp not B_DELWRI"); 54751860Sbostic if (!(bp->b_flags & B_LOCKED)) 54851915Sbostic panic("lfs_gather: bp not B_LOCKED"); 54951342Sbostic #endif 55055940Sbostic if (lfs_gatherblock(sp, bp, &s)) 55153145Sstaelin goto loop; 55251188Sbostic } 55351215Sbostic splx(s); 55456027Sbostic lfs_updatemeta(sp); 55556027Sbostic sp->vp = NULL; 55651188Sbostic } 55751188Sbostic 55855940Sbostic 55951342Sbostic /* 56051342Sbostic * Update the metadata that points to the blocks listed in the FINFO 56151188Sbostic * array. 56251188Sbostic */ 56352077Sbostic void 56456027Sbostic lfs_updatemeta(sp) 56552085Sbostic struct segment *sp; 56651188Sbostic { 56751915Sbostic SEGUSE *sup; 56852085Sbostic struct buf *bp; 56955940Sbostic struct lfs *fs; 57056027Sbostic struct vnode *vp; 57151860Sbostic INDIR a[NIADDR], *ap; 57252085Sbostic struct inode *ip; 57351915Sbostic daddr_t daddr, lbn, off; 57455940Sbostic int db_per_fsb, error, i, nblocks, num; 57551188Sbostic 57656027Sbostic vp = sp->vp; 57755940Sbostic nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp; 57856027Sbostic if (vp == NULL || nblocks == 0) 57951215Sbostic return; 58051215Sbostic 58151915Sbostic /* Sort the blocks. */ 58255940Sbostic if (!(sp->seg_flags & SEGM_CLEAN)) 58355940Sbostic lfs_shellsort(sp->start_bpp, sp->start_lbp, nblocks); 58451215Sbostic 58551915Sbostic /* 58651915Sbostic * Assign disk addresses, and update references to the logical 58751915Sbostic * block and the segment usage information. 58851915Sbostic */ 58955940Sbostic fs = sp->fs; 59051860Sbostic db_per_fsb = fsbtodb(fs, 1); 59155940Sbostic for (i = nblocks; i--; ++sp->start_bpp) { 59255940Sbostic lbn = *sp->start_lbp++; 59355940Sbostic (*sp->start_bpp)->b_blkno = off = fs->lfs_offset; 59451860Sbostic fs->lfs_offset += db_per_fsb; 59551215Sbostic 59651860Sbostic if (error = lfs_bmaparray(vp, lbn, &daddr, a, &num)) 59752085Sbostic panic("lfs_updatemeta: lfs_bmaparray %d", error); 59851860Sbostic ip = VTOI(vp); 59951860Sbostic switch (num) { 60051860Sbostic case 0: 60151915Sbostic ip->i_db[lbn] = off; 60251860Sbostic break; 60351860Sbostic case 1: 60451915Sbostic ip->i_ib[a[0].in_off] = off; 60551860Sbostic break; 60651860Sbostic default: 60751860Sbostic ap = &a[num - 1]; 60851860Sbostic if (bread(vp, ap->in_lbn, fs->lfs_bsize, NOCRED, &bp)) 60951860Sbostic panic("lfs_updatemeta: bread bno %d", 61051860Sbostic ap->in_lbn); 61155458Sbostic /* 61255458Sbostic * Bread may create a new indirect block which needs 61355458Sbostic * to get counted for the inode. 61455458Sbostic */ 61555592Sbostic if (bp->b_blkno == -1 && !(bp->b_flags & B_CACHE)) { 61655940Sbostic printf ("Updatemeta allocating indirect block: shouldn't happen\n"); 61755458Sbostic ip->i_blocks += btodb(fs->lfs_bsize); 61855592Sbostic fs->lfs_bfree -= btodb(fs->lfs_bsize); 61955592Sbostic } 62051915Sbostic bp->b_un.b_daddr[ap->in_off] = off; 62153530Sheideman VOP_BWRITE(bp); 62251188Sbostic } 62351915Sbostic 62451915Sbostic /* Update segment usage information. */ 62551915Sbostic if (daddr != UNASSIGNED) { 62651915Sbostic LFS_SEGENTRY(sup, fs, datosn(fs, daddr), bp); 62751915Sbostic #ifdef DIAGNOSTIC 62854264Sbostic if (sup->su_nbytes < fs->lfs_bsize) { 62952819Sbostic /* XXX -- Change to a panic. */ 63052819Sbostic printf("lfs: negative bytes (segment %d)\n", 63151915Sbostic datosn(fs, daddr)); 63254264Sbostic panic ("Negative Bytes"); 63354264Sbostic } 63451915Sbostic #endif 63551915Sbostic sup->su_nbytes -= fs->lfs_bsize; 63655940Sbostic error = VOP_BWRITE(bp); 63751915Sbostic } 63851188Sbostic } 63951188Sbostic } 64051188Sbostic 64151915Sbostic /* 64251915Sbostic * Start a new segment. 64351915Sbostic */ 64452077Sbostic void 64551915Sbostic lfs_initseg(fs, sp) 64651499Sbostic struct lfs *fs; 64752085Sbostic struct segment *sp; 64851188Sbostic { 64951915Sbostic SEGUSE *sup; 65051915Sbostic SEGSUM *ssp; 65151915Sbostic struct buf *bp; 65251915Sbostic daddr_t lbn, *lbnp; 65351215Sbostic 65451915Sbostic /* Advance to the next segment. */ 65551927Sbostic if (!LFS_PARTIAL_FITS(fs)) { 65652682Sstaelin /* Wake up any cleaning procs waiting on this file system. */ 65752688Sbostic wakeup(&fs->lfs_nextseg); 65852688Sbostic wakeup(&lfs_allclean_wakeup); 65952682Sstaelin 66051927Sbostic lfs_newseg(fs); 66151927Sbostic fs->lfs_offset = fs->lfs_curseg; 66251915Sbostic sp->seg_number = datosn(fs, fs->lfs_curseg); 66351915Sbostic sp->seg_bytes_left = fs->lfs_dbpseg * DEV_BSIZE; 66451915Sbostic 66551915Sbostic /* 66651927Sbostic * If the segment contains a superblock, update the offset 66751927Sbostic * and summary address to skip over it. 66851915Sbostic */ 66952077Sbostic LFS_SEGENTRY(sup, fs, sp->seg_number, bp); 67051927Sbostic if (sup->su_flags & SEGUSE_SUPERBLOCK) { 67151915Sbostic fs->lfs_offset += LFS_SBPAD / DEV_BSIZE; 67251915Sbostic sp->seg_bytes_left -= LFS_SBPAD; 67351215Sbostic } 67452085Sbostic brelse(bp); 67551915Sbostic } else { 67651915Sbostic sp->seg_number = datosn(fs, fs->lfs_curseg); 67751915Sbostic sp->seg_bytes_left = (fs->lfs_dbpseg - 67851915Sbostic (fs->lfs_offset - fs->lfs_curseg)) * DEV_BSIZE; 67951915Sbostic } 68054264Sbostic fs->lfs_lastpseg = fs->lfs_offset; 68151342Sbostic 68255940Sbostic sp->fs = fs; 68351915Sbostic sp->ibp = NULL; 68451915Sbostic sp->ninodes = 0; 68551342Sbostic 68651915Sbostic /* Get a new buffer for SEGSUM and enter it into the buffer list. */ 68751915Sbostic sp->cbpp = sp->bpp; 68856056Sbostic *sp->cbpp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp, fs->lfs_offset, 68956056Sbostic LFS_SUMMARY_SIZE); 69051915Sbostic sp->segsum = (*sp->cbpp)->b_un.b_addr; 69155940Sbostic sp->start_bpp = ++sp->cbpp; 69251915Sbostic fs->lfs_offset += LFS_SUMMARY_SIZE / DEV_BSIZE; 69351342Sbostic 69451915Sbostic /* Set point to SEGSUM, initialize it. */ 69551915Sbostic ssp = sp->segsum; 69651915Sbostic ssp->ss_next = fs->lfs_nextseg; 69751915Sbostic ssp->ss_nfinfo = ssp->ss_ninos = 0; 69851342Sbostic 69951915Sbostic /* Set pointer to first FINFO, initialize it. */ 70052085Sbostic sp->fip = (struct finfo *)(sp->segsum + sizeof(SEGSUM)); 70151915Sbostic sp->fip->fi_nblocks = 0; 70255940Sbostic sp->start_lbp = &sp->fip->fi_blocks[0]; 70351342Sbostic 70451915Sbostic sp->seg_bytes_left -= LFS_SUMMARY_SIZE; 70551915Sbostic sp->sum_bytes_left = LFS_SUMMARY_SIZE - sizeof(SEGSUM); 70651915Sbostic } 70751342Sbostic 70851915Sbostic /* 70951915Sbostic * Return the next segment to write. 71051915Sbostic */ 71152077Sbostic void 71251915Sbostic lfs_newseg(fs) 71351915Sbostic struct lfs *fs; 71451915Sbostic { 71551927Sbostic CLEANERINFO *cip; 71651915Sbostic SEGUSE *sup; 71751915Sbostic struct buf *bp; 71855940Sbostic int curseg, error, isdirty, sn; 71951915Sbostic 72055592Sbostic LFS_SEGENTRY(sup, fs, datosn(fs, fs->lfs_nextseg), bp); 72155592Sbostic sup->su_flags |= SEGUSE_DIRTY; 72256056Sbostic sup->su_nbytes = 0; 72356056Sbostic sup->su_nsums = 0; 72456056Sbostic sup->su_ninos = 0; 72555940Sbostic (void) VOP_BWRITE(bp); 72651927Sbostic 72751927Sbostic LFS_CLEANERINFO(cip, fs, bp); 72851927Sbostic --cip->clean; 72951927Sbostic ++cip->dirty; 73055940Sbostic (void) VOP_BWRITE(bp); 73151927Sbostic 73251927Sbostic fs->lfs_lastseg = fs->lfs_curseg; 73351927Sbostic fs->lfs_curseg = fs->lfs_nextseg; 73451927Sbostic for (sn = curseg = datosn(fs, fs->lfs_curseg);;) { 73551915Sbostic sn = (sn + 1) % fs->lfs_nseg; 73651927Sbostic if (sn == curseg) 73751915Sbostic panic("lfs_nextseg: no clean segments"); 73851915Sbostic LFS_SEGENTRY(sup, fs, sn, bp); 73951915Sbostic isdirty = sup->su_flags & SEGUSE_DIRTY; 74052085Sbostic brelse(bp); 74151915Sbostic if (!isdirty) 74251915Sbostic break; 74351915Sbostic } 74455592Sbostic 74555940Sbostic ++fs->lfs_nactive; 74651927Sbostic fs->lfs_nextseg = sntoda(fs, sn); 74751188Sbostic } 74851188Sbostic 74954264Sbostic int 75051188Sbostic lfs_writeseg(fs, sp) 75151499Sbostic struct lfs *fs; 75252085Sbostic struct segment *sp; 75351188Sbostic { 75455940Sbostic extern int locked_queue_count; 75552688Sbostic struct buf **bpp, *bp, *cbp; 75651188Sbostic SEGUSE *sup; 75752085Sbostic SEGSUM *ssp; 75851860Sbostic dev_t i_dev; 75954264Sbostic size_t size; 76051860Sbostic u_long *datap, *dp; 76155940Sbostic int ch_per_blk, do_again, error, i, nblocks, num, s; 76254264Sbostic int (*strategy)__P((struct vop_strategy_args *)); 76354690Sbostic struct vop_strategy_args vop_strategy_a; 76455592Sbostic u_short ninos; 76552688Sbostic char *p; 76651188Sbostic 76755940Sbostic /* 76855940Sbostic * If there are no buffers other than the segment summary to write 76955940Sbostic * and it is not a checkpoint, don't do anything. On a checkpoint, 77055940Sbostic * even if there aren't any buffers, you need to write the superblock. 77155940Sbostic */ 77255940Sbostic if ((nblocks = sp->cbpp - sp->bpp) == 1 && !(sp->seg_flags & SEGM_CKP)) 77355551Sbostic return (0); 77452085Sbostic 77551188Sbostic /* 77652085Sbostic * Compute checksum across data and then across summary; the first 77752085Sbostic * block (the summary block) is skipped. Set the create time here 77852085Sbostic * so that it's guaranteed to be later than the inode mod times. 77951860Sbostic * 78051860Sbostic * XXX 78151860Sbostic * Fix this to do it inline, instead of malloc/copy. 78251188Sbostic */ 78351860Sbostic datap = dp = malloc(nblocks * sizeof(u_long), M_SEGMENT, M_WAITOK); 78451915Sbostic for (bpp = sp->bpp, i = nblocks - 1; i--;) 78551915Sbostic *dp++ = (*++bpp)->b_un.b_words[0]; 78652085Sbostic ssp = (SEGSUM *)sp->segsum; 78752103Sbostic ssp->ss_create = time.tv_sec; 78855803Sbostic ssp->ss_datasum = cksum(datap, (nblocks - 1) * sizeof(u_long)); 78952085Sbostic ssp->ss_sumsum = 79052085Sbostic cksum(&ssp->ss_datasum, LFS_SUMMARY_SIZE - sizeof(ssp->ss_sumsum)); 79151927Sbostic free(datap, M_SEGMENT); 79251188Sbostic 79354264Sbostic /* Update the segment usage information. */ 79454264Sbostic LFS_SEGENTRY(sup, fs, sp->seg_number, bp); 79555592Sbostic ninos = (ssp->ss_ninos + INOPB(fs) - 1) / INOPB(fs); 79655592Sbostic sup->su_nbytes += nblocks - 1 - ninos << fs->lfs_bshift; 79754264Sbostic sup->su_nbytes += ssp->ss_ninos * sizeof(struct dinode); 79855803Sbostic sup->su_nbytes += LFS_SUMMARY_SIZE; 79954264Sbostic sup->su_lastmod = time.tv_sec; 80055592Sbostic sup->su_flags |= SEGUSE_ACTIVE; 80155592Sbostic sup->su_ninos += ninos; 80255592Sbostic ++sup->su_nsums; 803*56069Sbostic do_again = !(bp->b_flags & B_GATHERED); 80455940Sbostic (void)VOP_BWRITE(bp); 80555592Sbostic fs->lfs_bfree -= (fsbtodb(fs, ninos) + LFS_SUMMARY_SIZE / DEV_BSIZE); 80654264Sbostic 80751860Sbostic i_dev = VTOI(fs->lfs_ivnode)->i_dev; 80853574Sheideman strategy = VTOI(fs->lfs_ivnode)->i_devvp->v_op[VOFFSET(vop_strategy)]; 80951301Sbostic 81052688Sbostic /* 81152688Sbostic * When we simply write the blocks we lose a rotation for every block 81252688Sbostic * written. To avoid this problem, we allocate memory in chunks, copy 81352688Sbostic * the buffers into the chunk and write the chunk. 56K was chosen as 81452688Sbostic * some driver/controllers can't handle unsigned 16 bit transfers. 81552688Sbostic * When the data is copied to the chunk, turn off the the B_LOCKED bit 81652688Sbostic * and brelse the buffer (which will move them to the LRU list). Add 81752688Sbostic * the B_CALL flag to the buffer header so we can count I/O's for the 81852688Sbostic * checkpoints and so we can release the allocated memory. 81952688Sbostic * 82052688Sbostic * XXX 82152688Sbostic * This should be removed if the new virtual memory system allows us to 82252688Sbostic * easily make the buffers contiguous in kernel memory and if that's 82352688Sbostic * fast enough. 82452688Sbostic */ 82552688Sbostic #define LFS_CHUNKSIZE (56 * 1024) 82652688Sbostic ch_per_blk = LFS_CHUNKSIZE / fs->lfs_bsize; 82752688Sbostic for (bpp = sp->bpp, i = nblocks; i;) { 82852688Sbostic num = ch_per_blk; 82952688Sbostic if (num > i) 83052688Sbostic num = i; 83152688Sbostic i -= num; 83252688Sbostic size = num * fs->lfs_bsize; 83352688Sbostic 83456056Sbostic cbp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp, 83556056Sbostic (*bpp)->b_blkno, size); 83652688Sbostic cbp->b_dev = i_dev; 83755940Sbostic cbp->b_flags |= B_ASYNC | B_BUSY; 83852688Sbostic 83952688Sbostic s = splbio(); 84052688Sbostic ++fs->lfs_iocount; 84152688Sbostic for (p = cbp->b_un.b_addr; num--;) { 84252688Sbostic bp = *bpp++; 84355940Sbostic /* 84455940Sbostic * Fake buffers from the cleaner are marked as B_INVAL. 84555940Sbostic * We need to copy the data from user space rather than 84655940Sbostic * from the buffer indicated. 84755940Sbostic * XXX == what do I do on an error? 84855940Sbostic */ 84955940Sbostic if (bp->b_flags & B_INVAL) { 85055940Sbostic if (copyin(bp->b_saveaddr, p, bp->b_bcount)) 85155940Sbostic panic("lfs_writeseg: copyin failed"); 85255940Sbostic } else 85355940Sbostic bcopy(bp->b_un.b_addr, p, bp->b_bcount); 85452688Sbostic p += bp->b_bcount; 85555940Sbostic if (bp->b_flags & B_LOCKED) 85655940Sbostic --locked_queue_count; 85755940Sbostic bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI | 85854264Sbostic B_LOCKED | B_GATHERED); 85955940Sbostic if (bp->b_flags & B_CALL) { 86055940Sbostic /* if B_CALL, it was created with newbuf */ 86155940Sbostic brelvp(bp); 86255940Sbostic free(bp, M_SEGMENT); 86355940Sbostic } else { 86452688Sbostic bremfree(bp); 86552688Sbostic reassignbuf(bp, bp->b_vp); 86655940Sbostic brelse(bp); 86752688Sbostic } 86851860Sbostic } 869*56069Sbostic ++cbp->b_vp->v_numoutput; 87052688Sbostic splx(s); 87152688Sbostic cbp->b_bcount = p - cbp->b_un.b_addr; 87256056Sbostic /* 87356056Sbostic * XXXX This is a gross and disgusting hack. Since these 87456056Sbostic * buffers are physically addressed, they hang off the 87556056Sbostic * device vnode (devvp). As a result, they have no way 87656056Sbostic * of getting to the LFS superblock or lfs structure to 87756056Sbostic * keep track of the number of I/O's pending. So, I am 87856056Sbostic * going to stuff the fs into the saveaddr field of 87956056Sbostic * the buffer (yuk). 88056056Sbostic */ 88156056Sbostic cbp->b_saveaddr = (caddr_t)fs; 88253574Sheideman vop_strategy_a.a_desc = VDESC(vop_strategy); 88353574Sheideman vop_strategy_a.a_bp = cbp; 88453574Sheideman (strategy)(&vop_strategy_a); 88551860Sbostic } 88655551Sbostic return (do_again); 88751188Sbostic } 88851188Sbostic 88952077Sbostic void 89051860Sbostic lfs_writesuper(fs, sp) 89151499Sbostic struct lfs *fs; 89252085Sbostic struct segment *sp; 89351301Sbostic { 89452085Sbostic struct buf *bp; 89551860Sbostic dev_t i_dev; 89653574Sheideman int (*strategy) __P((struct vop_strategy_args *)); 897*56069Sbostic int s; 89854690Sbostic struct vop_strategy_args vop_strategy_a; 89951301Sbostic 90051860Sbostic i_dev = VTOI(fs->lfs_ivnode)->i_dev; 90153574Sheideman strategy = VTOI(fs->lfs_ivnode)->i_devvp->v_op[VOFFSET(vop_strategy)]; 90251356Sbostic 90351342Sbostic /* Checksum the superblock and copy it into a buffer. */ 90451499Sbostic fs->lfs_cksum = cksum(fs, sizeof(struct lfs) - sizeof(fs->lfs_cksum)); 90556056Sbostic bp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp, fs->lfs_sboffs[0], 90656056Sbostic LFS_SBPAD); 90751860Sbostic *bp->b_un.b_lfs = *fs; 90851215Sbostic 90951356Sbostic /* Write the first superblock (wait). */ 91051860Sbostic bp->b_dev = i_dev; 91151915Sbostic bp->b_flags |= B_BUSY; 91255940Sbostic bp->b_flags &= ~(B_DONE | B_CALL | B_ERROR | B_READ | B_DELWRI); 91353574Sheideman vop_strategy_a.a_desc = VDESC(vop_strategy); 91453574Sheideman vop_strategy_a.a_bp = bp; 915*56069Sbostic s = splbio(); 916*56069Sbostic bp->b_vp->v_numoutput += 2; 917*56069Sbostic splx(s); 91853574Sheideman (strategy)(&vop_strategy_a); 91951215Sbostic biowait(bp); 92051342Sbostic 92151356Sbostic /* Write the second superblock (don't wait). */ 92251215Sbostic bp->b_blkno = bp->b_lblkno = fs->lfs_sboffs[1]; 92355940Sbostic bp->b_flags |= B_CALL | B_ASYNC | B_BUSY; 92451860Sbostic bp->b_flags &= ~(B_DONE | B_ERROR | B_READ | B_DELWRI); 92555940Sbostic bp->b_iodone = lfs_supercallback; 92653574Sheideman (strategy)(&vop_strategy_a); 92751215Sbostic } 92851215Sbostic 92951342Sbostic /* 93051342Sbostic * Logical block number match routines used when traversing the dirty block 93151342Sbostic * chain. 93251342Sbostic */ 93352077Sbostic int 93452077Sbostic lfs_match_data(fs, bp) 93551860Sbostic struct lfs *fs; 93652085Sbostic struct buf *bp; 93751215Sbostic { 93851342Sbostic return (bp->b_lblkno >= 0); 93951215Sbostic } 94051215Sbostic 94152077Sbostic int 94252077Sbostic lfs_match_indir(fs, bp) 94351860Sbostic struct lfs *fs; 94452085Sbostic struct buf *bp; 94551215Sbostic { 94651860Sbostic int lbn; 94751860Sbostic 94851860Sbostic lbn = bp->b_lblkno; 94951860Sbostic return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 0); 95051215Sbostic } 95151215Sbostic 95252077Sbostic int 95352077Sbostic lfs_match_dindir(fs, bp) 95451860Sbostic struct lfs *fs; 95552085Sbostic struct buf *bp; 95651215Sbostic { 95751860Sbostic int lbn; 95851860Sbostic 95951860Sbostic lbn = bp->b_lblkno; 96051860Sbostic return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 1); 96151215Sbostic } 96251215Sbostic 96352077Sbostic int 96452077Sbostic lfs_match_tindir(fs, bp) 96551499Sbostic struct lfs *fs; 96652085Sbostic struct buf *bp; 96751342Sbostic { 96851860Sbostic int lbn; 96951342Sbostic 97051860Sbostic lbn = bp->b_lblkno; 97151860Sbostic return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 2); 97251860Sbostic } 97351342Sbostic 97451860Sbostic /* 97551860Sbostic * Allocate a new buffer header. 97651860Sbostic */ 97752085Sbostic struct buf * 97855940Sbostic lfs_newbuf(vp, daddr, size) 97955940Sbostic struct vnode *vp; 98051860Sbostic daddr_t daddr; 98151860Sbostic size_t size; 98251860Sbostic { 98352085Sbostic struct buf *bp; 98455940Sbostic size_t nbytes; 98551342Sbostic 98655940Sbostic nbytes = roundup(size, DEV_BSIZE); 98755940Sbostic bp = malloc(sizeof(struct buf) + nbytes, M_SEGMENT, M_WAITOK); 988*56069Sbostic bzero(bp, sizeof(struct buf) + nbytes); 98955940Sbostic bgetvp(vp, bp); 99055940Sbostic bp->b_un.b_addr = (caddr_t)(bp + 1); 99155940Sbostic bp->b_bufsize = size; 99255940Sbostic bp->b_bcount = size; 99351860Sbostic bp->b_lblkno = daddr; 99451860Sbostic bp->b_blkno = daddr; 99551860Sbostic bp->b_error = 0; 99651860Sbostic bp->b_resid = 0; 99755940Sbostic bp->b_iodone = lfs_callback; 99856027Sbostic bp->b_flags |= B_BUSY | B_CALL | B_NOCACHE; 99951860Sbostic return (bp); 100051860Sbostic } 100151342Sbostic 100253347Sbostic void 100351860Sbostic lfs_callback(bp) 100452085Sbostic struct buf *bp; 100551860Sbostic { 100651860Sbostic struct lfs *fs; 100751342Sbostic 100856056Sbostic fs = (struct lfs *)bp->b_saveaddr; 100951860Sbostic #ifdef DIAGNOSTIC 101051860Sbostic if (fs->lfs_iocount == 0) 101151860Sbostic panic("lfs_callback: zero iocount\n"); 101251860Sbostic #endif 101351860Sbostic if (--fs->lfs_iocount == 0) 101452688Sbostic wakeup(&fs->lfs_iocount); 101551915Sbostic 101655940Sbostic brelvp(bp); 101755940Sbostic free(bp, M_SEGMENT); 101851860Sbostic } 101951342Sbostic 102055940Sbostic void 102155940Sbostic lfs_supercallback(bp) 102255940Sbostic struct buf *bp; 102355940Sbostic { 102455940Sbostic brelvp(bp); 102555940Sbostic free(bp, M_SEGMENT); 102655940Sbostic } 102755940Sbostic 102851215Sbostic /* 102951188Sbostic * Shellsort (diminishing increment sort) from Data Structures and 103051188Sbostic * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290; 103151188Sbostic * see also Knuth Vol. 3, page 84. The increments are selected from 103251188Sbostic * formula (8), page 95. Roughly O(N^3/2). 103351188Sbostic */ 103451188Sbostic /* 103551188Sbostic * This is our own private copy of shellsort because we want to sort 103651188Sbostic * two parallel arrays (the array of buffer pointers and the array of 103751188Sbostic * logical block numbers) simultaneously. Note that we cast the array 103851188Sbostic * of logical block numbers to a unsigned in this routine so that the 103951188Sbostic * negative block numbers (meta data blocks) sort AFTER the data blocks. 104051188Sbostic */ 104152077Sbostic void 104252077Sbostic lfs_shellsort(bp_array, lb_array, nmemb) 104352085Sbostic struct buf **bp_array; 104451215Sbostic daddr_t *lb_array; 104551188Sbostic register int nmemb; 104651188Sbostic { 104751188Sbostic static int __rsshell_increments[] = { 4, 1, 0 }; 104851188Sbostic register int incr, *incrp, t1, t2; 104952085Sbostic struct buf *bp_temp; 105051188Sbostic u_long lb_temp; 105151188Sbostic 105251188Sbostic for (incrp = __rsshell_increments; incr = *incrp++;) 105351188Sbostic for (t1 = incr; t1 < nmemb; ++t1) 105451188Sbostic for (t2 = t1 - incr; t2 >= 0;) 105551188Sbostic if (lb_array[t2] > lb_array[t2 + incr]) { 105651188Sbostic lb_temp = lb_array[t2]; 105751188Sbostic lb_array[t2] = lb_array[t2 + incr]; 105851188Sbostic lb_array[t2 + incr] = lb_temp; 105951188Sbostic bp_temp = bp_array[t2]; 106051188Sbostic bp_array[t2] = bp_array[t2 + incr]; 106151188Sbostic bp_array[t2 + incr] = bp_temp; 106251188Sbostic t2 -= incr; 106351188Sbostic } else 106451188Sbostic break; 106551188Sbostic } 106655940Sbostic 1067