123394Smckusick /* 263371Sbostic * Copyright (c) 1982, 1986, 1989, 1993 363371Sbostic * The Regents of the University of California. All rights reserved. 423394Smckusick * 544536Sbostic * %sccs.include.redist.c% 634432Sbostic * 7*68554Smckusick * @(#)ffs_alloc.c 8.15 (Berkeley) 03/21/95 823394Smckusick */ 94359Smckusick 1051469Sbostic #include <sys/param.h> 1151469Sbostic #include <sys/systm.h> 1251469Sbostic #include <sys/buf.h> 1351469Sbostic #include <sys/proc.h> 1451469Sbostic #include <sys/vnode.h> 1554653Smckusick #include <sys/mount.h> 1651469Sbostic #include <sys/kernel.h> 1751469Sbostic #include <sys/syslog.h> 184359Smckusick 1953317Smckusick #include <vm/vm.h> 2053317Smckusick 2151469Sbostic #include <ufs/ufs/quota.h> 2251469Sbostic #include <ufs/ufs/inode.h> 2347571Skarels 2451469Sbostic #include <ufs/ffs/fs.h> 2551469Sbostic #include <ufs/ffs/ffs_extern.h> 264359Smckusick 2751469Sbostic extern u_long nextgennumber; 2851469Sbostic 29*68554Smckusick static ufs_daddr_t ffs_alloccg __P((struct inode *, int, ufs_daddr_t, int)); 30*68554Smckusick static ufs_daddr_t ffs_alloccgblk __P((struct fs *, struct cg *, ufs_daddr_t)); 31*68554Smckusick static ufs_daddr_t ffs_clusteralloc __P((struct inode *, int, ufs_daddr_t, 32*68554Smckusick int)); 3351469Sbostic static ino_t ffs_dirpref __P((struct fs *)); 34*68554Smckusick static ufs_daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); 3551469Sbostic static void ffs_fserr __P((struct fs *, u_int, char *)); 3651469Sbostic static u_long ffs_hashalloc 3768122Smckusick __P((struct inode *, int, long, int, u_int32_t (*)())); 38*68554Smckusick static ino_t ffs_nodealloccg __P((struct inode *, int, ufs_daddr_t, int)); 39*68554Smckusick static ufs_daddr_t ffs_mapsearch __P((struct fs *, struct cg *, ufs_daddr_t, 40*68554Smckusick int)); 4151469Sbostic 425375Smckusic /* 435375Smckusic * Allocate a block in the file system. 445375Smckusic * 455375Smckusic * The size of the requested block is given, which must be some 465375Smckusic * multiple of fs_fsize and <= fs_bsize. 475375Smckusic * A preference may be optionally specified. If a preference is given 485375Smckusic * the following hierarchy is used to allocate a block: 495375Smckusic * 1) allocate the requested block. 505375Smckusic * 2) allocate a rotationally optimal block in the same cylinder. 515375Smckusic * 3) allocate a block in the same cylinder group. 525375Smckusic * 4) quadradically rehash into other cylinder groups, until an 535375Smckusic * available block is located. 545375Smckusic * If no block preference is given the following heirarchy is used 555375Smckusic * to allocate a block: 565375Smckusic * 1) allocate a block in the cylinder group that contains the 575375Smckusic * inode for the file. 585375Smckusic * 2) quadradically rehash into other cylinder groups, until an 595375Smckusic * available block is located. 605375Smckusic */ 6153244Smckusick ffs_alloc(ip, lbn, bpref, size, cred, bnp) 624463Smckusic register struct inode *ip; 63*68554Smckusick ufs_daddr_t lbn, bpref; 644359Smckusick int size; 6553244Smckusick struct ucred *cred; 66*68554Smckusick ufs_daddr_t *bnp; 674359Smckusick { 6865468Sbostic register struct fs *fs; 69*68554Smckusick ufs_daddr_t bno; 7037735Smckusick int cg, error; 714359Smckusick 7239678Smckusick *bnp = 0; 735965Smckusic fs = ip->i_fs; 7453244Smckusick #ifdef DIAGNOSTIC 7564508Sbostic if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 766716Smckusick printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 776716Smckusick ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 7851469Sbostic panic("ffs_alloc: bad size"); 796716Smckusick } 8053244Smckusick if (cred == NOCRED) 8153244Smckusick panic("ffs_alloc: missing credential\n"); 8253244Smckusick #endif /* DIAGNOSTIC */ 835322Smckusic if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 844359Smckusick goto nospace; 8541309Smckusick if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 864792Smckusic goto nospace; 877650Ssam #ifdef QUOTA 8841309Smckusick if (error = chkdq(ip, (long)btodb(size), cred, 0)) 8937735Smckusick return (error); 907483Skre #endif 914948Smckusic if (bpref >= fs->fs_size) 924948Smckusic bpref = 0; 934359Smckusick if (bpref == 0) 9464603Sbostic cg = ino_to_cg(fs, ip->i_number); 954359Smckusick else 965377Smckusic cg = dtog(fs, bpref); 97*68554Smckusick bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 9868122Smckusick (u_int32_t (*)())ffs_alloccg); 9939678Smckusick if (bno > 0) { 10039678Smckusick ip->i_blocks += btodb(size); 10164603Sbostic ip->i_flag |= IN_CHANGE | IN_UPDATE; 10239678Smckusick *bnp = bno; 10339678Smckusick return (0); 10439678Smckusick } 10545173Smckusick #ifdef QUOTA 10645173Smckusick /* 10745173Smckusick * Restore user's disk quota because allocation failed. 10845173Smckusick */ 10945173Smckusick (void) chkdq(ip, (long)-btodb(size), cred, FORCE); 11045173Smckusick #endif 1114359Smckusick nospace: 11251469Sbostic ffs_fserr(fs, cred->cr_uid, "file system full"); 1134359Smckusick uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 11437735Smckusick return (ENOSPC); 1154359Smckusick } 1164359Smckusick 1175375Smckusic /* 1185375Smckusic * Reallocate a fragment to a bigger size 1195375Smckusic * 1205375Smckusic * The number and size of the old block is given, and a preference 1215375Smckusic * and new size is also specified. The allocator attempts to extend 1225375Smckusic * the original block. Failing that, the regular block allocator is 1235375Smckusic * invoked to get an appropriate block. 1245375Smckusic */ 12553244Smckusick ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) 1265965Smckusic register struct inode *ip; 127*68554Smckusick ufs_daddr_t lbprev; 128*68554Smckusick ufs_daddr_t bpref; 1294426Smckusic int osize, nsize; 13053244Smckusick struct ucred *cred; 13137735Smckusick struct buf **bpp; 1324426Smckusic { 1334426Smckusic register struct fs *fs; 13465468Sbostic struct buf *bp; 13545719Smckusick int cg, request, error; 136*68554Smckusick ufs_daddr_t bprev, bno; 1374426Smckusic 13837735Smckusick *bpp = 0; 1395965Smckusic fs = ip->i_fs; 14053244Smckusick #ifdef DIAGNOSTIC 14164508Sbostic if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 14264508Sbostic (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 14351469Sbostic printf( 14451469Sbostic "dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 1456716Smckusick ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 14651469Sbostic panic("ffs_realloccg: bad size"); 1476716Smckusick } 14853244Smckusick if (cred == NOCRED) 14953244Smckusick panic("ffs_realloccg: missing credential\n"); 15053244Smckusick #endif /* DIAGNOSTIC */ 15141309Smckusick if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 1524792Smckusic goto nospace; 15339678Smckusick if ((bprev = ip->i_db[lbprev]) == 0) { 1546716Smckusick printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 1556716Smckusick ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 15651469Sbostic panic("ffs_realloccg: bad bprev"); 1576716Smckusick } 15839678Smckusick /* 15939678Smckusick * Allocate the extra space in the buffer. 16039678Smckusick */ 16139678Smckusick if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) { 16239678Smckusick brelse(bp); 16339678Smckusick return (error); 16439678Smckusick } 16545173Smckusick #ifdef QUOTA 16645173Smckusick if (error = chkdq(ip, (long)btodb(nsize - osize), cred, 0)) { 16745173Smckusick brelse(bp); 16845173Smckusick return (error); 16945173Smckusick } 17045173Smckusick #endif 17139678Smckusick /* 17239678Smckusick * Check for extension in the existing location. 17339678Smckusick */ 1746294Smckusick cg = dtog(fs, bprev); 17551469Sbostic if (bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize)) { 17639887Smckusick if (bp->b_blkno != fsbtodb(fs, bno)) 17739678Smckusick panic("bad blockno"); 17839762Smckusick ip->i_blocks += btodb(nsize - osize); 17964603Sbostic ip->i_flag |= IN_CHANGE | IN_UPDATE; 18048948Smckusick allocbuf(bp, nsize); 18148948Smckusick bp->b_flags |= B_DONE; 18264508Sbostic bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 18337735Smckusick *bpp = bp; 18437735Smckusick return (0); 1854463Smckusic } 18639678Smckusick /* 18739678Smckusick * Allocate a new disk location. 18839678Smckusick */ 1894948Smckusic if (bpref >= fs->fs_size) 1904948Smckusic bpref = 0; 19126253Skarels switch ((int)fs->fs_optim) { 19225256Smckusick case FS_OPTSPACE: 19325256Smckusick /* 19425256Smckusick * Allocate an exact sized fragment. Although this makes 19525256Smckusick * best use of space, we will waste time relocating it if 19625256Smckusick * the file continues to grow. If the fragmentation is 19725256Smckusick * less than half of the minimum free reserve, we choose 19825256Smckusick * to begin optimizing for time. 19925256Smckusick */ 20024698Smckusick request = nsize; 20125256Smckusick if (fs->fs_minfree < 5 || 20225256Smckusick fs->fs_cstotal.cs_nffree > 20325256Smckusick fs->fs_dsize * fs->fs_minfree / (2 * 100)) 20425256Smckusick break; 20525256Smckusick log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 20625256Smckusick fs->fs_fsmnt); 20725256Smckusick fs->fs_optim = FS_OPTTIME; 20825256Smckusick break; 20925256Smckusick case FS_OPTTIME: 21025256Smckusick /* 21151469Sbostic * At this point we have discovered a file that is trying to 21251469Sbostic * grow a small fragment to a larger fragment. To save time, 21351469Sbostic * we allocate a full sized block, then free the unused portion. 21451469Sbostic * If the file continues to grow, the `ffs_fragextend' call 21551469Sbostic * above will be able to grow it in place without further 21651469Sbostic * copying. If aberrant programs cause disk fragmentation to 21751469Sbostic * grow within 2% of the free reserve, we choose to begin 21851469Sbostic * optimizing for space. 21925256Smckusick */ 22024698Smckusick request = fs->fs_bsize; 22125256Smckusick if (fs->fs_cstotal.cs_nffree < 22225256Smckusick fs->fs_dsize * (fs->fs_minfree - 2) / 100) 22325256Smckusick break; 22425256Smckusick log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 22525256Smckusick fs->fs_fsmnt); 22625256Smckusick fs->fs_optim = FS_OPTSPACE; 22725256Smckusick break; 22825256Smckusick default: 22925256Smckusick printf("dev = 0x%x, optim = %d, fs = %s\n", 23025256Smckusick ip->i_dev, fs->fs_optim, fs->fs_fsmnt); 23151469Sbostic panic("ffs_realloccg: bad optim"); 23225256Smckusick /* NOTREACHED */ 23325256Smckusick } 234*68554Smckusick bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 23568122Smckusick (u_int32_t (*)())ffs_alloccg); 2366567Smckusic if (bno > 0) { 23745719Smckusick bp->b_blkno = fsbtodb(fs, bno); 23845719Smckusick (void) vnode_pager_uncache(ITOV(ip)); 23953059Smckusick ffs_blkfree(ip, bprev, (long)osize); 24024698Smckusick if (nsize < request) 24151469Sbostic ffs_blkfree(ip, bno + numfrags(fs, nsize), 24253059Smckusick (long)(request - nsize)); 24312643Ssam ip->i_blocks += btodb(nsize - osize); 24464603Sbostic ip->i_flag |= IN_CHANGE | IN_UPDATE; 24548948Smckusick allocbuf(bp, nsize); 24648948Smckusick bp->b_flags |= B_DONE; 24764508Sbostic bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 24837735Smckusick *bpp = bp; 24937735Smckusick return (0); 2504463Smckusic } 25145173Smckusick #ifdef QUOTA 25245173Smckusick /* 25345173Smckusick * Restore user's disk quota because allocation failed. 25445173Smckusick */ 25545173Smckusick (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 25645173Smckusick #endif 25739678Smckusick brelse(bp); 2584792Smckusic nospace: 2594463Smckusic /* 2604463Smckusic * no space available 2614463Smckusic */ 26251469Sbostic ffs_fserr(fs, cred->cr_uid, "file system full"); 2634426Smckusic uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 26437735Smckusick return (ENOSPC); 2654426Smckusic } 2664426Smckusic 2675375Smckusic /* 26865999Smckusick * Reallocate a sequence of blocks into a contiguous sequence of blocks. 26965999Smckusick * 27065999Smckusick * The vnode and an array of buffer pointers for a range of sequential 27165999Smckusick * logical blocks to be made contiguous is given. The allocator attempts 27265999Smckusick * to find a range of sequential blocks starting as close as possible to 27365999Smckusick * an fs_rotdelay offset from the end of the allocation for the logical 27465999Smckusick * block immediately preceeding the current range. If successful, the 27565999Smckusick * physical block numbers in the buffer pointers and in the inode are 27665999Smckusick * changed to reflect the new allocation. If unsuccessful, the allocation 27765999Smckusick * is left unchanged. The success in doing the reallocation is returned. 27865999Smckusick * Note that the error return is not reflected back to the user. Rather 27965999Smckusick * the previous block allocation will be used. 28065999Smckusick */ 28168112Smckusick #ifdef DEBUG 28266176Smckusick #include <sys/sysctl.h> 28366176Smckusick int doasyncfree = 1; 28466176Smckusick struct ctldebug debug14 = { "doasyncfree", &doasyncfree }; 28567871Smckusick int prtrealloc = 0; 28667871Smckusick struct ctldebug debug15 = { "prtrealloc", &prtrealloc }; 28768112Smckusick #else 28868112Smckusick #define doasyncfree 1 28967392Smkm #endif 29067392Smkm 29165999Smckusick int 29265999Smckusick ffs_reallocblks(ap) 29365999Smckusick struct vop_reallocblks_args /* { 29465999Smckusick struct vnode *a_vp; 29565999Smckusick struct cluster_save *a_buflist; 29665999Smckusick } */ *ap; 29765999Smckusick { 29865999Smckusick struct fs *fs; 29965999Smckusick struct inode *ip; 30065999Smckusick struct vnode *vp; 30165999Smckusick struct buf *sbp, *ebp; 302*68554Smckusick ufs_daddr_t *bap, *sbap, *ebap; 30365999Smckusick struct cluster_save *buflist; 304*68554Smckusick ufs_daddr_t start_lbn, end_lbn, soff, eoff, newblk, blkno; 30565999Smckusick struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 30665999Smckusick int i, len, start_lvl, end_lvl, pref, ssize; 30765999Smckusick 30865999Smckusick vp = ap->a_vp; 30965999Smckusick ip = VTOI(vp); 31065999Smckusick fs = ip->i_fs; 31165999Smckusick if (fs->fs_contigsumsize <= 0) 31265999Smckusick return (ENOSPC); 31365999Smckusick buflist = ap->a_buflist; 31465999Smckusick len = buflist->bs_nchildren; 31565999Smckusick start_lbn = buflist->bs_children[0]->b_lblkno; 31665999Smckusick end_lbn = start_lbn + len - 1; 31765999Smckusick #ifdef DIAGNOSTIC 31865999Smckusick for (i = 1; i < len; i++) 31965999Smckusick if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 32065999Smckusick panic("ffs_reallocblks: non-cluster"); 32165999Smckusick #endif 32265999Smckusick /* 32365999Smckusick * If the latest allocation is in a new cylinder group, assume that 32465999Smckusick * the filesystem has decided to move and do not force it back to 32565999Smckusick * the previous cylinder group. 32665999Smckusick */ 32765999Smckusick if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 32865999Smckusick dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 32965999Smckusick return (ENOSPC); 33065999Smckusick if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 33165999Smckusick ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 33265999Smckusick return (ENOSPC); 33365999Smckusick /* 33465999Smckusick * Get the starting offset and block map for the first block. 33565999Smckusick */ 33665999Smckusick if (start_lvl == 0) { 33765999Smckusick sbap = &ip->i_db[0]; 33865999Smckusick soff = start_lbn; 33965999Smckusick } else { 34065999Smckusick idp = &start_ap[start_lvl - 1]; 34165999Smckusick if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 34265999Smckusick brelse(sbp); 34365999Smckusick return (ENOSPC); 34465999Smckusick } 345*68554Smckusick sbap = (ufs_daddr_t *)sbp->b_data; 34665999Smckusick soff = idp->in_off; 34765999Smckusick } 34865999Smckusick /* 34965999Smckusick * Find the preferred location for the cluster. 35065999Smckusick */ 35165999Smckusick pref = ffs_blkpref(ip, start_lbn, soff, sbap); 35265999Smckusick /* 35365999Smckusick * If the block range spans two block maps, get the second map. 35465999Smckusick */ 35566086Smckusick if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 35665999Smckusick ssize = len; 35765999Smckusick } else { 35866086Smckusick #ifdef DIAGNOSTIC 35966086Smckusick if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 36066086Smckusick panic("ffs_reallocblk: start == end"); 36166086Smckusick #endif 36265999Smckusick ssize = len - (idp->in_off + 1); 36365999Smckusick if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 36465999Smckusick goto fail; 365*68554Smckusick ebap = (ufs_daddr_t *)ebp->b_data; 36665999Smckusick } 36765999Smckusick /* 36865999Smckusick * Search the block map looking for an allocation of the desired size. 36965999Smckusick */ 370*68554Smckusick if ((newblk = (ufs_daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 37168122Smckusick len, (u_int32_t (*)())ffs_clusteralloc)) == 0) 37265999Smckusick goto fail; 37365999Smckusick /* 37465999Smckusick * We have found a new contiguous block. 37565999Smckusick * 37665999Smckusick * First we have to replace the old block pointers with the new 37765999Smckusick * block pointers in the inode and indirect blocks associated 37865999Smckusick * with the file. 37965999Smckusick */ 38067871Smckusick #ifdef DEBUG 38167871Smckusick if (prtrealloc) 38267871Smckusick printf("realloc: ino %d, lbns %d-%d\n\told:", ip->i_number, 38368112Smckusick start_lbn, end_lbn); 38468112Smckusick #endif 38565999Smckusick blkno = newblk; 38665999Smckusick for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 38765999Smckusick if (i == ssize) 38865999Smckusick bap = ebap; 38965999Smckusick #ifdef DIAGNOSTIC 39067869Smckusick if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 39165999Smckusick panic("ffs_reallocblks: alloc mismatch"); 39265999Smckusick #endif 39367871Smckusick #ifdef DEBUG 39467871Smckusick if (prtrealloc) 39567871Smckusick printf(" %d,", *bap); 39667871Smckusick #endif 39765999Smckusick *bap++ = blkno; 39865999Smckusick } 39965999Smckusick /* 40065999Smckusick * Next we must write out the modified inode and indirect blocks. 40166176Smckusick * For strict correctness, the writes should be synchronous since 40266176Smckusick * the old block values may have been written to disk. In practise 40366176Smckusick * they are almost never written, but if we are concerned about 40466176Smckusick * strict correctness, the `doasyncfree' flag should be set to zero. 40565999Smckusick * 40666176Smckusick * The test on `doasyncfree' should be changed to test a flag 40766176Smckusick * that shows whether the associated buffers and inodes have 40866176Smckusick * been written. The flag should be set when the cluster is 40966176Smckusick * started and cleared whenever the buffer or inode is flushed. 41066176Smckusick * We can then check below to see if it is set, and do the 41166176Smckusick * synchronous write only when it has been cleared. 41265999Smckusick */ 41365999Smckusick if (sbap != &ip->i_db[0]) { 41466176Smckusick if (doasyncfree) 41566176Smckusick bdwrite(sbp); 41666176Smckusick else 41766176Smckusick bwrite(sbp); 41865999Smckusick } else { 41965999Smckusick ip->i_flag |= IN_CHANGE | IN_UPDATE; 42066176Smckusick if (!doasyncfree) 42166176Smckusick VOP_UPDATE(vp, &time, &time, MNT_WAIT); 42265999Smckusick } 42365999Smckusick if (ssize < len) 42466176Smckusick if (doasyncfree) 42566176Smckusick bdwrite(ebp); 42666176Smckusick else 42766176Smckusick bwrite(ebp); 42865999Smckusick /* 42965999Smckusick * Last, free the old blocks and assign the new blocks to the buffers. 43065999Smckusick */ 43167871Smckusick #ifdef DEBUG 43267871Smckusick if (prtrealloc) 43367871Smckusick printf("\n\tnew:"); 43467871Smckusick #endif 43565999Smckusick for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 43665999Smckusick ffs_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno), 43765999Smckusick fs->fs_bsize); 43865999Smckusick buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 43967871Smckusick #ifdef DEBUG 44067871Smckusick if (prtrealloc) 44167871Smckusick printf(" %d,", blkno); 44267871Smckusick #endif 44365999Smckusick } 44467871Smckusick #ifdef DEBUG 44567871Smckusick if (prtrealloc) { 44667871Smckusick prtrealloc--; 44767871Smckusick printf("\n"); 44867871Smckusick } 44967871Smckusick #endif 45065999Smckusick return (0); 45165999Smckusick 45265999Smckusick fail: 45365999Smckusick if (ssize < len) 45465999Smckusick brelse(ebp); 45565999Smckusick if (sbap != &ip->i_db[0]) 45665999Smckusick brelse(sbp); 45765999Smckusick return (ENOSPC); 45865999Smckusick } 45965999Smckusick 46065999Smckusick /* 4615375Smckusic * Allocate an inode in the file system. 4625375Smckusic * 46351469Sbostic * If allocating a directory, use ffs_dirpref to select the inode. 46451469Sbostic * If allocating in a directory, the following hierarchy is followed: 46551469Sbostic * 1) allocate the preferred inode. 4665375Smckusic * 2) allocate an inode in the same cylinder group. 4675375Smckusic * 3) quadradically rehash into other cylinder groups, until an 4685375Smckusic * available inode is located. 4695375Smckusic * If no inode preference is given the following heirarchy is used 4705375Smckusic * to allocate an inode: 4715375Smckusic * 1) allocate an inode in cylinder group 0. 4725375Smckusic * 2) quadradically rehash into other cylinder groups, until an 4735375Smckusic * available inode is located. 4745375Smckusic */ 47554653Smckusick ffs_valloc(ap) 47654653Smckusick struct vop_valloc_args /* { 47754653Smckusick struct vnode *a_pvp; 47854653Smckusick int a_mode; 47954653Smckusick struct ucred *a_cred; 48054653Smckusick struct vnode **a_vpp; 48154653Smckusick } */ *ap; 4824359Smckusick { 48353865Sheideman register struct vnode *pvp = ap->a_pvp; 48451541Smckusick register struct inode *pip; 4854359Smckusick register struct fs *fs; 4864359Smckusick register struct inode *ip; 48754653Smckusick mode_t mode = ap->a_mode; 48851469Sbostic ino_t ino, ipref; 48937735Smckusick int cg, error; 4904359Smckusick 49153583Sheideman *ap->a_vpp = NULL; 49253865Sheideman pip = VTOI(pvp); 4935965Smckusic fs = pip->i_fs; 4944792Smckusic if (fs->fs_cstotal.cs_nifree == 0) 4954359Smckusick goto noinodes; 49651469Sbostic 49754653Smckusick if ((mode & IFMT) == IFDIR) 49851541Smckusick ipref = ffs_dirpref(fs); 49951469Sbostic else 50051469Sbostic ipref = pip->i_number; 5014948Smckusic if (ipref >= fs->fs_ncg * fs->fs_ipg) 5024948Smckusic ipref = 0; 50364603Sbostic cg = ino_to_cg(fs, ipref); 50464603Sbostic ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, ffs_nodealloccg); 5054359Smckusick if (ino == 0) 5064359Smckusick goto noinodes; 50754653Smckusick error = VFS_VGET(pvp->v_mount, ino, ap->a_vpp); 50837735Smckusick if (error) { 50954653Smckusick VOP_VFREE(pvp, ino, mode); 51037735Smckusick return (error); 5114359Smckusick } 51253583Sheideman ip = VTOI(*ap->a_vpp); 5136716Smckusick if (ip->i_mode) { 51454653Smckusick printf("mode = 0%o, inum = %d, fs = %s\n", 5156716Smckusick ip->i_mode, ip->i_number, fs->fs_fsmnt); 51651541Smckusick panic("ffs_valloc: dup alloc"); 5176716Smckusick } 51812643Ssam if (ip->i_blocks) { /* XXX */ 51912643Ssam printf("free inode %s/%d had %d blocks\n", 52012643Ssam fs->fs_fsmnt, ino, ip->i_blocks); 52112643Ssam ip->i_blocks = 0; 52212643Ssam } 52339516Smckusick ip->i_flags = 0; 52438255Smckusick /* 52538255Smckusick * Set up a new generation number for this inode. 52638255Smckusick */ 52738255Smckusick if (++nextgennumber < (u_long)time.tv_sec) 52838255Smckusick nextgennumber = time.tv_sec; 52938255Smckusick ip->i_gen = nextgennumber; 53037735Smckusick return (0); 5314359Smckusick noinodes: 53253583Sheideman ffs_fserr(fs, ap->a_cred->cr_uid, "out of inodes"); 5336294Smckusick uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 53437735Smckusick return (ENOSPC); 5354359Smckusick } 5364359Smckusick 5374651Smckusic /* 5385375Smckusic * Find a cylinder to place a directory. 5395375Smckusic * 5405375Smckusic * The policy implemented by this algorithm is to select from 5415375Smckusic * among those cylinder groups with above the average number of 5425375Smckusic * free inodes, the one with the smallest number of directories. 5434651Smckusic */ 54451469Sbostic static ino_t 54551469Sbostic ffs_dirpref(fs) 5465965Smckusic register struct fs *fs; 5474359Smckusick { 5484651Smckusic int cg, minndir, mincg, avgifree; 5494359Smckusick 5504792Smckusic avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 5514651Smckusic minndir = fs->fs_ipg; 5524359Smckusick mincg = 0; 5534651Smckusic for (cg = 0; cg < fs->fs_ncg; cg++) 5545322Smckusic if (fs->fs_cs(fs, cg).cs_ndir < minndir && 5555322Smckusic fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 5564359Smckusick mincg = cg; 5575322Smckusic minndir = fs->fs_cs(fs, cg).cs_ndir; 5584359Smckusick } 5599163Ssam return ((ino_t)(fs->fs_ipg * mincg)); 5604359Smckusick } 5614359Smckusick 5624651Smckusic /* 5639163Ssam * Select the desired position for the next block in a file. The file is 5649163Ssam * logically divided into sections. The first section is composed of the 5659163Ssam * direct blocks. Each additional section contains fs_maxbpg blocks. 5669163Ssam * 5679163Ssam * If no blocks have been allocated in the first section, the policy is to 5689163Ssam * request a block in the same cylinder group as the inode that describes 5699163Ssam * the file. If no blocks have been allocated in any other section, the 5709163Ssam * policy is to place the section in a cylinder group with a greater than 5719163Ssam * average number of free blocks. An appropriate cylinder group is found 57217696Smckusick * by using a rotor that sweeps the cylinder groups. When a new group of 57317696Smckusick * blocks is needed, the sweep begins in the cylinder group following the 57417696Smckusick * cylinder group from which the previous allocation was made. The sweep 57517696Smckusick * continues until a cylinder group with greater than the average number 57617696Smckusick * of free blocks is found. If the allocation is for the first block in an 57717696Smckusick * indirect block, the information on the previous allocation is unavailable; 57817696Smckusick * here a best guess is made based upon the logical block number being 57917696Smckusick * allocated. 5809163Ssam * 5819163Ssam * If a section is already partially allocated, the policy is to 5829163Ssam * contiguously allocate fs_maxcontig blocks. The end of one of these 5839163Ssam * contiguous blocks and the beginning of the next is physically separated 5849163Ssam * so that the disk head will be in transit between them for at least 5859163Ssam * fs_rotdelay milliseconds. This is to allow time for the processor to 5869163Ssam * schedule another I/O transfer. 5874651Smckusic */ 588*68554Smckusick ufs_daddr_t 58951469Sbostic ffs_blkpref(ip, lbn, indx, bap) 5909163Ssam struct inode *ip; 591*68554Smckusick ufs_daddr_t lbn; 5929163Ssam int indx; 593*68554Smckusick ufs_daddr_t *bap; 5949163Ssam { 5955965Smckusic register struct fs *fs; 59617696Smckusick register int cg; 59717696Smckusick int avgbfree, startcg; 598*68554Smckusick ufs_daddr_t nextblk; 5994651Smckusic 6009163Ssam fs = ip->i_fs; 6019163Ssam if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 6029163Ssam if (lbn < NDADDR) { 60364603Sbostic cg = ino_to_cg(fs, ip->i_number); 6045322Smckusic return (fs->fs_fpg * cg + fs->fs_frag); 6054651Smckusic } 6069163Ssam /* 6079163Ssam * Find a cylinder with greater than average number of 6089163Ssam * unused data blocks. 6099163Ssam */ 61017696Smckusick if (indx == 0 || bap[indx - 1] == 0) 61164603Sbostic startcg = 61264603Sbostic ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 61317696Smckusick else 61417696Smckusick startcg = dtog(fs, bap[indx - 1]) + 1; 61517696Smckusick startcg %= fs->fs_ncg; 6169163Ssam avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 61717696Smckusick for (cg = startcg; cg < fs->fs_ncg; cg++) 6189163Ssam if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 6199163Ssam fs->fs_cgrotor = cg; 6209163Ssam return (fs->fs_fpg * cg + fs->fs_frag); 6219163Ssam } 62217696Smckusick for (cg = 0; cg <= startcg; cg++) 6239163Ssam if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 6249163Ssam fs->fs_cgrotor = cg; 6259163Ssam return (fs->fs_fpg * cg + fs->fs_frag); 6269163Ssam } 6279163Ssam return (NULL); 6289163Ssam } 6299163Ssam /* 6309163Ssam * One or more previous blocks have been laid out. If less 6319163Ssam * than fs_maxcontig previous blocks are contiguous, the 6329163Ssam * next block is requested contiguously, otherwise it is 6339163Ssam * requested rotationally delayed by fs_rotdelay milliseconds. 6349163Ssam */ 6359163Ssam nextblk = bap[indx - 1] + fs->fs_frag; 63656665Smargo if (indx < fs->fs_maxcontig || bap[indx - fs->fs_maxcontig] + 63756665Smargo blkstofrags(fs, fs->fs_maxcontig) != nextblk) 6389163Ssam return (nextblk); 6399163Ssam if (fs->fs_rotdelay != 0) 6409163Ssam /* 6419163Ssam * Here we convert ms of delay to frags as: 6429163Ssam * (frags) = (ms) * (rev/sec) * (sect/rev) / 6439163Ssam * ((sect/frag) * (ms/sec)) 6449163Ssam * then round up to the next block. 6459163Ssam */ 6469163Ssam nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 6479163Ssam (NSPF(fs) * 1000), fs->fs_frag); 6489163Ssam return (nextblk); 6494651Smckusic } 6504651Smckusic 6515375Smckusic /* 6525375Smckusic * Implement the cylinder overflow algorithm. 6535375Smckusic * 6545375Smckusic * The policy implemented by this algorithm is: 6555375Smckusic * 1) allocate the block in its requested cylinder group. 6565375Smckusic * 2) quadradically rehash on the cylinder group number. 6575375Smckusic * 3) brute force search for a free block. 6585375Smckusic */ 6595212Smckusic /*VARARGS5*/ 66051469Sbostic static u_long 66151469Sbostic ffs_hashalloc(ip, cg, pref, size, allocator) 6625965Smckusic struct inode *ip; 6634359Smckusick int cg; 6644359Smckusick long pref; 6654359Smckusick int size; /* size for data blocks, mode for inodes */ 66668122Smckusick u_int32_t (*allocator)(); 6674359Smckusick { 6685965Smckusic register struct fs *fs; 6694359Smckusick long result; 6704359Smckusick int i, icg = cg; 6714359Smckusick 6725965Smckusic fs = ip->i_fs; 6734359Smckusick /* 6744359Smckusick * 1: preferred cylinder group 6754359Smckusick */ 6765965Smckusic result = (*allocator)(ip, cg, pref, size); 6774359Smckusick if (result) 6784359Smckusick return (result); 6794359Smckusick /* 6804359Smckusick * 2: quadratic rehash 6814359Smckusick */ 6824359Smckusick for (i = 1; i < fs->fs_ncg; i *= 2) { 6834359Smckusick cg += i; 6844359Smckusick if (cg >= fs->fs_ncg) 6854359Smckusick cg -= fs->fs_ncg; 6865965Smckusic result = (*allocator)(ip, cg, 0, size); 6874359Smckusick if (result) 6884359Smckusick return (result); 6894359Smckusick } 6904359Smckusick /* 6914359Smckusick * 3: brute force search 69210847Ssam * Note that we start at i == 2, since 0 was checked initially, 69310847Ssam * and 1 is always checked in the quadratic rehash. 6944359Smckusick */ 69510848Smckusick cg = (icg + 2) % fs->fs_ncg; 69610847Ssam for (i = 2; i < fs->fs_ncg; i++) { 6975965Smckusic result = (*allocator)(ip, cg, 0, size); 6984359Smckusick if (result) 6994359Smckusick return (result); 7004359Smckusick cg++; 7014359Smckusick if (cg == fs->fs_ncg) 7024359Smckusick cg = 0; 7034359Smckusick } 7046294Smckusick return (NULL); 7054359Smckusick } 7064359Smckusick 7075375Smckusic /* 7085375Smckusic * Determine whether a fragment can be extended. 7095375Smckusic * 7105375Smckusic * Check to see if the necessary fragments are available, and 7115375Smckusic * if they are, allocate them. 7125375Smckusic */ 713*68554Smckusick static ufs_daddr_t 71451469Sbostic ffs_fragextend(ip, cg, bprev, osize, nsize) 7155965Smckusic struct inode *ip; 7164426Smckusic int cg; 7174463Smckusic long bprev; 7184426Smckusic int osize, nsize; 7194426Smckusic { 7205965Smckusic register struct fs *fs; 7214463Smckusic register struct cg *cgp; 72237735Smckusick struct buf *bp; 7234463Smckusic long bno; 7244463Smckusic int frags, bbase; 72537735Smckusick int i, error; 7264426Smckusic 7275965Smckusic fs = ip->i_fs; 72817224Smckusick if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 7296531Smckusick return (NULL); 7305960Smckusic frags = numfrags(fs, nsize); 73117224Smckusick bbase = fragnum(fs, bprev); 73217224Smckusick if (bbase > fragnum(fs, (bprev + frags - 1))) { 73330749Skarels /* cannot extend across a block boundary */ 7346294Smckusick return (NULL); 7354463Smckusic } 73637735Smckusick error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 73738776Smckusick (int)fs->fs_cgsize, NOCRED, &bp); 73837735Smckusick if (error) { 73937735Smckusick brelse(bp); 74037735Smckusick return (NULL); 74137735Smckusick } 74264508Sbostic cgp = (struct cg *)bp->b_data; 74337735Smckusick if (!cg_chkmagic(cgp)) { 7445960Smckusic brelse(bp); 7456294Smckusick return (NULL); 7465960Smckusic } 7478105Sroot cgp->cg_time = time.tv_sec; 7485377Smckusic bno = dtogd(fs, bprev); 7495960Smckusic for (i = numfrags(fs, osize); i < frags; i++) 75034143Smckusick if (isclr(cg_blksfree(cgp), bno + i)) { 7515361Smckusic brelse(bp); 7526294Smckusick return (NULL); 7535361Smckusic } 7545361Smckusic /* 7555361Smckusic * the current fragment can be extended 7565361Smckusic * deduct the count on fragment being extended into 7575361Smckusic * increase the count on the remaining fragment (if any) 7585361Smckusic * allocate the extended piece 7595361Smckusic */ 7605361Smckusic for (i = frags; i < fs->fs_frag - bbase; i++) 76134143Smckusick if (isclr(cg_blksfree(cgp), bno + i)) 7624463Smckusic break; 7635960Smckusic cgp->cg_frsum[i - numfrags(fs, osize)]--; 7645361Smckusic if (i != frags) 7655361Smckusic cgp->cg_frsum[i - frags]++; 7665960Smckusic for (i = numfrags(fs, osize); i < frags; i++) { 76734143Smckusick clrbit(cg_blksfree(cgp), bno + i); 7685361Smckusic cgp->cg_cs.cs_nffree--; 7695361Smckusic fs->fs_cstotal.cs_nffree--; 7705361Smckusic fs->fs_cs(fs, cg).cs_nffree--; 7714463Smckusic } 77250893Smckusick fs->fs_fmod = 1; 7735361Smckusic bdwrite(bp); 7745361Smckusic return (bprev); 7754426Smckusic } 7764426Smckusic 7775375Smckusic /* 7785375Smckusic * Determine whether a block can be allocated. 7795375Smckusic * 78064603Sbostic * Check to see if a block of the appropriate size is available, 7815375Smckusic * and if it is, allocate it. 7825375Smckusic */ 783*68554Smckusick static ufs_daddr_t 78451469Sbostic ffs_alloccg(ip, cg, bpref, size) 7855965Smckusic struct inode *ip; 7864359Smckusick int cg; 787*68554Smckusick ufs_daddr_t bpref; 7884359Smckusick int size; 7894359Smckusick { 7905965Smckusic register struct fs *fs; 7914463Smckusic register struct cg *cgp; 79237735Smckusick struct buf *bp; 7934463Smckusic register int i; 79437735Smckusick int error, bno, frags, allocsiz; 7954359Smckusick 7965965Smckusic fs = ip->i_fs; 7975322Smckusic if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 7986294Smckusick return (NULL); 79937735Smckusick error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 80038776Smckusick (int)fs->fs_cgsize, NOCRED, &bp); 80137735Smckusick if (error) { 80237735Smckusick brelse(bp); 80337735Smckusick return (NULL); 80437735Smckusick } 80564508Sbostic cgp = (struct cg *)bp->b_data; 80637735Smckusick if (!cg_chkmagic(cgp) || 80715950Smckusick (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 8085960Smckusic brelse(bp); 8096294Smckusick return (NULL); 8105960Smckusic } 8118105Sroot cgp->cg_time = time.tv_sec; 8125322Smckusic if (size == fs->fs_bsize) { 81351469Sbostic bno = ffs_alloccgblk(fs, cgp, bpref); 8144463Smckusic bdwrite(bp); 8154463Smckusic return (bno); 8164463Smckusic } 8174463Smckusic /* 8184463Smckusic * check to see if any fragments are already available 8194463Smckusic * allocsiz is the size which will be allocated, hacking 8204463Smckusic * it down to a smaller size if necessary 8214463Smckusic */ 8225960Smckusic frags = numfrags(fs, size); 8235322Smckusic for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 8244463Smckusic if (cgp->cg_frsum[allocsiz] != 0) 8254463Smckusic break; 8265322Smckusic if (allocsiz == fs->fs_frag) { 8274463Smckusic /* 8284463Smckusic * no fragments were available, so a block will be 8294463Smckusic * allocated, and hacked up 8304463Smckusic */ 8314792Smckusic if (cgp->cg_cs.cs_nbfree == 0) { 8324463Smckusic brelse(bp); 8336294Smckusick return (NULL); 8344463Smckusic } 83551469Sbostic bno = ffs_alloccgblk(fs, cgp, bpref); 8365377Smckusic bpref = dtogd(fs, bno); 8375322Smckusic for (i = frags; i < fs->fs_frag; i++) 83834143Smckusick setbit(cg_blksfree(cgp), bpref + i); 8395322Smckusic i = fs->fs_frag - frags; 8404792Smckusic cgp->cg_cs.cs_nffree += i; 8414792Smckusic fs->fs_cstotal.cs_nffree += i; 8425322Smckusic fs->fs_cs(fs, cg).cs_nffree += i; 84350893Smckusick fs->fs_fmod = 1; 8444463Smckusic cgp->cg_frsum[i]++; 8454463Smckusic bdwrite(bp); 8464463Smckusic return (bno); 8474463Smckusic } 84851469Sbostic bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 84915950Smckusick if (bno < 0) { 85015950Smckusick brelse(bp); 8516294Smckusick return (NULL); 85215950Smckusick } 8534463Smckusic for (i = 0; i < frags; i++) 85434143Smckusick clrbit(cg_blksfree(cgp), bno + i); 8554792Smckusic cgp->cg_cs.cs_nffree -= frags; 8564792Smckusic fs->fs_cstotal.cs_nffree -= frags; 8575322Smckusic fs->fs_cs(fs, cg).cs_nffree -= frags; 85850893Smckusick fs->fs_fmod = 1; 8594463Smckusic cgp->cg_frsum[allocsiz]--; 8604463Smckusic if (frags != allocsiz) 8614463Smckusic cgp->cg_frsum[allocsiz - frags]++; 8624463Smckusic bdwrite(bp); 8634463Smckusic return (cg * fs->fs_fpg + bno); 8644463Smckusic } 8654463Smckusic 8665375Smckusic /* 8675375Smckusic * Allocate a block in a cylinder group. 8685375Smckusic * 8695375Smckusic * This algorithm implements the following policy: 8705375Smckusic * 1) allocate the requested block. 8715375Smckusic * 2) allocate a rotationally optimal block in the same cylinder. 8725375Smckusic * 3) allocate the next available block on the block rotor for the 8735375Smckusic * specified cylinder group. 8745375Smckusic * Note that this routine only allocates fs_bsize blocks; these 8755375Smckusic * blocks may be fragmented by the routine that allocates them. 8765375Smckusic */ 877*68554Smckusick static ufs_daddr_t 87851469Sbostic ffs_alloccgblk(fs, cgp, bpref) 8795965Smckusic register struct fs *fs; 8804463Smckusic register struct cg *cgp; 881*68554Smckusick ufs_daddr_t bpref; 8824463Smckusic { 883*68554Smckusick ufs_daddr_t bno, blkno; 8846294Smckusick int cylno, pos, delta; 8854651Smckusic short *cylbp; 8865361Smckusic register int i; 8874463Smckusic 88865999Smckusick if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 8894651Smckusic bpref = cgp->cg_rotor; 8905361Smckusic goto norot; 8915361Smckusic } 89217224Smckusick bpref = blknum(fs, bpref); 8935377Smckusic bpref = dtogd(fs, bpref); 8945361Smckusic /* 8955361Smckusic * if the requested block is available, use it 8965361Smckusic */ 89751469Sbostic if (ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { 8985361Smckusic bno = bpref; 8995361Smckusic goto gotit; 9005361Smckusic } 9015361Smckusic /* 9025361Smckusic * check for a block available on the same cylinder 9035361Smckusic */ 9045361Smckusic cylno = cbtocylno(fs, bpref); 90534143Smckusick if (cg_blktot(cgp)[cylno] == 0) 9065375Smckusic goto norot; 9075375Smckusic if (fs->fs_cpc == 0) { 9085375Smckusic /* 90957000Smckusick * Block layout information is not available. 91057000Smckusick * Leaving bpref unchanged means we take the 91157000Smckusick * next available free block following the one 91257000Smckusick * we just allocated. Hopefully this will at 91357000Smckusick * least hit a track cache on drives of unknown 91457000Smckusick * geometry (e.g. SCSI). 9155375Smckusic */ 9165375Smckusic goto norot; 9175375Smckusic } 9185375Smckusic /* 9195361Smckusic * check the summary information to see if a block is 9205361Smckusic * available in the requested cylinder starting at the 9219163Ssam * requested rotational position and proceeding around. 9225361Smckusic */ 92334143Smckusick cylbp = cg_blks(fs, cgp, cylno); 9249163Ssam pos = cbtorpos(fs, bpref); 92534143Smckusick for (i = pos; i < fs->fs_nrpos; i++) 9265361Smckusic if (cylbp[i] > 0) 9275361Smckusic break; 92834143Smckusick if (i == fs->fs_nrpos) 9295361Smckusic for (i = 0; i < pos; i++) 9305361Smckusic if (cylbp[i] > 0) 9315361Smckusic break; 9325361Smckusic if (cylbp[i] > 0) { 9334651Smckusic /* 9345361Smckusic * found a rotational position, now find the actual 9355361Smckusic * block. A panic if none is actually there. 9364651Smckusic */ 9375361Smckusic pos = cylno % fs->fs_cpc; 9385361Smckusic bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 93934143Smckusick if (fs_postbl(fs, pos)[i] == -1) { 9406716Smckusick printf("pos = %d, i = %d, fs = %s\n", 9416716Smckusick pos, i, fs->fs_fsmnt); 94251469Sbostic panic("ffs_alloccgblk: cyl groups corrupted"); 9436716Smckusick } 94434143Smckusick for (i = fs_postbl(fs, pos)[i];; ) { 94551469Sbostic if (ffs_isblock(fs, cg_blksfree(cgp), bno + i)) { 94611638Ssam bno = blkstofrags(fs, (bno + i)); 9475361Smckusic goto gotit; 9485361Smckusic } 94934143Smckusick delta = fs_rotbl(fs)[i]; 95034143Smckusick if (delta <= 0 || 95134143Smckusick delta + i > fragstoblks(fs, fs->fs_fpg)) 9524651Smckusic break; 9536294Smckusick i += delta; 9544651Smckusic } 9556716Smckusick printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 95651469Sbostic panic("ffs_alloccgblk: can't find blk in cyl"); 9574359Smckusick } 9585361Smckusic norot: 9595361Smckusic /* 9605361Smckusic * no blocks in the requested cylinder, so take next 9615361Smckusic * available one in this cylinder group. 9625361Smckusic */ 96351469Sbostic bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 9646567Smckusic if (bno < 0) 9656294Smckusick return (NULL); 9664651Smckusic cgp->cg_rotor = bno; 9674359Smckusick gotit: 96865999Smckusick blkno = fragstoblks(fs, bno); 96965999Smckusick ffs_clrblock(fs, cg_blksfree(cgp), (long)blkno); 97065999Smckusick ffs_clusteracct(fs, cgp, blkno, -1); 9714792Smckusic cgp->cg_cs.cs_nbfree--; 9724792Smckusic fs->fs_cstotal.cs_nbfree--; 9735322Smckusic fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 9745375Smckusic cylno = cbtocylno(fs, bno); 97534143Smckusick cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 97634143Smckusick cg_blktot(cgp)[cylno]--; 97750893Smckusick fs->fs_fmod = 1; 9784651Smckusic return (cgp->cg_cgx * fs->fs_fpg + bno); 9794359Smckusick } 98034143Smckusick 9815375Smckusic /* 98265999Smckusick * Determine whether a cluster can be allocated. 98365999Smckusick * 98465999Smckusick * We do not currently check for optimal rotational layout if there 98565999Smckusick * are multiple choices in the same cylinder group. Instead we just 98665999Smckusick * take the first one that we find following bpref. 98765999Smckusick */ 988*68554Smckusick static ufs_daddr_t 98965999Smckusick ffs_clusteralloc(ip, cg, bpref, len) 99065999Smckusick struct inode *ip; 99165999Smckusick int cg; 992*68554Smckusick ufs_daddr_t bpref; 99365999Smckusick int len; 99465999Smckusick { 99565999Smckusick register struct fs *fs; 99665999Smckusick register struct cg *cgp; 99765999Smckusick struct buf *bp; 99868336Smckusick int i, got, run, bno, bit, map; 99965999Smckusick u_char *mapp; 100067869Smckusick int32_t *lp; 100165999Smckusick 100265999Smckusick fs = ip->i_fs; 100367869Smckusick if (fs->fs_maxcluster[cg] < len) 100465999Smckusick return (NULL); 100565999Smckusick if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 100665999Smckusick NOCRED, &bp)) 100765999Smckusick goto fail; 100865999Smckusick cgp = (struct cg *)bp->b_data; 100965999Smckusick if (!cg_chkmagic(cgp)) 101065999Smckusick goto fail; 101165999Smckusick /* 101265999Smckusick * Check to see if a cluster of the needed size (or bigger) is 101365999Smckusick * available in this cylinder group. 101465999Smckusick */ 101567869Smckusick lp = &cg_clustersum(cgp)[len]; 101665999Smckusick for (i = len; i <= fs->fs_contigsumsize; i++) 101767869Smckusick if (*lp++ > 0) 101865999Smckusick break; 101967869Smckusick if (i > fs->fs_contigsumsize) { 102067869Smckusick /* 102167869Smckusick * This is the first time looking for a cluster in this 102267869Smckusick * cylinder group. Update the cluster summary information 102367869Smckusick * to reflect the true maximum sized cluster so that 102467869Smckusick * future cluster allocation requests can avoid reading 102567869Smckusick * the cylinder group map only to find no clusters. 102667869Smckusick */ 102767869Smckusick lp = &cg_clustersum(cgp)[len - 1]; 102867869Smckusick for (i = len - 1; i > 0; i--) 102967869Smckusick if (*lp-- > 0) 103067869Smckusick break; 103167869Smckusick fs->fs_maxcluster[cg] = i; 103265999Smckusick goto fail; 103367869Smckusick } 103465999Smckusick /* 103565999Smckusick * Search the cluster map to find a big enough cluster. 103665999Smckusick * We take the first one that we find, even if it is larger 103765999Smckusick * than we need as we prefer to get one close to the previous 103865999Smckusick * block allocation. We do not search before the current 103965999Smckusick * preference point as we do not want to allocate a block 104065999Smckusick * that is allocated before the previous one (as we will 104165999Smckusick * then have to wait for another pass of the elevator 104265999Smckusick * algorithm before it will be read). We prefer to fail and 104365999Smckusick * be recalled to try an allocation in the next cylinder group. 104465999Smckusick */ 104565999Smckusick if (dtog(fs, bpref) != cg) 104665999Smckusick bpref = 0; 104765999Smckusick else 104865999Smckusick bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 104965999Smckusick mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 105065999Smckusick map = *mapp++; 105165999Smckusick bit = 1 << (bpref % NBBY); 105268336Smckusick for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 105365999Smckusick if ((map & bit) == 0) { 105465999Smckusick run = 0; 105565999Smckusick } else { 105665999Smckusick run++; 105765999Smckusick if (run == len) 105865999Smckusick break; 105965999Smckusick } 106068336Smckusick if ((got & (NBBY - 1)) != (NBBY - 1)) { 106165999Smckusick bit <<= 1; 106265999Smckusick } else { 106365999Smckusick map = *mapp++; 106465999Smckusick bit = 1; 106565999Smckusick } 106665999Smckusick } 106768336Smckusick if (got == cgp->cg_nclusterblks) 106865999Smckusick goto fail; 106965999Smckusick /* 107065999Smckusick * Allocate the cluster that we have found. 107165999Smckusick */ 107268336Smckusick for (i = 1; i <= len; i++) 107368336Smckusick if (!ffs_isblock(fs, cg_blksfree(cgp), got - run + i)) 107468336Smckusick panic("ffs_clusteralloc: map mismatch"); 107568336Smckusick bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1); 107665999Smckusick len = blkstofrags(fs, len); 107765999Smckusick for (i = 0; i < len; i += fs->fs_frag) 107868336Smckusick if ((got = ffs_alloccgblk(fs, cgp, bno + i)) != bno + i) 107965999Smckusick panic("ffs_clusteralloc: lost block"); 108065999Smckusick brelse(bp); 108165999Smckusick return (bno); 108265999Smckusick 108365999Smckusick fail: 108465999Smckusick brelse(bp); 108565999Smckusick return (0); 108665999Smckusick } 108765999Smckusick 108865999Smckusick /* 10895375Smckusic * Determine whether an inode can be allocated. 10905375Smckusic * 10915375Smckusic * Check to see if an inode is available, and if it is, 10925375Smckusic * allocate it using the following policy: 10935375Smckusic * 1) allocate the requested inode. 10945375Smckusic * 2) allocate the next available inode after the requested 10955375Smckusic * inode in the specified cylinder group. 10965375Smckusic */ 109768122Smckusick static ino_t 109864603Sbostic ffs_nodealloccg(ip, cg, ipref, mode) 10995965Smckusic struct inode *ip; 11004359Smckusick int cg; 1101*68554Smckusick ufs_daddr_t ipref; 11024359Smckusick int mode; 11034359Smckusick { 11045965Smckusic register struct fs *fs; 11054463Smckusic register struct cg *cgp; 110616784Smckusick struct buf *bp; 110737735Smckusick int error, start, len, loc, map, i; 11084359Smckusick 11095965Smckusic fs = ip->i_fs; 11105322Smckusic if (fs->fs_cs(fs, cg).cs_nifree == 0) 11116294Smckusick return (NULL); 111237735Smckusick error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 111338776Smckusick (int)fs->fs_cgsize, NOCRED, &bp); 111437735Smckusick if (error) { 111537735Smckusick brelse(bp); 111637735Smckusick return (NULL); 111737735Smckusick } 111864508Sbostic cgp = (struct cg *)bp->b_data; 111937735Smckusick if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 11205960Smckusic brelse(bp); 11216294Smckusick return (NULL); 11225960Smckusic } 11238105Sroot cgp->cg_time = time.tv_sec; 11244359Smckusick if (ipref) { 11254359Smckusick ipref %= fs->fs_ipg; 112634143Smckusick if (isclr(cg_inosused(cgp), ipref)) 11274359Smckusick goto gotit; 112816784Smckusick } 112916784Smckusick start = cgp->cg_irotor / NBBY; 113016784Smckusick len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 113134143Smckusick loc = skpc(0xff, len, &cg_inosused(cgp)[start]); 113216784Smckusick if (loc == 0) { 113317697Smckusick len = start + 1; 113417697Smckusick start = 0; 113534143Smckusick loc = skpc(0xff, len, &cg_inosused(cgp)[0]); 113617697Smckusick if (loc == 0) { 113765718Sbostic printf("cg = %d, irotor = %d, fs = %s\n", 113817697Smckusick cg, cgp->cg_irotor, fs->fs_fsmnt); 113964603Sbostic panic("ffs_nodealloccg: map corrupted"); 114017697Smckusick /* NOTREACHED */ 114117697Smckusick } 114216784Smckusick } 114316784Smckusick i = start + len - loc; 114434143Smckusick map = cg_inosused(cgp)[i]; 114516784Smckusick ipref = i * NBBY; 114616784Smckusick for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 114716784Smckusick if ((map & i) == 0) { 11484359Smckusick cgp->cg_irotor = ipref; 11494359Smckusick goto gotit; 11504359Smckusick } 11514359Smckusick } 115216784Smckusick printf("fs = %s\n", fs->fs_fsmnt); 115364603Sbostic panic("ffs_nodealloccg: block not in map"); 115416784Smckusick /* NOTREACHED */ 11554359Smckusick gotit: 115634143Smckusick setbit(cg_inosused(cgp), ipref); 11574792Smckusic cgp->cg_cs.cs_nifree--; 11584792Smckusic fs->fs_cstotal.cs_nifree--; 11595322Smckusic fs->fs_cs(fs, cg).cs_nifree--; 116050893Smckusick fs->fs_fmod = 1; 11614359Smckusick if ((mode & IFMT) == IFDIR) { 11624792Smckusic cgp->cg_cs.cs_ndir++; 11634792Smckusic fs->fs_cstotal.cs_ndir++; 11645322Smckusic fs->fs_cs(fs, cg).cs_ndir++; 11654359Smckusick } 11664359Smckusick bdwrite(bp); 11674359Smckusick return (cg * fs->fs_ipg + ipref); 11684359Smckusick } 11694359Smckusick 11705375Smckusic /* 11715375Smckusic * Free a block or fragment. 11725375Smckusic * 11735375Smckusic * The specified block or fragment is placed back in the 11745375Smckusic * free map. If a fragment is deallocated, a possible 11755375Smckusic * block reassembly is checked. 11765375Smckusic */ 117751469Sbostic ffs_blkfree(ip, bno, size) 11785965Smckusic register struct inode *ip; 1179*68554Smckusick ufs_daddr_t bno; 118053059Smckusick long size; 11814359Smckusick { 11824359Smckusick register struct fs *fs; 11834359Smckusick register struct cg *cgp; 118437735Smckusick struct buf *bp; 1185*68554Smckusick ufs_daddr_t blkno; 118665999Smckusick int i, error, cg, blk, frags, bbase; 11874359Smckusick 11885965Smckusic fs = ip->i_fs; 118964508Sbostic if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 11906716Smckusick printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 11916716Smckusick ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 119231402Smckusick panic("blkfree: bad size"); 11936716Smckusick } 11945377Smckusic cg = dtog(fs, bno); 119564508Sbostic if ((u_int)bno >= fs->fs_size) { 11966567Smckusic printf("bad block %d, ino %d\n", bno, ip->i_number); 119753244Smckusick ffs_fserr(fs, ip->i_uid, "bad block"); 11984359Smckusick return; 11996567Smckusic } 120037735Smckusick error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 120138776Smckusick (int)fs->fs_cgsize, NOCRED, &bp); 120237735Smckusick if (error) { 120337735Smckusick brelse(bp); 120437735Smckusick return; 120537735Smckusick } 120664508Sbostic cgp = (struct cg *)bp->b_data; 120737735Smckusick if (!cg_chkmagic(cgp)) { 12085960Smckusic brelse(bp); 12094359Smckusick return; 12105960Smckusic } 12118105Sroot cgp->cg_time = time.tv_sec; 12125377Smckusic bno = dtogd(fs, bno); 12135322Smckusic if (size == fs->fs_bsize) { 121465999Smckusick blkno = fragstoblks(fs, bno); 121565999Smckusick if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { 12166716Smckusick printf("dev = 0x%x, block = %d, fs = %s\n", 12176716Smckusick ip->i_dev, bno, fs->fs_fsmnt); 121831402Smckusick panic("blkfree: freeing free block"); 12196567Smckusic } 122065999Smckusick ffs_setblock(fs, cg_blksfree(cgp), blkno); 122165999Smckusick ffs_clusteracct(fs, cgp, blkno, 1); 12224792Smckusic cgp->cg_cs.cs_nbfree++; 12234792Smckusic fs->fs_cstotal.cs_nbfree++; 12245322Smckusic fs->fs_cs(fs, cg).cs_nbfree++; 12255375Smckusic i = cbtocylno(fs, bno); 122634143Smckusick cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 122734143Smckusick cg_blktot(cgp)[i]++; 12284426Smckusic } else { 122917224Smckusick bbase = bno - fragnum(fs, bno); 12304463Smckusic /* 12314463Smckusic * decrement the counts associated with the old frags 12324463Smckusic */ 123334143Smckusick blk = blkmap(fs, cg_blksfree(cgp), bbase); 123451469Sbostic ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 12354463Smckusic /* 12364463Smckusic * deallocate the fragment 12374463Smckusic */ 12385960Smckusic frags = numfrags(fs, size); 12394463Smckusic for (i = 0; i < frags; i++) { 124034143Smckusick if (isset(cg_blksfree(cgp), bno + i)) { 12416716Smckusick printf("dev = 0x%x, block = %d, fs = %s\n", 12426716Smckusick ip->i_dev, bno + i, fs->fs_fsmnt); 124331402Smckusick panic("blkfree: freeing free frag"); 12446716Smckusick } 124534143Smckusick setbit(cg_blksfree(cgp), bno + i); 12464426Smckusic } 12476294Smckusick cgp->cg_cs.cs_nffree += i; 12486294Smckusick fs->fs_cstotal.cs_nffree += i; 12496294Smckusick fs->fs_cs(fs, cg).cs_nffree += i; 12504463Smckusic /* 12514463Smckusic * add back in counts associated with the new frags 12524463Smckusic */ 125334143Smckusick blk = blkmap(fs, cg_blksfree(cgp), bbase); 125451469Sbostic ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 12554463Smckusic /* 12564463Smckusic * if a complete block has been reassembled, account for it 12574463Smckusic */ 125865999Smckusick blkno = fragstoblks(fs, bbase); 125965999Smckusick if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { 12605322Smckusic cgp->cg_cs.cs_nffree -= fs->fs_frag; 12615322Smckusic fs->fs_cstotal.cs_nffree -= fs->fs_frag; 12625322Smckusic fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 126365999Smckusick ffs_clusteracct(fs, cgp, blkno, 1); 12644792Smckusic cgp->cg_cs.cs_nbfree++; 12654792Smckusic fs->fs_cstotal.cs_nbfree++; 12665322Smckusic fs->fs_cs(fs, cg).cs_nbfree++; 12675375Smckusic i = cbtocylno(fs, bbase); 126834143Smckusick cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 126934143Smckusick cg_blktot(cgp)[i]++; 12704426Smckusic } 12714426Smckusic } 127250893Smckusick fs->fs_fmod = 1; 12734359Smckusick bdwrite(bp); 12744359Smckusick } 12754359Smckusick 12765375Smckusic /* 12775375Smckusic * Free an inode. 12785375Smckusic * 12795375Smckusic * The specified inode is placed back in the free map. 12805375Smckusic */ 128153577Sheideman int 128254653Smckusick ffs_vfree(ap) 128354653Smckusick struct vop_vfree_args /* { 128454653Smckusick struct vnode *a_pvp; 128554653Smckusick ino_t a_ino; 128654653Smckusick int a_mode; 128754653Smckusick } */ *ap; 12884359Smckusick { 12894359Smckusick register struct fs *fs; 12904359Smckusick register struct cg *cgp; 129151541Smckusick register struct inode *pip; 129254653Smckusick ino_t ino = ap->a_ino; 129337735Smckusick struct buf *bp; 129437735Smckusick int error, cg; 12954359Smckusick 129653583Sheideman pip = VTOI(ap->a_pvp); 129751469Sbostic fs = pip->i_fs; 129854653Smckusick if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) 129954653Smckusick panic("ifree: range: dev = 0x%x, ino = %d, fs = %s\n", 130054653Smckusick pip->i_dev, ino, fs->fs_fsmnt); 130164603Sbostic cg = ino_to_cg(fs, ino); 130251469Sbostic error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 130338776Smckusick (int)fs->fs_cgsize, NOCRED, &bp); 130437735Smckusick if (error) { 130537735Smckusick brelse(bp); 130653577Sheideman return (0); 130737735Smckusick } 130864508Sbostic cgp = (struct cg *)bp->b_data; 130937735Smckusick if (!cg_chkmagic(cgp)) { 13105960Smckusic brelse(bp); 131153577Sheideman return (0); 13125960Smckusic } 13138105Sroot cgp->cg_time = time.tv_sec; 131454653Smckusick ino %= fs->fs_ipg; 131554653Smckusick if (isclr(cg_inosused(cgp), ino)) { 131654653Smckusick printf("dev = 0x%x, ino = %d, fs = %s\n", 131754653Smckusick pip->i_dev, ino, fs->fs_fsmnt); 131848057Smckusick if (fs->fs_ronly == 0) 131948057Smckusick panic("ifree: freeing free inode"); 13206716Smckusick } 132154653Smckusick clrbit(cg_inosused(cgp), ino); 132254653Smckusick if (ino < cgp->cg_irotor) 132354653Smckusick cgp->cg_irotor = ino; 13244792Smckusic cgp->cg_cs.cs_nifree++; 13254792Smckusic fs->fs_cstotal.cs_nifree++; 13265322Smckusic fs->fs_cs(fs, cg).cs_nifree++; 132753583Sheideman if ((ap->a_mode & IFMT) == IFDIR) { 13284792Smckusic cgp->cg_cs.cs_ndir--; 13294792Smckusic fs->fs_cstotal.cs_ndir--; 13305322Smckusic fs->fs_cs(fs, cg).cs_ndir--; 13314359Smckusick } 133250893Smckusick fs->fs_fmod = 1; 13334359Smckusick bdwrite(bp); 133453577Sheideman return (0); 13354359Smckusick } 13364359Smckusick 13374463Smckusic /* 13385375Smckusic * Find a block of the specified size in the specified cylinder group. 13395375Smckusic * 13404651Smckusic * It is a panic if a request is made to find a block if none are 13414651Smckusic * available. 13424651Smckusic */ 1343*68554Smckusick static ufs_daddr_t 134451469Sbostic ffs_mapsearch(fs, cgp, bpref, allocsiz) 13454651Smckusic register struct fs *fs; 13464651Smckusic register struct cg *cgp; 1347*68554Smckusick ufs_daddr_t bpref; 13484651Smckusic int allocsiz; 13494651Smckusic { 1350*68554Smckusick ufs_daddr_t bno; 13514651Smckusic int start, len, loc, i; 13524651Smckusic int blk, field, subfield, pos; 13534651Smckusic 13544651Smckusic /* 13554651Smckusic * find the fragment by searching through the free block 13564651Smckusic * map for an appropriate bit pattern 13574651Smckusic */ 13584651Smckusic if (bpref) 13595377Smckusic start = dtogd(fs, bpref) / NBBY; 13604651Smckusic else 13614651Smckusic start = cgp->cg_frotor / NBBY; 13625398Smckusic len = howmany(fs->fs_fpg, NBBY) - start; 136364508Sbostic loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[start], 136434476Smckusick (u_char *)fragtbl[fs->fs_frag], 136534476Smckusick (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 13664651Smckusic if (loc == 0) { 13676531Smckusick len = start + 1; 13686531Smckusick start = 0; 136964508Sbostic loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[0], 137034476Smckusick (u_char *)fragtbl[fs->fs_frag], 137134476Smckusick (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 137216784Smckusick if (loc == 0) { 137316784Smckusick printf("start = %d, len = %d, fs = %s\n", 137416784Smckusick start, len, fs->fs_fsmnt); 137551469Sbostic panic("ffs_alloccg: map corrupted"); 137617697Smckusick /* NOTREACHED */ 137716784Smckusick } 13784651Smckusic } 13794651Smckusic bno = (start + len - loc) * NBBY; 13804651Smckusic cgp->cg_frotor = bno; 13814651Smckusic /* 13824651Smckusic * found the byte in the map 13834651Smckusic * sift through the bits to find the selected frag 13844651Smckusic */ 13856294Smckusick for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 138634143Smckusick blk = blkmap(fs, cg_blksfree(cgp), bno); 13874651Smckusic blk <<= 1; 13884651Smckusic field = around[allocsiz]; 13894651Smckusic subfield = inside[allocsiz]; 13905322Smckusic for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 13916294Smckusick if ((blk & field) == subfield) 13926294Smckusick return (bno + pos); 13934651Smckusic field <<= 1; 13944651Smckusic subfield <<= 1; 13954651Smckusic } 13964651Smckusic } 13976716Smckusick printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 139851469Sbostic panic("ffs_alloccg: block not in map"); 13996531Smckusick return (-1); 14004651Smckusic } 14014651Smckusic 14024651Smckusic /* 140365999Smckusick * Update the cluster map because of an allocation or free. 140465999Smckusick * 140565999Smckusick * Cnt == 1 means free; cnt == -1 means allocating. 140665999Smckusick */ 140765999Smckusick ffs_clusteracct(fs, cgp, blkno, cnt) 140865999Smckusick struct fs *fs; 140965999Smckusick struct cg *cgp; 1410*68554Smckusick ufs_daddr_t blkno; 141165999Smckusick int cnt; 141265999Smckusick { 141368122Smckusick int32_t *sump; 141467869Smckusick int32_t *lp; 141565999Smckusick u_char *freemapp, *mapp; 141665999Smckusick int i, start, end, forw, back, map, bit; 141765999Smckusick 141865999Smckusick if (fs->fs_contigsumsize <= 0) 141965999Smckusick return; 142065999Smckusick freemapp = cg_clustersfree(cgp); 142165999Smckusick sump = cg_clustersum(cgp); 142265999Smckusick /* 142365999Smckusick * Allocate or clear the actual block. 142465999Smckusick */ 142565999Smckusick if (cnt > 0) 142665999Smckusick setbit(freemapp, blkno); 142765999Smckusick else 142865999Smckusick clrbit(freemapp, blkno); 142965999Smckusick /* 143065999Smckusick * Find the size of the cluster going forward. 143165999Smckusick */ 143265999Smckusick start = blkno + 1; 143365999Smckusick end = start + fs->fs_contigsumsize; 143465999Smckusick if (end >= cgp->cg_nclusterblks) 143565999Smckusick end = cgp->cg_nclusterblks; 143665999Smckusick mapp = &freemapp[start / NBBY]; 143765999Smckusick map = *mapp++; 143865999Smckusick bit = 1 << (start % NBBY); 143965999Smckusick for (i = start; i < end; i++) { 144065999Smckusick if ((map & bit) == 0) 144165999Smckusick break; 144265999Smckusick if ((i & (NBBY - 1)) != (NBBY - 1)) { 144365999Smckusick bit <<= 1; 144465999Smckusick } else { 144565999Smckusick map = *mapp++; 144665999Smckusick bit = 1; 144765999Smckusick } 144865999Smckusick } 144965999Smckusick forw = i - start; 145065999Smckusick /* 145165999Smckusick * Find the size of the cluster going backward. 145265999Smckusick */ 145365999Smckusick start = blkno - 1; 145465999Smckusick end = start - fs->fs_contigsumsize; 145565999Smckusick if (end < 0) 145665999Smckusick end = -1; 145765999Smckusick mapp = &freemapp[start / NBBY]; 145865999Smckusick map = *mapp--; 145965999Smckusick bit = 1 << (start % NBBY); 146065999Smckusick for (i = start; i > end; i--) { 146165999Smckusick if ((map & bit) == 0) 146265999Smckusick break; 146365999Smckusick if ((i & (NBBY - 1)) != 0) { 146465999Smckusick bit >>= 1; 146565999Smckusick } else { 146665999Smckusick map = *mapp--; 146765999Smckusick bit = 1 << (NBBY - 1); 146865999Smckusick } 146965999Smckusick } 147065999Smckusick back = start - i; 147165999Smckusick /* 147265999Smckusick * Account for old cluster and the possibly new forward and 147365999Smckusick * back clusters. 147465999Smckusick */ 147565999Smckusick i = back + forw + 1; 147665999Smckusick if (i > fs->fs_contigsumsize) 147765999Smckusick i = fs->fs_contigsumsize; 147865999Smckusick sump[i] += cnt; 147965999Smckusick if (back > 0) 148065999Smckusick sump[back] -= cnt; 148165999Smckusick if (forw > 0) 148265999Smckusick sump[forw] -= cnt; 148367869Smckusick /* 148467869Smckusick * Update cluster summary information. 148567869Smckusick */ 148667869Smckusick lp = &sump[fs->fs_contigsumsize]; 148767869Smckusick for (i = fs->fs_contigsumsize; i > 0; i--) 148867869Smckusick if (*lp-- > 0) 148967869Smckusick break; 149067869Smckusick fs->fs_maxcluster[cgp->cg_cgx] = i; 149165999Smckusick } 149265999Smckusick 149365999Smckusick /* 14945375Smckusic * Fserr prints the name of a file system with an error diagnostic. 14955375Smckusic * 14965375Smckusic * The form of the error message is: 14974359Smckusick * fs: error message 14984359Smckusick */ 149951469Sbostic static void 150051469Sbostic ffs_fserr(fs, uid, cp) 15014359Smckusick struct fs *fs; 150251469Sbostic u_int uid; 15034359Smckusick char *cp; 15044359Smckusick { 15054359Smckusick 150642318Smckusick log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp); 15074359Smckusick } 1508