1 /* $NetBSD: ffs_alloc.c,v 1.6 1994/12/16 05:55:15 mycroft Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)ffs_alloc.c 8.11 (Berkeley) 10/27/94 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/buf.h> 41 #include <sys/proc.h> 42 #include <sys/vnode.h> 43 #include <sys/mount.h> 44 #include <sys/kernel.h> 45 #include <sys/syslog.h> 46 47 #include <vm/vm.h> 48 49 #include <ufs/ufs/quota.h> 50 #include <ufs/ufs/inode.h> 51 52 #include <ufs/ffs/fs.h> 53 #include <ufs/ffs/ffs_extern.h> 54 55 extern u_long nextgennumber; 56 57 static daddr_t ffs_alloccg __P((struct inode *, int, daddr_t, int)); 58 static daddr_t ffs_alloccgblk __P((struct fs *, struct cg *, daddr_t)); 59 static daddr_t ffs_clusteralloc __P((struct inode *, int, daddr_t, int)); 60 static ino_t ffs_dirpref __P((struct fs *)); 61 static daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); 62 static void ffs_fserr __P((struct fs *, u_int, char *)); 63 static u_long ffs_hashalloc 64 __P((struct inode *, int, long, int, u_int32_t (*)())); 65 static ino_t ffs_nodealloccg __P((struct inode *, int, daddr_t, int)); 66 static daddr_t ffs_mapsearch __P((struct fs *, struct cg *, daddr_t, int)); 67 68 /* 69 * Allocate a block in the file system. 70 * 71 * The size of the requested block is given, which must be some 72 * multiple of fs_fsize and <= fs_bsize. 73 * A preference may be optionally specified. If a preference is given 74 * the following hierarchy is used to allocate a block: 75 * 1) allocate the requested block. 76 * 2) allocate a rotationally optimal block in the same cylinder. 77 * 3) allocate a block in the same cylinder group. 78 * 4) quadradically rehash into other cylinder groups, until an 79 * available block is located. 80 * If no block preference is given the following heirarchy is used 81 * to allocate a block: 82 * 1) allocate a block in the cylinder group that contains the 83 * inode for the file. 84 * 2) quadradically rehash into other cylinder groups, until an 85 * available block is located. 86 */ 87 ffs_alloc(ip, lbn, bpref, size, cred, bnp) 88 register struct inode *ip; 89 daddr_t lbn, bpref; 90 int size; 91 struct ucred *cred; 92 daddr_t *bnp; 93 { 94 register struct fs *fs; 95 daddr_t bno; 96 int cg, error; 97 98 *bnp = 0; 99 fs = ip->i_fs; 100 #ifdef DIAGNOSTIC 101 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 102 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 103 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 104 panic("ffs_alloc: bad size"); 105 } 106 if (cred == NOCRED) 107 panic("ffs_alloc: missing credential\n"); 108 #endif /* DIAGNOSTIC */ 109 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 110 goto nospace; 111 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 112 goto nospace; 113 #ifdef QUOTA 114 if (error = chkdq(ip, (long)btodb(size), cred, 0)) 115 return (error); 116 #endif 117 if (bpref >= fs->fs_size) 118 bpref = 0; 119 if (bpref == 0) 120 cg = ino_to_cg(fs, ip->i_number); 121 else 122 cg = dtog(fs, bpref); 123 bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 124 (u_int32_t (*)())ffs_alloccg); 125 if (bno > 0) { 126 ip->i_blocks += btodb(size); 127 ip->i_flag |= IN_CHANGE | IN_UPDATE; 128 *bnp = bno; 129 return (0); 130 } 131 #ifdef QUOTA 132 /* 133 * Restore user's disk quota because allocation failed. 134 */ 135 (void) chkdq(ip, (long)-btodb(size), cred, FORCE); 136 #endif 137 nospace: 138 ffs_fserr(fs, cred->cr_uid, "file system full"); 139 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 140 return (ENOSPC); 141 } 142 143 /* 144 * Reallocate a fragment to a bigger size 145 * 146 * The number and size of the old block is given, and a preference 147 * and new size is also specified. The allocator attempts to extend 148 * the original block. Failing that, the regular block allocator is 149 * invoked to get an appropriate block. 150 */ 151 ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) 152 register struct inode *ip; 153 daddr_t lbprev; 154 daddr_t bpref; 155 int osize, nsize; 156 struct ucred *cred; 157 struct buf **bpp; 158 { 159 register struct fs *fs; 160 struct buf *bp; 161 int cg, request, error; 162 daddr_t bprev, bno; 163 164 *bpp = 0; 165 fs = ip->i_fs; 166 #ifdef DIAGNOSTIC 167 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 168 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 169 printf( 170 "dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 171 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 172 panic("ffs_realloccg: bad size"); 173 } 174 if (cred == NOCRED) 175 panic("ffs_realloccg: missing credential\n"); 176 #endif /* DIAGNOSTIC */ 177 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 178 goto nospace; 179 if ((bprev = ip->i_db[lbprev]) == 0) { 180 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 181 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 182 panic("ffs_realloccg: bad bprev"); 183 } 184 /* 185 * Allocate the extra space in the buffer. 186 */ 187 if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) { 188 brelse(bp); 189 return (error); 190 } 191 #ifdef QUOTA 192 if (error = chkdq(ip, (long)btodb(nsize - osize), cred, 0)) { 193 brelse(bp); 194 return (error); 195 } 196 #endif 197 /* 198 * Check for extension in the existing location. 199 */ 200 cg = dtog(fs, bprev); 201 if (bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize)) { 202 if (bp->b_blkno != fsbtodb(fs, bno)) 203 panic("bad blockno"); 204 ip->i_blocks += btodb(nsize - osize); 205 ip->i_flag |= IN_CHANGE | IN_UPDATE; 206 allocbuf(bp, nsize); 207 bp->b_flags |= B_DONE; 208 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 209 *bpp = bp; 210 return (0); 211 } 212 /* 213 * Allocate a new disk location. 214 */ 215 if (bpref >= fs->fs_size) 216 bpref = 0; 217 switch ((int)fs->fs_optim) { 218 case FS_OPTSPACE: 219 /* 220 * Allocate an exact sized fragment. Although this makes 221 * best use of space, we will waste time relocating it if 222 * the file continues to grow. If the fragmentation is 223 * less than half of the minimum free reserve, we choose 224 * to begin optimizing for time. 225 */ 226 request = nsize; 227 if (fs->fs_minfree < 5 || 228 fs->fs_cstotal.cs_nffree > 229 fs->fs_dsize * fs->fs_minfree / (2 * 100)) 230 break; 231 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 232 fs->fs_fsmnt); 233 fs->fs_optim = FS_OPTTIME; 234 break; 235 case FS_OPTTIME: 236 /* 237 * At this point we have discovered a file that is trying to 238 * grow a small fragment to a larger fragment. To save time, 239 * we allocate a full sized block, then free the unused portion. 240 * If the file continues to grow, the `ffs_fragextend' call 241 * above will be able to grow it in place without further 242 * copying. If aberrant programs cause disk fragmentation to 243 * grow within 2% of the free reserve, we choose to begin 244 * optimizing for space. 245 */ 246 request = fs->fs_bsize; 247 if (fs->fs_cstotal.cs_nffree < 248 fs->fs_dsize * (fs->fs_minfree - 2) / 100) 249 break; 250 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 251 fs->fs_fsmnt); 252 fs->fs_optim = FS_OPTSPACE; 253 break; 254 default: 255 printf("dev = 0x%x, optim = %d, fs = %s\n", 256 ip->i_dev, fs->fs_optim, fs->fs_fsmnt); 257 panic("ffs_realloccg: bad optim"); 258 /* NOTREACHED */ 259 } 260 bno = (daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 261 (u_int32_t (*)())ffs_alloccg); 262 if (bno > 0) { 263 bp->b_blkno = fsbtodb(fs, bno); 264 (void) vnode_pager_uncache(ITOV(ip)); 265 ffs_blkfree(ip, bprev, (long)osize); 266 if (nsize < request) 267 ffs_blkfree(ip, bno + numfrags(fs, nsize), 268 (long)(request - nsize)); 269 ip->i_blocks += btodb(nsize - osize); 270 ip->i_flag |= IN_CHANGE | IN_UPDATE; 271 allocbuf(bp, nsize); 272 bp->b_flags |= B_DONE; 273 bzero((char *)bp->b_data + osize, (u_int)nsize - osize); 274 *bpp = bp; 275 return (0); 276 } 277 #ifdef QUOTA 278 /* 279 * Restore user's disk quota because allocation failed. 280 */ 281 (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 282 #endif 283 brelse(bp); 284 nospace: 285 /* 286 * no space available 287 */ 288 ffs_fserr(fs, cred->cr_uid, "file system full"); 289 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 290 return (ENOSPC); 291 } 292 293 /* 294 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 295 * 296 * The vnode and an array of buffer pointers for a range of sequential 297 * logical blocks to be made contiguous is given. The allocator attempts 298 * to find a range of sequential blocks starting as close as possible to 299 * an fs_rotdelay offset from the end of the allocation for the logical 300 * block immediately preceeding the current range. If successful, the 301 * physical block numbers in the buffer pointers and in the inode are 302 * changed to reflect the new allocation. If unsuccessful, the allocation 303 * is left unchanged. The success in doing the reallocation is returned. 304 * Note that the error return is not reflected back to the user. Rather 305 * the previous block allocation will be used. 306 */ 307 #ifdef DEBUG 308 #include <sys/sysctl.h> 309 int doasyncfree = 1; 310 struct ctldebug debug14 = { "doasyncfree", &doasyncfree }; 311 int prtrealloc = 0; 312 struct ctldebug debug15 = { "prtrealloc", &prtrealloc }; 313 #else 314 #define doasyncfree 1 315 #endif 316 317 int 318 ffs_reallocblks(ap) 319 struct vop_reallocblks_args /* { 320 struct vnode *a_vp; 321 struct cluster_save *a_buflist; 322 } */ *ap; 323 { 324 struct fs *fs; 325 struct inode *ip; 326 struct vnode *vp; 327 struct buf *sbp, *ebp; 328 daddr_t *bap, *sbap, *ebap; 329 struct cluster_save *buflist; 330 daddr_t start_lbn, end_lbn, soff, eoff, newblk, blkno; 331 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 332 int i, len, start_lvl, end_lvl, pref, ssize; 333 334 vp = ap->a_vp; 335 ip = VTOI(vp); 336 fs = ip->i_fs; 337 if (fs->fs_contigsumsize <= 0) 338 return (ENOSPC); 339 buflist = ap->a_buflist; 340 len = buflist->bs_nchildren; 341 start_lbn = buflist->bs_children[0]->b_lblkno; 342 end_lbn = start_lbn + len - 1; 343 #ifdef DIAGNOSTIC 344 for (i = 1; i < len; i++) 345 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 346 panic("ffs_reallocblks: non-cluster"); 347 #endif 348 /* 349 * If the latest allocation is in a new cylinder group, assume that 350 * the filesystem has decided to move and do not force it back to 351 * the previous cylinder group. 352 */ 353 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 354 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 355 return (ENOSPC); 356 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 357 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 358 return (ENOSPC); 359 /* 360 * Get the starting offset and block map for the first block. 361 */ 362 if (start_lvl == 0) { 363 sbap = &ip->i_db[0]; 364 soff = start_lbn; 365 } else { 366 idp = &start_ap[start_lvl - 1]; 367 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 368 brelse(sbp); 369 return (ENOSPC); 370 } 371 sbap = (daddr_t *)sbp->b_data; 372 soff = idp->in_off; 373 } 374 /* 375 * Find the preferred location for the cluster. 376 */ 377 pref = ffs_blkpref(ip, start_lbn, soff, sbap); 378 /* 379 * If the block range spans two block maps, get the second map. 380 */ 381 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 382 ssize = len; 383 } else { 384 #ifdef DIAGNOSTIC 385 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 386 panic("ffs_reallocblk: start == end"); 387 #endif 388 ssize = len - (idp->in_off + 1); 389 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 390 goto fail; 391 ebap = (daddr_t *)ebp->b_data; 392 } 393 /* 394 * Search the block map looking for an allocation of the desired size. 395 */ 396 if ((newblk = (daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 397 len, (u_int32_t (*)())ffs_clusteralloc)) == 0) 398 goto fail; 399 /* 400 * We have found a new contiguous block. 401 * 402 * First we have to replace the old block pointers with the new 403 * block pointers in the inode and indirect blocks associated 404 * with the file. 405 */ 406 #ifdef DEBUG 407 if (prtrealloc) 408 printf("realloc: ino %d, lbns %d-%d\n\told:", ip->i_number, 409 start_lbn, end_lbn); 410 #endif 411 blkno = newblk; 412 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 413 if (i == ssize) 414 bap = ebap; 415 #ifdef DIAGNOSTIC 416 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 417 panic("ffs_reallocblks: alloc mismatch"); 418 #endif 419 #ifdef DEBUG 420 if (prtrealloc) 421 printf(" %d,", *bap); 422 #endif 423 *bap++ = blkno; 424 } 425 /* 426 * Next we must write out the modified inode and indirect blocks. 427 * For strict correctness, the writes should be synchronous since 428 * the old block values may have been written to disk. In practise 429 * they are almost never written, but if we are concerned about 430 * strict correctness, the `doasyncfree' flag should be set to zero. 431 * 432 * The test on `doasyncfree' should be changed to test a flag 433 * that shows whether the associated buffers and inodes have 434 * been written. The flag should be set when the cluster is 435 * started and cleared whenever the buffer or inode is flushed. 436 * We can then check below to see if it is set, and do the 437 * synchronous write only when it has been cleared. 438 */ 439 if (sbap != &ip->i_db[0]) { 440 if (doasyncfree) 441 bdwrite(sbp); 442 else 443 bwrite(sbp); 444 } else { 445 ip->i_flag |= IN_CHANGE | IN_UPDATE; 446 if (!doasyncfree) 447 VOP_UPDATE(vp, &time, &time, MNT_WAIT); 448 } 449 if (ssize < len) 450 if (doasyncfree) 451 bdwrite(ebp); 452 else 453 bwrite(ebp); 454 /* 455 * Last, free the old blocks and assign the new blocks to the buffers. 456 */ 457 #ifdef DEBUG 458 if (prtrealloc) 459 printf("\n\tnew:"); 460 #endif 461 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 462 ffs_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno), 463 fs->fs_bsize); 464 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 465 #ifdef DEBUG 466 if (prtrealloc) 467 printf(" %d,", blkno); 468 #endif 469 } 470 #ifdef DEBUG 471 if (prtrealloc) { 472 prtrealloc--; 473 printf("\n"); 474 } 475 #endif 476 return (0); 477 478 fail: 479 if (ssize < len) 480 brelse(ebp); 481 if (sbap != &ip->i_db[0]) 482 brelse(sbp); 483 return (ENOSPC); 484 } 485 486 /* 487 * Allocate an inode in the file system. 488 * 489 * If allocating a directory, use ffs_dirpref to select the inode. 490 * If allocating in a directory, the following hierarchy is followed: 491 * 1) allocate the preferred inode. 492 * 2) allocate an inode in the same cylinder group. 493 * 3) quadradically rehash into other cylinder groups, until an 494 * available inode is located. 495 * If no inode preference is given the following heirarchy is used 496 * to allocate an inode: 497 * 1) allocate an inode in cylinder group 0. 498 * 2) quadradically rehash into other cylinder groups, until an 499 * available inode is located. 500 */ 501 ffs_valloc(ap) 502 struct vop_valloc_args /* { 503 struct vnode *a_pvp; 504 int a_mode; 505 struct ucred *a_cred; 506 struct vnode **a_vpp; 507 } */ *ap; 508 { 509 register struct vnode *pvp = ap->a_pvp; 510 register struct inode *pip; 511 register struct fs *fs; 512 register struct inode *ip; 513 mode_t mode = ap->a_mode; 514 ino_t ino, ipref; 515 int cg, error; 516 517 *ap->a_vpp = NULL; 518 pip = VTOI(pvp); 519 fs = pip->i_fs; 520 if (fs->fs_cstotal.cs_nifree == 0) 521 goto noinodes; 522 523 if ((mode & IFMT) == IFDIR) 524 ipref = ffs_dirpref(fs); 525 else 526 ipref = pip->i_number; 527 if (ipref >= fs->fs_ncg * fs->fs_ipg) 528 ipref = 0; 529 cg = ino_to_cg(fs, ipref); 530 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, ffs_nodealloccg); 531 if (ino == 0) 532 goto noinodes; 533 error = VFS_VGET(pvp->v_mount, ino, ap->a_vpp); 534 if (error) { 535 VOP_VFREE(pvp, ino, mode); 536 return (error); 537 } 538 ip = VTOI(*ap->a_vpp); 539 if (ip->i_mode) { 540 printf("mode = 0%o, inum = %d, fs = %s\n", 541 ip->i_mode, ip->i_number, fs->fs_fsmnt); 542 panic("ffs_valloc: dup alloc"); 543 } 544 if (ip->i_blocks) { /* XXX */ 545 printf("free inode %s/%d had %d blocks\n", 546 fs->fs_fsmnt, ino, ip->i_blocks); 547 ip->i_blocks = 0; 548 } 549 ip->i_flags = 0; 550 /* 551 * Set up a new generation number for this inode. 552 */ 553 if (++nextgennumber < (u_long)time.tv_sec) 554 nextgennumber = time.tv_sec; 555 ip->i_gen = nextgennumber; 556 return (0); 557 noinodes: 558 ffs_fserr(fs, ap->a_cred->cr_uid, "out of inodes"); 559 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 560 return (ENOSPC); 561 } 562 563 /* 564 * Find a cylinder to place a directory. 565 * 566 * The policy implemented by this algorithm is to select from 567 * among those cylinder groups with above the average number of 568 * free inodes, the one with the smallest number of directories. 569 */ 570 static ino_t 571 ffs_dirpref(fs) 572 register struct fs *fs; 573 { 574 int cg, minndir, mincg, avgifree; 575 576 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 577 minndir = fs->fs_ipg; 578 mincg = 0; 579 for (cg = 0; cg < fs->fs_ncg; cg++) 580 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 581 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 582 mincg = cg; 583 minndir = fs->fs_cs(fs, cg).cs_ndir; 584 } 585 return ((ino_t)(fs->fs_ipg * mincg)); 586 } 587 588 /* 589 * Select the desired position for the next block in a file. The file is 590 * logically divided into sections. The first section is composed of the 591 * direct blocks. Each additional section contains fs_maxbpg blocks. 592 * 593 * If no blocks have been allocated in the first section, the policy is to 594 * request a block in the same cylinder group as the inode that describes 595 * the file. If no blocks have been allocated in any other section, the 596 * policy is to place the section in a cylinder group with a greater than 597 * average number of free blocks. An appropriate cylinder group is found 598 * by using a rotor that sweeps the cylinder groups. When a new group of 599 * blocks is needed, the sweep begins in the cylinder group following the 600 * cylinder group from which the previous allocation was made. The sweep 601 * continues until a cylinder group with greater than the average number 602 * of free blocks is found. If the allocation is for the first block in an 603 * indirect block, the information on the previous allocation is unavailable; 604 * here a best guess is made based upon the logical block number being 605 * allocated. 606 * 607 * If a section is already partially allocated, the policy is to 608 * contiguously allocate fs_maxcontig blocks. The end of one of these 609 * contiguous blocks and the beginning of the next is physically separated 610 * so that the disk head will be in transit between them for at least 611 * fs_rotdelay milliseconds. This is to allow time for the processor to 612 * schedule another I/O transfer. 613 */ 614 daddr_t 615 ffs_blkpref(ip, lbn, indx, bap) 616 struct inode *ip; 617 daddr_t lbn; 618 int indx; 619 daddr_t *bap; 620 { 621 register struct fs *fs; 622 register int cg; 623 int avgbfree, startcg; 624 daddr_t nextblk; 625 626 fs = ip->i_fs; 627 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 628 if (lbn < NDADDR) { 629 cg = ino_to_cg(fs, ip->i_number); 630 return (fs->fs_fpg * cg + fs->fs_frag); 631 } 632 /* 633 * Find a cylinder with greater than average number of 634 * unused data blocks. 635 */ 636 if (indx == 0 || bap[indx - 1] == 0) 637 startcg = 638 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 639 else 640 startcg = dtog(fs, bap[indx - 1]) + 1; 641 startcg %= fs->fs_ncg; 642 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 643 for (cg = startcg; cg < fs->fs_ncg; cg++) 644 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 645 fs->fs_cgrotor = cg; 646 return (fs->fs_fpg * cg + fs->fs_frag); 647 } 648 for (cg = 0; cg <= startcg; cg++) 649 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 650 fs->fs_cgrotor = cg; 651 return (fs->fs_fpg * cg + fs->fs_frag); 652 } 653 return (NULL); 654 } 655 /* 656 * One or more previous blocks have been laid out. If less 657 * than fs_maxcontig previous blocks are contiguous, the 658 * next block is requested contiguously, otherwise it is 659 * requested rotationally delayed by fs_rotdelay milliseconds. 660 */ 661 nextblk = bap[indx - 1] + fs->fs_frag; 662 if (indx < fs->fs_maxcontig || bap[indx - fs->fs_maxcontig] + 663 blkstofrags(fs, fs->fs_maxcontig) != nextblk) 664 return (nextblk); 665 if (fs->fs_rotdelay != 0) 666 /* 667 * Here we convert ms of delay to frags as: 668 * (frags) = (ms) * (rev/sec) * (sect/rev) / 669 * ((sect/frag) * (ms/sec)) 670 * then round up to the next block. 671 */ 672 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 673 (NSPF(fs) * 1000), fs->fs_frag); 674 return (nextblk); 675 } 676 677 /* 678 * Implement the cylinder overflow algorithm. 679 * 680 * The policy implemented by this algorithm is: 681 * 1) allocate the block in its requested cylinder group. 682 * 2) quadradically rehash on the cylinder group number. 683 * 3) brute force search for a free block. 684 */ 685 /*VARARGS5*/ 686 static u_long 687 ffs_hashalloc(ip, cg, pref, size, allocator) 688 struct inode *ip; 689 int cg; 690 long pref; 691 int size; /* size for data blocks, mode for inodes */ 692 u_int32_t (*allocator)(); 693 { 694 register struct fs *fs; 695 long result; 696 int i, icg = cg; 697 698 fs = ip->i_fs; 699 /* 700 * 1: preferred cylinder group 701 */ 702 result = (*allocator)(ip, cg, pref, size); 703 if (result) 704 return (result); 705 /* 706 * 2: quadratic rehash 707 */ 708 for (i = 1; i < fs->fs_ncg; i *= 2) { 709 cg += i; 710 if (cg >= fs->fs_ncg) 711 cg -= fs->fs_ncg; 712 result = (*allocator)(ip, cg, 0, size); 713 if (result) 714 return (result); 715 } 716 /* 717 * 3: brute force search 718 * Note that we start at i == 2, since 0 was checked initially, 719 * and 1 is always checked in the quadratic rehash. 720 */ 721 cg = (icg + 2) % fs->fs_ncg; 722 for (i = 2; i < fs->fs_ncg; i++) { 723 result = (*allocator)(ip, cg, 0, size); 724 if (result) 725 return (result); 726 cg++; 727 if (cg == fs->fs_ncg) 728 cg = 0; 729 } 730 return (NULL); 731 } 732 733 /* 734 * Determine whether a fragment can be extended. 735 * 736 * Check to see if the necessary fragments are available, and 737 * if they are, allocate them. 738 */ 739 static daddr_t 740 ffs_fragextend(ip, cg, bprev, osize, nsize) 741 struct inode *ip; 742 int cg; 743 long bprev; 744 int osize, nsize; 745 { 746 register struct fs *fs; 747 register struct cg *cgp; 748 struct buf *bp; 749 long bno; 750 int frags, bbase; 751 int i, error; 752 753 fs = ip->i_fs; 754 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 755 return (NULL); 756 frags = numfrags(fs, nsize); 757 bbase = fragnum(fs, bprev); 758 if (bbase > fragnum(fs, (bprev + frags - 1))) { 759 /* cannot extend across a block boundary */ 760 return (NULL); 761 } 762 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 763 (int)fs->fs_cgsize, NOCRED, &bp); 764 if (error) { 765 brelse(bp); 766 return (NULL); 767 } 768 cgp = (struct cg *)bp->b_data; 769 if (!cg_chkmagic(cgp)) { 770 brelse(bp); 771 return (NULL); 772 } 773 cgp->cg_time = time.tv_sec; 774 bno = dtogd(fs, bprev); 775 for (i = numfrags(fs, osize); i < frags; i++) 776 if (isclr(cg_blksfree(cgp), bno + i)) { 777 brelse(bp); 778 return (NULL); 779 } 780 /* 781 * the current fragment can be extended 782 * deduct the count on fragment being extended into 783 * increase the count on the remaining fragment (if any) 784 * allocate the extended piece 785 */ 786 for (i = frags; i < fs->fs_frag - bbase; i++) 787 if (isclr(cg_blksfree(cgp), bno + i)) 788 break; 789 cgp->cg_frsum[i - numfrags(fs, osize)]--; 790 if (i != frags) 791 cgp->cg_frsum[i - frags]++; 792 for (i = numfrags(fs, osize); i < frags; i++) { 793 clrbit(cg_blksfree(cgp), bno + i); 794 cgp->cg_cs.cs_nffree--; 795 fs->fs_cstotal.cs_nffree--; 796 fs->fs_cs(fs, cg).cs_nffree--; 797 } 798 fs->fs_fmod = 1; 799 bdwrite(bp); 800 return (bprev); 801 } 802 803 /* 804 * Determine whether a block can be allocated. 805 * 806 * Check to see if a block of the appropriate size is available, 807 * and if it is, allocate it. 808 */ 809 static daddr_t 810 ffs_alloccg(ip, cg, bpref, size) 811 struct inode *ip; 812 int cg; 813 daddr_t bpref; 814 int size; 815 { 816 register struct fs *fs; 817 register struct cg *cgp; 818 struct buf *bp; 819 register int i; 820 int error, bno, frags, allocsiz; 821 822 fs = ip->i_fs; 823 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 824 return (NULL); 825 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 826 (int)fs->fs_cgsize, NOCRED, &bp); 827 if (error) { 828 brelse(bp); 829 return (NULL); 830 } 831 cgp = (struct cg *)bp->b_data; 832 if (!cg_chkmagic(cgp) || 833 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 834 brelse(bp); 835 return (NULL); 836 } 837 cgp->cg_time = time.tv_sec; 838 if (size == fs->fs_bsize) { 839 bno = ffs_alloccgblk(fs, cgp, bpref); 840 bdwrite(bp); 841 return (bno); 842 } 843 /* 844 * check to see if any fragments are already available 845 * allocsiz is the size which will be allocated, hacking 846 * it down to a smaller size if necessary 847 */ 848 frags = numfrags(fs, size); 849 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 850 if (cgp->cg_frsum[allocsiz] != 0) 851 break; 852 if (allocsiz == fs->fs_frag) { 853 /* 854 * no fragments were available, so a block will be 855 * allocated, and hacked up 856 */ 857 if (cgp->cg_cs.cs_nbfree == 0) { 858 brelse(bp); 859 return (NULL); 860 } 861 bno = ffs_alloccgblk(fs, cgp, bpref); 862 bpref = dtogd(fs, bno); 863 for (i = frags; i < fs->fs_frag; i++) 864 setbit(cg_blksfree(cgp), bpref + i); 865 i = fs->fs_frag - frags; 866 cgp->cg_cs.cs_nffree += i; 867 fs->fs_cstotal.cs_nffree += i; 868 fs->fs_cs(fs, cg).cs_nffree += i; 869 fs->fs_fmod = 1; 870 cgp->cg_frsum[i]++; 871 bdwrite(bp); 872 return (bno); 873 } 874 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 875 if (bno < 0) { 876 brelse(bp); 877 return (NULL); 878 } 879 for (i = 0; i < frags; i++) 880 clrbit(cg_blksfree(cgp), bno + i); 881 cgp->cg_cs.cs_nffree -= frags; 882 fs->fs_cstotal.cs_nffree -= frags; 883 fs->fs_cs(fs, cg).cs_nffree -= frags; 884 fs->fs_fmod = 1; 885 cgp->cg_frsum[allocsiz]--; 886 if (frags != allocsiz) 887 cgp->cg_frsum[allocsiz - frags]++; 888 bdwrite(bp); 889 return (cg * fs->fs_fpg + bno); 890 } 891 892 /* 893 * Allocate a block in a cylinder group. 894 * 895 * This algorithm implements the following policy: 896 * 1) allocate the requested block. 897 * 2) allocate a rotationally optimal block in the same cylinder. 898 * 3) allocate the next available block on the block rotor for the 899 * specified cylinder group. 900 * Note that this routine only allocates fs_bsize blocks; these 901 * blocks may be fragmented by the routine that allocates them. 902 */ 903 static daddr_t 904 ffs_alloccgblk(fs, cgp, bpref) 905 register struct fs *fs; 906 register struct cg *cgp; 907 daddr_t bpref; 908 { 909 daddr_t bno, blkno; 910 int cylno, pos, delta; 911 short *cylbp; 912 register int i; 913 914 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 915 bpref = cgp->cg_rotor; 916 goto norot; 917 } 918 bpref = blknum(fs, bpref); 919 bpref = dtogd(fs, bpref); 920 /* 921 * if the requested block is available, use it 922 */ 923 if (ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { 924 bno = bpref; 925 goto gotit; 926 } 927 if (fs->fs_cpc == 0 || fs->fs_nrpos <= 1) { 928 /* 929 * Block layout information is not available. 930 * Leaving bpref unchanged means we take the 931 * next available free block following the one 932 * we just allocated. Hopefully this will at 933 * least hit a track cache on drives of unknown 934 * geometry (e.g. SCSI). 935 */ 936 goto norot; 937 } 938 /* 939 * check for a block available on the same cylinder 940 */ 941 cylno = cbtocylno(fs, bpref); 942 if (cg_blktot(cgp)[cylno] == 0) 943 goto norot; 944 /* 945 * check the summary information to see if a block is 946 * available in the requested cylinder starting at the 947 * requested rotational position and proceeding around. 948 */ 949 cylbp = cg_blks(fs, cgp, cylno); 950 pos = cbtorpos(fs, bpref); 951 for (i = pos; i < fs->fs_nrpos; i++) 952 if (cylbp[i] > 0) 953 break; 954 if (i == fs->fs_nrpos) 955 for (i = 0; i < pos; i++) 956 if (cylbp[i] > 0) 957 break; 958 if (cylbp[i] > 0) { 959 /* 960 * found a rotational position, now find the actual 961 * block. A panic if none is actually there. 962 */ 963 pos = cylno % fs->fs_cpc; 964 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 965 if (fs_postbl(fs, pos)[i] == -1) { 966 printf("pos = %d, i = %d, fs = %s\n", 967 pos, i, fs->fs_fsmnt); 968 panic("ffs_alloccgblk: cyl groups corrupted"); 969 } 970 for (i = fs_postbl(fs, pos)[i];; ) { 971 if (ffs_isblock(fs, cg_blksfree(cgp), bno + i)) { 972 bno = blkstofrags(fs, (bno + i)); 973 goto gotit; 974 } 975 delta = fs_rotbl(fs)[i]; 976 if (delta <= 0 || 977 delta + i > fragstoblks(fs, fs->fs_fpg)) 978 break; 979 i += delta; 980 } 981 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 982 panic("ffs_alloccgblk: can't find blk in cyl"); 983 } 984 norot: 985 /* 986 * no blocks in the requested cylinder, so take next 987 * available one in this cylinder group. 988 */ 989 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 990 if (bno < 0) 991 return (NULL); 992 cgp->cg_rotor = bno; 993 gotit: 994 blkno = fragstoblks(fs, bno); 995 ffs_clrblock(fs, cg_blksfree(cgp), (long)blkno); 996 ffs_clusteracct(fs, cgp, blkno, -1); 997 cgp->cg_cs.cs_nbfree--; 998 fs->fs_cstotal.cs_nbfree--; 999 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 1000 cylno = cbtocylno(fs, bno); 1001 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 1002 cg_blktot(cgp)[cylno]--; 1003 fs->fs_fmod = 1; 1004 return (cgp->cg_cgx * fs->fs_fpg + bno); 1005 } 1006 1007 /* 1008 * Determine whether a cluster can be allocated. 1009 * 1010 * We do not currently check for optimal rotational layout if there 1011 * are multiple choices in the same cylinder group. Instead we just 1012 * take the first one that we find following bpref. 1013 */ 1014 static daddr_t 1015 ffs_clusteralloc(ip, cg, bpref, len) 1016 struct inode *ip; 1017 int cg; 1018 daddr_t bpref; 1019 int len; 1020 { 1021 register struct fs *fs; 1022 register struct cg *cgp; 1023 struct buf *bp; 1024 int i, run, bno, bit, map; 1025 u_char *mapp; 1026 int32_t *lp; 1027 1028 fs = ip->i_fs; 1029 if (fs->fs_maxcluster[cg] < len) 1030 return (NULL); 1031 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1032 NOCRED, &bp)) 1033 goto fail; 1034 cgp = (struct cg *)bp->b_data; 1035 if (!cg_chkmagic(cgp)) 1036 goto fail; 1037 /* 1038 * Check to see if a cluster of the needed size (or bigger) is 1039 * available in this cylinder group. 1040 */ 1041 lp = &cg_clustersum(cgp)[len]; 1042 for (i = len; i <= fs->fs_contigsumsize; i++) 1043 if (*lp++ > 0) 1044 break; 1045 if (i > fs->fs_contigsumsize) { 1046 /* 1047 * This is the first time looking for a cluster in this 1048 * cylinder group. Update the cluster summary information 1049 * to reflect the true maximum sized cluster so that 1050 * future cluster allocation requests can avoid reading 1051 * the cylinder group map only to find no clusters. 1052 */ 1053 lp = &cg_clustersum(cgp)[len - 1]; 1054 for (i = len - 1; i > 0; i--) 1055 if (*lp-- > 0) 1056 break; 1057 fs->fs_maxcluster[cg] = i; 1058 goto fail; 1059 } 1060 /* 1061 * Search the cluster map to find a big enough cluster. 1062 * We take the first one that we find, even if it is larger 1063 * than we need as we prefer to get one close to the previous 1064 * block allocation. We do not search before the current 1065 * preference point as we do not want to allocate a block 1066 * that is allocated before the previous one (as we will 1067 * then have to wait for another pass of the elevator 1068 * algorithm before it will be read). We prefer to fail and 1069 * be recalled to try an allocation in the next cylinder group. 1070 */ 1071 if (dtog(fs, bpref) != cg) 1072 bpref = 0; 1073 else 1074 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1075 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1076 map = *mapp++; 1077 bit = 1 << (bpref % NBBY); 1078 for (run = 0, i = bpref; i < cgp->cg_nclusterblks; i++) { 1079 if ((map & bit) == 0) { 1080 run = 0; 1081 } else { 1082 run++; 1083 if (run == len) 1084 break; 1085 } 1086 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1087 bit <<= 1; 1088 } else { 1089 map = *mapp++; 1090 bit = 1; 1091 } 1092 } 1093 if (i == cgp->cg_nclusterblks) 1094 goto fail; 1095 /* 1096 * Allocate the cluster that we have found. 1097 */ 1098 bno = cg * fs->fs_fpg + blkstofrags(fs, i - run + 1); 1099 len = blkstofrags(fs, len); 1100 for (i = 0; i < len; i += fs->fs_frag) 1101 if (ffs_alloccgblk(fs, cgp, bno + i) != bno + i) 1102 panic("ffs_clusteralloc: lost block"); 1103 brelse(bp); 1104 return (bno); 1105 1106 fail: 1107 brelse(bp); 1108 return (0); 1109 } 1110 1111 /* 1112 * Determine whether an inode can be allocated. 1113 * 1114 * Check to see if an inode is available, and if it is, 1115 * allocate it using the following policy: 1116 * 1) allocate the requested inode. 1117 * 2) allocate the next available inode after the requested 1118 * inode in the specified cylinder group. 1119 */ 1120 static ino_t 1121 ffs_nodealloccg(ip, cg, ipref, mode) 1122 struct inode *ip; 1123 int cg; 1124 daddr_t ipref; 1125 int mode; 1126 { 1127 register struct fs *fs; 1128 register struct cg *cgp; 1129 struct buf *bp; 1130 int error, start, len, loc, map, i; 1131 1132 fs = ip->i_fs; 1133 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1134 return (NULL); 1135 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1136 (int)fs->fs_cgsize, NOCRED, &bp); 1137 if (error) { 1138 brelse(bp); 1139 return (NULL); 1140 } 1141 cgp = (struct cg *)bp->b_data; 1142 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1143 brelse(bp); 1144 return (NULL); 1145 } 1146 cgp->cg_time = time.tv_sec; 1147 if (ipref) { 1148 ipref %= fs->fs_ipg; 1149 if (isclr(cg_inosused(cgp), ipref)) 1150 goto gotit; 1151 } 1152 start = cgp->cg_irotor / NBBY; 1153 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 1154 loc = skpc(0xff, len, &cg_inosused(cgp)[start]); 1155 if (loc == 0) { 1156 len = start + 1; 1157 start = 0; 1158 loc = skpc(0xff, len, &cg_inosused(cgp)[0]); 1159 if (loc == 0) { 1160 printf("cg = %d, irotor = %d, fs = %s\n", 1161 cg, cgp->cg_irotor, fs->fs_fsmnt); 1162 panic("ffs_nodealloccg: map corrupted"); 1163 /* NOTREACHED */ 1164 } 1165 } 1166 i = start + len - loc; 1167 map = cg_inosused(cgp)[i]; 1168 ipref = i * NBBY; 1169 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 1170 if ((map & i) == 0) { 1171 cgp->cg_irotor = ipref; 1172 goto gotit; 1173 } 1174 } 1175 printf("fs = %s\n", fs->fs_fsmnt); 1176 panic("ffs_nodealloccg: block not in map"); 1177 /* NOTREACHED */ 1178 gotit: 1179 setbit(cg_inosused(cgp), ipref); 1180 cgp->cg_cs.cs_nifree--; 1181 fs->fs_cstotal.cs_nifree--; 1182 fs->fs_cs(fs, cg).cs_nifree--; 1183 fs->fs_fmod = 1; 1184 if ((mode & IFMT) == IFDIR) { 1185 cgp->cg_cs.cs_ndir++; 1186 fs->fs_cstotal.cs_ndir++; 1187 fs->fs_cs(fs, cg).cs_ndir++; 1188 } 1189 bdwrite(bp); 1190 return (cg * fs->fs_ipg + ipref); 1191 } 1192 1193 /* 1194 * Free a block or fragment. 1195 * 1196 * The specified block or fragment is placed back in the 1197 * free map. If a fragment is deallocated, a possible 1198 * block reassembly is checked. 1199 */ 1200 ffs_blkfree(ip, bno, size) 1201 register struct inode *ip; 1202 daddr_t bno; 1203 long size; 1204 { 1205 register struct fs *fs; 1206 register struct cg *cgp; 1207 struct buf *bp; 1208 daddr_t blkno; 1209 int i, error, cg, blk, frags, bbase; 1210 1211 fs = ip->i_fs; 1212 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 1213 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 1214 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 1215 panic("blkfree: bad size"); 1216 } 1217 cg = dtog(fs, bno); 1218 if ((u_int)bno >= fs->fs_size) { 1219 printf("bad block %d, ino %d\n", bno, ip->i_number); 1220 ffs_fserr(fs, ip->i_uid, "bad block"); 1221 return; 1222 } 1223 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1224 (int)fs->fs_cgsize, NOCRED, &bp); 1225 if (error) { 1226 brelse(bp); 1227 return; 1228 } 1229 cgp = (struct cg *)bp->b_data; 1230 if (!cg_chkmagic(cgp)) { 1231 brelse(bp); 1232 return; 1233 } 1234 cgp->cg_time = time.tv_sec; 1235 bno = dtogd(fs, bno); 1236 if (size == fs->fs_bsize) { 1237 blkno = fragstoblks(fs, bno); 1238 if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { 1239 printf("dev = 0x%x, block = %d, fs = %s\n", 1240 ip->i_dev, bno, fs->fs_fsmnt); 1241 panic("blkfree: freeing free block"); 1242 } 1243 ffs_setblock(fs, cg_blksfree(cgp), blkno); 1244 ffs_clusteracct(fs, cgp, blkno, 1); 1245 cgp->cg_cs.cs_nbfree++; 1246 fs->fs_cstotal.cs_nbfree++; 1247 fs->fs_cs(fs, cg).cs_nbfree++; 1248 i = cbtocylno(fs, bno); 1249 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 1250 cg_blktot(cgp)[i]++; 1251 } else { 1252 bbase = bno - fragnum(fs, bno); 1253 /* 1254 * decrement the counts associated with the old frags 1255 */ 1256 blk = blkmap(fs, cg_blksfree(cgp), bbase); 1257 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 1258 /* 1259 * deallocate the fragment 1260 */ 1261 frags = numfrags(fs, size); 1262 for (i = 0; i < frags; i++) { 1263 if (isset(cg_blksfree(cgp), bno + i)) { 1264 printf("dev = 0x%x, block = %d, fs = %s\n", 1265 ip->i_dev, bno + i, fs->fs_fsmnt); 1266 panic("blkfree: freeing free frag"); 1267 } 1268 setbit(cg_blksfree(cgp), bno + i); 1269 } 1270 cgp->cg_cs.cs_nffree += i; 1271 fs->fs_cstotal.cs_nffree += i; 1272 fs->fs_cs(fs, cg).cs_nffree += i; 1273 /* 1274 * add back in counts associated with the new frags 1275 */ 1276 blk = blkmap(fs, cg_blksfree(cgp), bbase); 1277 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 1278 /* 1279 * if a complete block has been reassembled, account for it 1280 */ 1281 blkno = fragstoblks(fs, bbase); 1282 if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { 1283 cgp->cg_cs.cs_nffree -= fs->fs_frag; 1284 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 1285 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 1286 ffs_clusteracct(fs, cgp, blkno, 1); 1287 cgp->cg_cs.cs_nbfree++; 1288 fs->fs_cstotal.cs_nbfree++; 1289 fs->fs_cs(fs, cg).cs_nbfree++; 1290 i = cbtocylno(fs, bbase); 1291 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 1292 cg_blktot(cgp)[i]++; 1293 } 1294 } 1295 fs->fs_fmod = 1; 1296 bdwrite(bp); 1297 } 1298 1299 /* 1300 * Free an inode. 1301 * 1302 * The specified inode is placed back in the free map. 1303 */ 1304 int 1305 ffs_vfree(ap) 1306 struct vop_vfree_args /* { 1307 struct vnode *a_pvp; 1308 ino_t a_ino; 1309 int a_mode; 1310 } */ *ap; 1311 { 1312 register struct fs *fs; 1313 register struct cg *cgp; 1314 register struct inode *pip; 1315 ino_t ino = ap->a_ino; 1316 struct buf *bp; 1317 int error, cg; 1318 1319 pip = VTOI(ap->a_pvp); 1320 fs = pip->i_fs; 1321 if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) 1322 panic("ifree: range: dev = 0x%x, ino = %d, fs = %s\n", 1323 pip->i_dev, ino, fs->fs_fsmnt); 1324 cg = ino_to_cg(fs, ino); 1325 error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1326 (int)fs->fs_cgsize, NOCRED, &bp); 1327 if (error) { 1328 brelse(bp); 1329 return (0); 1330 } 1331 cgp = (struct cg *)bp->b_data; 1332 if (!cg_chkmagic(cgp)) { 1333 brelse(bp); 1334 return (0); 1335 } 1336 cgp->cg_time = time.tv_sec; 1337 ino %= fs->fs_ipg; 1338 if (isclr(cg_inosused(cgp), ino)) { 1339 printf("dev = 0x%x, ino = %d, fs = %s\n", 1340 pip->i_dev, ino, fs->fs_fsmnt); 1341 if (fs->fs_ronly == 0) 1342 panic("ifree: freeing free inode"); 1343 } 1344 clrbit(cg_inosused(cgp), ino); 1345 if (ino < cgp->cg_irotor) 1346 cgp->cg_irotor = ino; 1347 cgp->cg_cs.cs_nifree++; 1348 fs->fs_cstotal.cs_nifree++; 1349 fs->fs_cs(fs, cg).cs_nifree++; 1350 if ((ap->a_mode & IFMT) == IFDIR) { 1351 cgp->cg_cs.cs_ndir--; 1352 fs->fs_cstotal.cs_ndir--; 1353 fs->fs_cs(fs, cg).cs_ndir--; 1354 } 1355 fs->fs_fmod = 1; 1356 bdwrite(bp); 1357 return (0); 1358 } 1359 1360 /* 1361 * Find a block of the specified size in the specified cylinder group. 1362 * 1363 * It is a panic if a request is made to find a block if none are 1364 * available. 1365 */ 1366 static daddr_t 1367 ffs_mapsearch(fs, cgp, bpref, allocsiz) 1368 register struct fs *fs; 1369 register struct cg *cgp; 1370 daddr_t bpref; 1371 int allocsiz; 1372 { 1373 daddr_t bno; 1374 int start, len, loc, i; 1375 int blk, field, subfield, pos; 1376 1377 /* 1378 * find the fragment by searching through the free block 1379 * map for an appropriate bit pattern 1380 */ 1381 if (bpref) 1382 start = dtogd(fs, bpref) / NBBY; 1383 else 1384 start = cgp->cg_frotor / NBBY; 1385 len = howmany(fs->fs_fpg, NBBY) - start; 1386 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[start], 1387 (u_char *)fragtbl[fs->fs_frag], 1388 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1389 if (loc == 0) { 1390 len = start + 1; 1391 start = 0; 1392 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[0], 1393 (u_char *)fragtbl[fs->fs_frag], 1394 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1395 if (loc == 0) { 1396 printf("start = %d, len = %d, fs = %s\n", 1397 start, len, fs->fs_fsmnt); 1398 panic("ffs_alloccg: map corrupted"); 1399 /* NOTREACHED */ 1400 } 1401 } 1402 bno = (start + len - loc) * NBBY; 1403 cgp->cg_frotor = bno; 1404 /* 1405 * found the byte in the map 1406 * sift through the bits to find the selected frag 1407 */ 1408 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1409 blk = blkmap(fs, cg_blksfree(cgp), bno); 1410 blk <<= 1; 1411 field = around[allocsiz]; 1412 subfield = inside[allocsiz]; 1413 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1414 if ((blk & field) == subfield) 1415 return (bno + pos); 1416 field <<= 1; 1417 subfield <<= 1; 1418 } 1419 } 1420 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 1421 panic("ffs_alloccg: block not in map"); 1422 return (-1); 1423 } 1424 1425 /* 1426 * Update the cluster map because of an allocation or free. 1427 * 1428 * Cnt == 1 means free; cnt == -1 means allocating. 1429 */ 1430 ffs_clusteracct(fs, cgp, blkno, cnt) 1431 struct fs *fs; 1432 struct cg *cgp; 1433 daddr_t blkno; 1434 int cnt; 1435 { 1436 int32_t *sump; 1437 int32_t *lp; 1438 u_char *freemapp, *mapp; 1439 int i, start, end, forw, back, map, bit; 1440 1441 if (fs->fs_contigsumsize <= 0) 1442 return; 1443 freemapp = cg_clustersfree(cgp); 1444 sump = cg_clustersum(cgp); 1445 /* 1446 * Allocate or clear the actual block. 1447 */ 1448 if (cnt > 0) 1449 setbit(freemapp, blkno); 1450 else 1451 clrbit(freemapp, blkno); 1452 /* 1453 * Find the size of the cluster going forward. 1454 */ 1455 start = blkno + 1; 1456 end = start + fs->fs_contigsumsize; 1457 if (end >= cgp->cg_nclusterblks) 1458 end = cgp->cg_nclusterblks; 1459 mapp = &freemapp[start / NBBY]; 1460 map = *mapp++; 1461 bit = 1 << (start % NBBY); 1462 for (i = start; i < end; i++) { 1463 if ((map & bit) == 0) 1464 break; 1465 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1466 bit <<= 1; 1467 } else { 1468 map = *mapp++; 1469 bit = 1; 1470 } 1471 } 1472 forw = i - start; 1473 /* 1474 * Find the size of the cluster going backward. 1475 */ 1476 start = blkno - 1; 1477 end = start - fs->fs_contigsumsize; 1478 if (end < 0) 1479 end = -1; 1480 mapp = &freemapp[start / NBBY]; 1481 map = *mapp--; 1482 bit = 1 << (start % NBBY); 1483 for (i = start; i > end; i--) { 1484 if ((map & bit) == 0) 1485 break; 1486 if ((i & (NBBY - 1)) != 0) { 1487 bit >>= 1; 1488 } else { 1489 map = *mapp--; 1490 bit = 1 << (NBBY - 1); 1491 } 1492 } 1493 back = start - i; 1494 /* 1495 * Account for old cluster and the possibly new forward and 1496 * back clusters. 1497 */ 1498 i = back + forw + 1; 1499 if (i > fs->fs_contigsumsize) 1500 i = fs->fs_contigsumsize; 1501 sump[i] += cnt; 1502 if (back > 0) 1503 sump[back] -= cnt; 1504 if (forw > 0) 1505 sump[forw] -= cnt; 1506 /* 1507 * Update cluster summary information. 1508 */ 1509 lp = &sump[fs->fs_contigsumsize]; 1510 for (i = fs->fs_contigsumsize; i > 0; i--) 1511 if (*lp-- > 0) 1512 break; 1513 fs->fs_maxcluster[cgp->cg_cgx] = i; 1514 } 1515 1516 /* 1517 * Fserr prints the name of a file system with an error diagnostic. 1518 * 1519 * The form of the error message is: 1520 * fs: error message 1521 */ 1522 static void 1523 ffs_fserr(fs, uid, cp) 1524 struct fs *fs; 1525 u_int uid; 1526 char *cp; 1527 { 1528 1529 log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp); 1530 } 1531