1 /* Copyright (c) 1981 Regents of the University of California */ 2 3 static char vers[] = "@(#)ffs_alloc.c 1.17 02/21/82"; 4 5 /* alloc.c 4.8 81/03/08 */ 6 7 #include "../h/param.h" 8 #include "../h/systm.h" 9 #include "../h/mount.h" 10 #include "../h/fs.h" 11 #include "../h/conf.h" 12 #include "../h/buf.h" 13 #include "../h/inode.h" 14 #include "../h/user.h" 15 16 extern u_long hashalloc(); 17 extern ino_t ialloccg(); 18 extern daddr_t alloccg(); 19 extern daddr_t alloccgblk(); 20 extern daddr_t fragextend(); 21 extern daddr_t blkpref(); 22 extern daddr_t mapsearch(); 23 extern int inside[], around[]; 24 extern unsigned char *fragtbl[]; 25 26 /* 27 * Allocate a block in the file system. 28 * 29 * The size of the requested block is given, which must be some 30 * multiple of fs_fsize and <= fs_bsize. 31 * A preference may be optionally specified. If a preference is given 32 * the following hierarchy is used to allocate a block: 33 * 1) allocate the requested block. 34 * 2) allocate a rotationally optimal block in the same cylinder. 35 * 3) allocate a block in the same cylinder group. 36 * 4) quadradically rehash into other cylinder groups, until an 37 * available block is located. 38 * If no block preference is given the following heirarchy is used 39 * to allocate a block: 40 * 1) allocate a block in the cylinder group that contains the 41 * inode for the file. 42 * 2) quadradically rehash into other cylinder groups, until an 43 * available block is located. 44 */ 45 struct buf * 46 alloc(dev, ip, bpref, size) 47 dev_t dev; 48 register struct inode *ip; 49 daddr_t bpref; 50 int size; 51 { 52 daddr_t bno; 53 register struct fs *fs; 54 register struct buf *bp; 55 int cg; 56 57 fs = getfs(dev); 58 if ((unsigned)size > fs->fs_bsize || size % fs->fs_fsize != 0) 59 panic("alloc: bad size"); 60 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 61 goto nospace; 62 if (u.u_uid != 0 && 63 fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < 64 fs->fs_dsize * fs->fs_minfree / 100) 65 goto nospace; 66 if (bpref >= fs->fs_size) 67 bpref = 0; 68 if (bpref == 0) 69 cg = itog(fs, ip->i_number); 70 else 71 cg = dtog(fs, bpref); 72 bno = (daddr_t)hashalloc(dev, fs, cg, (long)bpref, size, alloccg); 73 if (bno == 0) 74 goto nospace; 75 bp = getblk(dev, fsbtodb(fs, bno), size); 76 clrbuf(bp); 77 return (bp); 78 nospace: 79 fserr(fs, "file system full"); 80 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 81 u.u_error = ENOSPC; 82 return (NULL); 83 } 84 85 /* 86 * Reallocate a fragment to a bigger size 87 * 88 * The number and size of the old block is given, and a preference 89 * and new size is also specified. The allocator attempts to extend 90 * the original block. Failing that, the regular block allocator is 91 * invoked to get an appropriate block. 92 */ 93 struct buf * 94 realloccg(dev, bprev, bpref, osize, nsize) 95 dev_t dev; 96 daddr_t bprev, bpref; 97 int osize, nsize; 98 { 99 daddr_t bno; 100 register struct fs *fs; 101 register struct buf *bp, *obp; 102 caddr_t cp; 103 int cg; 104 105 fs = getfs(dev); 106 if ((unsigned)osize > fs->fs_bsize || osize % fs->fs_fsize != 0 || 107 (unsigned)nsize > fs->fs_bsize || nsize % fs->fs_fsize != 0) 108 panic("realloccg: bad size"); 109 if (u.u_uid != 0 && 110 fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < 111 fs->fs_dsize * fs->fs_minfree / 100) 112 goto nospace; 113 if (bprev != 0) 114 cg = dtog(fs, bprev); 115 else 116 panic("realloccg: bad bprev"); 117 bno = fragextend(dev, fs, cg, (long)bprev, osize, nsize); 118 if (bno != 0) { 119 bp = bread(dev, fsbtodb(fs, bno), osize); 120 if (bp->b_flags & B_ERROR) 121 return (0); 122 bp->b_bcount = nsize; 123 blkclr(bp->b_un.b_addr + osize, nsize - osize); 124 return (bp); 125 } 126 if (bpref >= fs->fs_size) 127 bpref = 0; 128 bno = (daddr_t)hashalloc(dev, fs, cg, (long)bpref, nsize, alloccg); 129 if (bno != 0) { 130 /* 131 * make a new copy 132 */ 133 obp = bread(dev, fsbtodb(fs, bprev), osize); 134 if (obp->b_flags & B_ERROR) 135 return (0); 136 bp = getblk(dev, fsbtodb(fs, bno), nsize); 137 cp = bp->b_un.b_addr; 138 bp->b_un.b_addr = obp->b_un.b_addr; 139 obp->b_un.b_addr = cp; 140 obp->b_flags |= B_INVAL; 141 brelse(obp); 142 fre(dev, bprev, (off_t)osize); 143 blkclr(bp->b_un.b_addr + osize, nsize - osize); 144 return(bp); 145 } 146 nospace: 147 /* 148 * no space available 149 */ 150 fserr(fs, "file system full"); 151 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 152 u.u_error = ENOSPC; 153 return (NULL); 154 } 155 156 /* 157 * Allocate an inode in the file system. 158 * 159 * A preference may be optionally specified. If a preference is given 160 * the following hierarchy is used to allocate an inode: 161 * 1) allocate the requested inode. 162 * 2) allocate an inode in the same cylinder group. 163 * 3) quadradically rehash into other cylinder groups, until an 164 * available inode is located. 165 * If no inode preference is given the following heirarchy is used 166 * to allocate an inode: 167 * 1) allocate an inode in cylinder group 0. 168 * 2) quadradically rehash into other cylinder groups, until an 169 * available inode is located. 170 */ 171 struct inode * 172 ialloc(dev, ipref, mode) 173 dev_t dev; 174 ino_t ipref; 175 int mode; 176 { 177 ino_t ino; 178 register struct fs *fs; 179 register struct inode *ip; 180 int cg; 181 182 fs = getfs(dev); 183 if (fs->fs_cstotal.cs_nifree == 0) 184 goto noinodes; 185 if (ipref >= fs->fs_ncg * fs->fs_ipg) 186 ipref = 0; 187 cg = itog(fs, ipref); 188 ino = (ino_t)hashalloc(dev, fs, cg, (long)ipref, mode, ialloccg); 189 if (ino == 0) 190 goto noinodes; 191 ip = iget(dev, ino); 192 if (ip == NULL) { 193 ifree(dev, ino, 0); 194 return (NULL); 195 } 196 if (ip->i_mode) 197 panic("ialloc: dup alloc"); 198 return (ip); 199 noinodes: 200 fserr(fs, "out of inodes"); 201 uprintf("\n%s: create failed, no inodes free\n", fs->fs_fsmnt); 202 u.u_error = ENOSPC; 203 return (NULL); 204 } 205 206 /* 207 * Find a cylinder to place a directory. 208 * 209 * The policy implemented by this algorithm is to select from 210 * among those cylinder groups with above the average number of 211 * free inodes, the one with the smallest number of directories. 212 */ 213 dirpref(dev) 214 dev_t dev; 215 { 216 register struct fs *fs; 217 int cg, minndir, mincg, avgifree; 218 219 fs = getfs(dev); 220 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 221 minndir = fs->fs_ipg; 222 mincg = 0; 223 for (cg = 0; cg < fs->fs_ncg; cg++) 224 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 225 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 226 mincg = cg; 227 minndir = fs->fs_cs(fs, cg).cs_ndir; 228 } 229 return (fs->fs_ipg * mincg); 230 } 231 232 /* 233 * Select a cylinder to place a large block of data. 234 * 235 * The policy implemented by this algorithm is to maintain a 236 * rotor that sweeps the cylinder groups. When a block is 237 * needed, the rotor is advanced until a cylinder group with 238 * greater than the average number of free blocks is found. 239 */ 240 daddr_t 241 blkpref(dev) 242 dev_t dev; 243 { 244 register struct fs *fs; 245 int cg, avgbfree; 246 247 fs = getfs(dev); 248 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 249 for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++) 250 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 251 fs->fs_cgrotor = cg; 252 return (fs->fs_fpg * cg + fs->fs_frag); 253 } 254 for (cg = 0; cg <= fs->fs_cgrotor; cg++) 255 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 256 fs->fs_cgrotor = cg; 257 return (fs->fs_fpg * cg + fs->fs_frag); 258 } 259 return (0); 260 } 261 262 /* 263 * Implement the cylinder overflow algorithm. 264 * 265 * The policy implemented by this algorithm is: 266 * 1) allocate the block in its requested cylinder group. 267 * 2) quadradically rehash on the cylinder group number. 268 * 3) brute force search for a free block. 269 */ 270 /*VARARGS5*/ 271 u_long 272 hashalloc(dev, fs, cg, pref, size, allocator) 273 dev_t dev; 274 register struct fs *fs; 275 int cg; 276 long pref; 277 int size; /* size for data blocks, mode for inodes */ 278 u_long (*allocator)(); 279 { 280 long result; 281 int i, icg = cg; 282 283 /* 284 * 1: preferred cylinder group 285 */ 286 result = (*allocator)(dev, fs, cg, pref, size); 287 if (result) 288 return (result); 289 /* 290 * 2: quadratic rehash 291 */ 292 for (i = 1; i < fs->fs_ncg; i *= 2) { 293 cg += i; 294 if (cg >= fs->fs_ncg) 295 cg -= fs->fs_ncg; 296 result = (*allocator)(dev, fs, cg, 0, size); 297 if (result) 298 return (result); 299 } 300 /* 301 * 3: brute force search 302 */ 303 cg = icg; 304 for (i = 0; i < fs->fs_ncg; i++) { 305 result = (*allocator)(dev, fs, cg, 0, size); 306 if (result) 307 return (result); 308 cg++; 309 if (cg == fs->fs_ncg) 310 cg = 0; 311 } 312 return (0); 313 } 314 315 /* 316 * Determine whether a fragment can be extended. 317 * 318 * Check to see if the necessary fragments are available, and 319 * if they are, allocate them. 320 */ 321 daddr_t 322 fragextend(dev, fs, cg, bprev, osize, nsize) 323 dev_t dev; 324 register struct fs *fs; 325 int cg; 326 long bprev; 327 int osize, nsize; 328 { 329 register struct buf *bp; 330 register struct cg *cgp; 331 long bno; 332 int frags, bbase; 333 int i; 334 335 frags = nsize / fs->fs_fsize; 336 bbase = bprev % fs->fs_frag; 337 if (bbase > (bprev + frags - 1) % fs->fs_frag) { 338 /* cannot extend across a block boundry */ 339 return (0); 340 } 341 bp = bread(dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 342 if (bp->b_flags & B_ERROR) 343 return (0); 344 cgp = bp->b_un.b_cg; 345 bno = dtogd(fs, bprev); 346 for (i = osize / fs->fs_fsize; i < frags; i++) 347 if (isclr(cgp->cg_free, bno + i)) { 348 brelse(bp); 349 return (0); 350 } 351 /* 352 * the current fragment can be extended 353 * deduct the count on fragment being extended into 354 * increase the count on the remaining fragment (if any) 355 * allocate the extended piece 356 */ 357 for (i = frags; i < fs->fs_frag - bbase; i++) 358 if (isclr(cgp->cg_free, bno + i)) 359 break; 360 cgp->cg_frsum[i - osize / fs->fs_fsize]--; 361 if (i != frags) 362 cgp->cg_frsum[i - frags]++; 363 for (i = osize / fs->fs_fsize; i < frags; i++) { 364 clrbit(cgp->cg_free, bno + i); 365 cgp->cg_cs.cs_nffree--; 366 fs->fs_cstotal.cs_nffree--; 367 fs->fs_cs(fs, cg).cs_nffree--; 368 } 369 fs->fs_fmod++; 370 bdwrite(bp); 371 return (bprev); 372 } 373 374 /* 375 * Determine whether a block can be allocated. 376 * 377 * Check to see if a block of the apprpriate size is available, 378 * and if it is, allocate it. 379 */ 380 daddr_t 381 alloccg(dev, fs, cg, bpref, size) 382 dev_t dev; 383 register struct fs *fs; 384 int cg; 385 daddr_t bpref; 386 int size; 387 { 388 register struct buf *bp; 389 register struct cg *cgp; 390 int bno, frags; 391 int allocsiz; 392 register int i; 393 394 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 395 return (0); 396 bp = bread(dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 397 if (bp->b_flags & B_ERROR) 398 return (0); 399 cgp = bp->b_un.b_cg; 400 if (size == fs->fs_bsize) { 401 bno = alloccgblk(fs, cgp, bpref); 402 bdwrite(bp); 403 return (bno); 404 } 405 /* 406 * check to see if any fragments are already available 407 * allocsiz is the size which will be allocated, hacking 408 * it down to a smaller size if necessary 409 */ 410 frags = size / fs->fs_fsize; 411 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 412 if (cgp->cg_frsum[allocsiz] != 0) 413 break; 414 if (allocsiz == fs->fs_frag) { 415 /* 416 * no fragments were available, so a block will be 417 * allocated, and hacked up 418 */ 419 if (cgp->cg_cs.cs_nbfree == 0) { 420 brelse(bp); 421 return (0); 422 } 423 bno = alloccgblk(fs, cgp, bpref); 424 bpref = dtogd(fs, bno); 425 for (i = frags; i < fs->fs_frag; i++) 426 setbit(cgp->cg_free, bpref + i); 427 i = fs->fs_frag - frags; 428 cgp->cg_cs.cs_nffree += i; 429 fs->fs_cstotal.cs_nffree += i; 430 fs->fs_cs(fs, cg).cs_nffree += i; 431 cgp->cg_frsum[i]++; 432 bdwrite(bp); 433 return (bno); 434 } 435 bno = mapsearch(fs, cgp, bpref, allocsiz); 436 if (bno == 0) 437 return (0); 438 for (i = 0; i < frags; i++) 439 clrbit(cgp->cg_free, bno + i); 440 cgp->cg_cs.cs_nffree -= frags; 441 fs->fs_cstotal.cs_nffree -= frags; 442 fs->fs_cs(fs, cg).cs_nffree -= frags; 443 cgp->cg_frsum[allocsiz]--; 444 if (frags != allocsiz) 445 cgp->cg_frsum[allocsiz - frags]++; 446 bdwrite(bp); 447 return (cg * fs->fs_fpg + bno); 448 } 449 450 /* 451 * Allocate a block in a cylinder group. 452 * 453 * This algorithm implements the following policy: 454 * 1) allocate the requested block. 455 * 2) allocate a rotationally optimal block in the same cylinder. 456 * 3) allocate the next available block on the block rotor for the 457 * specified cylinder group. 458 * Note that this routine only allocates fs_bsize blocks; these 459 * blocks may be fragmented by the routine that allocates them. 460 */ 461 daddr_t 462 alloccgblk(fs, cgp, bpref) 463 struct fs *fs; 464 register struct cg *cgp; 465 daddr_t bpref; 466 { 467 daddr_t bno; 468 int cylno, pos; 469 short *cylbp; 470 register int i; 471 472 if (bpref == 0) { 473 bpref = cgp->cg_rotor; 474 goto norot; 475 } 476 bpref &= ~(fs->fs_frag - 1); 477 bpref = dtogd(fs, bpref); 478 /* 479 * if the requested block is available, use it 480 */ 481 if (isblock(fs, cgp->cg_free, bpref/fs->fs_frag)) { 482 bno = bpref; 483 goto gotit; 484 } 485 /* 486 * check for a block available on the same cylinder 487 */ 488 cylno = cbtocylno(fs, bpref); 489 if (cgp->cg_btot[cylno] == 0) 490 goto norot; 491 if (fs->fs_cpc == 0) { 492 /* 493 * block layout info is not available, so just have 494 * to take any block in this cylinder. 495 */ 496 bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); 497 goto norot; 498 } 499 /* 500 * find a block that is rotationally optimal 501 */ 502 cylbp = cgp->cg_b[cylno]; 503 if (fs->fs_rotdelay == 0) { 504 pos = cbtorpos(fs, bpref); 505 } else { 506 /* 507 * here we convert ms of delay to frags as: 508 * (frags) = (ms) * (rev/sec) * (sect/rev) / 509 * ((sect/frag) * (ms/sec)) 510 * then round up to the next rotational position 511 */ 512 bpref += fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 513 (NSPF(fs) * 1000); 514 pos = cbtorpos(fs, bpref); 515 pos = (pos + 1) % NRPOS; 516 } 517 /* 518 * check the summary information to see if a block is 519 * available in the requested cylinder starting at the 520 * optimal rotational position and proceeding around. 521 */ 522 for (i = pos; i < NRPOS; i++) 523 if (cylbp[i] > 0) 524 break; 525 if (i == NRPOS) 526 for (i = 0; i < pos; i++) 527 if (cylbp[i] > 0) 528 break; 529 if (cylbp[i] > 0) { 530 /* 531 * found a rotational position, now find the actual 532 * block. A panic if none is actually there. 533 */ 534 pos = cylno % fs->fs_cpc; 535 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 536 if (fs->fs_postbl[pos][i] == -1) 537 panic("alloccgblk: cyl groups corrupted"); 538 for (i = fs->fs_postbl[pos][i]; ; i += fs->fs_rotbl[i]) { 539 if (isblock(fs, cgp->cg_free, bno + i)) { 540 bno = (bno + i) * fs->fs_frag; 541 goto gotit; 542 } 543 if (fs->fs_rotbl[i] == 0) 544 break; 545 } 546 panic("alloccgblk: can't find blk in cyl"); 547 } 548 norot: 549 /* 550 * no blocks in the requested cylinder, so take next 551 * available one in this cylinder group. 552 */ 553 bno = mapsearch(fs, cgp, bpref, fs->fs_frag); 554 if (bno == 0) 555 return (0); 556 cgp->cg_rotor = bno; 557 gotit: 558 clrblock(fs, cgp->cg_free, bno/fs->fs_frag); 559 cgp->cg_cs.cs_nbfree--; 560 fs->fs_cstotal.cs_nbfree--; 561 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 562 cylno = cbtocylno(fs, bno); 563 cgp->cg_b[cylno][cbtorpos(fs, bno)]--; 564 cgp->cg_btot[cylno]--; 565 fs->fs_fmod++; 566 return (cgp->cg_cgx * fs->fs_fpg + bno); 567 } 568 569 /* 570 * Determine whether an inode can be allocated. 571 * 572 * Check to see if an inode is available, and if it is, 573 * allocate it using the following policy: 574 * 1) allocate the requested inode. 575 * 2) allocate the next available inode after the requested 576 * inode in the specified cylinder group. 577 */ 578 ino_t 579 ialloccg(dev, fs, cg, ipref, mode) 580 dev_t dev; 581 register struct fs *fs; 582 int cg; 583 daddr_t ipref; 584 int mode; 585 { 586 register struct buf *bp; 587 register struct cg *cgp; 588 int i; 589 590 if (fs->fs_cs(fs, cg).cs_nifree == 0) 591 return (0); 592 bp = bread(dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 593 if (bp->b_flags & B_ERROR) 594 return (0); 595 cgp = bp->b_un.b_cg; 596 if (ipref) { 597 ipref %= fs->fs_ipg; 598 if (isclr(cgp->cg_iused, ipref)) 599 goto gotit; 600 } else 601 ipref = cgp->cg_irotor; 602 for (i = 0; i < fs->fs_ipg; i++) { 603 ipref++; 604 if (ipref >= fs->fs_ipg) 605 ipref = 0; 606 if (isclr(cgp->cg_iused, ipref)) { 607 cgp->cg_irotor = ipref; 608 goto gotit; 609 } 610 } 611 brelse(bp); 612 return (0); 613 gotit: 614 setbit(cgp->cg_iused, ipref); 615 cgp->cg_cs.cs_nifree--; 616 fs->fs_cstotal.cs_nifree--; 617 fs->fs_cs(fs, cg).cs_nifree--; 618 fs->fs_fmod++; 619 if ((mode & IFMT) == IFDIR) { 620 cgp->cg_cs.cs_ndir++; 621 fs->fs_cstotal.cs_ndir++; 622 fs->fs_cs(fs, cg).cs_ndir++; 623 } 624 bdwrite(bp); 625 return (cg * fs->fs_ipg + ipref); 626 } 627 628 /* 629 * Free a block or fragment. 630 * 631 * The specified block or fragment is placed back in the 632 * free map. If a fragment is deallocated, a possible 633 * block reassembly is checked. 634 */ 635 fre(dev, bno, size) 636 dev_t dev; 637 daddr_t bno; 638 off_t size; 639 { 640 register struct fs *fs; 641 register struct cg *cgp; 642 register struct buf *bp; 643 int cg, blk, frags, bbase; 644 register int i; 645 646 fs = getfs(dev); 647 if ((unsigned)size > fs->fs_bsize || size % fs->fs_fsize != 0) 648 panic("free: bad size"); 649 cg = dtog(fs, bno); 650 if (badblock(fs, bno)) 651 return; 652 bp = bread(dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 653 if (bp->b_flags & B_ERROR) 654 return; 655 cgp = bp->b_un.b_cg; 656 bno = dtogd(fs, bno); 657 if (size == fs->fs_bsize) { 658 if (isblock(fs, cgp->cg_free, bno/fs->fs_frag)) 659 panic("free: freeing free block"); 660 setblock(fs, cgp->cg_free, bno/fs->fs_frag); 661 cgp->cg_cs.cs_nbfree++; 662 fs->fs_cstotal.cs_nbfree++; 663 fs->fs_cs(fs, cg).cs_nbfree++; 664 i = cbtocylno(fs, bno); 665 cgp->cg_b[i][cbtorpos(fs, bno)]++; 666 cgp->cg_btot[i]++; 667 } else { 668 bbase = bno - (bno % fs->fs_frag); 669 /* 670 * decrement the counts associated with the old frags 671 */ 672 blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) & 673 (0xff >> (NBBY - fs->fs_frag))); 674 fragacct(fs, blk, cgp->cg_frsum, -1); 675 /* 676 * deallocate the fragment 677 */ 678 frags = size / fs->fs_fsize; 679 for (i = 0; i < frags; i++) { 680 if (isset(cgp->cg_free, bno + i)) 681 panic("free: freeing free frag"); 682 setbit(cgp->cg_free, bno + i); 683 cgp->cg_cs.cs_nffree++; 684 fs->fs_cstotal.cs_nffree++; 685 fs->fs_cs(fs, cg).cs_nffree++; 686 } 687 /* 688 * add back in counts associated with the new frags 689 */ 690 blk = ((cgp->cg_free[bbase / NBBY] >> (bbase % NBBY)) & 691 (0xff >> (NBBY - fs->fs_frag))); 692 fragacct(fs, blk, cgp->cg_frsum, 1); 693 /* 694 * if a complete block has been reassembled, account for it 695 */ 696 if (isblock(fs, cgp->cg_free, bbase / fs->fs_frag)) { 697 cgp->cg_cs.cs_nffree -= fs->fs_frag; 698 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 699 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 700 cgp->cg_cs.cs_nbfree++; 701 fs->fs_cstotal.cs_nbfree++; 702 fs->fs_cs(fs, cg).cs_nbfree++; 703 i = cbtocylno(fs, bbase); 704 cgp->cg_b[i][cbtorpos(fs, bbase)]++; 705 cgp->cg_btot[i]++; 706 } 707 } 708 fs->fs_fmod++; 709 bdwrite(bp); 710 } 711 712 /* 713 * Free an inode. 714 * 715 * The specified inode is placed back in the free map. 716 */ 717 ifree(dev, ino, mode) 718 dev_t dev; 719 ino_t ino; 720 int mode; 721 { 722 register struct fs *fs; 723 register struct cg *cgp; 724 register struct buf *bp; 725 int cg; 726 727 fs = getfs(dev); 728 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) 729 panic("ifree: range"); 730 cg = itog(fs, ino); 731 bp = bread(dev, fsbtodb(fs, cgtod(fs, cg)), fs->fs_bsize); 732 if (bp->b_flags & B_ERROR) 733 return; 734 cgp = bp->b_un.b_cg; 735 ino %= fs->fs_ipg; 736 if (isclr(cgp->cg_iused, ino)) 737 panic("ifree: freeing free inode"); 738 clrbit(cgp->cg_iused, ino); 739 cgp->cg_cs.cs_nifree++; 740 fs->fs_cstotal.cs_nifree++; 741 fs->fs_cs(fs, cg).cs_nifree++; 742 if ((mode & IFMT) == IFDIR) { 743 cgp->cg_cs.cs_ndir--; 744 fs->fs_cstotal.cs_ndir--; 745 fs->fs_cs(fs, cg).cs_ndir--; 746 } 747 fs->fs_fmod++; 748 bdwrite(bp); 749 } 750 751 /* 752 * Find a block of the specified size in the specified cylinder group. 753 * 754 * It is a panic if a request is made to find a block if none are 755 * available. 756 */ 757 daddr_t 758 mapsearch(fs, cgp, bpref, allocsiz) 759 register struct fs *fs; 760 register struct cg *cgp; 761 daddr_t bpref; 762 int allocsiz; 763 { 764 daddr_t bno; 765 int start, len, loc, i; 766 int blk, field, subfield, pos; 767 768 /* 769 * find the fragment by searching through the free block 770 * map for an appropriate bit pattern 771 */ 772 if (bpref) 773 start = dtogd(fs, bpref) / NBBY; 774 else 775 start = cgp->cg_frotor / NBBY; 776 len = howmany(fs->fs_fpg, NBBY) - start; 777 loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], 778 1 << (allocsiz - 1)); 779 if (loc == 0) { 780 loc = fs->fs_dblkno / NBBY; 781 len = start - loc + 1; 782 start = loc; 783 loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], 784 1 << (allocsiz - 1)); 785 if (loc == 0) { 786 panic("alloccg: map corrupted"); 787 return (0); 788 } 789 } 790 bno = (start + len - loc) * NBBY; 791 cgp->cg_frotor = bno; 792 /* 793 * found the byte in the map 794 * sift through the bits to find the selected frag 795 */ 796 for (i = 0; i < NBBY; i += fs->fs_frag) { 797 blk = (cgp->cg_free[bno / NBBY] >> i) & 798 (0xff >> NBBY - fs->fs_frag); 799 blk <<= 1; 800 field = around[allocsiz]; 801 subfield = inside[allocsiz]; 802 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 803 if ((blk & field) == subfield) { 804 return (bno + i + pos); 805 } 806 field <<= 1; 807 subfield <<= 1; 808 } 809 } 810 panic("alloccg: block not in map"); 811 return (0); 812 } 813 814 /* 815 * Update the frsum fields to reflect addition or deletion 816 * of some frags. 817 */ 818 fragacct(fs, fragmap, fraglist, cnt) 819 struct fs *fs; 820 int fragmap; 821 long fraglist[]; 822 int cnt; 823 { 824 int inblk; 825 register int field, subfield; 826 register int siz, pos; 827 828 inblk = (int)(fragtbl[fs->fs_frag][fragmap]) << 1; 829 fragmap <<= 1; 830 for (siz = 1; siz < fs->fs_frag; siz++) { 831 if (((1 << siz) & inblk) == 0) 832 continue; 833 field = around[siz]; 834 subfield = inside[siz]; 835 for (pos = siz; pos <= fs->fs_frag; pos++) { 836 if ((fragmap & field) == subfield) { 837 fraglist[siz] += cnt; 838 pos += siz; 839 field <<= siz; 840 subfield <<= siz; 841 } 842 field <<= 1; 843 subfield <<= 1; 844 } 845 } 846 } 847 848 /* 849 * Check that a specified block number is in range. 850 */ 851 badblock(fs, bn) 852 register struct fs *fs; 853 daddr_t bn; 854 { 855 856 if ((unsigned)bn >= fs->fs_size || bn < cgdmin(fs, dtog(fs, bn))) { 857 fserr(fs, "bad block"); 858 return (1); 859 } 860 return (0); 861 } 862 863 /* 864 * Getfs maps a device number into a pointer to the incore super block. 865 * 866 * The algorithm is a linear search through the mount table. A 867 * consistency check of the super block magic number is performed. 868 * 869 * panic: no fs -- the device is not mounted. 870 * this "cannot happen" 871 */ 872 struct fs * 873 getfs(dev) 874 dev_t dev; 875 { 876 register struct mount *mp; 877 register struct fs *fs; 878 879 for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++) 880 if (mp->m_bufp != NULL && mp->m_dev == dev) { 881 fs = mp->m_bufp->b_un.b_fs; 882 if (fs->fs_magic != FS_MAGIC) 883 panic("getfs: bad magic"); 884 return (fs); 885 } 886 panic("getfs: no fs"); 887 return (NULL); 888 } 889 890 /* 891 * Fserr prints the name of a file system with an error diagnostic. 892 * 893 * The form of the error message is: 894 * fs: error message 895 */ 896 fserr(fs, cp) 897 struct fs *fs; 898 char *cp; 899 { 900 901 printf("%s: %s\n", fs->fs_fsmnt, cp); 902 } 903 904 /* 905 * Getfsx returns the index in the file system 906 * table of the specified device. The swap device 907 * is also assigned a pseudo-index. The index may 908 * be used as a compressed indication of the location 909 * of a block, recording 910 * <getfsx(dev),blkno> 911 * rather than 912 * <dev, blkno> 913 * provided the information need remain valid only 914 * as long as the file system is mounted. 915 */ 916 getfsx(dev) 917 dev_t dev; 918 { 919 register struct mount *mp; 920 921 if (dev == swapdev) 922 return (MSWAPX); 923 for(mp = &mount[0]; mp < &mount[NMOUNT]; mp++) 924 if (mp->m_dev == dev) 925 return (mp - &mount[0]); 926 return (-1); 927 } 928 929 /* 930 * Update is the internal name of 'sync'. It goes through the disk 931 * queues to initiate sandbagged IO; goes through the inodes to write 932 * modified nodes; and it goes through the mount table to initiate 933 * the writing of the modified super blocks. 934 */ 935 update() 936 { 937 register struct inode *ip; 938 register struct mount *mp; 939 register struct buf *bp; 940 struct fs *fs; 941 time_t tim; 942 int i, blks; 943 944 if (updlock) 945 return; 946 updlock++; 947 /* 948 * Write back modified superblocks. 949 * Consistency check that the superblock 950 * of each file system is still in the buffer cache. 951 */ 952 for (mp = &mount[0]; mp < &mount[NMOUNT]; mp++) 953 if (mp->m_bufp != NULL) { 954 fs = mp->m_bufp->b_un.b_fs; 955 if (fs->fs_fmod == 0) 956 continue; 957 if (fs->fs_ronly != 0) 958 panic("update: rofs mod"); 959 bp = getblk(mp->m_dev, SBLOCK, SBSIZE); 960 fs->fs_fmod = 0; 961 fs->fs_time = TIME; 962 if (bp->b_un.b_fs != fs) 963 panic("update: bad b_fs"); 964 bwrite(bp); 965 blks = howmany(fs->fs_cssize, fs->fs_bsize); 966 for (i = 0; i < blks; i++) { 967 bp = getblk(mp->m_dev, 968 fsbtodb(fs, fs->fs_csaddr + (i * fs->fs_frag)), 969 fs->fs_bsize); 970 bwrite(bp); 971 } 972 } 973 /* 974 * Write back each (modified) inode. 975 */ 976 for (ip = inode; ip < inodeNINODE; ip++) 977 if((ip->i_flag&ILOCK)==0 && ip->i_count) { 978 ip->i_flag |= ILOCK; 979 ip->i_count++; 980 tim = TIME; 981 iupdat(ip, &tim, &tim, 0); 982 iput(ip); 983 } 984 updlock = 0; 985 /* 986 * Force stale buffer cache information to be flushed, 987 * for all devices. 988 */ 989 bflush(NODEV); 990 } 991 992 /* 993 * block operations 994 * 995 * check if a block is available 996 */ 997 isblock(fs, cp, h) 998 struct fs *fs; 999 unsigned char *cp; 1000 int h; 1001 { 1002 unsigned char mask; 1003 1004 switch (fs->fs_frag) { 1005 case 8: 1006 return (cp[h] == 0xff); 1007 case 4: 1008 mask = 0x0f << ((h & 0x1) << 2); 1009 return ((cp[h >> 1] & mask) == mask); 1010 case 2: 1011 mask = 0x03 << ((h & 0x3) << 1); 1012 return ((cp[h >> 2] & mask) == mask); 1013 case 1: 1014 mask = 0x01 << (h & 0x7); 1015 return ((cp[h >> 3] & mask) == mask); 1016 default: 1017 panic("isblock bad fs_frag"); 1018 return; 1019 } 1020 } 1021 1022 /* 1023 * take a block out of the map 1024 */ 1025 clrblock(fs, cp, h) 1026 struct fs *fs; 1027 unsigned char *cp; 1028 int h; 1029 { 1030 switch ((fs)->fs_frag) { 1031 case 8: 1032 cp[h] = 0; 1033 return; 1034 case 4: 1035 cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2)); 1036 return; 1037 case 2: 1038 cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1)); 1039 return; 1040 case 1: 1041 cp[h >> 3] &= ~(0x01 << (h & 0x7)); 1042 return; 1043 default: 1044 panic("clrblock bad fs_frag"); 1045 return; 1046 } 1047 } 1048 1049 /* 1050 * put a block into the map 1051 */ 1052 setblock(fs, cp, h) 1053 struct fs *fs; 1054 unsigned char *cp; 1055 int h; 1056 { 1057 switch (fs->fs_frag) { 1058 case 8: 1059 cp[h] = 0xff; 1060 return; 1061 case 4: 1062 cp[h >> 1] |= (0x0f << ((h & 0x1) << 2)); 1063 return; 1064 case 2: 1065 cp[h >> 2] |= (0x03 << ((h & 0x3) << 1)); 1066 return; 1067 case 1: 1068 cp[h >> 3] |= (0x01 << (h & 0x7)); 1069 return; 1070 default: 1071 panic("setblock bad fs_frag"); 1072 return; 1073 } 1074 } 1075