1 /* ffs_alloc.c 2.24 83/03/21 */ 2 3 #include "../h/param.h" 4 #include "../h/systm.h" 5 #include "../h/mount.h" 6 #include "../h/fs.h" 7 #include "../h/conf.h" 8 #include "../h/buf.h" 9 #include "../h/inode.h" 10 #include "../h/dir.h" 11 #include "../h/user.h" 12 #include "../h/quota.h" 13 #include "../h/kernel.h" 14 15 extern u_long hashalloc(); 16 extern ino_t ialloccg(); 17 extern daddr_t alloccg(); 18 extern daddr_t alloccgblk(); 19 extern daddr_t fragextend(); 20 extern daddr_t blkpref(); 21 extern daddr_t mapsearch(); 22 extern int inside[], around[]; 23 extern unsigned char *fragtbl[]; 24 25 /* 26 * Allocate a block in the file system. 27 * 28 * The size of the requested block is given, which must be some 29 * multiple of fs_fsize and <= fs_bsize. 30 * A preference may be optionally specified. If a preference is given 31 * the following hierarchy is used to allocate a block: 32 * 1) allocate the requested block. 33 * 2) allocate a rotationally optimal block in the same cylinder. 34 * 3) allocate a block in the same cylinder group. 35 * 4) quadradically rehash into other cylinder groups, until an 36 * available block is located. 37 * If no block preference is given the following heirarchy is used 38 * to allocate a block: 39 * 1) allocate a block in the cylinder group that contains the 40 * inode for the file. 41 * 2) quadradically rehash into other cylinder groups, until an 42 * available block is located. 43 */ 44 struct buf * 45 alloc(ip, bpref, size) 46 register struct inode *ip; 47 daddr_t bpref; 48 int size; 49 { 50 daddr_t bno; 51 register struct fs *fs; 52 register struct buf *bp; 53 int cg; 54 55 fs = ip->i_fs; 56 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 57 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 58 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 59 panic("alloc: bad size"); 60 } 61 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 62 goto nospace; 63 if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 64 goto nospace; 65 #ifdef QUOTA 66 if (chkdq(ip, (long)((unsigned)size/DEV_BSIZE), 0)) 67 return(NULL); 68 #endif 69 if (bpref >= fs->fs_size) 70 bpref = 0; 71 if (bpref == 0) 72 cg = itog(fs, ip->i_number); 73 else 74 cg = dtog(fs, bpref); 75 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size, 76 (u_long (*)())alloccg); 77 if (bno <= 0) 78 goto nospace; 79 bp = getblk(ip->i_dev, fsbtodb(fs, bno), size); 80 clrbuf(bp); 81 return (bp); 82 nospace: 83 fserr(fs, "file system full"); 84 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 85 u.u_error = ENOSPC; 86 return (NULL); 87 } 88 89 /* 90 * Reallocate a fragment to a bigger size 91 * 92 * The number and size of the old block is given, and a preference 93 * and new size is also specified. The allocator attempts to extend 94 * the original block. Failing that, the regular block allocator is 95 * invoked to get an appropriate block. 96 */ 97 struct buf * 98 realloccg(ip, bprev, bpref, osize, nsize) 99 register struct inode *ip; 100 daddr_t bprev, bpref; 101 int osize, nsize; 102 { 103 daddr_t bno; 104 register struct fs *fs; 105 register struct buf *bp, *obp; 106 int cg; 107 108 fs = ip->i_fs; 109 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 110 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 111 printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 112 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 113 panic("realloccg: bad size"); 114 } 115 if (u.u_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 116 goto nospace; 117 if (bprev == 0) { 118 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 119 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 120 panic("realloccg: bad bprev"); 121 } 122 #ifdef QUOTA 123 if (chkdq(ip, (long)((unsigned)(nsize-osize)/DEV_BSIZE), 0)) 124 return(NULL); 125 #endif 126 cg = dtog(fs, bprev); 127 bno = fragextend(ip, cg, (long)bprev, osize, nsize); 128 if (bno != 0) { 129 do { 130 bp = bread(ip->i_dev, fsbtodb(fs, bno), osize); 131 if (bp->b_flags & B_ERROR) { 132 brelse(bp); 133 return (NULL); 134 } 135 } while (brealloc(bp, nsize) == 0); 136 bp->b_flags |= B_DONE; 137 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); 138 return (bp); 139 } 140 if (bpref >= fs->fs_size) 141 bpref = 0; 142 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, nsize, 143 (u_long (*)())alloccg); 144 if (bno > 0) { 145 obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize); 146 if (obp->b_flags & B_ERROR) { 147 brelse(obp); 148 return (NULL); 149 } 150 bp = getblk(ip->i_dev, fsbtodb(fs, bno), nsize); 151 bcopy(obp->b_un.b_addr, bp->b_un.b_addr, (u_int)osize); 152 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); 153 brelse(obp); 154 free(ip, bprev, (off_t)osize); 155 return (bp); 156 } 157 nospace: 158 /* 159 * no space available 160 */ 161 fserr(fs, "file system full"); 162 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 163 u.u_error = ENOSPC; 164 return (NULL); 165 } 166 167 /* 168 * Allocate an inode in the file system. 169 * 170 * A preference may be optionally specified. If a preference is given 171 * the following hierarchy is used to allocate an inode: 172 * 1) allocate the requested inode. 173 * 2) allocate an inode in the same cylinder group. 174 * 3) quadradically rehash into other cylinder groups, until an 175 * available inode is located. 176 * If no inode preference is given the following heirarchy is used 177 * to allocate an inode: 178 * 1) allocate an inode in cylinder group 0. 179 * 2) quadradically rehash into other cylinder groups, until an 180 * available inode is located. 181 */ 182 struct inode * 183 ialloc(pip, ipref, mode) 184 register struct inode *pip; 185 ino_t ipref; 186 int mode; 187 { 188 ino_t ino; 189 register struct fs *fs; 190 register struct inode *ip; 191 int cg; 192 193 fs = pip->i_fs; 194 if (fs->fs_cstotal.cs_nifree == 0) 195 goto noinodes; 196 #ifdef QUOTA 197 if (chkiq(pip->i_dev, (struct inode *)NULL, u.u_uid, 0)) 198 return(NULL); 199 #endif 200 if (ipref >= fs->fs_ncg * fs->fs_ipg) 201 ipref = 0; 202 cg = itog(fs, ipref); 203 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg); 204 if (ino == 0) 205 goto noinodes; 206 ip = iget(pip->i_dev, pip->i_fs, ino); 207 if (ip == NULL) { 208 ifree(ip, ino, 0); 209 return (NULL); 210 } 211 if (ip->i_mode) { 212 printf("mode = 0%o, inum = %d, fs = %s\n", 213 ip->i_mode, ip->i_number, fs->fs_fsmnt); 214 panic("ialloc: dup alloc"); 215 } 216 return (ip); 217 noinodes: 218 fserr(fs, "out of inodes"); 219 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 220 u.u_error = ENOSPC; 221 return (NULL); 222 } 223 224 /* 225 * Find a cylinder to place a directory. 226 * 227 * The policy implemented by this algorithm is to select from 228 * among those cylinder groups with above the average number of 229 * free inodes, the one with the smallest number of directories. 230 */ 231 ino_t 232 dirpref(fs) 233 register struct fs *fs; 234 { 235 int cg, minndir, mincg, avgifree; 236 237 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 238 minndir = fs->fs_ipg; 239 mincg = 0; 240 for (cg = 0; cg < fs->fs_ncg; cg++) 241 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 242 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 243 mincg = cg; 244 minndir = fs->fs_cs(fs, cg).cs_ndir; 245 } 246 return ((ino_t)(fs->fs_ipg * mincg)); 247 } 248 249 /* 250 * Select the desired position for the next block in a file. The file is 251 * logically divided into sections. The first section is composed of the 252 * direct blocks. Each additional section contains fs_maxbpg blocks. 253 * 254 * If no blocks have been allocated in the first section, the policy is to 255 * request a block in the same cylinder group as the inode that describes 256 * the file. If no blocks have been allocated in any other section, the 257 * policy is to place the section in a cylinder group with a greater than 258 * average number of free blocks. An appropriate cylinder group is found 259 * by maintaining a rotor that sweeps the cylinder groups. When a new 260 * group of blocks is needed, the rotor is advanced until a cylinder group 261 * with greater than the average number of free blocks is found. 262 * 263 * If a section is already partially allocated, the policy is to 264 * contiguously allocate fs_maxcontig blocks. The end of one of these 265 * contiguous blocks and the beginning of the next is physically separated 266 * so that the disk head will be in transit between them for at least 267 * fs_rotdelay milliseconds. This is to allow time for the processor to 268 * schedule another I/O transfer. 269 */ 270 daddr_t 271 blkpref(ip, lbn, indx, bap) 272 struct inode *ip; 273 daddr_t lbn; 274 int indx; 275 daddr_t *bap; 276 { 277 register struct fs *fs; 278 int cg, avgbfree; 279 daddr_t nextblk; 280 281 fs = ip->i_fs; 282 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 283 if (lbn < NDADDR) { 284 cg = itog(fs, ip->i_number); 285 return (fs->fs_fpg * cg + fs->fs_frag); 286 } 287 /* 288 * Find a cylinder with greater than average number of 289 * unused data blocks. 290 */ 291 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 292 for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++) 293 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 294 fs->fs_cgrotor = cg; 295 return (fs->fs_fpg * cg + fs->fs_frag); 296 } 297 for (cg = 0; cg <= fs->fs_cgrotor; cg++) 298 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 299 fs->fs_cgrotor = cg; 300 return (fs->fs_fpg * cg + fs->fs_frag); 301 } 302 return (NULL); 303 } 304 /* 305 * One or more previous blocks have been laid out. If less 306 * than fs_maxcontig previous blocks are contiguous, the 307 * next block is requested contiguously, otherwise it is 308 * requested rotationally delayed by fs_rotdelay milliseconds. 309 */ 310 nextblk = bap[indx - 1] + fs->fs_frag; 311 if (indx > fs->fs_maxcontig && 312 bap[indx - fs->fs_maxcontig] + blkstofrags(fs, fs->fs_maxcontig) 313 != nextblk) 314 return (nextblk); 315 if (fs->fs_rotdelay != 0) 316 /* 317 * Here we convert ms of delay to frags as: 318 * (frags) = (ms) * (rev/sec) * (sect/rev) / 319 * ((sect/frag) * (ms/sec)) 320 * then round up to the next block. 321 */ 322 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 323 (NSPF(fs) * 1000), fs->fs_frag); 324 return (nextblk); 325 } 326 327 /* 328 * Implement the cylinder overflow algorithm. 329 * 330 * The policy implemented by this algorithm is: 331 * 1) allocate the block in its requested cylinder group. 332 * 2) quadradically rehash on the cylinder group number. 333 * 3) brute force search for a free block. 334 */ 335 /*VARARGS5*/ 336 u_long 337 hashalloc(ip, cg, pref, size, allocator) 338 struct inode *ip; 339 int cg; 340 long pref; 341 int size; /* size for data blocks, mode for inodes */ 342 u_long (*allocator)(); 343 { 344 register struct fs *fs; 345 long result; 346 int i, icg = cg; 347 348 fs = ip->i_fs; 349 /* 350 * 1: preferred cylinder group 351 */ 352 result = (*allocator)(ip, cg, pref, size); 353 if (result) 354 return (result); 355 /* 356 * 2: quadratic rehash 357 */ 358 for (i = 1; i < fs->fs_ncg; i *= 2) { 359 cg += i; 360 if (cg >= fs->fs_ncg) 361 cg -= fs->fs_ncg; 362 result = (*allocator)(ip, cg, 0, size); 363 if (result) 364 return (result); 365 } 366 /* 367 * 3: brute force search 368 * Note that we start at i == 2, since 0 was checked initially, 369 * and 1 is always checked in the quadratic rehash. 370 */ 371 cg = (icg + 2) % fs->fs_ncg; 372 for (i = 2; i < fs->fs_ncg; i++) { 373 result = (*allocator)(ip, cg, 0, size); 374 if (result) 375 return (result); 376 cg++; 377 if (cg == fs->fs_ncg) 378 cg = 0; 379 } 380 return (NULL); 381 } 382 383 /* 384 * Determine whether a fragment can be extended. 385 * 386 * Check to see if the necessary fragments are available, and 387 * if they are, allocate them. 388 */ 389 daddr_t 390 fragextend(ip, cg, bprev, osize, nsize) 391 struct inode *ip; 392 int cg; 393 long bprev; 394 int osize, nsize; 395 { 396 register struct fs *fs; 397 register struct buf *bp; 398 register struct cg *cgp; 399 long bno; 400 int frags, bbase; 401 int i; 402 403 fs = ip->i_fs; 404 if (fs->fs_cs(fs, cg).cs_nffree < nsize - osize) 405 return (NULL); 406 frags = numfrags(fs, nsize); 407 bbase = fragoff(fs, bprev); 408 if (bbase > (bprev + frags - 1) % fs->fs_frag) { 409 /* cannot extend across a block boundry */ 410 return (NULL); 411 } 412 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize); 413 cgp = bp->b_un.b_cg; 414 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 415 brelse(bp); 416 return (NULL); 417 } 418 cgp->cg_time = time.tv_sec; 419 bno = dtogd(fs, bprev); 420 for (i = numfrags(fs, osize); i < frags; i++) 421 if (isclr(cgp->cg_free, bno + i)) { 422 brelse(bp); 423 return (NULL); 424 } 425 /* 426 * the current fragment can be extended 427 * deduct the count on fragment being extended into 428 * increase the count on the remaining fragment (if any) 429 * allocate the extended piece 430 */ 431 for (i = frags; i < fs->fs_frag - bbase; i++) 432 if (isclr(cgp->cg_free, bno + i)) 433 break; 434 cgp->cg_frsum[i - numfrags(fs, osize)]--; 435 if (i != frags) 436 cgp->cg_frsum[i - frags]++; 437 for (i = numfrags(fs, osize); i < frags; i++) { 438 clrbit(cgp->cg_free, bno + i); 439 cgp->cg_cs.cs_nffree--; 440 fs->fs_cstotal.cs_nffree--; 441 fs->fs_cs(fs, cg).cs_nffree--; 442 } 443 fs->fs_fmod++; 444 bdwrite(bp); 445 return (bprev); 446 } 447 448 /* 449 * Determine whether a block can be allocated. 450 * 451 * Check to see if a block of the apprpriate size is available, 452 * and if it is, allocate it. 453 */ 454 daddr_t 455 alloccg(ip, cg, bpref, size) 456 struct inode *ip; 457 int cg; 458 daddr_t bpref; 459 int size; 460 { 461 register struct fs *fs; 462 register struct buf *bp; 463 register struct cg *cgp; 464 int bno, frags; 465 int allocsiz; 466 register int i; 467 468 fs = ip->i_fs; 469 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 470 return (NULL); 471 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize); 472 cgp = bp->b_un.b_cg; 473 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 474 brelse(bp); 475 return (NULL); 476 } 477 if (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize) 478 return (NULL); 479 cgp->cg_time = time.tv_sec; 480 if (size == fs->fs_bsize) { 481 bno = alloccgblk(fs, cgp, bpref); 482 bdwrite(bp); 483 return (bno); 484 } 485 /* 486 * check to see if any fragments are already available 487 * allocsiz is the size which will be allocated, hacking 488 * it down to a smaller size if necessary 489 */ 490 frags = numfrags(fs, size); 491 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 492 if (cgp->cg_frsum[allocsiz] != 0) 493 break; 494 if (allocsiz == fs->fs_frag) { 495 /* 496 * no fragments were available, so a block will be 497 * allocated, and hacked up 498 */ 499 if (cgp->cg_cs.cs_nbfree == 0) { 500 brelse(bp); 501 return (NULL); 502 } 503 bno = alloccgblk(fs, cgp, bpref); 504 bpref = dtogd(fs, bno); 505 for (i = frags; i < fs->fs_frag; i++) 506 setbit(cgp->cg_free, bpref + i); 507 i = fs->fs_frag - frags; 508 cgp->cg_cs.cs_nffree += i; 509 fs->fs_cstotal.cs_nffree += i; 510 fs->fs_cs(fs, cg).cs_nffree += i; 511 fs->fs_fmod++; 512 cgp->cg_frsum[i]++; 513 bdwrite(bp); 514 return (bno); 515 } 516 bno = mapsearch(fs, cgp, bpref, allocsiz); 517 if (bno < 0) 518 return (NULL); 519 for (i = 0; i < frags; i++) 520 clrbit(cgp->cg_free, bno + i); 521 cgp->cg_cs.cs_nffree -= frags; 522 fs->fs_cstotal.cs_nffree -= frags; 523 fs->fs_cs(fs, cg).cs_nffree -= frags; 524 fs->fs_fmod++; 525 cgp->cg_frsum[allocsiz]--; 526 if (frags != allocsiz) 527 cgp->cg_frsum[allocsiz - frags]++; 528 bdwrite(bp); 529 return (cg * fs->fs_fpg + bno); 530 } 531 532 /* 533 * Allocate a block in a cylinder group. 534 * 535 * This algorithm implements the following policy: 536 * 1) allocate the requested block. 537 * 2) allocate a rotationally optimal block in the same cylinder. 538 * 3) allocate the next available block on the block rotor for the 539 * specified cylinder group. 540 * Note that this routine only allocates fs_bsize blocks; these 541 * blocks may be fragmented by the routine that allocates them. 542 */ 543 daddr_t 544 alloccgblk(fs, cgp, bpref) 545 register struct fs *fs; 546 register struct cg *cgp; 547 daddr_t bpref; 548 { 549 daddr_t bno; 550 int cylno, pos, delta; 551 short *cylbp; 552 register int i; 553 554 if (bpref == 0) { 555 bpref = cgp->cg_rotor; 556 goto norot; 557 } 558 bpref &= ~(fs->fs_frag - 1); 559 bpref = dtogd(fs, bpref); 560 /* 561 * if the requested block is available, use it 562 */ 563 if (isblock(fs, cgp->cg_free, fragstoblks(fs, bpref))) { 564 bno = bpref; 565 goto gotit; 566 } 567 /* 568 * check for a block available on the same cylinder 569 */ 570 cylno = cbtocylno(fs, bpref); 571 if (cgp->cg_btot[cylno] == 0) 572 goto norot; 573 if (fs->fs_cpc == 0) { 574 /* 575 * block layout info is not available, so just have 576 * to take any block in this cylinder. 577 */ 578 bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); 579 goto norot; 580 } 581 /* 582 * check the summary information to see if a block is 583 * available in the requested cylinder starting at the 584 * requested rotational position and proceeding around. 585 */ 586 cylbp = cgp->cg_b[cylno]; 587 pos = cbtorpos(fs, bpref); 588 for (i = pos; i < NRPOS; i++) 589 if (cylbp[i] > 0) 590 break; 591 if (i == NRPOS) 592 for (i = 0; i < pos; i++) 593 if (cylbp[i] > 0) 594 break; 595 if (cylbp[i] > 0) { 596 /* 597 * found a rotational position, now find the actual 598 * block. A panic if none is actually there. 599 */ 600 pos = cylno % fs->fs_cpc; 601 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 602 if (fs->fs_postbl[pos][i] == -1) { 603 printf("pos = %d, i = %d, fs = %s\n", 604 pos, i, fs->fs_fsmnt); 605 panic("alloccgblk: cyl groups corrupted"); 606 } 607 for (i = fs->fs_postbl[pos][i];; ) { 608 if (isblock(fs, cgp->cg_free, bno + i)) { 609 bno = blkstofrags(fs, (bno + i)); 610 goto gotit; 611 } 612 delta = fs->fs_rotbl[i]; 613 if (delta <= 0 || delta > MAXBPC - i) 614 break; 615 i += delta; 616 } 617 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 618 panic("alloccgblk: can't find blk in cyl"); 619 } 620 norot: 621 /* 622 * no blocks in the requested cylinder, so take next 623 * available one in this cylinder group. 624 */ 625 bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 626 if (bno < 0) 627 return (NULL); 628 cgp->cg_rotor = bno; 629 gotit: 630 clrblock(fs, cgp->cg_free, (long)fragstoblks(fs, bno)); 631 cgp->cg_cs.cs_nbfree--; 632 fs->fs_cstotal.cs_nbfree--; 633 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 634 cylno = cbtocylno(fs, bno); 635 cgp->cg_b[cylno][cbtorpos(fs, bno)]--; 636 cgp->cg_btot[cylno]--; 637 fs->fs_fmod++; 638 return (cgp->cg_cgx * fs->fs_fpg + bno); 639 } 640 641 /* 642 * Determine whether an inode can be allocated. 643 * 644 * Check to see if an inode is available, and if it is, 645 * allocate it using the following policy: 646 * 1) allocate the requested inode. 647 * 2) allocate the next available inode after the requested 648 * inode in the specified cylinder group. 649 */ 650 ino_t 651 ialloccg(ip, cg, ipref, mode) 652 struct inode *ip; 653 int cg; 654 daddr_t ipref; 655 int mode; 656 { 657 register struct fs *fs; 658 register struct buf *bp; 659 register struct cg *cgp; 660 int i; 661 662 fs = ip->i_fs; 663 if (fs->fs_cs(fs, cg).cs_nifree == 0) 664 return (NULL); 665 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize); 666 cgp = bp->b_un.b_cg; 667 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 668 brelse(bp); 669 return (NULL); 670 } 671 if (cgp->cg_cs.cs_nifree == 0) 672 return (NULL); 673 cgp->cg_time = time.tv_sec; 674 if (ipref) { 675 ipref %= fs->fs_ipg; 676 if (isclr(cgp->cg_iused, ipref)) 677 goto gotit; 678 } else 679 ipref = cgp->cg_irotor; 680 for (i = 0; i < fs->fs_ipg; i++) { 681 ipref++; 682 if (ipref >= fs->fs_ipg) 683 ipref = 0; 684 if (isclr(cgp->cg_iused, ipref)) { 685 cgp->cg_irotor = ipref; 686 goto gotit; 687 } 688 } 689 brelse(bp); 690 return (NULL); 691 gotit: 692 setbit(cgp->cg_iused, ipref); 693 cgp->cg_cs.cs_nifree--; 694 fs->fs_cstotal.cs_nifree--; 695 fs->fs_cs(fs, cg).cs_nifree--; 696 fs->fs_fmod++; 697 if ((mode & IFMT) == IFDIR) { 698 cgp->cg_cs.cs_ndir++; 699 fs->fs_cstotal.cs_ndir++; 700 fs->fs_cs(fs, cg).cs_ndir++; 701 } 702 bdwrite(bp); 703 return (cg * fs->fs_ipg + ipref); 704 } 705 706 /* 707 * Free a block or fragment. 708 * 709 * The specified block or fragment is placed back in the 710 * free map. If a fragment is deallocated, a possible 711 * block reassembly is checked. 712 */ 713 free(ip, bno, size) 714 register struct inode *ip; 715 daddr_t bno; 716 off_t size; 717 { 718 register struct fs *fs; 719 register struct cg *cgp; 720 register struct buf *bp; 721 int cg, blk, frags, bbase; 722 register int i; 723 724 fs = ip->i_fs; 725 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 726 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 727 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 728 panic("free: bad size"); 729 } 730 cg = dtog(fs, bno); 731 if (badblock(fs, bno)) { 732 printf("bad block %d, ino %d\n", bno, ip->i_number); 733 return; 734 } 735 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize); 736 cgp = bp->b_un.b_cg; 737 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 738 brelse(bp); 739 return; 740 } 741 cgp->cg_time = time.tv_sec; 742 bno = dtogd(fs, bno); 743 if (size == fs->fs_bsize) { 744 if (isblock(fs, cgp->cg_free, fragstoblks(fs, bno))) { 745 printf("dev = 0x%x, block = %d, fs = %s\n", 746 ip->i_dev, bno, fs->fs_fsmnt); 747 panic("free: freeing free block"); 748 } 749 setblock(fs, cgp->cg_free, fragstoblks(fs, bno)); 750 cgp->cg_cs.cs_nbfree++; 751 fs->fs_cstotal.cs_nbfree++; 752 fs->fs_cs(fs, cg).cs_nbfree++; 753 i = cbtocylno(fs, bno); 754 cgp->cg_b[i][cbtorpos(fs, bno)]++; 755 cgp->cg_btot[i]++; 756 } else { 757 bbase = bno - (bno % fs->fs_frag); 758 /* 759 * decrement the counts associated with the old frags 760 */ 761 blk = blkmap(fs, cgp->cg_free, bbase); 762 fragacct(fs, blk, cgp->cg_frsum, -1); 763 /* 764 * deallocate the fragment 765 */ 766 frags = numfrags(fs, size); 767 for (i = 0; i < frags; i++) { 768 if (isset(cgp->cg_free, bno + i)) { 769 printf("dev = 0x%x, block = %d, fs = %s\n", 770 ip->i_dev, bno + i, fs->fs_fsmnt); 771 panic("free: freeing free frag"); 772 } 773 setbit(cgp->cg_free, bno + i); 774 } 775 cgp->cg_cs.cs_nffree += i; 776 fs->fs_cstotal.cs_nffree += i; 777 fs->fs_cs(fs, cg).cs_nffree += i; 778 /* 779 * add back in counts associated with the new frags 780 */ 781 blk = blkmap(fs, cgp->cg_free, bbase); 782 fragacct(fs, blk, cgp->cg_frsum, 1); 783 /* 784 * if a complete block has been reassembled, account for it 785 */ 786 if (isblock(fs, cgp->cg_free, fragstoblks(fs, bbase))) { 787 cgp->cg_cs.cs_nffree -= fs->fs_frag; 788 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 789 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 790 cgp->cg_cs.cs_nbfree++; 791 fs->fs_cstotal.cs_nbfree++; 792 fs->fs_cs(fs, cg).cs_nbfree++; 793 i = cbtocylno(fs, bbase); 794 cgp->cg_b[i][cbtorpos(fs, bbase)]++; 795 cgp->cg_btot[i]++; 796 } 797 } 798 fs->fs_fmod++; 799 bdwrite(bp); 800 } 801 802 /* 803 * Free an inode. 804 * 805 * The specified inode is placed back in the free map. 806 */ 807 ifree(ip, ino, mode) 808 struct inode *ip; 809 ino_t ino; 810 int mode; 811 { 812 register struct fs *fs; 813 register struct cg *cgp; 814 register struct buf *bp; 815 int cg; 816 817 fs = ip->i_fs; 818 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) { 819 printf("dev = 0x%x, ino = %d, fs = %s\n", 820 ip->i_dev, ino, fs->fs_fsmnt); 821 panic("ifree: range"); 822 } 823 cg = itog(fs, ino); 824 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize); 825 cgp = bp->b_un.b_cg; 826 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 827 brelse(bp); 828 return; 829 } 830 cgp->cg_time = time.tv_sec; 831 ino %= fs->fs_ipg; 832 if (isclr(cgp->cg_iused, ino)) { 833 printf("dev = 0x%x, ino = %d, fs = %s\n", 834 ip->i_dev, ino, fs->fs_fsmnt); 835 panic("ifree: freeing free inode"); 836 } 837 clrbit(cgp->cg_iused, ino); 838 cgp->cg_cs.cs_nifree++; 839 fs->fs_cstotal.cs_nifree++; 840 fs->fs_cs(fs, cg).cs_nifree++; 841 if ((mode & IFMT) == IFDIR) { 842 cgp->cg_cs.cs_ndir--; 843 fs->fs_cstotal.cs_ndir--; 844 fs->fs_cs(fs, cg).cs_ndir--; 845 } 846 fs->fs_fmod++; 847 bdwrite(bp); 848 } 849 850 /* 851 * Find a block of the specified size in the specified cylinder group. 852 * 853 * It is a panic if a request is made to find a block if none are 854 * available. 855 */ 856 daddr_t 857 mapsearch(fs, cgp, bpref, allocsiz) 858 register struct fs *fs; 859 register struct cg *cgp; 860 daddr_t bpref; 861 int allocsiz; 862 { 863 daddr_t bno; 864 int start, len, loc, i; 865 int blk, field, subfield, pos; 866 867 /* 868 * find the fragment by searching through the free block 869 * map for an appropriate bit pattern 870 */ 871 if (bpref) 872 start = dtogd(fs, bpref) / NBBY; 873 else 874 start = cgp->cg_frotor / NBBY; 875 len = howmany(fs->fs_fpg, NBBY) - start; 876 loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], 877 1 << (allocsiz - 1 + (fs->fs_frag % NBBY))); 878 if (loc == 0) { 879 len = start + 1; 880 start = 0; 881 loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], 882 1 << (allocsiz - 1 + (fs->fs_frag % NBBY))); 883 if (loc == 0) 884 return (-1); 885 } 886 bno = (start + len - loc) * NBBY; 887 cgp->cg_frotor = bno; 888 /* 889 * found the byte in the map 890 * sift through the bits to find the selected frag 891 */ 892 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 893 blk = blkmap(fs, cgp->cg_free, bno); 894 blk <<= 1; 895 field = around[allocsiz]; 896 subfield = inside[allocsiz]; 897 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 898 if ((blk & field) == subfield) 899 return (bno + pos); 900 field <<= 1; 901 subfield <<= 1; 902 } 903 } 904 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 905 panic("alloccg: block not in map"); 906 return (-1); 907 } 908 909 /* 910 * Fserr prints the name of a file system with an error diagnostic. 911 * 912 * The form of the error message is: 913 * fs: error message 914 */ 915 fserr(fs, cp) 916 struct fs *fs; 917 char *cp; 918 { 919 920 printf("%s: %s\n", fs->fs_fsmnt, cp); 921 } 922