1 /* lfs_alloc.c 2.22 83/02/10 */ 2 3 #include "../h/param.h" 4 #include "../h/systm.h" 5 #include "../h/mount.h" 6 #include "../h/fs.h" 7 #include "../h/conf.h" 8 #include "../h/buf.h" 9 #include "../h/inode.h" 10 #include "../h/dir.h" 11 #include "../h/user.h" 12 #include "../h/quota.h" 13 #include "../h/kernel.h" 14 15 extern u_long hashalloc(); 16 extern ino_t ialloccg(); 17 extern daddr_t alloccg(); 18 extern daddr_t alloccgblk(); 19 extern daddr_t fragextend(); 20 extern daddr_t blkpref(); 21 extern daddr_t mapsearch(); 22 extern int inside[], around[]; 23 extern unsigned char *fragtbl[]; 24 25 /* 26 * Allocate a block in the file system. 27 * 28 * The size of the requested block is given, which must be some 29 * multiple of fs_fsize and <= fs_bsize. 30 * A preference may be optionally specified. If a preference is given 31 * the following hierarchy is used to allocate a block: 32 * 1) allocate the requested block. 33 * 2) allocate a rotationally optimal block in the same cylinder. 34 * 3) allocate a block in the same cylinder group. 35 * 4) quadradically rehash into other cylinder groups, until an 36 * available block is located. 37 * If no block preference is given the following heirarchy is used 38 * to allocate a block: 39 * 1) allocate a block in the cylinder group that contains the 40 * inode for the file. 41 * 2) quadradically rehash into other cylinder groups, until an 42 * available block is located. 43 */ 44 struct buf * 45 alloc(ip, bpref, size) 46 register struct inode *ip; 47 daddr_t bpref; 48 int size; 49 { 50 daddr_t bno; 51 register struct fs *fs; 52 register struct buf *bp; 53 int cg; 54 55 fs = ip->i_fs; 56 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 57 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 58 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 59 panic("alloc: bad size"); 60 } 61 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 62 goto nospace; 63 if (u.u_uid != 0 && 64 fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < 65 fs->fs_dsize * fs->fs_minfree / 100) 66 goto nospace; 67 #ifdef QUOTA 68 if (chkdq(ip, (long)((unsigned)size/DEV_BSIZE), 0)) 69 return(NULL); 70 #endif 71 if (bpref >= fs->fs_size) 72 bpref = 0; 73 if (bpref == 0) 74 cg = itog(fs, ip->i_number); 75 else 76 cg = dtog(fs, bpref); 77 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, size, 78 (u_long (*)())alloccg); 79 if (bno <= 0) 80 goto nospace; 81 bp = getblk(ip->i_dev, fsbtodb(fs, bno), size); 82 clrbuf(bp); 83 return (bp); 84 nospace: 85 fserr(fs, "file system full"); 86 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 87 u.u_error = ENOSPC; 88 return (NULL); 89 } 90 91 /* 92 * Reallocate a fragment to a bigger size 93 * 94 * The number and size of the old block is given, and a preference 95 * and new size is also specified. The allocator attempts to extend 96 * the original block. Failing that, the regular block allocator is 97 * invoked to get an appropriate block. 98 */ 99 struct buf * 100 realloccg(ip, bprev, bpref, osize, nsize) 101 register struct inode *ip; 102 daddr_t bprev, bpref; 103 int osize, nsize; 104 { 105 daddr_t bno; 106 register struct fs *fs; 107 register struct buf *bp, *obp; 108 int cg; 109 110 fs = ip->i_fs; 111 if ((unsigned)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 112 (unsigned)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 113 printf("dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 114 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 115 panic("realloccg: bad size"); 116 } 117 if (u.u_uid != 0 && 118 fs->fs_cstotal.cs_nbfree * fs->fs_frag + fs->fs_cstotal.cs_nffree < 119 fs->fs_dsize * fs->fs_minfree / 100) 120 goto nospace; 121 if (bprev == 0) { 122 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 123 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 124 panic("realloccg: bad bprev"); 125 } 126 #ifdef QUOTA 127 if (chkdq(ip, (long)((unsigned)(nsize-osize)/DEV_BSIZE), 0)) 128 return(NULL); 129 #endif 130 cg = dtog(fs, bprev); 131 bno = fragextend(ip, cg, (long)bprev, osize, nsize); 132 if (bno != 0) { 133 do { 134 bp = bread(ip->i_dev, fsbtodb(fs, bno), osize); 135 if (bp->b_flags & B_ERROR) { 136 brelse(bp); 137 return (NULL); 138 } 139 } while (brealloc(bp, nsize) == 0); 140 bp->b_flags |= B_DONE; 141 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); 142 return (bp); 143 } 144 if (bpref >= fs->fs_size) 145 bpref = 0; 146 bno = (daddr_t)hashalloc(ip, cg, (long)bpref, nsize, 147 (u_long (*)())alloccg); 148 if (bno > 0) { 149 obp = bread(ip->i_dev, fsbtodb(fs, bprev), osize); 150 if (obp->b_flags & B_ERROR) { 151 brelse(obp); 152 return (NULL); 153 } 154 bp = getblk(ip->i_dev, fsbtodb(fs, bno), nsize); 155 bcopy(obp->b_un.b_addr, bp->b_un.b_addr, (u_int)osize); 156 bzero(bp->b_un.b_addr + osize, (unsigned)nsize - osize); 157 brelse(obp); 158 free(ip, bprev, (off_t)osize); 159 return (bp); 160 } 161 nospace: 162 /* 163 * no space available 164 */ 165 fserr(fs, "file system full"); 166 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 167 u.u_error = ENOSPC; 168 return (NULL); 169 } 170 171 /* 172 * Allocate an inode in the file system. 173 * 174 * A preference may be optionally specified. If a preference is given 175 * the following hierarchy is used to allocate an inode: 176 * 1) allocate the requested inode. 177 * 2) allocate an inode in the same cylinder group. 178 * 3) quadradically rehash into other cylinder groups, until an 179 * available inode is located. 180 * If no inode preference is given the following heirarchy is used 181 * to allocate an inode: 182 * 1) allocate an inode in cylinder group 0. 183 * 2) quadradically rehash into other cylinder groups, until an 184 * available inode is located. 185 */ 186 struct inode * 187 ialloc(pip, ipref, mode) 188 register struct inode *pip; 189 ino_t ipref; 190 int mode; 191 { 192 ino_t ino; 193 register struct fs *fs; 194 register struct inode *ip; 195 int cg; 196 197 fs = pip->i_fs; 198 if (fs->fs_cstotal.cs_nifree == 0) 199 goto noinodes; 200 #ifdef QUOTA 201 if (chkiq(pip->i_dev, (struct inode *)NULL, u.u_uid, 0)) 202 return(NULL); 203 #endif 204 if (ipref >= fs->fs_ncg * fs->fs_ipg) 205 ipref = 0; 206 cg = itog(fs, ipref); 207 ino = (ino_t)hashalloc(pip, cg, (long)ipref, mode, ialloccg); 208 if (ino == 0) 209 goto noinodes; 210 ip = iget(pip->i_dev, pip->i_fs, ino); 211 if (ip == NULL) { 212 ifree(ip, ino, 0); 213 return (NULL); 214 } 215 if (ip->i_mode) { 216 printf("mode = 0%o, inum = %d, fs = %s\n", 217 ip->i_mode, ip->i_number, fs->fs_fsmnt); 218 panic("ialloc: dup alloc"); 219 } 220 return (ip); 221 noinodes: 222 fserr(fs, "out of inodes"); 223 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 224 u.u_error = ENOSPC; 225 return (NULL); 226 } 227 228 /* 229 * Find a cylinder to place a directory. 230 * 231 * The policy implemented by this algorithm is to select from 232 * among those cylinder groups with above the average number of 233 * free inodes, the one with the smallest number of directories. 234 */ 235 ino_t 236 dirpref(fs) 237 register struct fs *fs; 238 { 239 int cg, minndir, mincg, avgifree; 240 241 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 242 minndir = fs->fs_ipg; 243 mincg = 0; 244 for (cg = 0; cg < fs->fs_ncg; cg++) 245 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 246 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 247 mincg = cg; 248 minndir = fs->fs_cs(fs, cg).cs_ndir; 249 } 250 return ((ino_t)(fs->fs_ipg * mincg)); 251 } 252 253 /* 254 * Select the desired position for the next block in a file. The file is 255 * logically divided into sections. The first section is composed of the 256 * direct blocks. Each additional section contains fs_maxbpg blocks. 257 * 258 * If no blocks have been allocated in the first section, the policy is to 259 * request a block in the same cylinder group as the inode that describes 260 * the file. If no blocks have been allocated in any other section, the 261 * policy is to place the section in a cylinder group with a greater than 262 * average number of free blocks. An appropriate cylinder group is found 263 * by maintaining a rotor that sweeps the cylinder groups. When a new 264 * group of blocks is needed, the rotor is advanced until a cylinder group 265 * with greater than the average number of free blocks is found. 266 * 267 * If a section is already partially allocated, the policy is to 268 * contiguously allocate fs_maxcontig blocks. The end of one of these 269 * contiguous blocks and the beginning of the next is physically separated 270 * so that the disk head will be in transit between them for at least 271 * fs_rotdelay milliseconds. This is to allow time for the processor to 272 * schedule another I/O transfer. 273 */ 274 daddr_t 275 blkpref(ip, lbn, indx, bap) 276 struct inode *ip; 277 daddr_t lbn; 278 int indx; 279 daddr_t *bap; 280 { 281 register struct fs *fs; 282 int cg, avgbfree; 283 daddr_t nextblk; 284 285 fs = ip->i_fs; 286 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 287 if (lbn < NDADDR) { 288 cg = itog(fs, ip->i_number); 289 return (fs->fs_fpg * cg + fs->fs_frag); 290 } 291 /* 292 * Find a cylinder with greater than average number of 293 * unused data blocks. 294 */ 295 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 296 for (cg = fs->fs_cgrotor + 1; cg < fs->fs_ncg; cg++) 297 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 298 fs->fs_cgrotor = cg; 299 return (fs->fs_fpg * cg + fs->fs_frag); 300 } 301 for (cg = 0; cg <= fs->fs_cgrotor; cg++) 302 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 303 fs->fs_cgrotor = cg; 304 return (fs->fs_fpg * cg + fs->fs_frag); 305 } 306 return (NULL); 307 } 308 /* 309 * One or more previous blocks have been laid out. If less 310 * than fs_maxcontig previous blocks are contiguous, the 311 * next block is requested contiguously, otherwise it is 312 * requested rotationally delayed by fs_rotdelay milliseconds. 313 */ 314 nextblk = bap[indx - 1] + fs->fs_frag; 315 if (indx > fs->fs_maxcontig && 316 bap[indx - fs->fs_maxcontig] + fs->fs_frag * fs->fs_maxcontig 317 != nextblk) 318 return (nextblk); 319 if (fs->fs_rotdelay != 0) 320 /* 321 * Here we convert ms of delay to frags as: 322 * (frags) = (ms) * (rev/sec) * (sect/rev) / 323 * ((sect/frag) * (ms/sec)) 324 * then round up to the next block. 325 */ 326 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 327 (NSPF(fs) * 1000), fs->fs_frag); 328 return (nextblk); 329 } 330 331 /* 332 * Implement the cylinder overflow algorithm. 333 * 334 * The policy implemented by this algorithm is: 335 * 1) allocate the block in its requested cylinder group. 336 * 2) quadradically rehash on the cylinder group number. 337 * 3) brute force search for a free block. 338 */ 339 /*VARARGS5*/ 340 u_long 341 hashalloc(ip, cg, pref, size, allocator) 342 struct inode *ip; 343 int cg; 344 long pref; 345 int size; /* size for data blocks, mode for inodes */ 346 u_long (*allocator)(); 347 { 348 register struct fs *fs; 349 long result; 350 int i, icg = cg; 351 352 fs = ip->i_fs; 353 /* 354 * 1: preferred cylinder group 355 */ 356 result = (*allocator)(ip, cg, pref, size); 357 if (result) 358 return (result); 359 /* 360 * 2: quadratic rehash 361 */ 362 for (i = 1; i < fs->fs_ncg; i *= 2) { 363 cg += i; 364 if (cg >= fs->fs_ncg) 365 cg -= fs->fs_ncg; 366 result = (*allocator)(ip, cg, 0, size); 367 if (result) 368 return (result); 369 } 370 /* 371 * 3: brute force search 372 * Note that we start at i == 2, since 0 was checked initially, 373 * and 1 is always checked in the quadratic rehash. 374 */ 375 cg = icg; 376 for (i = 2; i < fs->fs_ncg; i++) { 377 result = (*allocator)(ip, cg, 0, size); 378 if (result) 379 return (result); 380 cg++; 381 if (cg == fs->fs_ncg) 382 cg = 0; 383 } 384 return (NULL); 385 } 386 387 /* 388 * Determine whether a fragment can be extended. 389 * 390 * Check to see if the necessary fragments are available, and 391 * if they are, allocate them. 392 */ 393 daddr_t 394 fragextend(ip, cg, bprev, osize, nsize) 395 struct inode *ip; 396 int cg; 397 long bprev; 398 int osize, nsize; 399 { 400 register struct fs *fs; 401 register struct buf *bp; 402 register struct cg *cgp; 403 long bno; 404 int frags, bbase; 405 int i; 406 407 fs = ip->i_fs; 408 if (fs->fs_cs(fs, cg).cs_nffree < nsize - osize) 409 return (NULL); 410 frags = numfrags(fs, nsize); 411 bbase = fragoff(fs, bprev); 412 if (bbase > (bprev + frags - 1) % fs->fs_frag) { 413 /* cannot extend across a block boundry */ 414 return (NULL); 415 } 416 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize); 417 cgp = bp->b_un.b_cg; 418 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 419 brelse(bp); 420 return (NULL); 421 } 422 cgp->cg_time = time.tv_sec; 423 bno = dtogd(fs, bprev); 424 for (i = numfrags(fs, osize); i < frags; i++) 425 if (isclr(cgp->cg_free, bno + i)) { 426 brelse(bp); 427 return (NULL); 428 } 429 /* 430 * the current fragment can be extended 431 * deduct the count on fragment being extended into 432 * increase the count on the remaining fragment (if any) 433 * allocate the extended piece 434 */ 435 for (i = frags; i < fs->fs_frag - bbase; i++) 436 if (isclr(cgp->cg_free, bno + i)) 437 break; 438 cgp->cg_frsum[i - numfrags(fs, osize)]--; 439 if (i != frags) 440 cgp->cg_frsum[i - frags]++; 441 for (i = numfrags(fs, osize); i < frags; i++) { 442 clrbit(cgp->cg_free, bno + i); 443 cgp->cg_cs.cs_nffree--; 444 fs->fs_cstotal.cs_nffree--; 445 fs->fs_cs(fs, cg).cs_nffree--; 446 } 447 fs->fs_fmod++; 448 bdwrite(bp); 449 return (bprev); 450 } 451 452 /* 453 * Determine whether a block can be allocated. 454 * 455 * Check to see if a block of the apprpriate size is available, 456 * and if it is, allocate it. 457 */ 458 daddr_t 459 alloccg(ip, cg, bpref, size) 460 struct inode *ip; 461 int cg; 462 daddr_t bpref; 463 int size; 464 { 465 register struct fs *fs; 466 register struct buf *bp; 467 register struct cg *cgp; 468 int bno, frags; 469 int allocsiz; 470 register int i; 471 472 fs = ip->i_fs; 473 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 474 return (NULL); 475 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize); 476 cgp = bp->b_un.b_cg; 477 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 478 brelse(bp); 479 return (NULL); 480 } 481 if (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize) 482 return (NULL); 483 cgp->cg_time = time.tv_sec; 484 if (size == fs->fs_bsize) { 485 bno = alloccgblk(fs, cgp, bpref); 486 bdwrite(bp); 487 return (bno); 488 } 489 /* 490 * check to see if any fragments are already available 491 * allocsiz is the size which will be allocated, hacking 492 * it down to a smaller size if necessary 493 */ 494 frags = numfrags(fs, size); 495 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 496 if (cgp->cg_frsum[allocsiz] != 0) 497 break; 498 if (allocsiz == fs->fs_frag) { 499 /* 500 * no fragments were available, so a block will be 501 * allocated, and hacked up 502 */ 503 if (cgp->cg_cs.cs_nbfree == 0) { 504 brelse(bp); 505 return (NULL); 506 } 507 bno = alloccgblk(fs, cgp, bpref); 508 bpref = dtogd(fs, bno); 509 for (i = frags; i < fs->fs_frag; i++) 510 setbit(cgp->cg_free, bpref + i); 511 i = fs->fs_frag - frags; 512 cgp->cg_cs.cs_nffree += i; 513 fs->fs_cstotal.cs_nffree += i; 514 fs->fs_cs(fs, cg).cs_nffree += i; 515 fs->fs_fmod++; 516 cgp->cg_frsum[i]++; 517 bdwrite(bp); 518 return (bno); 519 } 520 bno = mapsearch(fs, cgp, bpref, allocsiz); 521 if (bno < 0) 522 return (NULL); 523 for (i = 0; i < frags; i++) 524 clrbit(cgp->cg_free, bno + i); 525 cgp->cg_cs.cs_nffree -= frags; 526 fs->fs_cstotal.cs_nffree -= frags; 527 fs->fs_cs(fs, cg).cs_nffree -= frags; 528 fs->fs_fmod++; 529 cgp->cg_frsum[allocsiz]--; 530 if (frags != allocsiz) 531 cgp->cg_frsum[allocsiz - frags]++; 532 bdwrite(bp); 533 return (cg * fs->fs_fpg + bno); 534 } 535 536 /* 537 * Allocate a block in a cylinder group. 538 * 539 * This algorithm implements the following policy: 540 * 1) allocate the requested block. 541 * 2) allocate a rotationally optimal block in the same cylinder. 542 * 3) allocate the next available block on the block rotor for the 543 * specified cylinder group. 544 * Note that this routine only allocates fs_bsize blocks; these 545 * blocks may be fragmented by the routine that allocates them. 546 */ 547 daddr_t 548 alloccgblk(fs, cgp, bpref) 549 register struct fs *fs; 550 register struct cg *cgp; 551 daddr_t bpref; 552 { 553 daddr_t bno; 554 int cylno, pos, delta; 555 short *cylbp; 556 register int i; 557 558 if (bpref == 0) { 559 bpref = cgp->cg_rotor; 560 goto norot; 561 } 562 bpref &= ~(fs->fs_frag - 1); 563 bpref = dtogd(fs, bpref); 564 /* 565 * if the requested block is available, use it 566 */ 567 if (isblock(fs, cgp->cg_free, bpref/fs->fs_frag)) { 568 bno = bpref; 569 goto gotit; 570 } 571 /* 572 * check for a block available on the same cylinder 573 */ 574 cylno = cbtocylno(fs, bpref); 575 if (cgp->cg_btot[cylno] == 0) 576 goto norot; 577 if (fs->fs_cpc == 0) { 578 /* 579 * block layout info is not available, so just have 580 * to take any block in this cylinder. 581 */ 582 bpref = howmany(fs->fs_spc * cylno, NSPF(fs)); 583 goto norot; 584 } 585 /* 586 * check the summary information to see if a block is 587 * available in the requested cylinder starting at the 588 * requested rotational position and proceeding around. 589 */ 590 cylbp = cgp->cg_b[cylno]; 591 pos = cbtorpos(fs, bpref); 592 for (i = pos; i < NRPOS; i++) 593 if (cylbp[i] > 0) 594 break; 595 if (i == NRPOS) 596 for (i = 0; i < pos; i++) 597 if (cylbp[i] > 0) 598 break; 599 if (cylbp[i] > 0) { 600 /* 601 * found a rotational position, now find the actual 602 * block. A panic if none is actually there. 603 */ 604 pos = cylno % fs->fs_cpc; 605 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 606 if (fs->fs_postbl[pos][i] == -1) { 607 printf("pos = %d, i = %d, fs = %s\n", 608 pos, i, fs->fs_fsmnt); 609 panic("alloccgblk: cyl groups corrupted"); 610 } 611 for (i = fs->fs_postbl[pos][i];; ) { 612 if (isblock(fs, cgp->cg_free, bno + i)) { 613 bno = (bno + i) * fs->fs_frag; 614 goto gotit; 615 } 616 delta = fs->fs_rotbl[i]; 617 if (delta <= 0 || delta > MAXBPC - i) 618 break; 619 i += delta; 620 } 621 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 622 panic("alloccgblk: can't find blk in cyl"); 623 } 624 norot: 625 /* 626 * no blocks in the requested cylinder, so take next 627 * available one in this cylinder group. 628 */ 629 bno = mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 630 if (bno < 0) 631 return (NULL); 632 cgp->cg_rotor = bno; 633 gotit: 634 clrblock(fs, cgp->cg_free, (long)(bno/fs->fs_frag)); 635 cgp->cg_cs.cs_nbfree--; 636 fs->fs_cstotal.cs_nbfree--; 637 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 638 cylno = cbtocylno(fs, bno); 639 cgp->cg_b[cylno][cbtorpos(fs, bno)]--; 640 cgp->cg_btot[cylno]--; 641 fs->fs_fmod++; 642 return (cgp->cg_cgx * fs->fs_fpg + bno); 643 } 644 645 /* 646 * Determine whether an inode can be allocated. 647 * 648 * Check to see if an inode is available, and if it is, 649 * allocate it using the following policy: 650 * 1) allocate the requested inode. 651 * 2) allocate the next available inode after the requested 652 * inode in the specified cylinder group. 653 */ 654 ino_t 655 ialloccg(ip, cg, ipref, mode) 656 struct inode *ip; 657 int cg; 658 daddr_t ipref; 659 int mode; 660 { 661 register struct fs *fs; 662 register struct buf *bp; 663 register struct cg *cgp; 664 int i; 665 666 fs = ip->i_fs; 667 if (fs->fs_cs(fs, cg).cs_nifree == 0) 668 return (NULL); 669 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize); 670 cgp = bp->b_un.b_cg; 671 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 672 brelse(bp); 673 return (NULL); 674 } 675 if (cgp->cg_cs.cs_nifree == 0) 676 return (NULL); 677 cgp->cg_time = time.tv_sec; 678 if (ipref) { 679 ipref %= fs->fs_ipg; 680 if (isclr(cgp->cg_iused, ipref)) 681 goto gotit; 682 } else 683 ipref = cgp->cg_irotor; 684 for (i = 0; i < fs->fs_ipg; i++) { 685 ipref++; 686 if (ipref >= fs->fs_ipg) 687 ipref = 0; 688 if (isclr(cgp->cg_iused, ipref)) { 689 cgp->cg_irotor = ipref; 690 goto gotit; 691 } 692 } 693 brelse(bp); 694 return (NULL); 695 gotit: 696 setbit(cgp->cg_iused, ipref); 697 cgp->cg_cs.cs_nifree--; 698 fs->fs_cstotal.cs_nifree--; 699 fs->fs_cs(fs, cg).cs_nifree--; 700 fs->fs_fmod++; 701 if ((mode & IFMT) == IFDIR) { 702 cgp->cg_cs.cs_ndir++; 703 fs->fs_cstotal.cs_ndir++; 704 fs->fs_cs(fs, cg).cs_ndir++; 705 } 706 bdwrite(bp); 707 return (cg * fs->fs_ipg + ipref); 708 } 709 710 /* 711 * Free a block or fragment. 712 * 713 * The specified block or fragment is placed back in the 714 * free map. If a fragment is deallocated, a possible 715 * block reassembly is checked. 716 */ 717 free(ip, bno, size) 718 register struct inode *ip; 719 daddr_t bno; 720 off_t size; 721 { 722 register struct fs *fs; 723 register struct cg *cgp; 724 register struct buf *bp; 725 int cg, blk, frags, bbase; 726 register int i; 727 728 fs = ip->i_fs; 729 if ((unsigned)size > fs->fs_bsize || fragoff(fs, size) != 0) { 730 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 731 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 732 panic("free: bad size"); 733 } 734 cg = dtog(fs, bno); 735 if (badblock(fs, bno)) { 736 printf("bad block %d, ino %d\n", bno, ip->i_number); 737 return; 738 } 739 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize); 740 cgp = bp->b_un.b_cg; 741 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 742 brelse(bp); 743 return; 744 } 745 cgp->cg_time = time.tv_sec; 746 bno = dtogd(fs, bno); 747 if (size == fs->fs_bsize) { 748 if (isblock(fs, cgp->cg_free, bno/fs->fs_frag)) { 749 printf("dev = 0x%x, block = %d, fs = %s\n", 750 ip->i_dev, bno, fs->fs_fsmnt); 751 panic("free: freeing free block"); 752 } 753 setblock(fs, cgp->cg_free, bno/fs->fs_frag); 754 cgp->cg_cs.cs_nbfree++; 755 fs->fs_cstotal.cs_nbfree++; 756 fs->fs_cs(fs, cg).cs_nbfree++; 757 i = cbtocylno(fs, bno); 758 cgp->cg_b[i][cbtorpos(fs, bno)]++; 759 cgp->cg_btot[i]++; 760 } else { 761 bbase = bno - (bno % fs->fs_frag); 762 /* 763 * decrement the counts associated with the old frags 764 */ 765 blk = blkmap(fs, cgp->cg_free, bbase); 766 fragacct(fs, blk, cgp->cg_frsum, -1); 767 /* 768 * deallocate the fragment 769 */ 770 frags = numfrags(fs, size); 771 for (i = 0; i < frags; i++) { 772 if (isset(cgp->cg_free, bno + i)) { 773 printf("dev = 0x%x, block = %d, fs = %s\n", 774 ip->i_dev, bno + i, fs->fs_fsmnt); 775 panic("free: freeing free frag"); 776 } 777 setbit(cgp->cg_free, bno + i); 778 } 779 cgp->cg_cs.cs_nffree += i; 780 fs->fs_cstotal.cs_nffree += i; 781 fs->fs_cs(fs, cg).cs_nffree += i; 782 /* 783 * add back in counts associated with the new frags 784 */ 785 blk = blkmap(fs, cgp->cg_free, bbase); 786 fragacct(fs, blk, cgp->cg_frsum, 1); 787 /* 788 * if a complete block has been reassembled, account for it 789 */ 790 if (isblock(fs, cgp->cg_free, bbase / fs->fs_frag)) { 791 cgp->cg_cs.cs_nffree -= fs->fs_frag; 792 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 793 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 794 cgp->cg_cs.cs_nbfree++; 795 fs->fs_cstotal.cs_nbfree++; 796 fs->fs_cs(fs, cg).cs_nbfree++; 797 i = cbtocylno(fs, bbase); 798 cgp->cg_b[i][cbtorpos(fs, bbase)]++; 799 cgp->cg_btot[i]++; 800 } 801 } 802 fs->fs_fmod++; 803 bdwrite(bp); 804 } 805 806 /* 807 * Free an inode. 808 * 809 * The specified inode is placed back in the free map. 810 */ 811 ifree(ip, ino, mode) 812 struct inode *ip; 813 ino_t ino; 814 int mode; 815 { 816 register struct fs *fs; 817 register struct cg *cgp; 818 register struct buf *bp; 819 int cg; 820 821 fs = ip->i_fs; 822 if ((unsigned)ino >= fs->fs_ipg*fs->fs_ncg) { 823 printf("dev = 0x%x, ino = %d, fs = %s\n", 824 ip->i_dev, ino, fs->fs_fsmnt); 825 panic("ifree: range"); 826 } 827 cg = itog(fs, ino); 828 bp = bread(ip->i_dev, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize); 829 cgp = bp->b_un.b_cg; 830 if (bp->b_flags & B_ERROR || cgp->cg_magic != CG_MAGIC) { 831 brelse(bp); 832 return; 833 } 834 cgp->cg_time = time.tv_sec; 835 ino %= fs->fs_ipg; 836 if (isclr(cgp->cg_iused, ino)) { 837 printf("dev = 0x%x, ino = %d, fs = %s\n", 838 ip->i_dev, ino, fs->fs_fsmnt); 839 panic("ifree: freeing free inode"); 840 } 841 clrbit(cgp->cg_iused, ino); 842 cgp->cg_cs.cs_nifree++; 843 fs->fs_cstotal.cs_nifree++; 844 fs->fs_cs(fs, cg).cs_nifree++; 845 if ((mode & IFMT) == IFDIR) { 846 cgp->cg_cs.cs_ndir--; 847 fs->fs_cstotal.cs_ndir--; 848 fs->fs_cs(fs, cg).cs_ndir--; 849 } 850 fs->fs_fmod++; 851 bdwrite(bp); 852 } 853 854 /* 855 * Find a block of the specified size in the specified cylinder group. 856 * 857 * It is a panic if a request is made to find a block if none are 858 * available. 859 */ 860 daddr_t 861 mapsearch(fs, cgp, bpref, allocsiz) 862 register struct fs *fs; 863 register struct cg *cgp; 864 daddr_t bpref; 865 int allocsiz; 866 { 867 daddr_t bno; 868 int start, len, loc, i; 869 int blk, field, subfield, pos; 870 871 /* 872 * find the fragment by searching through the free block 873 * map for an appropriate bit pattern 874 */ 875 if (bpref) 876 start = dtogd(fs, bpref) / NBBY; 877 else 878 start = cgp->cg_frotor / NBBY; 879 len = howmany(fs->fs_fpg, NBBY) - start; 880 loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], 881 1 << (allocsiz - 1 + (fs->fs_frag % NBBY))); 882 if (loc == 0) { 883 len = start + 1; 884 start = 0; 885 loc = scanc(len, &cgp->cg_free[start], fragtbl[fs->fs_frag], 886 1 << (allocsiz - 1 + (fs->fs_frag % NBBY))); 887 if (loc == 0) 888 return (-1); 889 } 890 bno = (start + len - loc) * NBBY; 891 cgp->cg_frotor = bno; 892 /* 893 * found the byte in the map 894 * sift through the bits to find the selected frag 895 */ 896 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 897 blk = blkmap(fs, cgp->cg_free, bno); 898 blk <<= 1; 899 field = around[allocsiz]; 900 subfield = inside[allocsiz]; 901 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 902 if ((blk & field) == subfield) 903 return (bno + pos); 904 field <<= 1; 905 subfield <<= 1; 906 } 907 } 908 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 909 panic("alloccg: block not in map"); 910 return (-1); 911 } 912 913 /* 914 * Fserr prints the name of a file system with an error diagnostic. 915 * 916 * The form of the error message is: 917 * fs: error message 918 */ 919 fserr(fs, cp) 920 struct fs *fs; 921 char *cp; 922 { 923 924 printf("%s: %s\n", fs->fs_fsmnt, cp); 925 } 926