1 /* $NetBSD: ffs_alloc.c,v 1.35 2000/05/19 04:34:44 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95 36 */ 37 38 #if defined(_KERNEL) && !defined(_LKM) 39 #include "opt_ffs.h" 40 #include "opt_quota.h" 41 #endif 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/buf.h> 46 #include <sys/proc.h> 47 #include <sys/vnode.h> 48 #include <sys/mount.h> 49 #include <sys/kernel.h> 50 #include <sys/syslog.h> 51 52 #include <vm/vm.h> 53 54 #include <uvm/uvm_extern.h> 55 56 #include <ufs/ufs/quota.h> 57 #include <ufs/ufs/ufsmount.h> 58 #include <ufs/ufs/inode.h> 59 #include <ufs/ufs/ufs_extern.h> 60 #include <ufs/ufs/ufs_bswap.h> 61 62 #include <ufs/ffs/fs.h> 63 #include <ufs/ffs/ffs_extern.h> 64 65 static ufs_daddr_t ffs_alloccg __P((struct inode *, int, ufs_daddr_t, int)); 66 static ufs_daddr_t ffs_alloccgblk __P((struct inode *, struct buf *, 67 ufs_daddr_t)); 68 static ufs_daddr_t ffs_clusteralloc __P((struct inode *, int, ufs_daddr_t, int)); 69 static ino_t ffs_dirpref __P((struct fs *)); 70 static ufs_daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); 71 static void ffs_fserr __P((struct fs *, u_int, char *)); 72 static u_long ffs_hashalloc 73 __P((struct inode *, int, long, int, 74 ufs_daddr_t (*)(struct inode *, int, ufs_daddr_t, int))); 75 static ufs_daddr_t ffs_nodealloccg __P((struct inode *, int, ufs_daddr_t, int)); 76 static ufs_daddr_t ffs_mapsearch __P((struct fs *, struct cg *, 77 ufs_daddr_t, int)); 78 #if defined(DIAGNOSTIC) || defined(DEBUG) 79 static int ffs_checkblk __P((struct inode *, ufs_daddr_t, long size)); 80 #endif 81 82 /* if 1, changes in optimalization strategy are logged */ 83 int ffs_log_changeopt = 0; 84 85 /* in ffs_tables.c */ 86 extern int inside[], around[]; 87 extern u_char *fragtbl[]; 88 89 /* 90 * Allocate a block in the file system. 91 * 92 * The size of the requested block is given, which must be some 93 * multiple of fs_fsize and <= fs_bsize. 94 * A preference may be optionally specified. If a preference is given 95 * the following hierarchy is used to allocate a block: 96 * 1) allocate the requested block. 97 * 2) allocate a rotationally optimal block in the same cylinder. 98 * 3) allocate a block in the same cylinder group. 99 * 4) quadradically rehash into other cylinder groups, until an 100 * available block is located. 101 * If no block preference is given the following heirarchy is used 102 * to allocate a block: 103 * 1) allocate a block in the cylinder group that contains the 104 * inode for the file. 105 * 2) quadradically rehash into other cylinder groups, until an 106 * available block is located. 107 */ 108 int 109 ffs_alloc(ip, lbn, bpref, size, cred, bnp) 110 struct inode *ip; 111 ufs_daddr_t lbn, bpref; 112 int size; 113 struct ucred *cred; 114 ufs_daddr_t *bnp; 115 { 116 struct fs *fs; 117 ufs_daddr_t bno; 118 int cg; 119 #ifdef QUOTA 120 int error; 121 #endif 122 123 *bnp = 0; 124 fs = ip->i_fs; 125 #ifdef DIAGNOSTIC 126 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 127 printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", 128 ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); 129 panic("ffs_alloc: bad size"); 130 } 131 if (cred == NOCRED) 132 panic("ffs_alloc: missing credential\n"); 133 #endif /* DIAGNOSTIC */ 134 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 135 goto nospace; 136 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 137 goto nospace; 138 #ifdef QUOTA 139 if ((error = chkdq(ip, (long)btodb(size), cred, 0)) != 0) 140 return (error); 141 #endif 142 if (bpref >= fs->fs_size) 143 bpref = 0; 144 if (bpref == 0) 145 cg = ino_to_cg(fs, ip->i_number); 146 else 147 cg = dtog(fs, bpref); 148 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 149 ffs_alloccg); 150 if (bno > 0) { 151 ip->i_ffs_blocks += btodb(size); 152 ip->i_flag |= IN_CHANGE | IN_UPDATE; 153 *bnp = bno; 154 return (0); 155 } 156 #ifdef QUOTA 157 /* 158 * Restore user's disk quota because allocation failed. 159 */ 160 (void) chkdq(ip, (long)-btodb(size), cred, FORCE); 161 #endif 162 nospace: 163 ffs_fserr(fs, cred->cr_uid, "file system full"); 164 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 165 return (ENOSPC); 166 } 167 168 /* 169 * Reallocate a fragment to a bigger size 170 * 171 * The number and size of the old block is given, and a preference 172 * and new size is also specified. The allocator attempts to extend 173 * the original block. Failing that, the regular block allocator is 174 * invoked to get an appropriate block. 175 */ 176 int 177 ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) 178 struct inode *ip; 179 ufs_daddr_t lbprev; 180 ufs_daddr_t bpref; 181 int osize, nsize; 182 struct ucred *cred; 183 struct buf **bpp; 184 { 185 struct fs *fs; 186 struct buf *bp; 187 int cg, request, error; 188 ufs_daddr_t bprev, bno; 189 190 *bpp = 0; 191 fs = ip->i_fs; 192 #ifdef DIAGNOSTIC 193 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 194 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 195 printf( 196 "dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", 197 ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); 198 panic("ffs_realloccg: bad size"); 199 } 200 if (cred == NOCRED) 201 panic("ffs_realloccg: missing credential\n"); 202 #endif /* DIAGNOSTIC */ 203 if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) 204 goto nospace; 205 if ((bprev = ufs_rw32(ip->i_ffs_db[lbprev], UFS_FSNEEDSWAP(fs))) == 0) { 206 printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", 207 ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); 208 panic("ffs_realloccg: bad bprev"); 209 } 210 /* 211 * Allocate the extra space in the buffer. 212 */ 213 if ((error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) != 0) { 214 brelse(bp); 215 return (error); 216 } 217 #ifdef QUOTA 218 if ((error = chkdq(ip, (long)btodb(nsize - osize), cred, 0)) != 0) { 219 brelse(bp); 220 return (error); 221 } 222 #endif 223 /* 224 * Check for extension in the existing location. 225 */ 226 cg = dtog(fs, bprev); 227 if ((bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize)) != 0) { 228 if (bp->b_blkno != fsbtodb(fs, bno)) 229 panic("bad blockno"); 230 ip->i_ffs_blocks += btodb(nsize - osize); 231 ip->i_flag |= IN_CHANGE | IN_UPDATE; 232 allocbuf(bp, nsize); 233 bp->b_flags |= B_DONE; 234 memset((char *)bp->b_data + osize, 0, (u_int)nsize - osize); 235 *bpp = bp; 236 return (0); 237 } 238 /* 239 * Allocate a new disk location. 240 */ 241 if (bpref >= fs->fs_size) 242 bpref = 0; 243 switch ((int)fs->fs_optim) { 244 case FS_OPTSPACE: 245 /* 246 * Allocate an exact sized fragment. Although this makes 247 * best use of space, we will waste time relocating it if 248 * the file continues to grow. If the fragmentation is 249 * less than half of the minimum free reserve, we choose 250 * to begin optimizing for time. 251 */ 252 request = nsize; 253 if (fs->fs_minfree < 5 || 254 fs->fs_cstotal.cs_nffree > 255 fs->fs_dsize * fs->fs_minfree / (2 * 100)) 256 break; 257 258 if (ffs_log_changeopt) { 259 log(LOG_NOTICE, 260 "%s: optimization changed from SPACE to TIME\n", 261 fs->fs_fsmnt); 262 } 263 264 fs->fs_optim = FS_OPTTIME; 265 break; 266 case FS_OPTTIME: 267 /* 268 * At this point we have discovered a file that is trying to 269 * grow a small fragment to a larger fragment. To save time, 270 * we allocate a full sized block, then free the unused portion. 271 * If the file continues to grow, the `ffs_fragextend' call 272 * above will be able to grow it in place without further 273 * copying. If aberrant programs cause disk fragmentation to 274 * grow within 2% of the free reserve, we choose to begin 275 * optimizing for space. 276 */ 277 request = fs->fs_bsize; 278 if (fs->fs_cstotal.cs_nffree < 279 fs->fs_dsize * (fs->fs_minfree - 2) / 100) 280 break; 281 282 if (ffs_log_changeopt) { 283 log(LOG_NOTICE, 284 "%s: optimization changed from TIME to SPACE\n", 285 fs->fs_fsmnt); 286 } 287 288 fs->fs_optim = FS_OPTSPACE; 289 break; 290 default: 291 printf("dev = 0x%x, optim = %d, fs = %s\n", 292 ip->i_dev, fs->fs_optim, fs->fs_fsmnt); 293 panic("ffs_realloccg: bad optim"); 294 /* NOTREACHED */ 295 } 296 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 297 ffs_alloccg); 298 if (bno > 0) { 299 bp->b_blkno = fsbtodb(fs, bno); 300 (void) uvm_vnp_uncache(ITOV(ip)); 301 if (!DOINGSOFTDEP(ITOV(ip))) 302 ffs_blkfree(ip, bprev, (long)osize); 303 if (nsize < request) 304 ffs_blkfree(ip, bno + numfrags(fs, nsize), 305 (long)(request - nsize)); 306 ip->i_ffs_blocks += btodb(nsize - osize); 307 ip->i_flag |= IN_CHANGE | IN_UPDATE; 308 allocbuf(bp, nsize); 309 bp->b_flags |= B_DONE; 310 memset((char *)bp->b_data + osize, 0, (u_int)nsize - osize); 311 *bpp = bp; 312 return (0); 313 } 314 #ifdef QUOTA 315 /* 316 * Restore user's disk quota because allocation failed. 317 */ 318 (void) chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 319 #endif 320 brelse(bp); 321 nospace: 322 /* 323 * no space available 324 */ 325 ffs_fserr(fs, cred->cr_uid, "file system full"); 326 uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); 327 return (ENOSPC); 328 } 329 330 /* 331 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 332 * 333 * The vnode and an array of buffer pointers for a range of sequential 334 * logical blocks to be made contiguous is given. The allocator attempts 335 * to find a range of sequential blocks starting as close as possible to 336 * an fs_rotdelay offset from the end of the allocation for the logical 337 * block immediately preceeding the current range. If successful, the 338 * physical block numbers in the buffer pointers and in the inode are 339 * changed to reflect the new allocation. If unsuccessful, the allocation 340 * is left unchanged. The success in doing the reallocation is returned. 341 * Note that the error return is not reflected back to the user. Rather 342 * the previous block allocation will be used. 343 */ 344 #ifdef DEBUG 345 #include <sys/sysctl.h> 346 int prtrealloc = 0; 347 struct ctldebug debug15 = { "prtrealloc", &prtrealloc }; 348 #endif 349 350 int doasyncfree = 1; 351 extern int doreallocblks; 352 353 int 354 ffs_reallocblks(v) 355 void *v; 356 { 357 struct vop_reallocblks_args /* { 358 struct vnode *a_vp; 359 struct cluster_save *a_buflist; 360 } */ *ap = v; 361 struct fs *fs; 362 struct inode *ip; 363 struct vnode *vp; 364 struct buf *sbp, *ebp; 365 ufs_daddr_t *bap, *sbap, *ebap = NULL; 366 struct cluster_save *buflist; 367 ufs_daddr_t start_lbn, end_lbn, soff, newblk, blkno; 368 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 369 int i, len, start_lvl, end_lvl, pref, ssize; 370 371 vp = ap->a_vp; 372 ip = VTOI(vp); 373 fs = ip->i_fs; 374 if (fs->fs_contigsumsize <= 0) 375 return (ENOSPC); 376 buflist = ap->a_buflist; 377 len = buflist->bs_nchildren; 378 start_lbn = buflist->bs_children[0]->b_lblkno; 379 end_lbn = start_lbn + len - 1; 380 #ifdef DIAGNOSTIC 381 for (i = 0; i < len; i++) 382 if (!ffs_checkblk(ip, 383 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 384 panic("ffs_reallocblks: unallocated block 1"); 385 for (i = 1; i < len; i++) 386 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 387 panic("ffs_reallocblks: non-logical cluster"); 388 blkno = buflist->bs_children[0]->b_blkno; 389 ssize = fsbtodb(fs, fs->fs_frag); 390 for (i = 1; i < len - 1; i++) 391 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 392 panic("ffs_reallocblks: non-physical cluster %d", i); 393 #endif 394 /* 395 * If the latest allocation is in a new cylinder group, assume that 396 * the filesystem has decided to move and do not force it back to 397 * the previous cylinder group. 398 */ 399 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 400 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 401 return (ENOSPC); 402 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 403 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 404 return (ENOSPC); 405 /* 406 * Get the starting offset and block map for the first block. 407 */ 408 if (start_lvl == 0) { 409 sbap = &ip->i_ffs_db[0]; 410 soff = start_lbn; 411 } else { 412 idp = &start_ap[start_lvl - 1]; 413 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 414 brelse(sbp); 415 return (ENOSPC); 416 } 417 sbap = (ufs_daddr_t *)sbp->b_data; 418 soff = idp->in_off; 419 } 420 /* 421 * Find the preferred location for the cluster. 422 */ 423 pref = ffs_blkpref(ip, start_lbn, soff, sbap); 424 /* 425 * If the block range spans two block maps, get the second map. 426 */ 427 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 428 ssize = len; 429 } else { 430 #ifdef DIAGNOSTIC 431 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 432 panic("ffs_reallocblk: start == end"); 433 #endif 434 ssize = len - (idp->in_off + 1); 435 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 436 goto fail; 437 ebap = (ufs_daddr_t *)ebp->b_data; 438 } 439 /* 440 * Search the block map looking for an allocation of the desired size. 441 */ 442 if ((newblk = (ufs_daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 443 len, ffs_clusteralloc)) == 0) 444 goto fail; 445 /* 446 * We have found a new contiguous block. 447 * 448 * First we have to replace the old block pointers with the new 449 * block pointers in the inode and indirect blocks associated 450 * with the file. 451 */ 452 #ifdef DEBUG 453 if (prtrealloc) 454 printf("realloc: ino %d, lbns %d-%d\n\told:", ip->i_number, 455 start_lbn, end_lbn); 456 #endif 457 blkno = newblk; 458 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 459 ufs_daddr_t ba; 460 461 if (i == ssize) { 462 bap = ebap; 463 soff = -i; 464 } 465 ba = ufs_rw32(*bap, UFS_FSNEEDSWAP(fs)); 466 #ifdef DIAGNOSTIC 467 if (!ffs_checkblk(ip, 468 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 469 panic("ffs_reallocblks: unallocated block 2"); 470 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != ba) 471 panic("ffs_reallocblks: alloc mismatch"); 472 #endif 473 #ifdef DEBUG 474 if (prtrealloc) 475 printf(" %d,", ba); 476 #endif 477 if (DOINGSOFTDEP(vp)) { 478 if (sbap == &ip->i_ffs_db[0] && i < ssize) 479 softdep_setup_allocdirect(ip, start_lbn + i, 480 blkno, ba, fs->fs_bsize, fs->fs_bsize, 481 buflist->bs_children[i]); 482 else 483 softdep_setup_allocindir_page(ip, start_lbn + i, 484 i < ssize ? sbp : ebp, soff + i, blkno, 485 ba, buflist->bs_children[i]); 486 } 487 *bap++ = ufs_rw32(blkno, UFS_FSNEEDSWAP(fs)); 488 } 489 /* 490 * Next we must write out the modified inode and indirect blocks. 491 * For strict correctness, the writes should be synchronous since 492 * the old block values may have been written to disk. In practise 493 * they are almost never written, but if we are concerned about 494 * strict correctness, the `doasyncfree' flag should be set to zero. 495 * 496 * The test on `doasyncfree' should be changed to test a flag 497 * that shows whether the associated buffers and inodes have 498 * been written. The flag should be set when the cluster is 499 * started and cleared whenever the buffer or inode is flushed. 500 * We can then check below to see if it is set, and do the 501 * synchronous write only when it has been cleared. 502 */ 503 if (sbap != &ip->i_ffs_db[0]) { 504 if (doasyncfree) 505 bdwrite(sbp); 506 else 507 bwrite(sbp); 508 } else { 509 ip->i_flag |= IN_CHANGE | IN_UPDATE; 510 if (!doasyncfree) 511 VOP_UPDATE(vp, NULL, NULL, 1); 512 } 513 if (ssize < len) { 514 if (doasyncfree) 515 bdwrite(ebp); 516 else 517 bwrite(ebp); 518 } 519 /* 520 * Last, free the old blocks and assign the new blocks to the buffers. 521 */ 522 #ifdef DEBUG 523 if (prtrealloc) 524 printf("\n\tnew:"); 525 #endif 526 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 527 if (!DOINGSOFTDEP(vp)) 528 ffs_blkfree(ip, 529 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 530 fs->fs_bsize); 531 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 532 #ifdef DEBUG 533 if (!ffs_checkblk(ip, 534 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 535 panic("ffs_reallocblks: unallocated block 3"); 536 if (prtrealloc) 537 printf(" %d,", blkno); 538 #endif 539 } 540 #ifdef DEBUG 541 if (prtrealloc) { 542 prtrealloc--; 543 printf("\n"); 544 } 545 #endif 546 return (0); 547 548 fail: 549 if (ssize < len) 550 brelse(ebp); 551 if (sbap != &ip->i_ffs_db[0]) 552 brelse(sbp); 553 return (ENOSPC); 554 } 555 556 /* 557 * Allocate an inode in the file system. 558 * 559 * If allocating a directory, use ffs_dirpref to select the inode. 560 * If allocating in a directory, the following hierarchy is followed: 561 * 1) allocate the preferred inode. 562 * 2) allocate an inode in the same cylinder group. 563 * 3) quadradically rehash into other cylinder groups, until an 564 * available inode is located. 565 * If no inode preference is given the following heirarchy is used 566 * to allocate an inode: 567 * 1) allocate an inode in cylinder group 0. 568 * 2) quadradically rehash into other cylinder groups, until an 569 * available inode is located. 570 */ 571 int 572 ffs_valloc(v) 573 void *v; 574 { 575 struct vop_valloc_args /* { 576 struct vnode *a_pvp; 577 int a_mode; 578 struct ucred *a_cred; 579 struct vnode **a_vpp; 580 } */ *ap = v; 581 struct vnode *pvp = ap->a_pvp; 582 struct inode *pip; 583 struct fs *fs; 584 struct inode *ip; 585 mode_t mode = ap->a_mode; 586 ino_t ino, ipref; 587 int cg, error; 588 589 *ap->a_vpp = NULL; 590 pip = VTOI(pvp); 591 fs = pip->i_fs; 592 if (fs->fs_cstotal.cs_nifree == 0) 593 goto noinodes; 594 595 if ((mode & IFMT) == IFDIR) 596 ipref = ffs_dirpref(fs); 597 else 598 ipref = pip->i_number; 599 if (ipref >= fs->fs_ncg * fs->fs_ipg) 600 ipref = 0; 601 cg = ino_to_cg(fs, ipref); 602 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, ffs_nodealloccg); 603 if (ino == 0) 604 goto noinodes; 605 error = VFS_VGET(pvp->v_mount, ino, ap->a_vpp); 606 if (error) { 607 VOP_VFREE(pvp, ino, mode); 608 return (error); 609 } 610 ip = VTOI(*ap->a_vpp); 611 if (ip->i_ffs_mode) { 612 printf("mode = 0%o, inum = %d, fs = %s\n", 613 ip->i_ffs_mode, ip->i_number, fs->fs_fsmnt); 614 panic("ffs_valloc: dup alloc"); 615 } 616 if (ip->i_ffs_blocks) { /* XXX */ 617 printf("free inode %s/%d had %d blocks\n", 618 fs->fs_fsmnt, ino, ip->i_ffs_blocks); 619 ip->i_ffs_blocks = 0; 620 } 621 ip->i_ffs_flags = 0; 622 /* 623 * Set up a new generation number for this inode. 624 */ 625 ip->i_ffs_gen++; 626 return (0); 627 noinodes: 628 ffs_fserr(fs, ap->a_cred->cr_uid, "out of inodes"); 629 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 630 return (ENOSPC); 631 } 632 633 /* 634 * Find a cylinder to place a directory. 635 * 636 * The policy implemented by this algorithm is to select from 637 * among those cylinder groups with above the average number of 638 * free inodes, the one with the smallest number of directories. 639 */ 640 static ino_t 641 ffs_dirpref(fs) 642 struct fs *fs; 643 { 644 int cg, minndir, mincg, avgifree; 645 646 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 647 minndir = fs->fs_ipg; 648 mincg = 0; 649 for (cg = 0; cg < fs->fs_ncg; cg++) 650 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 651 fs->fs_cs(fs, cg).cs_nifree >= avgifree) { 652 mincg = cg; 653 minndir = fs->fs_cs(fs, cg).cs_ndir; 654 } 655 return ((ino_t)(fs->fs_ipg * mincg)); 656 } 657 658 /* 659 * Select the desired position for the next block in a file. The file is 660 * logically divided into sections. The first section is composed of the 661 * direct blocks. Each additional section contains fs_maxbpg blocks. 662 * 663 * If no blocks have been allocated in the first section, the policy is to 664 * request a block in the same cylinder group as the inode that describes 665 * the file. If no blocks have been allocated in any other section, the 666 * policy is to place the section in a cylinder group with a greater than 667 * average number of free blocks. An appropriate cylinder group is found 668 * by using a rotor that sweeps the cylinder groups. When a new group of 669 * blocks is needed, the sweep begins in the cylinder group following the 670 * cylinder group from which the previous allocation was made. The sweep 671 * continues until a cylinder group with greater than the average number 672 * of free blocks is found. If the allocation is for the first block in an 673 * indirect block, the information on the previous allocation is unavailable; 674 * here a best guess is made based upon the logical block number being 675 * allocated. 676 * 677 * If a section is already partially allocated, the policy is to 678 * contiguously allocate fs_maxcontig blocks. The end of one of these 679 * contiguous blocks and the beginning of the next is physically separated 680 * so that the disk head will be in transit between them for at least 681 * fs_rotdelay milliseconds. This is to allow time for the processor to 682 * schedule another I/O transfer. 683 */ 684 ufs_daddr_t 685 ffs_blkpref(ip, lbn, indx, bap) 686 struct inode *ip; 687 ufs_daddr_t lbn; 688 int indx; 689 ufs_daddr_t *bap; 690 { 691 struct fs *fs; 692 int cg; 693 int avgbfree, startcg; 694 ufs_daddr_t nextblk; 695 696 fs = ip->i_fs; 697 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 698 if (lbn < NDADDR + NINDIR(fs)) { 699 cg = ino_to_cg(fs, ip->i_number); 700 return (fs->fs_fpg * cg + fs->fs_frag); 701 } 702 /* 703 * Find a cylinder with greater than average number of 704 * unused data blocks. 705 */ 706 if (indx == 0 || bap[indx - 1] == 0) 707 startcg = 708 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 709 else 710 startcg = dtog(fs, 711 ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); 712 startcg %= fs->fs_ncg; 713 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 714 for (cg = startcg; cg < fs->fs_ncg; cg++) 715 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 716 fs->fs_cgrotor = cg; 717 return (fs->fs_fpg * cg + fs->fs_frag); 718 } 719 for (cg = 0; cg <= startcg; cg++) 720 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 721 fs->fs_cgrotor = cg; 722 return (fs->fs_fpg * cg + fs->fs_frag); 723 } 724 return (0); 725 } 726 /* 727 * One or more previous blocks have been laid out. If less 728 * than fs_maxcontig previous blocks are contiguous, the 729 * next block is requested contiguously, otherwise it is 730 * requested rotationally delayed by fs_rotdelay milliseconds. 731 */ 732 nextblk = ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; 733 if (indx < fs->fs_maxcontig || 734 ufs_rw32(bap[indx - fs->fs_maxcontig], UFS_FSNEEDSWAP(fs)) + 735 blkstofrags(fs, fs->fs_maxcontig) != nextblk) 736 return (nextblk); 737 if (fs->fs_rotdelay != 0) 738 /* 739 * Here we convert ms of delay to frags as: 740 * (frags) = (ms) * (rev/sec) * (sect/rev) / 741 * ((sect/frag) * (ms/sec)) 742 * then round up to the next block. 743 */ 744 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 745 (NSPF(fs) * 1000), fs->fs_frag); 746 return (nextblk); 747 } 748 749 /* 750 * Implement the cylinder overflow algorithm. 751 * 752 * The policy implemented by this algorithm is: 753 * 1) allocate the block in its requested cylinder group. 754 * 2) quadradically rehash on the cylinder group number. 755 * 3) brute force search for a free block. 756 */ 757 /*VARARGS5*/ 758 static u_long 759 ffs_hashalloc(ip, cg, pref, size, allocator) 760 struct inode *ip; 761 int cg; 762 long pref; 763 int size; /* size for data blocks, mode for inodes */ 764 ufs_daddr_t (*allocator) __P((struct inode *, int, ufs_daddr_t, int)); 765 { 766 struct fs *fs; 767 long result; 768 int i, icg = cg; 769 770 fs = ip->i_fs; 771 /* 772 * 1: preferred cylinder group 773 */ 774 result = (*allocator)(ip, cg, pref, size); 775 if (result) 776 return (result); 777 /* 778 * 2: quadratic rehash 779 */ 780 for (i = 1; i < fs->fs_ncg; i *= 2) { 781 cg += i; 782 if (cg >= fs->fs_ncg) 783 cg -= fs->fs_ncg; 784 result = (*allocator)(ip, cg, 0, size); 785 if (result) 786 return (result); 787 } 788 /* 789 * 3: brute force search 790 * Note that we start at i == 2, since 0 was checked initially, 791 * and 1 is always checked in the quadratic rehash. 792 */ 793 cg = (icg + 2) % fs->fs_ncg; 794 for (i = 2; i < fs->fs_ncg; i++) { 795 result = (*allocator)(ip, cg, 0, size); 796 if (result) 797 return (result); 798 cg++; 799 if (cg == fs->fs_ncg) 800 cg = 0; 801 } 802 return (0); 803 } 804 805 /* 806 * Determine whether a fragment can be extended. 807 * 808 * Check to see if the necessary fragments are available, and 809 * if they are, allocate them. 810 */ 811 static ufs_daddr_t 812 ffs_fragextend(ip, cg, bprev, osize, nsize) 813 struct inode *ip; 814 int cg; 815 long bprev; 816 int osize, nsize; 817 { 818 struct fs *fs; 819 struct cg *cgp; 820 struct buf *bp; 821 long bno; 822 int frags, bbase; 823 int i, error; 824 825 fs = ip->i_fs; 826 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 827 return (0); 828 frags = numfrags(fs, nsize); 829 bbase = fragnum(fs, bprev); 830 if (bbase > fragnum(fs, (bprev + frags - 1))) { 831 /* cannot extend across a block boundary */ 832 return (0); 833 } 834 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 835 (int)fs->fs_cgsize, NOCRED, &bp); 836 if (error) { 837 brelse(bp); 838 return (0); 839 } 840 cgp = (struct cg *)bp->b_data; 841 if (!cg_chkmagic(cgp, UFS_FSNEEDSWAP(fs))) { 842 brelse(bp); 843 return (0); 844 } 845 cgp->cg_time = ufs_rw32(time.tv_sec, UFS_FSNEEDSWAP(fs)); 846 bno = dtogd(fs, bprev); 847 for (i = numfrags(fs, osize); i < frags; i++) 848 if (isclr(cg_blksfree(cgp, UFS_FSNEEDSWAP(fs)), bno + i)) { 849 brelse(bp); 850 return (0); 851 } 852 /* 853 * the current fragment can be extended 854 * deduct the count on fragment being extended into 855 * increase the count on the remaining fragment (if any) 856 * allocate the extended piece 857 */ 858 for (i = frags; i < fs->fs_frag - bbase; i++) 859 if (isclr(cg_blksfree(cgp, UFS_FSNEEDSWAP(fs)), bno + i)) 860 break; 861 ufs_add32(cgp->cg_frsum[i - numfrags(fs, osize)], -1, UFS_FSNEEDSWAP(fs)); 862 if (i != frags) 863 ufs_add32(cgp->cg_frsum[i - frags], 1, UFS_FSNEEDSWAP(fs)); 864 for (i = numfrags(fs, osize); i < frags; i++) { 865 clrbit(cg_blksfree(cgp, UFS_FSNEEDSWAP(fs)), bno + i); 866 ufs_add32(cgp->cg_cs.cs_nffree, -1, UFS_FSNEEDSWAP(fs)); 867 fs->fs_cstotal.cs_nffree--; 868 fs->fs_cs(fs, cg).cs_nffree--; 869 } 870 fs->fs_fmod = 1; 871 if (DOINGSOFTDEP(ITOV(ip))) 872 softdep_setup_blkmapdep(bp, fs, bprev); 873 bdwrite(bp); 874 return (bprev); 875 } 876 877 /* 878 * Determine whether a block can be allocated. 879 * 880 * Check to see if a block of the appropriate size is available, 881 * and if it is, allocate it. 882 */ 883 static ufs_daddr_t 884 ffs_alloccg(ip, cg, bpref, size) 885 struct inode *ip; 886 int cg; 887 ufs_daddr_t bpref; 888 int size; 889 { 890 struct cg *cgp; 891 struct buf *bp; 892 ufs_daddr_t bno, blkno; 893 int error, frags, allocsiz, i; 894 struct fs *fs = ip->i_fs; 895 #ifdef FFS_EI 896 const int needswap = UFS_FSNEEDSWAP(fs); 897 #endif 898 899 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 900 return (0); 901 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 902 (int)fs->fs_cgsize, NOCRED, &bp); 903 if (error) { 904 brelse(bp); 905 return (0); 906 } 907 cgp = (struct cg *)bp->b_data; 908 if (!cg_chkmagic(cgp, needswap) || 909 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 910 brelse(bp); 911 return (0); 912 } 913 cgp->cg_time = ufs_rw32(time.tv_sec, needswap); 914 if (size == fs->fs_bsize) { 915 bno = ffs_alloccgblk(ip, bp, bpref); 916 bdwrite(bp); 917 return (bno); 918 } 919 /* 920 * check to see if any fragments are already available 921 * allocsiz is the size which will be allocated, hacking 922 * it down to a smaller size if necessary 923 */ 924 frags = numfrags(fs, size); 925 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 926 if (cgp->cg_frsum[allocsiz] != 0) 927 break; 928 if (allocsiz == fs->fs_frag) { 929 /* 930 * no fragments were available, so a block will be 931 * allocated, and hacked up 932 */ 933 if (cgp->cg_cs.cs_nbfree == 0) { 934 brelse(bp); 935 return (0); 936 } 937 bno = ffs_alloccgblk(ip, bp, bpref); 938 bpref = dtogd(fs, bno); 939 for (i = frags; i < fs->fs_frag; i++) 940 setbit(cg_blksfree(cgp, needswap), bpref + i); 941 i = fs->fs_frag - frags; 942 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 943 fs->fs_cstotal.cs_nffree += i; 944 fs->fs_cs(fs, cg).cs_nffree += i; 945 fs->fs_fmod = 1; 946 ufs_add32(cgp->cg_frsum[i], 1, needswap); 947 bdwrite(bp); 948 return (bno); 949 } 950 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 951 #if 0 952 /* 953 * XXX fvdl mapsearch will panic, and never return -1 954 * also: returning NULL as ufs_daddr_t ? 955 */ 956 if (bno < 0) { 957 brelse(bp); 958 return (0); 959 } 960 #endif 961 for (i = 0; i < frags; i++) 962 clrbit(cg_blksfree(cgp, needswap), bno + i); 963 ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap); 964 fs->fs_cstotal.cs_nffree -= frags; 965 fs->fs_cs(fs, cg).cs_nffree -= frags; 966 fs->fs_fmod = 1; 967 ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap); 968 if (frags != allocsiz) 969 ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap); 970 blkno = cg * fs->fs_fpg + bno; 971 if (DOINGSOFTDEP(ITOV(ip))) 972 softdep_setup_blkmapdep(bp, fs, blkno); 973 bdwrite(bp); 974 return blkno; 975 } 976 977 /* 978 * Allocate a block in a cylinder group. 979 * 980 * This algorithm implements the following policy: 981 * 1) allocate the requested block. 982 * 2) allocate a rotationally optimal block in the same cylinder. 983 * 3) allocate the next available block on the block rotor for the 984 * specified cylinder group. 985 * Note that this routine only allocates fs_bsize blocks; these 986 * blocks may be fragmented by the routine that allocates them. 987 */ 988 static ufs_daddr_t 989 ffs_alloccgblk(ip, bp, bpref) 990 struct inode *ip; 991 struct buf *bp; 992 ufs_daddr_t bpref; 993 { 994 struct cg *cgp; 995 ufs_daddr_t bno, blkno; 996 int cylno, pos, delta; 997 short *cylbp; 998 int i; 999 struct fs *fs = ip->i_fs; 1000 #ifdef FFS_EI 1001 const int needswap = UFS_FSNEEDSWAP(fs); 1002 #endif 1003 1004 cgp = (struct cg *)bp->b_data; 1005 if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) { 1006 bpref = ufs_rw32(cgp->cg_rotor, needswap); 1007 goto norot; 1008 } 1009 bpref = blknum(fs, bpref); 1010 bpref = dtogd(fs, bpref); 1011 /* 1012 * if the requested block is available, use it 1013 */ 1014 if (ffs_isblock(fs, cg_blksfree(cgp, needswap), 1015 fragstoblks(fs, bpref))) { 1016 bno = bpref; 1017 goto gotit; 1018 } 1019 if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) { 1020 /* 1021 * Block layout information is not available. 1022 * Leaving bpref unchanged means we take the 1023 * next available free block following the one 1024 * we just allocated. Hopefully this will at 1025 * least hit a track cache on drives of unknown 1026 * geometry (e.g. SCSI). 1027 */ 1028 goto norot; 1029 } 1030 /* 1031 * check for a block available on the same cylinder 1032 */ 1033 cylno = cbtocylno(fs, bpref); 1034 if (cg_blktot(cgp, needswap)[cylno] == 0) 1035 goto norot; 1036 /* 1037 * check the summary information to see if a block is 1038 * available in the requested cylinder starting at the 1039 * requested rotational position and proceeding around. 1040 */ 1041 cylbp = cg_blks(fs, cgp, cylno, needswap); 1042 pos = cbtorpos(fs, bpref); 1043 for (i = pos; i < fs->fs_nrpos; i++) 1044 if (ufs_rw16(cylbp[i], needswap) > 0) 1045 break; 1046 if (i == fs->fs_nrpos) 1047 for (i = 0; i < pos; i++) 1048 if (ufs_rw16(cylbp[i], needswap) > 0) 1049 break; 1050 if (ufs_rw16(cylbp[i], needswap) > 0) { 1051 /* 1052 * found a rotational position, now find the actual 1053 * block. A panic if none is actually there. 1054 */ 1055 pos = cylno % fs->fs_cpc; 1056 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 1057 if (fs_postbl(fs, pos)[i] == -1) { 1058 printf("pos = %d, i = %d, fs = %s\n", 1059 pos, i, fs->fs_fsmnt); 1060 panic("ffs_alloccgblk: cyl groups corrupted"); 1061 } 1062 for (i = fs_postbl(fs, pos)[i];; ) { 1063 if (ffs_isblock(fs, cg_blksfree(cgp, needswap), bno + i)) { 1064 bno = blkstofrags(fs, (bno + i)); 1065 goto gotit; 1066 } 1067 delta = fs_rotbl(fs)[i]; 1068 if (delta <= 0 || 1069 delta + i > fragstoblks(fs, fs->fs_fpg)) 1070 break; 1071 i += delta; 1072 } 1073 printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 1074 panic("ffs_alloccgblk: can't find blk in cyl"); 1075 } 1076 norot: 1077 /* 1078 * no blocks in the requested cylinder, so take next 1079 * available one in this cylinder group. 1080 */ 1081 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 1082 if (bno < 0) 1083 return (0); 1084 cgp->cg_rotor = ufs_rw32(bno, needswap); 1085 gotit: 1086 blkno = fragstoblks(fs, bno); 1087 ffs_clrblock(fs, cg_blksfree(cgp, needswap), (long)blkno); 1088 ffs_clusteracct(fs, cgp, blkno, -1); 1089 ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap); 1090 fs->fs_cstotal.cs_nbfree--; 1091 fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--; 1092 cylno = cbtocylno(fs, bno); 1093 ufs_add16(cg_blks(fs, cgp, cylno, needswap)[cbtorpos(fs, bno)], -1, 1094 needswap); 1095 ufs_add32(cg_blktot(cgp, needswap)[cylno], -1, needswap); 1096 fs->fs_fmod = 1; 1097 blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno; 1098 if (DOINGSOFTDEP(ITOV(ip))) 1099 softdep_setup_blkmapdep(bp, fs, blkno); 1100 return (blkno); 1101 } 1102 1103 /* 1104 * Determine whether a cluster can be allocated. 1105 * 1106 * We do not currently check for optimal rotational layout if there 1107 * are multiple choices in the same cylinder group. Instead we just 1108 * take the first one that we find following bpref. 1109 */ 1110 static ufs_daddr_t 1111 ffs_clusteralloc(ip, cg, bpref, len) 1112 struct inode *ip; 1113 int cg; 1114 ufs_daddr_t bpref; 1115 int len; 1116 { 1117 struct fs *fs; 1118 struct cg *cgp; 1119 struct buf *bp; 1120 int i, got, run, bno, bit, map; 1121 u_char *mapp; 1122 int32_t *lp; 1123 1124 fs = ip->i_fs; 1125 if (fs->fs_maxcluster[cg] < len) 1126 return (0); 1127 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1128 NOCRED, &bp)) 1129 goto fail; 1130 cgp = (struct cg *)bp->b_data; 1131 if (!cg_chkmagic(cgp, UFS_FSNEEDSWAP(fs))) 1132 goto fail; 1133 /* 1134 * Check to see if a cluster of the needed size (or bigger) is 1135 * available in this cylinder group. 1136 */ 1137 lp = &cg_clustersum(cgp, UFS_FSNEEDSWAP(fs))[len]; 1138 for (i = len; i <= fs->fs_contigsumsize; i++) 1139 if (ufs_rw32(*lp++, UFS_FSNEEDSWAP(fs)) > 0) 1140 break; 1141 if (i > fs->fs_contigsumsize) { 1142 /* 1143 * This is the first time looking for a cluster in this 1144 * cylinder group. Update the cluster summary information 1145 * to reflect the true maximum sized cluster so that 1146 * future cluster allocation requests can avoid reading 1147 * the cylinder group map only to find no clusters. 1148 */ 1149 lp = &cg_clustersum(cgp, UFS_FSNEEDSWAP(fs))[len - 1]; 1150 for (i = len - 1; i > 0; i--) 1151 if (ufs_rw32(*lp--, UFS_FSNEEDSWAP(fs)) > 0) 1152 break; 1153 fs->fs_maxcluster[cg] = i; 1154 goto fail; 1155 } 1156 /* 1157 * Search the cluster map to find a big enough cluster. 1158 * We take the first one that we find, even if it is larger 1159 * than we need as we prefer to get one close to the previous 1160 * block allocation. We do not search before the current 1161 * preference point as we do not want to allocate a block 1162 * that is allocated before the previous one (as we will 1163 * then have to wait for another pass of the elevator 1164 * algorithm before it will be read). We prefer to fail and 1165 * be recalled to try an allocation in the next cylinder group. 1166 */ 1167 if (dtog(fs, bpref) != cg) 1168 bpref = 0; 1169 else 1170 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1171 mapp = &cg_clustersfree(cgp, UFS_FSNEEDSWAP(fs))[bpref / NBBY]; 1172 map = *mapp++; 1173 bit = 1 << (bpref % NBBY); 1174 for (run = 0, got = bpref; 1175 got < ufs_rw32(cgp->cg_nclusterblks, UFS_FSNEEDSWAP(fs)); got++) { 1176 if ((map & bit) == 0) { 1177 run = 0; 1178 } else { 1179 run++; 1180 if (run == len) 1181 break; 1182 } 1183 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1184 bit <<= 1; 1185 } else { 1186 map = *mapp++; 1187 bit = 1; 1188 } 1189 } 1190 if (got == ufs_rw32(cgp->cg_nclusterblks, UFS_FSNEEDSWAP(fs))) 1191 goto fail; 1192 /* 1193 * Allocate the cluster that we have found. 1194 */ 1195 #ifdef DIAGNOSTIC 1196 for (i = 1; i <= len; i++) 1197 if (!ffs_isblock(fs, cg_blksfree(cgp, UFS_FSNEEDSWAP(fs)), 1198 got - run + i)) 1199 panic("ffs_clusteralloc: map mismatch"); 1200 #endif 1201 bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1); 1202 if (dtog(fs, bno) != cg) 1203 panic("ffs_clusteralloc: allocated out of group"); 1204 len = blkstofrags(fs, len); 1205 for (i = 0; i < len; i += fs->fs_frag) 1206 if ((got = ffs_alloccgblk(ip, bp, bno + i)) != bno + i) 1207 panic("ffs_clusteralloc: lost block"); 1208 bdwrite(bp); 1209 return (bno); 1210 1211 fail: 1212 brelse(bp); 1213 return (0); 1214 } 1215 1216 /* 1217 * Determine whether an inode can be allocated. 1218 * 1219 * Check to see if an inode is available, and if it is, 1220 * allocate it using the following policy: 1221 * 1) allocate the requested inode. 1222 * 2) allocate the next available inode after the requested 1223 * inode in the specified cylinder group. 1224 */ 1225 static ufs_daddr_t 1226 ffs_nodealloccg(ip, cg, ipref, mode) 1227 struct inode *ip; 1228 int cg; 1229 ufs_daddr_t ipref; 1230 int mode; 1231 { 1232 struct cg *cgp; 1233 struct buf *bp; 1234 int error, start, len, loc, map, i; 1235 struct fs *fs = ip->i_fs; 1236 #ifdef FFS_EI 1237 const int needswap = UFS_FSNEEDSWAP(fs); 1238 #endif 1239 1240 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1241 return (0); 1242 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1243 (int)fs->fs_cgsize, NOCRED, &bp); 1244 if (error) { 1245 brelse(bp); 1246 return (0); 1247 } 1248 cgp = (struct cg *)bp->b_data; 1249 if (!cg_chkmagic(cgp, needswap) || cgp->cg_cs.cs_nifree == 0) { 1250 brelse(bp); 1251 return (0); 1252 } 1253 cgp->cg_time = ufs_rw32(time.tv_sec, needswap); 1254 if (ipref) { 1255 ipref %= fs->fs_ipg; 1256 if (isclr(cg_inosused(cgp, needswap), ipref)) 1257 goto gotit; 1258 } 1259 start = ufs_rw32(cgp->cg_irotor, needswap) / NBBY; 1260 len = howmany(fs->fs_ipg - ufs_rw32(cgp->cg_irotor, needswap), 1261 NBBY); 1262 loc = skpc(0xff, len, &cg_inosused(cgp, needswap)[start]); 1263 if (loc == 0) { 1264 len = start + 1; 1265 start = 0; 1266 loc = skpc(0xff, len, &cg_inosused(cgp, needswap)[0]); 1267 if (loc == 0) { 1268 printf("cg = %d, irotor = %d, fs = %s\n", 1269 cg, ufs_rw32(cgp->cg_irotor, needswap), 1270 fs->fs_fsmnt); 1271 panic("ffs_nodealloccg: map corrupted"); 1272 /* NOTREACHED */ 1273 } 1274 } 1275 i = start + len - loc; 1276 map = cg_inosused(cgp, needswap)[i]; 1277 ipref = i * NBBY; 1278 for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { 1279 if ((map & i) == 0) { 1280 cgp->cg_irotor = ufs_rw32(ipref, needswap); 1281 goto gotit; 1282 } 1283 } 1284 printf("fs = %s\n", fs->fs_fsmnt); 1285 panic("ffs_nodealloccg: block not in map"); 1286 /* NOTREACHED */ 1287 gotit: 1288 if (DOINGSOFTDEP(ITOV(ip))) 1289 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref); 1290 setbit(cg_inosused(cgp, needswap), ipref); 1291 ufs_add32(cgp->cg_cs.cs_nifree, -1, needswap); 1292 fs->fs_cstotal.cs_nifree--; 1293 fs->fs_cs(fs, cg).cs_nifree--; 1294 fs->fs_fmod = 1; 1295 if ((mode & IFMT) == IFDIR) { 1296 ufs_add32(cgp->cg_cs.cs_ndir, 1, needswap); 1297 fs->fs_cstotal.cs_ndir++; 1298 fs->fs_cs(fs, cg).cs_ndir++; 1299 } 1300 bdwrite(bp); 1301 return (cg * fs->fs_ipg + ipref); 1302 } 1303 1304 /* 1305 * Free a block or fragment. 1306 * 1307 * The specified block or fragment is placed back in the 1308 * free map. If a fragment is deallocated, a possible 1309 * block reassembly is checked. 1310 */ 1311 void 1312 ffs_blkfree(ip, bno, size) 1313 struct inode *ip; 1314 ufs_daddr_t bno; 1315 long size; 1316 { 1317 struct cg *cgp; 1318 struct buf *bp; 1319 ufs_daddr_t blkno; 1320 int i, error, cg, blk, frags, bbase; 1321 struct fs *fs = ip->i_fs; 1322 const int needswap = UFS_FSNEEDSWAP(fs); 1323 1324 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 || 1325 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 1326 printf("dev = 0x%x, bno = %u bsize = %d, size = %ld, fs = %s\n", 1327 ip->i_dev, bno, fs->fs_bsize, size, fs->fs_fsmnt); 1328 panic("blkfree: bad size"); 1329 } 1330 cg = dtog(fs, bno); 1331 if ((u_int)bno >= fs->fs_size) { 1332 printf("bad block %d, ino %d\n", bno, ip->i_number); 1333 ffs_fserr(fs, ip->i_ffs_uid, "bad block"); 1334 return; 1335 } 1336 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1337 (int)fs->fs_cgsize, NOCRED, &bp); 1338 if (error) { 1339 brelse(bp); 1340 return; 1341 } 1342 cgp = (struct cg *)bp->b_data; 1343 if (!cg_chkmagic(cgp, needswap)) { 1344 brelse(bp); 1345 return; 1346 } 1347 cgp->cg_time = ufs_rw32(time.tv_sec, needswap); 1348 bno = dtogd(fs, bno); 1349 if (size == fs->fs_bsize) { 1350 blkno = fragstoblks(fs, bno); 1351 if (!ffs_isfreeblock(fs, cg_blksfree(cgp, needswap), blkno)) { 1352 printf("dev = 0x%x, block = %d, fs = %s\n", 1353 ip->i_dev, bno, fs->fs_fsmnt); 1354 panic("blkfree: freeing free block"); 1355 } 1356 ffs_setblock(fs, cg_blksfree(cgp, needswap), blkno); 1357 ffs_clusteracct(fs, cgp, blkno, 1); 1358 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 1359 fs->fs_cstotal.cs_nbfree++; 1360 fs->fs_cs(fs, cg).cs_nbfree++; 1361 i = cbtocylno(fs, bno); 1362 ufs_add16(cg_blks(fs, cgp, i, needswap)[cbtorpos(fs, bno)], 1, 1363 needswap); 1364 ufs_add32(cg_blktot(cgp, needswap)[i], 1, needswap); 1365 } else { 1366 bbase = bno - fragnum(fs, bno); 1367 /* 1368 * decrement the counts associated with the old frags 1369 */ 1370 blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase); 1371 ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap); 1372 /* 1373 * deallocate the fragment 1374 */ 1375 frags = numfrags(fs, size); 1376 for (i = 0; i < frags; i++) { 1377 if (isset(cg_blksfree(cgp, needswap), bno + i)) { 1378 printf("dev = 0x%x, block = %d, fs = %s\n", 1379 ip->i_dev, bno + i, fs->fs_fsmnt); 1380 panic("blkfree: freeing free frag"); 1381 } 1382 setbit(cg_blksfree(cgp, needswap), bno + i); 1383 } 1384 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 1385 fs->fs_cstotal.cs_nffree += i; 1386 fs->fs_cs(fs, cg).cs_nffree += i; 1387 /* 1388 * add back in counts associated with the new frags 1389 */ 1390 blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase); 1391 ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap); 1392 /* 1393 * if a complete block has been reassembled, account for it 1394 */ 1395 blkno = fragstoblks(fs, bbase); 1396 if (ffs_isblock(fs, cg_blksfree(cgp, needswap), blkno)) { 1397 ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap); 1398 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 1399 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 1400 ffs_clusteracct(fs, cgp, blkno, 1); 1401 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 1402 fs->fs_cstotal.cs_nbfree++; 1403 fs->fs_cs(fs, cg).cs_nbfree++; 1404 i = cbtocylno(fs, bbase); 1405 ufs_add16(cg_blks(fs, cgp, i, needswap)[cbtorpos(fs, 1406 bbase)], 1, 1407 needswap); 1408 ufs_add32(cg_blktot(cgp, needswap)[i], 1, needswap); 1409 } 1410 } 1411 fs->fs_fmod = 1; 1412 bdwrite(bp); 1413 } 1414 1415 #if defined(DIAGNOSTIC) || defined(DEBUG) 1416 /* 1417 * Verify allocation of a block or fragment. Returns true if block or 1418 * fragment is allocated, false if it is free. 1419 */ 1420 static int 1421 ffs_checkblk(ip, bno, size) 1422 struct inode *ip; 1423 ufs_daddr_t bno; 1424 long size; 1425 { 1426 struct fs *fs; 1427 struct cg *cgp; 1428 struct buf *bp; 1429 int i, error, frags, free; 1430 1431 fs = ip->i_fs; 1432 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 1433 printf("bsize = %d, size = %ld, fs = %s\n", 1434 fs->fs_bsize, size, fs->fs_fsmnt); 1435 panic("checkblk: bad size"); 1436 } 1437 if ((u_int)bno >= fs->fs_size) 1438 panic("checkblk: bad block %d", bno); 1439 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))), 1440 (int)fs->fs_cgsize, NOCRED, &bp); 1441 if (error) { 1442 brelse(bp); 1443 return 0; 1444 } 1445 cgp = (struct cg *)bp->b_data; 1446 if (!cg_chkmagic(cgp, UFS_FSNEEDSWAP(fs))) { 1447 brelse(bp); 1448 return 0; 1449 } 1450 bno = dtogd(fs, bno); 1451 if (size == fs->fs_bsize) { 1452 free = ffs_isblock(fs, cg_blksfree(cgp, UFS_FSNEEDSWAP(fs)), 1453 fragstoblks(fs, bno)); 1454 } else { 1455 frags = numfrags(fs, size); 1456 for (free = 0, i = 0; i < frags; i++) 1457 if (isset(cg_blksfree(cgp, UFS_FSNEEDSWAP(fs)), bno + i)) 1458 free++; 1459 if (free != 0 && free != frags) 1460 panic("checkblk: partially free fragment"); 1461 } 1462 brelse(bp); 1463 return (!free); 1464 } 1465 #endif /* DIAGNOSTIC */ 1466 1467 /* 1468 * Free an inode. 1469 */ 1470 int 1471 ffs_vfree(v) 1472 void *v; 1473 { 1474 struct vop_vfree_args /* { 1475 struct vnode *a_pvp; 1476 ino_t a_ino; 1477 int a_mode; 1478 } */ *ap = v; 1479 1480 if (DOINGSOFTDEP(ap->a_pvp)) { 1481 softdep_freefile(ap); 1482 return (0); 1483 } 1484 return (ffs_freefile(ap)); 1485 } 1486 1487 /* 1488 * Do the actual free operation. 1489 * The specified inode is placed back in the free map. 1490 */ 1491 int 1492 ffs_freefile(v) 1493 void *v; 1494 { 1495 struct vop_vfree_args /* { 1496 struct vnode *a_pvp; 1497 ino_t a_ino; 1498 int a_mode; 1499 } */ *ap = v; 1500 struct cg *cgp; 1501 struct inode *pip = VTOI(ap->a_pvp); 1502 struct fs *fs = pip->i_fs; 1503 ino_t ino = ap->a_ino; 1504 struct buf *bp; 1505 int error, cg; 1506 #ifdef FFS_EI 1507 const int needswap = UFS_FSNEEDSWAP(fs); 1508 #endif 1509 1510 if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) 1511 panic("ifree: range: dev = 0x%x, ino = %d, fs = %s\n", 1512 pip->i_dev, ino, fs->fs_fsmnt); 1513 cg = ino_to_cg(fs, ino); 1514 error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1515 (int)fs->fs_cgsize, NOCRED, &bp); 1516 if (error) { 1517 brelse(bp); 1518 return (error); 1519 } 1520 cgp = (struct cg *)bp->b_data; 1521 if (!cg_chkmagic(cgp, needswap)) { 1522 brelse(bp); 1523 return (0); 1524 } 1525 cgp->cg_time = ufs_rw32(time.tv_sec, needswap); 1526 ino %= fs->fs_ipg; 1527 if (isclr(cg_inosused(cgp, needswap), ino)) { 1528 printf("dev = 0x%x, ino = %d, fs = %s\n", 1529 pip->i_dev, ino, fs->fs_fsmnt); 1530 if (fs->fs_ronly == 0) 1531 panic("ifree: freeing free inode"); 1532 } 1533 clrbit(cg_inosused(cgp, needswap), ino); 1534 if (ino < ufs_rw32(cgp->cg_irotor, needswap)) 1535 cgp->cg_irotor = ufs_rw32(ino, needswap); 1536 ufs_add32(cgp->cg_cs.cs_nifree, 1, needswap); 1537 fs->fs_cstotal.cs_nifree++; 1538 fs->fs_cs(fs, cg).cs_nifree++; 1539 if ((ap->a_mode & IFMT) == IFDIR) { 1540 ufs_add32(cgp->cg_cs.cs_ndir, -1, needswap); 1541 fs->fs_cstotal.cs_ndir--; 1542 fs->fs_cs(fs, cg).cs_ndir--; 1543 } 1544 fs->fs_fmod = 1; 1545 bdwrite(bp); 1546 return (0); 1547 } 1548 1549 /* 1550 * Find a block of the specified size in the specified cylinder group. 1551 * 1552 * It is a panic if a request is made to find a block if none are 1553 * available. 1554 */ 1555 static ufs_daddr_t 1556 ffs_mapsearch(fs, cgp, bpref, allocsiz) 1557 struct fs *fs; 1558 struct cg *cgp; 1559 ufs_daddr_t bpref; 1560 int allocsiz; 1561 { 1562 ufs_daddr_t bno; 1563 int start, len, loc, i; 1564 int blk, field, subfield, pos; 1565 int ostart, olen; 1566 #ifdef FFS_EI 1567 const int needswap = UFS_FSNEEDSWAP(fs); 1568 #endif 1569 1570 /* 1571 * find the fragment by searching through the free block 1572 * map for an appropriate bit pattern 1573 */ 1574 if (bpref) 1575 start = dtogd(fs, bpref) / NBBY; 1576 else 1577 start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY; 1578 len = howmany(fs->fs_fpg, NBBY) - start; 1579 ostart = start; 1580 olen = len; 1581 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp, needswap)[start], 1582 (u_char *)fragtbl[fs->fs_frag], 1583 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1584 if (loc == 0) { 1585 len = start + 1; 1586 start = 0; 1587 loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp, needswap)[0], 1588 (u_char *)fragtbl[fs->fs_frag], 1589 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1590 if (loc == 0) { 1591 printf("start = %d, len = %d, fs = %s\n", 1592 ostart, olen, fs->fs_fsmnt); 1593 printf("offset=%d %ld\n", 1594 ufs_rw32(cgp->cg_freeoff, needswap), 1595 (long)cg_blksfree(cgp, needswap) - (long)cgp); 1596 panic("ffs_alloccg: map corrupted"); 1597 /* NOTREACHED */ 1598 } 1599 } 1600 bno = (start + len - loc) * NBBY; 1601 cgp->cg_frotor = ufs_rw32(bno, needswap); 1602 /* 1603 * found the byte in the map 1604 * sift through the bits to find the selected frag 1605 */ 1606 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1607 blk = blkmap(fs, cg_blksfree(cgp, needswap), bno); 1608 blk <<= 1; 1609 field = around[allocsiz]; 1610 subfield = inside[allocsiz]; 1611 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1612 if ((blk & field) == subfield) 1613 return (bno + pos); 1614 field <<= 1; 1615 subfield <<= 1; 1616 } 1617 } 1618 printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); 1619 panic("ffs_alloccg: block not in map"); 1620 return (-1); 1621 } 1622 1623 /* 1624 * Update the cluster map because of an allocation or free. 1625 * 1626 * Cnt == 1 means free; cnt == -1 means allocating. 1627 */ 1628 void 1629 ffs_clusteracct(fs, cgp, blkno, cnt) 1630 struct fs *fs; 1631 struct cg *cgp; 1632 ufs_daddr_t blkno; 1633 int cnt; 1634 { 1635 int32_t *sump; 1636 int32_t *lp; 1637 u_char *freemapp, *mapp; 1638 int i, start, end, forw, back, map, bit; 1639 #ifdef FFS_EI 1640 const int needswap = UFS_FSNEEDSWAP(fs); 1641 #endif 1642 1643 if (fs->fs_contigsumsize <= 0) 1644 return; 1645 freemapp = cg_clustersfree(cgp, needswap); 1646 sump = cg_clustersum(cgp, needswap); 1647 /* 1648 * Allocate or clear the actual block. 1649 */ 1650 if (cnt > 0) 1651 setbit(freemapp, blkno); 1652 else 1653 clrbit(freemapp, blkno); 1654 /* 1655 * Find the size of the cluster going forward. 1656 */ 1657 start = blkno + 1; 1658 end = start + fs->fs_contigsumsize; 1659 if (end >= ufs_rw32(cgp->cg_nclusterblks, needswap)) 1660 end = ufs_rw32(cgp->cg_nclusterblks, needswap); 1661 mapp = &freemapp[start / NBBY]; 1662 map = *mapp++; 1663 bit = 1 << (start % NBBY); 1664 for (i = start; i < end; i++) { 1665 if ((map & bit) == 0) 1666 break; 1667 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1668 bit <<= 1; 1669 } else { 1670 map = *mapp++; 1671 bit = 1; 1672 } 1673 } 1674 forw = i - start; 1675 /* 1676 * Find the size of the cluster going backward. 1677 */ 1678 start = blkno - 1; 1679 end = start - fs->fs_contigsumsize; 1680 if (end < 0) 1681 end = -1; 1682 mapp = &freemapp[start / NBBY]; 1683 map = *mapp--; 1684 bit = 1 << (start % NBBY); 1685 for (i = start; i > end; i--) { 1686 if ((map & bit) == 0) 1687 break; 1688 if ((i & (NBBY - 1)) != 0) { 1689 bit >>= 1; 1690 } else { 1691 map = *mapp--; 1692 bit = 1 << (NBBY - 1); 1693 } 1694 } 1695 back = start - i; 1696 /* 1697 * Account for old cluster and the possibly new forward and 1698 * back clusters. 1699 */ 1700 i = back + forw + 1; 1701 if (i > fs->fs_contigsumsize) 1702 i = fs->fs_contigsumsize; 1703 ufs_add32(sump[i], cnt, needswap); 1704 if (back > 0) 1705 ufs_add32(sump[back], -cnt, needswap); 1706 if (forw > 0) 1707 ufs_add32(sump[forw], -cnt, needswap); 1708 1709 /* 1710 * Update cluster summary information. 1711 */ 1712 lp = &sump[fs->fs_contigsumsize]; 1713 for (i = fs->fs_contigsumsize; i > 0; i--) 1714 if (ufs_rw32(*lp--, needswap) > 0) 1715 break; 1716 fs->fs_maxcluster[ufs_rw32(cgp->cg_cgx, needswap)] = i; 1717 } 1718 1719 /* 1720 * Fserr prints the name of a file system with an error diagnostic. 1721 * 1722 * The form of the error message is: 1723 * fs: error message 1724 */ 1725 static void 1726 ffs_fserr(fs, uid, cp) 1727 struct fs *fs; 1728 u_int uid; 1729 char *cp; 1730 { 1731 1732 log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp); 1733 } 1734