1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95 34 * $FreeBSD: src/sys/ufs/ffs/ffs_alloc.c,v 1.64.2.2 2001/09/21 19:15:21 dillon Exp $ 35 * $DragonFly: src/sys/vfs/ufs/ffs_alloc.c,v 1.27 2006/12/29 17:10:20 swildner Exp $ 36 */ 37 38 #include "opt_quota.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/buf.h> 43 #include <sys/conf.h> 44 #include <sys/proc.h> 45 #include <sys/vnode.h> 46 #include <sys/mount.h> 47 #include <sys/kernel.h> 48 #include <sys/sysctl.h> 49 #include <sys/syslog.h> 50 51 #include <sys/taskqueue.h> 52 #include <machine/inttypes.h> 53 54 #include <sys/buf2.h> 55 56 #include "quota.h" 57 #include "inode.h" 58 #include "ufs_extern.h" 59 #include "ufsmount.h" 60 61 #include "fs.h" 62 #include "ffs_extern.h" 63 64 typedef ufs_daddr_t allocfcn_t (struct inode *ip, int cg, ufs_daddr_t bpref, 65 int size); 66 67 static ufs_daddr_t ffs_alloccg (struct inode *, int, ufs_daddr_t, int); 68 static ufs_daddr_t 69 ffs_alloccgblk (struct inode *, struct buf *, ufs_daddr_t); 70 static void ffs_blkfree_cg(struct fs *, struct vnode *, cdev_t , ino_t, 71 uint32_t , ufs_daddr_t, long ); 72 #ifdef DIAGNOSTIC 73 static int ffs_checkblk (struct inode *, ufs_daddr_t, long); 74 #endif 75 static void ffs_clusteracct (struct fs *, struct cg *, ufs_daddr_t, 76 int); 77 static ufs_daddr_t ffs_clusteralloc (struct inode *, int, ufs_daddr_t, 78 int); 79 static ino_t ffs_dirpref (struct inode *); 80 static ufs_daddr_t ffs_fragextend (struct inode *, int, long, int, int); 81 static void ffs_fserr (struct fs *, uint, char *); 82 static u_long ffs_hashalloc 83 (struct inode *, int, long, int, allocfcn_t *); 84 static ino_t ffs_nodealloccg (struct inode *, int, ufs_daddr_t, int); 85 static ufs_daddr_t ffs_mapsearch (struct fs *, struct cg *, ufs_daddr_t, 86 int); 87 88 /* 89 * Allocate a block in the filesystem. 90 * 91 * The size of the requested block is given, which must be some 92 * multiple of fs_fsize and <= fs_bsize. 93 * A preference may be optionally specified. If a preference is given 94 * the following hierarchy is used to allocate a block: 95 * 1) allocate the requested block. 96 * 2) allocate a rotationally optimal block in the same cylinder. 97 * 3) allocate a block in the same cylinder group. 98 * 4) quadradically rehash into other cylinder groups, until an 99 * available block is located. 100 * If no block preference is given the following heirarchy is used 101 * to allocate a block: 102 * 1) allocate a block in the cylinder group that contains the 103 * inode for the file. 104 * 2) quadradically rehash into other cylinder groups, until an 105 * available block is located. 106 */ 107 int 108 ffs_alloc(struct inode *ip, ufs_daddr_t lbn, ufs_daddr_t bpref, int size, 109 struct ucred *cred, ufs_daddr_t *bnp) 110 { 111 struct fs *fs; 112 ufs_daddr_t bno; 113 int cg; 114 #ifdef QUOTA 115 int error; 116 #endif 117 118 *bnp = 0; 119 fs = ip->i_fs; 120 #ifdef DIAGNOSTIC 121 if ((uint)size > fs->fs_bsize || fragoff(fs, size) != 0) { 122 kprintf("dev = %s, bsize = %ld, size = %d, fs = %s\n", 123 devtoname(ip->i_dev), (long)fs->fs_bsize, size, 124 fs->fs_fsmnt); 125 panic("ffs_alloc: bad size"); 126 } 127 if (cred == NOCRED) 128 panic("ffs_alloc: missing credential"); 129 #endif /* DIAGNOSTIC */ 130 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 131 goto nospace; 132 if (cred->cr_uid != 0 && 133 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0) 134 goto nospace; 135 #ifdef QUOTA 136 error = ufs_chkdq(ip, (long)btodb(size), cred, 0); 137 if (error) 138 return (error); 139 #endif 140 if (bpref >= fs->fs_size) 141 bpref = 0; 142 if (bpref == 0) 143 cg = ino_to_cg(fs, ip->i_number); 144 else 145 cg = dtog(fs, bpref); 146 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, 147 ffs_alloccg); 148 if (bno > 0) { 149 ip->i_blocks += btodb(size); 150 ip->i_flag |= IN_CHANGE | IN_UPDATE; 151 *bnp = bno; 152 return (0); 153 } 154 #ifdef QUOTA 155 /* 156 * Restore user's disk quota because allocation failed. 157 */ 158 (void) ufs_chkdq(ip, (long)-btodb(size), cred, FORCE); 159 #endif 160 nospace: 161 ffs_fserr(fs, cred->cr_uid, "filesystem full"); 162 uprintf("\n%s: write failed, filesystem is full\n", fs->fs_fsmnt); 163 return (ENOSPC); 164 } 165 166 /* 167 * Reallocate a fragment to a bigger size 168 * 169 * The number and size of the old block is given, and a preference 170 * and new size is also specified. The allocator attempts to extend 171 * the original block. Failing that, the regular block allocator is 172 * invoked to get an appropriate block. 173 */ 174 int 175 ffs_realloccg(struct inode *ip, ufs_daddr_t lbprev, ufs_daddr_t bpref, 176 int osize, int nsize, struct ucred *cred, struct buf **bpp) 177 { 178 struct fs *fs; 179 struct buf *bp; 180 int cg, request, error; 181 ufs_daddr_t bprev, bno; 182 183 *bpp = 0; 184 fs = ip->i_fs; 185 #ifdef DIAGNOSTIC 186 if ((uint)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 187 (uint)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 188 kprintf( 189 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n", 190 devtoname(ip->i_dev), (long)fs->fs_bsize, osize, 191 nsize, fs->fs_fsmnt); 192 panic("ffs_realloccg: bad size"); 193 } 194 if (cred == NOCRED) 195 panic("ffs_realloccg: missing credential"); 196 #endif /* DIAGNOSTIC */ 197 if (cred->cr_uid != 0 && 198 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) 199 goto nospace; 200 if ((bprev = ip->i_db[lbprev]) == 0) { 201 kprintf("dev = %s, bsize = %ld, bprev = %ld, fs = %s\n", 202 devtoname(ip->i_dev), (long)fs->fs_bsize, (long)bprev, 203 fs->fs_fsmnt); 204 panic("ffs_realloccg: bad bprev"); 205 } 206 /* 207 * Allocate the extra space in the buffer. 208 */ 209 error = bread(ITOV(ip), lblktodoff(fs, lbprev), osize, &bp); 210 if (error) { 211 brelse(bp); 212 return (error); 213 } 214 215 if(bp->b_bio2.bio_offset == NOOFFSET) { 216 if( lbprev >= NDADDR) 217 panic("ffs_realloccg: lbprev out of range"); 218 bp->b_bio2.bio_offset = fsbtodoff(fs, bprev); 219 } 220 221 #ifdef QUOTA 222 error = ufs_chkdq(ip, (long)btodb(nsize - osize), cred, 0); 223 if (error) { 224 brelse(bp); 225 return (error); 226 } 227 #endif 228 /* 229 * Check for extension in the existing location. 230 */ 231 cg = dtog(fs, bprev); 232 bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize); 233 if (bno) { 234 if (bp->b_bio2.bio_offset != fsbtodoff(fs, bno)) 235 panic("ffs_realloccg: bad blockno"); 236 ip->i_blocks += btodb(nsize - osize); 237 ip->i_flag |= IN_CHANGE | IN_UPDATE; 238 allocbuf(bp, nsize); 239 bzero((char *)bp->b_data + osize, (uint)nsize - osize); 240 *bpp = bp; 241 return (0); 242 } 243 /* 244 * Allocate a new disk location. 245 */ 246 if (bpref >= fs->fs_size) 247 bpref = 0; 248 switch ((int)fs->fs_optim) { 249 case FS_OPTSPACE: 250 /* 251 * Allocate an exact sized fragment. Although this makes 252 * best use of space, we will waste time relocating it if 253 * the file continues to grow. If the fragmentation is 254 * less than half of the minimum free reserve, we choose 255 * to begin optimizing for time. 256 */ 257 request = nsize; 258 if (fs->fs_minfree <= 5 || 259 fs->fs_cstotal.cs_nffree > 260 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100)) 261 break; 262 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 263 fs->fs_fsmnt); 264 fs->fs_optim = FS_OPTTIME; 265 break; 266 case FS_OPTTIME: 267 /* 268 * At this point we have discovered a file that is trying to 269 * grow a small fragment to a larger fragment. To save time, 270 * we allocate a full sized block, then free the unused portion. 271 * If the file continues to grow, the `ffs_fragextend' call 272 * above will be able to grow it in place without further 273 * copying. If aberrant programs cause disk fragmentation to 274 * grow within 2% of the free reserve, we choose to begin 275 * optimizing for space. 276 */ 277 request = fs->fs_bsize; 278 if (fs->fs_cstotal.cs_nffree < 279 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100) 280 break; 281 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 282 fs->fs_fsmnt); 283 fs->fs_optim = FS_OPTSPACE; 284 break; 285 default: 286 kprintf("dev = %s, optim = %ld, fs = %s\n", 287 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt); 288 panic("ffs_realloccg: bad optim"); 289 /* NOTREACHED */ 290 } 291 bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, 292 ffs_alloccg); 293 if (bno > 0) { 294 bp->b_bio2.bio_offset = fsbtodoff(fs, bno); 295 if (!DOINGSOFTDEP(ITOV(ip))) 296 ffs_blkfree(ip, bprev, (long)osize); 297 if (nsize < request) 298 ffs_blkfree(ip, bno + numfrags(fs, nsize), 299 (long)(request - nsize)); 300 ip->i_blocks += btodb(nsize - osize); 301 ip->i_flag |= IN_CHANGE | IN_UPDATE; 302 allocbuf(bp, nsize); 303 bzero((char *)bp->b_data + osize, (uint)nsize - osize); 304 *bpp = bp; 305 return (0); 306 } 307 #ifdef QUOTA 308 /* 309 * Restore user's disk quota because allocation failed. 310 */ 311 (void) ufs_chkdq(ip, (long)-btodb(nsize - osize), cred, FORCE); 312 #endif 313 brelse(bp); 314 nospace: 315 /* 316 * no space available 317 */ 318 ffs_fserr(fs, cred->cr_uid, "filesystem full"); 319 uprintf("\n%s: write failed, filesystem is full\n", fs->fs_fsmnt); 320 return (ENOSPC); 321 } 322 323 SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem"); 324 325 /* 326 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 327 * 328 * The vnode and an array of buffer pointers for a range of sequential 329 * logical blocks to be made contiguous is given. The allocator attempts 330 * to find a range of sequential blocks starting as close as possible to 331 * an fs_rotdelay offset from the end of the allocation for the logical 332 * block immediately preceeding the current range. If successful, the 333 * physical block numbers in the buffer pointers and in the inode are 334 * changed to reflect the new allocation. If unsuccessful, the allocation 335 * is left unchanged. The success in doing the reallocation is returned. 336 * Note that the error return is not reflected back to the user. Rather 337 * the previous block allocation will be used. 338 */ 339 static int doasyncfree = 1; 340 SYSCTL_INT(_vfs_ffs, FFS_ASYNCFREE, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, ""); 341 342 static int doreallocblks = 1; 343 SYSCTL_INT(_vfs_ffs, FFS_REALLOCBLKS, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, ""); 344 345 #ifdef DEBUG 346 static volatile int prtrealloc = 0; 347 #endif 348 349 /* 350 * ffs_reallocblks(struct vnode *a_vp, struct cluster_save *a_buflist) 351 */ 352 int 353 ffs_reallocblks(struct vop_reallocblks_args *ap) 354 { 355 struct fs *fs; 356 struct inode *ip; 357 struct vnode *vp; 358 struct buf *sbp, *ebp; 359 ufs_daddr_t *bap, *sbap, *ebap = 0; 360 struct cluster_save *buflist; 361 ufs_daddr_t start_lbn, end_lbn, soff, newblk, blkno; 362 #ifdef DIAGNOSTIC 363 off_t boffset; 364 #endif 365 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 366 int i, len, slen, start_lvl, end_lvl, pref, ssize; 367 368 if (doreallocblks == 0) 369 return (ENOSPC); 370 vp = ap->a_vp; 371 ip = VTOI(vp); 372 fs = ip->i_fs; 373 if (fs->fs_contigsumsize <= 0) 374 return (ENOSPC); 375 buflist = ap->a_buflist; 376 len = buflist->bs_nchildren; 377 start_lbn = lblkno(fs, buflist->bs_children[0]->b_loffset); 378 end_lbn = start_lbn + len - 1; 379 #ifdef DIAGNOSTIC 380 for (i = 0; i < len; i++) 381 if (!ffs_checkblk(ip, 382 dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset), fs->fs_bsize)) 383 panic("ffs_reallocblks: unallocated block 1"); 384 for (i = 1; i < len; i++) { 385 if (buflist->bs_children[i]->b_loffset != lblktodoff(fs, start_lbn) + lblktodoff(fs, i)) 386 panic("ffs_reallocblks: non-logical cluster"); 387 } 388 boffset = buflist->bs_children[0]->b_bio2.bio_offset; 389 ssize = (int)fsbtodoff(fs, fs->fs_frag); 390 for (i = 1; i < len - 1; i++) 391 if (buflist->bs_children[i]->b_bio2.bio_offset != boffset + (i * ssize)) 392 panic("ffs_reallocblks: non-physical cluster %d", i); 393 #endif 394 /* 395 * If the latest allocation is in a new cylinder group, assume that 396 * the filesystem has decided to move and do not force it back to 397 * the previous cylinder group. 398 */ 399 if (dtog(fs, dofftofsb(fs, buflist->bs_children[0]->b_bio2.bio_offset)) != 400 dtog(fs, dofftofsb(fs, buflist->bs_children[len - 1]->b_bio2.bio_offset))) 401 return (ENOSPC); 402 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 403 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 404 return (ENOSPC); 405 /* 406 * Get the starting offset and block map for the first block and 407 * the number of blocks that will fit into sbap starting at soff. 408 */ 409 if (start_lvl == 0) { 410 sbap = &ip->i_db[0]; 411 soff = start_lbn; 412 slen = NDADDR - soff; 413 } else { 414 idp = &start_ap[start_lvl - 1]; 415 if (bread(vp, lblktodoff(fs, idp->in_lbn), (int)fs->fs_bsize, &sbp)) { 416 brelse(sbp); 417 return (ENOSPC); 418 } 419 sbap = (ufs_daddr_t *)sbp->b_data; 420 soff = idp->in_off; 421 slen = fs->fs_nindir - soff; 422 } 423 /* 424 * Find the preferred location for the cluster. 425 */ 426 pref = ffs_blkpref(ip, start_lbn, soff, sbap); 427 428 /* 429 * If the block range spans two block maps, get the second map. 430 */ 431 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 432 ssize = len; 433 } else { 434 #ifdef DIAGNOSTIC 435 if (start_ap[start_lvl-1].in_lbn == idp->in_lbn) 436 panic("ffs_reallocblk: start == end"); 437 #endif 438 ssize = len - (idp->in_off + 1); 439 if (bread(vp, lblktodoff(fs, idp->in_lbn), (int)fs->fs_bsize, &ebp)) 440 goto fail; 441 ebap = (ufs_daddr_t *)ebp->b_data; 442 } 443 444 /* 445 * Make sure we aren't spanning more then two blockmaps. ssize is 446 * our calculation of the span we have to scan in the first blockmap, 447 * while slen is our calculation of the number of entries available 448 * in the first blockmap (from soff). 449 */ 450 if (ssize > slen) { 451 panic("ffs_reallocblks: range spans more then two blockmaps!" 452 " start_lbn %ld len %d (%d/%d)", 453 (long)start_lbn, len, slen, ssize); 454 } 455 /* 456 * Search the block map looking for an allocation of the desired size. 457 */ 458 if ((newblk = (ufs_daddr_t)ffs_hashalloc(ip, dtog(fs, pref), (long)pref, 459 len, ffs_clusteralloc)) == 0) 460 goto fail; 461 /* 462 * We have found a new contiguous block. 463 * 464 * First we have to replace the old block pointers with the new 465 * block pointers in the inode and indirect blocks associated 466 * with the file. 467 */ 468 #ifdef DEBUG 469 if (prtrealloc) 470 kprintf("realloc: ino %ju, lbns %d-%d\n\told:", 471 (uintmax_t)ip->i_number, start_lbn, end_lbn); 472 #endif 473 blkno = newblk; 474 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 475 if (i == ssize) { 476 bap = ebap; 477 soff = -i; 478 } 479 #ifdef DIAGNOSTIC 480 if (!ffs_checkblk(ip, 481 dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset), fs->fs_bsize)) 482 panic("ffs_reallocblks: unallocated block 2"); 483 if (dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset) != *bap) 484 panic("ffs_reallocblks: alloc mismatch"); 485 #endif 486 #ifdef DEBUG 487 if (prtrealloc) 488 kprintf(" %d,", *bap); 489 #endif 490 if (DOINGSOFTDEP(vp)) { 491 if (sbap == &ip->i_db[0] && i < ssize) 492 softdep_setup_allocdirect(ip, start_lbn + i, 493 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 494 buflist->bs_children[i]); 495 else 496 softdep_setup_allocindir_page(ip, start_lbn + i, 497 i < ssize ? sbp : ebp, soff + i, blkno, 498 *bap, buflist->bs_children[i]); 499 } 500 *bap++ = blkno; 501 } 502 /* 503 * Next we must write out the modified inode and indirect blocks. 504 * For strict correctness, the writes should be synchronous since 505 * the old block values may have been written to disk. In practise 506 * they are almost never written, but if we are concerned about 507 * strict correctness, the `doasyncfree' flag should be set to zero. 508 * 509 * The test on `doasyncfree' should be changed to test a flag 510 * that shows whether the associated buffers and inodes have 511 * been written. The flag should be set when the cluster is 512 * started and cleared whenever the buffer or inode is flushed. 513 * We can then check below to see if it is set, and do the 514 * synchronous write only when it has been cleared. 515 */ 516 if (sbap != &ip->i_db[0]) { 517 if (doasyncfree) 518 bdwrite(sbp); 519 else 520 bwrite(sbp); 521 } else { 522 ip->i_flag |= IN_CHANGE | IN_UPDATE; 523 if (!doasyncfree) 524 ffs_update(vp, 1); 525 } 526 if (ssize < len) { 527 if (doasyncfree) 528 bdwrite(ebp); 529 else 530 bwrite(ebp); 531 } 532 /* 533 * Last, free the old blocks and assign the new blocks to the buffers. 534 */ 535 #ifdef DEBUG 536 if (prtrealloc) 537 kprintf("\n\tnew:"); 538 #endif 539 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 540 if (!DOINGSOFTDEP(vp)) 541 ffs_blkfree(ip, 542 dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset), 543 fs->fs_bsize); 544 buflist->bs_children[i]->b_bio2.bio_offset = fsbtodoff(fs, blkno); 545 #ifdef DIAGNOSTIC 546 if (!ffs_checkblk(ip, 547 dofftofsb(fs, buflist->bs_children[i]->b_bio2.bio_offset), fs->fs_bsize)) 548 panic("ffs_reallocblks: unallocated block 3"); 549 #endif 550 #ifdef DEBUG 551 if (prtrealloc) 552 kprintf(" %d,", blkno); 553 #endif 554 } 555 #ifdef DEBUG 556 if (prtrealloc) { 557 prtrealloc--; 558 kprintf("\n"); 559 } 560 #endif 561 return (0); 562 563 fail: 564 if (ssize < len) 565 brelse(ebp); 566 if (sbap != &ip->i_db[0]) 567 brelse(sbp); 568 return (ENOSPC); 569 } 570 571 /* 572 * Allocate an inode in the filesystem. 573 * 574 * If allocating a directory, use ffs_dirpref to select the inode. 575 * If allocating in a directory, the following hierarchy is followed: 576 * 1) allocate the preferred inode. 577 * 2) allocate an inode in the same cylinder group. 578 * 3) quadradically rehash into other cylinder groups, until an 579 * available inode is located. 580 * If no inode preference is given the following heirarchy is used 581 * to allocate an inode: 582 * 1) allocate an inode in cylinder group 0. 583 * 2) quadradically rehash into other cylinder groups, until an 584 * available inode is located. 585 */ 586 int 587 ffs_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp) 588 { 589 struct inode *pip; 590 struct fs *fs; 591 struct inode *ip; 592 ino_t ino, ipref; 593 int cg, error; 594 595 *vpp = NULL; 596 pip = VTOI(pvp); 597 fs = pip->i_fs; 598 if (fs->fs_cstotal.cs_nifree == 0) 599 goto noinodes; 600 601 if ((mode & IFMT) == IFDIR) 602 ipref = ffs_dirpref(pip); 603 else 604 ipref = pip->i_number; 605 if (ipref >= fs->fs_ncg * fs->fs_ipg) 606 ipref = 0; 607 cg = ino_to_cg(fs, ipref); 608 /* 609 * Track number of dirs created one after another 610 * in a same cg without intervening by files. 611 */ 612 if ((mode & IFMT) == IFDIR) { 613 if (fs->fs_contigdirs[cg] < 255) 614 fs->fs_contigdirs[cg]++; 615 } else { 616 if (fs->fs_contigdirs[cg] > 0) 617 fs->fs_contigdirs[cg]--; 618 } 619 ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, 620 (allocfcn_t *)ffs_nodealloccg); 621 if (ino == 0) 622 goto noinodes; 623 error = VFS_VGET(pvp->v_mount, NULL, ino, vpp); 624 if (error) { 625 ffs_vfree(pvp, ino, mode); 626 return (error); 627 } 628 ip = VTOI(*vpp); 629 if (ip->i_mode) { 630 kprintf("mode = 0%o, inum = %lu, fs = %s\n", 631 ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt); 632 panic("ffs_valloc: dup alloc"); 633 } 634 if (ip->i_blocks) { /* XXX */ 635 kprintf("free inode %s/%lu had %ld blocks\n", 636 fs->fs_fsmnt, (u_long)ino, (long)ip->i_blocks); 637 ip->i_blocks = 0; 638 } 639 ip->i_flags = 0; 640 /* 641 * Set up a new generation number for this inode. 642 */ 643 if (ip->i_gen == 0 || ++ip->i_gen == 0) 644 ip->i_gen = krandom() / 2 + 1; 645 return (0); 646 noinodes: 647 ffs_fserr(fs, cred->cr_uid, "out of inodes"); 648 uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); 649 return (ENOSPC); 650 } 651 652 /* 653 * Find a cylinder group to place a directory. 654 * 655 * The policy implemented by this algorithm is to allocate a 656 * directory inode in the same cylinder group as its parent 657 * directory, but also to reserve space for its files inodes 658 * and data. Restrict the number of directories which may be 659 * allocated one after another in the same cylinder group 660 * without intervening allocation of files. 661 * 662 * If we allocate a first level directory then force allocation 663 * in another cylinder group. 664 */ 665 static ino_t 666 ffs_dirpref(struct inode *pip) 667 { 668 struct fs *fs; 669 int cg, prefcg, dirsize, cgsize; 670 int64_t dirsize64; 671 int avgifree, avgbfree, avgndir, curdirsize; 672 int minifree, minbfree, maxndir; 673 int mincg, minndir; 674 int maxcontigdirs; 675 676 fs = pip->i_fs; 677 678 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 679 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 680 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg; 681 682 /* 683 * Force allocation in another cg if creating a first level dir. 684 */ 685 if (ITOV(pip)->v_flag & VROOT) { 686 prefcg = karc4random() % fs->fs_ncg; 687 mincg = prefcg; 688 minndir = fs->fs_ipg; 689 for (cg = prefcg; cg < fs->fs_ncg; cg++) 690 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 691 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 692 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 693 mincg = cg; 694 minndir = fs->fs_cs(fs, cg).cs_ndir; 695 } 696 for (cg = 0; cg < prefcg; cg++) 697 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 698 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 699 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 700 mincg = cg; 701 minndir = fs->fs_cs(fs, cg).cs_ndir; 702 } 703 return ((ino_t)(fs->fs_ipg * mincg)); 704 } 705 706 /* 707 * Count various limits which used for 708 * optimal allocation of a directory inode. 709 */ 710 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg); 711 minifree = avgifree - avgifree / 4; 712 if (minifree < 1) 713 minifree = 1; 714 minbfree = avgbfree - avgbfree / 4; 715 if (minbfree < 1) 716 minbfree = 1; 717 cgsize = fs->fs_fsize * fs->fs_fpg; 718 719 /* 720 * fs_avgfilesize and fs_avgfpdir are user-settable entities and 721 * multiplying them may overflow a 32 bit integer. 722 */ 723 dirsize64 = fs->fs_avgfilesize * (int64_t)fs->fs_avgfpdir; 724 if (dirsize64 > 0x7fffffff) { 725 maxcontigdirs = 1; 726 } else { 727 dirsize = (int)dirsize64; 728 curdirsize = avgndir ? 729 (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0; 730 if (dirsize < curdirsize) 731 dirsize = curdirsize; 732 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255); 733 if (fs->fs_avgfpdir > 0) 734 maxcontigdirs = min(maxcontigdirs, 735 fs->fs_ipg / fs->fs_avgfpdir); 736 if (maxcontigdirs == 0) 737 maxcontigdirs = 1; 738 } 739 740 /* 741 * Limit number of dirs in one cg and reserve space for 742 * regular files, but only if we have no deficit in 743 * inodes or space. 744 */ 745 prefcg = ino_to_cg(fs, pip->i_number); 746 for (cg = prefcg; cg < fs->fs_ncg; cg++) 747 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 748 fs->fs_cs(fs, cg).cs_nifree >= minifree && 749 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 750 if (fs->fs_contigdirs[cg] < maxcontigdirs) 751 return ((ino_t)(fs->fs_ipg * cg)); 752 } 753 for (cg = 0; cg < prefcg; cg++) 754 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 755 fs->fs_cs(fs, cg).cs_nifree >= minifree && 756 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 757 if (fs->fs_contigdirs[cg] < maxcontigdirs) 758 return ((ino_t)(fs->fs_ipg * cg)); 759 } 760 /* 761 * This is a backstop when we have deficit in space. 762 */ 763 for (cg = prefcg; cg < fs->fs_ncg; cg++) 764 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 765 return ((ino_t)(fs->fs_ipg * cg)); 766 for (cg = 0; cg < prefcg; cg++) 767 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 768 break; 769 return ((ino_t)(fs->fs_ipg * cg)); 770 } 771 772 /* 773 * Select the desired position for the next block in a file. The file is 774 * logically divided into sections. The first section is composed of the 775 * direct blocks. Each additional section contains fs_maxbpg blocks. 776 * 777 * If no blocks have been allocated in the first section, the policy is to 778 * request a block in the same cylinder group as the inode that describes 779 * the file. If no blocks have been allocated in any other section, the 780 * policy is to place the section in a cylinder group with a greater than 781 * average number of free blocks. An appropriate cylinder group is found 782 * by using a rotor that sweeps the cylinder groups. When a new group of 783 * blocks is needed, the sweep begins in the cylinder group following the 784 * cylinder group from which the previous allocation was made. The sweep 785 * continues until a cylinder group with greater than the average number 786 * of free blocks is found. If the allocation is for the first block in an 787 * indirect block, the information on the previous allocation is unavailable; 788 * here a best guess is made based upon the logical block number being 789 * allocated. 790 * 791 * If a section is already partially allocated, the policy is to 792 * contiguously allocate fs_maxcontig blocks. The end of one of these 793 * contiguous blocks and the beginning of the next is physically separated 794 * so that the disk head will be in transit between them for at least 795 * fs_rotdelay milliseconds. This is to allow time for the processor to 796 * schedule another I/O transfer. 797 */ 798 ufs_daddr_t 799 ffs_blkpref(struct inode *ip, ufs_daddr_t lbn, int indx, ufs_daddr_t *bap) 800 { 801 struct fs *fs; 802 int cg; 803 int avgbfree, startcg; 804 ufs_daddr_t nextblk; 805 806 fs = ip->i_fs; 807 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 808 if (lbn < NDADDR + NINDIR(fs)) { 809 cg = ino_to_cg(fs, ip->i_number); 810 return (fs->fs_fpg * cg + fs->fs_frag); 811 } 812 /* 813 * Find a cylinder with greater than average number of 814 * unused data blocks. 815 */ 816 if (indx == 0 || bap[indx - 1] == 0) 817 startcg = 818 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 819 else 820 startcg = dtog(fs, bap[indx - 1]) + 1; 821 startcg %= fs->fs_ncg; 822 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 823 for (cg = startcg; cg < fs->fs_ncg; cg++) 824 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 825 fs->fs_cgrotor = cg; 826 return (fs->fs_fpg * cg + fs->fs_frag); 827 } 828 for (cg = 0; cg <= startcg; cg++) 829 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 830 fs->fs_cgrotor = cg; 831 return (fs->fs_fpg * cg + fs->fs_frag); 832 } 833 return (0); 834 } 835 /* 836 * One or more previous blocks have been laid out. If less 837 * than fs_maxcontig previous blocks are contiguous, the 838 * next block is requested contiguously, otherwise it is 839 * requested rotationally delayed by fs_rotdelay milliseconds. 840 */ 841 nextblk = bap[indx - 1] + fs->fs_frag; 842 if (fs->fs_rotdelay == 0 || indx < fs->fs_maxcontig || 843 bap[indx - fs->fs_maxcontig] + 844 blkstofrags(fs, fs->fs_maxcontig) != nextblk) 845 return (nextblk); 846 /* 847 * Here we convert ms of delay to frags as: 848 * (frags) = (ms) * (rev/sec) * (sect/rev) / 849 * ((sect/frag) * (ms/sec)) 850 * then round up to the next block. 851 */ 852 nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / 853 (NSPF(fs) * 1000), fs->fs_frag); 854 return (nextblk); 855 } 856 857 /* 858 * Implement the cylinder overflow algorithm. 859 * 860 * The policy implemented by this algorithm is: 861 * 1) allocate the block in its requested cylinder group. 862 * 2) quadradically rehash on the cylinder group number. 863 * 3) brute force search for a free block. 864 */ 865 /*VARARGS5*/ 866 static u_long 867 ffs_hashalloc(struct inode *ip, int cg, long pref, 868 int size, /* size for data blocks, mode for inodes */ 869 allocfcn_t *allocator) 870 { 871 struct fs *fs; 872 long result; /* XXX why not same type as we return? */ 873 int i, icg = cg; 874 875 fs = ip->i_fs; 876 /* 877 * 1: preferred cylinder group 878 */ 879 result = (*allocator)(ip, cg, pref, size); 880 if (result) 881 return (result); 882 /* 883 * 2: quadratic rehash 884 */ 885 for (i = 1; i < fs->fs_ncg; i *= 2) { 886 cg += i; 887 if (cg >= fs->fs_ncg) 888 cg -= fs->fs_ncg; 889 result = (*allocator)(ip, cg, 0, size); 890 if (result) 891 return (result); 892 } 893 /* 894 * 3: brute force search 895 * Note that we start at i == 2, since 0 was checked initially, 896 * and 1 is always checked in the quadratic rehash. 897 */ 898 cg = (icg + 2) % fs->fs_ncg; 899 for (i = 2; i < fs->fs_ncg; i++) { 900 result = (*allocator)(ip, cg, 0, size); 901 if (result) 902 return (result); 903 cg++; 904 if (cg == fs->fs_ncg) 905 cg = 0; 906 } 907 return (0); 908 } 909 910 /* 911 * Determine whether a fragment can be extended. 912 * 913 * Check to see if the necessary fragments are available, and 914 * if they are, allocate them. 915 */ 916 static ufs_daddr_t 917 ffs_fragextend(struct inode *ip, int cg, long bprev, int osize, int nsize) 918 { 919 struct fs *fs; 920 struct cg *cgp; 921 struct buf *bp; 922 long bno; 923 int frags, bbase; 924 int i, error; 925 uint8_t *blksfree; 926 927 fs = ip->i_fs; 928 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 929 return (0); 930 frags = numfrags(fs, nsize); 931 bbase = fragnum(fs, bprev); 932 if (bbase > fragnum(fs, (bprev + frags - 1))) { 933 /* cannot extend across a block boundary */ 934 return (0); 935 } 936 KKASSERT(blknum(fs, bprev) == blknum(fs, bprev + frags - 1)); 937 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 938 (int)fs->fs_cgsize, &bp); 939 if (error) { 940 brelse(bp); 941 return (0); 942 } 943 cgp = (struct cg *)bp->b_data; 944 if (!cg_chkmagic(cgp)) { 945 brelse(bp); 946 return (0); 947 } 948 cgp->cg_time = time_second; 949 bno = dtogd(fs, bprev); 950 blksfree = cg_blksfree(cgp); 951 for (i = numfrags(fs, osize); i < frags; i++) { 952 if (isclr(blksfree, bno + i)) { 953 brelse(bp); 954 return (0); 955 } 956 } 957 958 /* 959 * the current fragment can be extended 960 * deduct the count on fragment being extended into 961 * increase the count on the remaining fragment (if any) 962 * allocate the extended piece 963 * 964 * ---oooooooooonnnnnnn111---- 965 * [-----frags-----] 966 * ^ ^ 967 * bbase fs_frag 968 */ 969 for (i = frags; i < fs->fs_frag - bbase; i++) { 970 if (isclr(blksfree, bno + i)) 971 break; 972 } 973 974 /* 975 * Size of original free frag is [i - numfrags(fs, osize)] 976 * Size of remaining free frag is [i - frags] 977 */ 978 cgp->cg_frsum[i - numfrags(fs, osize)]--; 979 if (i != frags) 980 cgp->cg_frsum[i - frags]++; 981 for (i = numfrags(fs, osize); i < frags; i++) { 982 clrbit(blksfree, bno + i); 983 cgp->cg_cs.cs_nffree--; 984 fs->fs_cstotal.cs_nffree--; 985 fs->fs_cs(fs, cg).cs_nffree--; 986 } 987 fs->fs_fmod = 1; 988 if (DOINGSOFTDEP(ITOV(ip))) 989 softdep_setup_blkmapdep(bp, fs, bprev); 990 bdwrite(bp); 991 return (bprev); 992 } 993 994 /* 995 * Determine whether a block can be allocated. 996 * 997 * Check to see if a block of the appropriate size is available, 998 * and if it is, allocate it. 999 */ 1000 static ufs_daddr_t 1001 ffs_alloccg(struct inode *ip, int cg, ufs_daddr_t bpref, int size) 1002 { 1003 struct fs *fs; 1004 struct cg *cgp; 1005 struct buf *bp; 1006 int i; 1007 ufs_daddr_t bno, blkno; 1008 int allocsiz, error, frags; 1009 uint8_t *blksfree; 1010 1011 fs = ip->i_fs; 1012 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 1013 return (0); 1014 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1015 (int)fs->fs_cgsize, &bp); 1016 if (error) { 1017 brelse(bp); 1018 return (0); 1019 } 1020 cgp = (struct cg *)bp->b_data; 1021 if (!cg_chkmagic(cgp) || 1022 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 1023 brelse(bp); 1024 return (0); 1025 } 1026 cgp->cg_time = time_second; 1027 if (size == fs->fs_bsize) { 1028 bno = ffs_alloccgblk(ip, bp, bpref); 1029 bdwrite(bp); 1030 return (bno); 1031 } 1032 /* 1033 * Check to see if any fragments of sufficient size are already 1034 * available. Fit the data into a larger fragment if necessary, 1035 * before allocating a whole new block. 1036 */ 1037 blksfree = cg_blksfree(cgp); 1038 frags = numfrags(fs, size); 1039 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) { 1040 if (cgp->cg_frsum[allocsiz] != 0) 1041 break; 1042 } 1043 if (allocsiz == fs->fs_frag) { 1044 /* 1045 * No fragments were available, allocate a whole block and 1046 * cut the requested fragment (of size frags) out of it. 1047 */ 1048 if (cgp->cg_cs.cs_nbfree == 0) { 1049 brelse(bp); 1050 return (0); 1051 } 1052 bno = ffs_alloccgblk(ip, bp, bpref); 1053 bpref = dtogd(fs, bno); 1054 for (i = frags; i < fs->fs_frag; i++) 1055 setbit(blksfree, bpref + i); 1056 1057 /* 1058 * Calculate the number of free frags still remaining after 1059 * we have cut out the requested allocation. Indicate that 1060 * a fragment of that size is now available for future 1061 * allocation. 1062 */ 1063 i = fs->fs_frag - frags; 1064 cgp->cg_cs.cs_nffree += i; 1065 fs->fs_cstotal.cs_nffree += i; 1066 fs->fs_cs(fs, cg).cs_nffree += i; 1067 fs->fs_fmod = 1; 1068 cgp->cg_frsum[i]++; 1069 bdwrite(bp); 1070 return (bno); 1071 } 1072 1073 /* 1074 * cg_frsum[] has told us that a free fragment of allocsiz size is 1075 * available. Find it, then clear the bitmap bits associated with 1076 * the size we want. 1077 */ 1078 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 1079 if (bno < 0) { 1080 brelse(bp); 1081 return (0); 1082 } 1083 for (i = 0; i < frags; i++) 1084 clrbit(blksfree, bno + i); 1085 cgp->cg_cs.cs_nffree -= frags; 1086 fs->fs_cstotal.cs_nffree -= frags; 1087 fs->fs_cs(fs, cg).cs_nffree -= frags; 1088 fs->fs_fmod = 1; 1089 1090 /* 1091 * Account for the allocation. The original searched size that we 1092 * found is no longer available. If we cut out a smaller piece then 1093 * a smaller fragment is now available. 1094 */ 1095 cgp->cg_frsum[allocsiz]--; 1096 if (frags != allocsiz) 1097 cgp->cg_frsum[allocsiz - frags]++; 1098 blkno = cg * fs->fs_fpg + bno; 1099 if (DOINGSOFTDEP(ITOV(ip))) 1100 softdep_setup_blkmapdep(bp, fs, blkno); 1101 bdwrite(bp); 1102 return ((u_long)blkno); 1103 } 1104 1105 /* 1106 * Allocate a block in a cylinder group. 1107 * 1108 * This algorithm implements the following policy: 1109 * 1) allocate the requested block. 1110 * 2) allocate a rotationally optimal block in the same cylinder. 1111 * 3) allocate the next available block on the block rotor for the 1112 * specified cylinder group. 1113 * Note that this routine only allocates fs_bsize blocks; these 1114 * blocks may be fragmented by the routine that allocates them. 1115 */ 1116 static ufs_daddr_t 1117 ffs_alloccgblk(struct inode *ip, struct buf *bp, ufs_daddr_t bpref) 1118 { 1119 struct fs *fs; 1120 struct cg *cgp; 1121 ufs_daddr_t bno, blkno; 1122 int cylno, pos, delta; 1123 short *cylbp; 1124 int i; 1125 uint8_t *blksfree; 1126 1127 fs = ip->i_fs; 1128 cgp = (struct cg *)bp->b_data; 1129 blksfree = cg_blksfree(cgp); 1130 if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { 1131 bpref = cgp->cg_rotor; 1132 goto norot; 1133 } 1134 bpref = blknum(fs, bpref); 1135 bpref = dtogd(fs, bpref); 1136 /* 1137 * if the requested block is available, use it 1138 */ 1139 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bpref))) { 1140 bno = bpref; 1141 goto gotit; 1142 } 1143 if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) { 1144 /* 1145 * Block layout information is not available. 1146 * Leaving bpref unchanged means we take the 1147 * next available free block following the one 1148 * we just allocated. Hopefully this will at 1149 * least hit a track cache on drives of unknown 1150 * geometry (e.g. SCSI). 1151 */ 1152 goto norot; 1153 } 1154 /* 1155 * check for a block available on the same cylinder 1156 */ 1157 cylno = cbtocylno(fs, bpref); 1158 if (cg_blktot(cgp)[cylno] == 0) 1159 goto norot; 1160 /* 1161 * check the summary information to see if a block is 1162 * available in the requested cylinder starting at the 1163 * requested rotational position and proceeding around. 1164 */ 1165 cylbp = cg_blks(fs, cgp, cylno); 1166 pos = cbtorpos(fs, bpref); 1167 for (i = pos; i < fs->fs_nrpos; i++) 1168 if (cylbp[i] > 0) 1169 break; 1170 if (i == fs->fs_nrpos) 1171 for (i = 0; i < pos; i++) 1172 if (cylbp[i] > 0) 1173 break; 1174 if (cylbp[i] > 0) { 1175 /* 1176 * found a rotational position, now find the actual 1177 * block. A panic if none is actually there. 1178 */ 1179 pos = cylno % fs->fs_cpc; 1180 bno = (cylno - pos) * fs->fs_spc / NSPB(fs); 1181 if (fs_postbl(fs, pos)[i] == -1) { 1182 kprintf("pos = %d, i = %d, fs = %s\n", 1183 pos, i, fs->fs_fsmnt); 1184 panic("ffs_alloccgblk: cyl groups corrupted"); 1185 } 1186 for (i = fs_postbl(fs, pos)[i];; ) { 1187 if (ffs_isblock(fs, blksfree, bno + i)) { 1188 bno = blkstofrags(fs, (bno + i)); 1189 goto gotit; 1190 } 1191 delta = fs_rotbl(fs)[i]; 1192 if (delta <= 0 || 1193 delta + i > fragstoblks(fs, fs->fs_fpg)) 1194 break; 1195 i += delta; 1196 } 1197 kprintf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); 1198 panic("ffs_alloccgblk: can't find blk in cyl"); 1199 } 1200 norot: 1201 /* 1202 * no blocks in the requested cylinder, so take next 1203 * available one in this cylinder group. 1204 */ 1205 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 1206 if (bno < 0) 1207 return (0); 1208 cgp->cg_rotor = bno; 1209 gotit: 1210 blkno = fragstoblks(fs, bno); 1211 ffs_clrblock(fs, blksfree, (long)blkno); 1212 ffs_clusteracct(fs, cgp, blkno, -1); 1213 cgp->cg_cs.cs_nbfree--; 1214 fs->fs_cstotal.cs_nbfree--; 1215 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 1216 cylno = cbtocylno(fs, bno); 1217 cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; 1218 cg_blktot(cgp)[cylno]--; 1219 fs->fs_fmod = 1; 1220 blkno = cgp->cg_cgx * fs->fs_fpg + bno; 1221 if (DOINGSOFTDEP(ITOV(ip))) 1222 softdep_setup_blkmapdep(bp, fs, blkno); 1223 return (blkno); 1224 } 1225 1226 /* 1227 * Determine whether a cluster can be allocated. 1228 * 1229 * We do not currently check for optimal rotational layout if there 1230 * are multiple choices in the same cylinder group. Instead we just 1231 * take the first one that we find following bpref. 1232 */ 1233 static ufs_daddr_t 1234 ffs_clusteralloc(struct inode *ip, int cg, ufs_daddr_t bpref, int len) 1235 { 1236 struct fs *fs; 1237 struct cg *cgp; 1238 struct buf *bp; 1239 int i, got, run, bno, bit, map; 1240 u_char *mapp; 1241 int32_t *lp; 1242 uint8_t *blksfree; 1243 1244 fs = ip->i_fs; 1245 if (fs->fs_maxcluster[cg] < len) 1246 return (0); 1247 if (bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1248 (int)fs->fs_cgsize, &bp)) { 1249 goto fail; 1250 } 1251 cgp = (struct cg *)bp->b_data; 1252 if (!cg_chkmagic(cgp)) 1253 goto fail; 1254 1255 /* 1256 * Check to see if a cluster of the needed size (or bigger) is 1257 * available in this cylinder group. 1258 */ 1259 lp = &cg_clustersum(cgp)[len]; 1260 for (i = len; i <= fs->fs_contigsumsize; i++) 1261 if (*lp++ > 0) 1262 break; 1263 if (i > fs->fs_contigsumsize) { 1264 /* 1265 * This is the first time looking for a cluster in this 1266 * cylinder group. Update the cluster summary information 1267 * to reflect the true maximum sized cluster so that 1268 * future cluster allocation requests can avoid reading 1269 * the cylinder group map only to find no clusters. 1270 */ 1271 lp = &cg_clustersum(cgp)[len - 1]; 1272 for (i = len - 1; i > 0; i--) 1273 if (*lp-- > 0) 1274 break; 1275 fs->fs_maxcluster[cg] = i; 1276 goto fail; 1277 } 1278 /* 1279 * Search the cluster map to find a big enough cluster. 1280 * We take the first one that we find, even if it is larger 1281 * than we need as we prefer to get one close to the previous 1282 * block allocation. We do not search before the current 1283 * preference point as we do not want to allocate a block 1284 * that is allocated before the previous one (as we will 1285 * then have to wait for another pass of the elevator 1286 * algorithm before it will be read). We prefer to fail and 1287 * be recalled to try an allocation in the next cylinder group. 1288 */ 1289 if (dtog(fs, bpref) != cg) 1290 bpref = 0; 1291 else 1292 bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); 1293 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1294 map = *mapp++; 1295 bit = 1 << (bpref % NBBY); 1296 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 1297 if ((map & bit) == 0) { 1298 run = 0; 1299 } else { 1300 run++; 1301 if (run == len) 1302 break; 1303 } 1304 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1305 bit <<= 1; 1306 } else { 1307 map = *mapp++; 1308 bit = 1; 1309 } 1310 } 1311 if (got >= cgp->cg_nclusterblks) 1312 goto fail; 1313 /* 1314 * Allocate the cluster that we have found. 1315 */ 1316 blksfree = cg_blksfree(cgp); 1317 for (i = 1; i <= len; i++) { 1318 if (!ffs_isblock(fs, blksfree, got - run + i)) 1319 panic("ffs_clusteralloc: map mismatch"); 1320 } 1321 bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1); 1322 if (dtog(fs, bno) != cg) 1323 panic("ffs_clusteralloc: allocated out of group"); 1324 len = blkstofrags(fs, len); 1325 for (i = 0; i < len; i += fs->fs_frag) { 1326 if ((got = ffs_alloccgblk(ip, bp, bno + i)) != bno + i) 1327 panic("ffs_clusteralloc: lost block"); 1328 } 1329 bdwrite(bp); 1330 return (bno); 1331 1332 fail: 1333 brelse(bp); 1334 return (0); 1335 } 1336 1337 /* 1338 * Determine whether an inode can be allocated. 1339 * 1340 * Check to see if an inode is available, and if it is, 1341 * allocate it using the following policy: 1342 * 1) allocate the requested inode. 1343 * 2) allocate the next available inode after the requested 1344 * inode in the specified cylinder group. 1345 * 3) the inode must not already be in the inode hash table. We 1346 * can encounter such a case because the vnode reclamation sequence 1347 * frees the bit 1348 * 3) the inode must not already be in the inode hash, otherwise it 1349 * may be in the process of being deallocated. This can occur 1350 * because the bitmap is updated before the inode is removed from 1351 * hash. If we were to reallocate the inode the caller could wind 1352 * up returning a vnode/inode combination which is in an indeterminate 1353 * state. 1354 */ 1355 static ino_t 1356 ffs_nodealloccg(struct inode *ip, int cg, ufs_daddr_t ipref, int mode) 1357 { 1358 struct fs *fs; 1359 struct cg *cgp; 1360 struct buf *bp; 1361 uint8_t *inosused; 1362 uint8_t map; 1363 int error, len, arraysize, i; 1364 int icheckmiss; 1365 ufs_daddr_t ibase; 1366 1367 fs = ip->i_fs; 1368 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1369 return (0); 1370 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1371 (int)fs->fs_cgsize, &bp); 1372 if (error) { 1373 brelse(bp); 1374 return (0); 1375 } 1376 cgp = (struct cg *)bp->b_data; 1377 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 1378 brelse(bp); 1379 return (0); 1380 } 1381 inosused = cg_inosused(cgp); 1382 icheckmiss = 0; 1383 1384 /* 1385 * Quick check, reuse the most recently free inode or continue 1386 * a scan from where we left off the last time. 1387 */ 1388 ibase = cg * fs->fs_ipg; 1389 if (ipref) { 1390 ipref %= fs->fs_ipg; 1391 if (isclr(inosused, ipref)) { 1392 if (ufs_ihashcheck(ip->i_dev, ibase + ipref) == 0) 1393 goto gotit; 1394 } 1395 } 1396 1397 /* 1398 * Scan the inode bitmap starting at irotor, be sure to handle 1399 * the edge case by going back to the beginning of the array. 1400 * 1401 * If the number of inodes is not byte-aligned, the unused bits 1402 * should be set to 1. This will be sanity checked in gotit. Note 1403 * that we have to be sure not to overlap the beginning and end 1404 * when irotor is in the middle of a byte as this will cause the 1405 * same bitmap byte to be checked twice. To solve this problem we 1406 * just convert everything to a byte index for the loop. 1407 */ 1408 ipref = (cgp->cg_irotor % fs->fs_ipg) >> 3; /* byte index */ 1409 len = (fs->fs_ipg + 7) >> 3; /* byte size */ 1410 arraysize = len; 1411 1412 while (len > 0) { 1413 map = inosused[ipref]; 1414 if (map != 255) { 1415 for (i = 0; i < NBBY; ++i) { 1416 /* 1417 * If we find a free bit we have to make sure 1418 * that the inode is not in the middle of 1419 * being destroyed. The inode should not exist 1420 * in the inode hash. 1421 * 1422 * Adjust the rotor to try to hit the 1423 * quick-check up above. 1424 */ 1425 if ((map & (1 << i)) == 0) { 1426 if (ufs_ihashcheck(ip->i_dev, ibase + (ipref << 3) + i) == 0) { 1427 ipref = (ipref << 3) + i; 1428 cgp->cg_irotor = (ipref + 1) % fs->fs_ipg; 1429 goto gotit; 1430 } 1431 ++icheckmiss; 1432 } 1433 } 1434 } 1435 1436 /* 1437 * Setup for the next byte, start at the beginning again if 1438 * we hit the end of the array. 1439 */ 1440 if (++ipref == arraysize) 1441 ipref = 0; 1442 --len; 1443 } 1444 if (icheckmiss == cgp->cg_cs.cs_nifree) { 1445 brelse(bp); 1446 return(0); 1447 } 1448 kprintf("fs = %s\n", fs->fs_fsmnt); 1449 panic("ffs_nodealloccg: block not in map, icheckmiss/nfree %d/%d", 1450 icheckmiss, cgp->cg_cs.cs_nifree); 1451 /* NOTREACHED */ 1452 1453 /* 1454 * ipref is a bit index as of the gotit label. 1455 */ 1456 gotit: 1457 KKASSERT(ipref >= 0 && ipref < fs->fs_ipg); 1458 cgp->cg_time = time_second; 1459 if (DOINGSOFTDEP(ITOV(ip))) 1460 softdep_setup_inomapdep(bp, ip, ibase + ipref); 1461 setbit(inosused, ipref); 1462 cgp->cg_cs.cs_nifree--; 1463 fs->fs_cstotal.cs_nifree--; 1464 fs->fs_cs(fs, cg).cs_nifree--; 1465 fs->fs_fmod = 1; 1466 if ((mode & IFMT) == IFDIR) { 1467 cgp->cg_cs.cs_ndir++; 1468 fs->fs_cstotal.cs_ndir++; 1469 fs->fs_cs(fs, cg).cs_ndir++; 1470 } 1471 bdwrite(bp); 1472 return (ibase + ipref); 1473 } 1474 1475 /* 1476 * Free a block or fragment. 1477 * 1478 * The specified block or fragment is placed back in the 1479 * free map. If a fragment is deallocated, a possible 1480 * block reassembly is checked. 1481 */ 1482 void 1483 ffs_blkfree_cg(struct fs * fs, struct vnode * i_devvp, cdev_t i_dev, ino_t i_number, 1484 uint32_t i_din_uid, ufs_daddr_t bno, long size) 1485 { 1486 struct cg *cgp; 1487 struct buf *bp; 1488 ufs_daddr_t blkno; 1489 int i, error, cg, blk, frags, bbase; 1490 uint8_t *blksfree; 1491 1492 VOP_FREEBLKS(i_devvp, fsbtodoff(fs, bno), size); 1493 if ((uint)size > fs->fs_bsize || fragoff(fs, size) != 0 || 1494 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 1495 kprintf("dev=%s, bno = %ld, bsize = %ld, size = %ld, fs = %s\n", 1496 devtoname(i_dev), (long)bno, (long)fs->fs_bsize, size, 1497 fs->fs_fsmnt); 1498 panic("ffs_blkfree: bad size"); 1499 } 1500 cg = dtog(fs, bno); 1501 if ((uint)bno >= fs->fs_size) { 1502 kprintf("bad block %ld, ino %lu\n", 1503 (long)bno, (u_long)i_number); 1504 ffs_fserr(fs, i_din_uid, "bad block"); 1505 return; 1506 } 1507 1508 /* 1509 * Load the cylinder group 1510 */ 1511 error = bread(i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1512 (int)fs->fs_cgsize, &bp); 1513 if (error) { 1514 brelse(bp); 1515 return; 1516 } 1517 cgp = (struct cg *)bp->b_data; 1518 if (!cg_chkmagic(cgp)) { 1519 brelse(bp); 1520 return; 1521 } 1522 cgp->cg_time = time_second; 1523 bno = dtogd(fs, bno); 1524 blksfree = cg_blksfree(cgp); 1525 1526 if (size == fs->fs_bsize) { 1527 /* 1528 * Free a whole block 1529 */ 1530 blkno = fragstoblks(fs, bno); 1531 if (!ffs_isfreeblock(fs, blksfree, blkno)) { 1532 kprintf("dev = %s, block = %ld, fs = %s\n", 1533 devtoname(i_dev), (long)bno, fs->fs_fsmnt); 1534 panic("ffs_blkfree: freeing free block"); 1535 } 1536 ffs_setblock(fs, blksfree, blkno); 1537 ffs_clusteracct(fs, cgp, blkno, 1); 1538 cgp->cg_cs.cs_nbfree++; 1539 fs->fs_cstotal.cs_nbfree++; 1540 fs->fs_cs(fs, cg).cs_nbfree++; 1541 i = cbtocylno(fs, bno); 1542 cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; 1543 cg_blktot(cgp)[i]++; 1544 } else { 1545 /* 1546 * Free a fragment within a block. 1547 * 1548 * bno is the starting block number of the fragment being 1549 * freed. 1550 * 1551 * bbase is the starting block number for the filesystem 1552 * block containing the fragment. 1553 * 1554 * blk is the current bitmap for the fragments within the 1555 * filesystem block containing the fragment. 1556 * 1557 * frags is the number of fragments being freed 1558 * 1559 * Call ffs_fragacct() to account for the removal of all 1560 * current fragments, then adjust the bitmap to free the 1561 * requested fragment, and finally call ffs_fragacct() again 1562 * to regenerate the accounting. 1563 */ 1564 bbase = bno - fragnum(fs, bno); 1565 blk = blkmap(fs, blksfree, bbase); 1566 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 1567 frags = numfrags(fs, size); 1568 for (i = 0; i < frags; i++) { 1569 if (isset(blksfree, bno + i)) { 1570 kprintf("dev = %s, block = %ld, fs = %s\n", 1571 devtoname(i_dev), (long)(bno + i), 1572 fs->fs_fsmnt); 1573 panic("ffs_blkfree: freeing free frag"); 1574 } 1575 setbit(blksfree, bno + i); 1576 } 1577 cgp->cg_cs.cs_nffree += i; 1578 fs->fs_cstotal.cs_nffree += i; 1579 fs->fs_cs(fs, cg).cs_nffree += i; 1580 1581 /* 1582 * Add back in counts associated with the new frags 1583 */ 1584 blk = blkmap(fs, blksfree, bbase); 1585 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 1586 1587 /* 1588 * If a complete block has been reassembled, account for it 1589 */ 1590 blkno = fragstoblks(fs, bbase); 1591 if (ffs_isblock(fs, blksfree, blkno)) { 1592 cgp->cg_cs.cs_nffree -= fs->fs_frag; 1593 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 1594 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 1595 ffs_clusteracct(fs, cgp, blkno, 1); 1596 cgp->cg_cs.cs_nbfree++; 1597 fs->fs_cstotal.cs_nbfree++; 1598 fs->fs_cs(fs, cg).cs_nbfree++; 1599 i = cbtocylno(fs, bbase); 1600 cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; 1601 cg_blktot(cgp)[i]++; 1602 } 1603 } 1604 fs->fs_fmod = 1; 1605 bdwrite(bp); 1606 } 1607 1608 struct ffs_blkfree_trim_params { 1609 struct task task; 1610 ufs_daddr_t bno; 1611 long size; 1612 1613 /* 1614 * With TRIM, inode pointer is gone in the callback but we still need 1615 * the following fields for ffs_blkfree_cg() 1616 */ 1617 struct vnode *i_devvp; 1618 struct fs *i_fs; 1619 cdev_t i_dev; 1620 ino_t i_number; 1621 uint32_t i_din_uid; 1622 }; 1623 1624 1625 static void 1626 ffs_blkfree_trim_task(void *ctx, int pending) 1627 { 1628 struct ffs_blkfree_trim_params *tp; 1629 1630 tp = ctx; 1631 ffs_blkfree_cg(tp->i_fs, tp->i_devvp, tp->i_dev, tp->i_number, 1632 tp->i_din_uid, tp->bno, tp->size); 1633 kfree(tp, M_TEMP); 1634 } 1635 1636 1637 1638 static void 1639 ffs_blkfree_trim_completed(struct bio *biop) 1640 { 1641 struct buf *bp = biop->bio_buf; 1642 struct ffs_blkfree_trim_params *tp; 1643 1644 tp = bp->b_bio1.bio_caller_info1.ptr; 1645 TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp); 1646 tp = biop->bio_caller_info1.ptr; 1647 taskqueue_enqueue(taskqueue_swi, &tp->task); 1648 biodone(biop); 1649 } 1650 1651 1652 /* 1653 * If TRIM is enabled, we TRIM the blocks first then free them. We do this 1654 * after TRIM is finished and the callback handler is called. The logic here 1655 * is that we free the blocks before updating the bitmap so that we don't 1656 * reuse a block before we actually trim it, which would result in trimming 1657 * a valid block. 1658 */ 1659 void 1660 ffs_blkfree(struct inode *ip, ufs_daddr_t bno, long size) 1661 { 1662 struct mount *mp = ip->i_devvp->v_mount; 1663 struct ffs_blkfree_trim_params *tp; 1664 1665 if (!(mp->mnt_flag & MNT_TRIM)) { 1666 ffs_blkfree_cg(ip->i_fs, ip->i_devvp,ip->i_dev,ip->i_number, 1667 ip->i_uid, bno, size); 1668 return; 1669 } 1670 1671 struct buf *bp; 1672 1673 tp = kmalloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK); 1674 tp->bno = bno; 1675 tp->i_fs= ip->i_fs; 1676 tp->i_devvp = ip->i_devvp; 1677 tp->i_dev = ip->i_dev; 1678 tp->i_din_uid = ip->i_uid; 1679 tp->i_number = ip->i_number; 1680 tp->size = size; 1681 1682 bp = getnewbuf(0,0,0,1); 1683 BUF_KERNPROC(bp); 1684 bp->b_cmd = BUF_CMD_FREEBLKS; 1685 bp->b_bio1.bio_offset = fsbtodoff(ip->i_fs, bno); 1686 bp->b_bcount = size; 1687 bp->b_bio1.bio_caller_info1.ptr = tp; 1688 bp->b_bio1.bio_done = ffs_blkfree_trim_completed; 1689 vn_strategy(ip->i_devvp, &bp->b_bio1); 1690 } 1691 1692 #ifdef DIAGNOSTIC 1693 /* 1694 * Verify allocation of a block or fragment. Returns true if block or 1695 * fragment is allocated, false if it is free. 1696 */ 1697 static int 1698 ffs_checkblk(struct inode *ip, ufs_daddr_t bno, long size) 1699 { 1700 struct fs *fs; 1701 struct cg *cgp; 1702 struct buf *bp; 1703 int i, error, frags, free; 1704 uint8_t *blksfree; 1705 1706 fs = ip->i_fs; 1707 if ((uint)size > fs->fs_bsize || fragoff(fs, size) != 0) { 1708 kprintf("bsize = %ld, size = %ld, fs = %s\n", 1709 (long)fs->fs_bsize, size, fs->fs_fsmnt); 1710 panic("ffs_checkblk: bad size"); 1711 } 1712 if ((uint)bno >= fs->fs_size) 1713 panic("ffs_checkblk: bad block %d", bno); 1714 error = bread(ip->i_devvp, fsbtodoff(fs, cgtod(fs, dtog(fs, bno))), 1715 (int)fs->fs_cgsize, &bp); 1716 if (error) 1717 panic("ffs_checkblk: cg bread failed"); 1718 cgp = (struct cg *)bp->b_data; 1719 if (!cg_chkmagic(cgp)) 1720 panic("ffs_checkblk: cg magic mismatch"); 1721 blksfree = cg_blksfree(cgp); 1722 bno = dtogd(fs, bno); 1723 if (size == fs->fs_bsize) { 1724 free = ffs_isblock(fs, blksfree, fragstoblks(fs, bno)); 1725 } else { 1726 frags = numfrags(fs, size); 1727 for (free = 0, i = 0; i < frags; i++) 1728 if (isset(blksfree, bno + i)) 1729 free++; 1730 if (free != 0 && free != frags) 1731 panic("ffs_checkblk: partially free fragment"); 1732 } 1733 brelse(bp); 1734 return (!free); 1735 } 1736 #endif /* DIAGNOSTIC */ 1737 1738 /* 1739 * Free an inode. 1740 */ 1741 int 1742 ffs_vfree(struct vnode *pvp, ino_t ino, int mode) 1743 { 1744 if (DOINGSOFTDEP(pvp)) { 1745 softdep_freefile(pvp, ino, mode); 1746 return (0); 1747 } 1748 return (ffs_freefile(pvp, ino, mode)); 1749 } 1750 1751 /* 1752 * Do the actual free operation. 1753 * The specified inode is placed back in the free map. 1754 */ 1755 int 1756 ffs_freefile(struct vnode *pvp, ino_t ino, int mode) 1757 { 1758 struct fs *fs; 1759 struct cg *cgp; 1760 struct inode *pip; 1761 struct buf *bp; 1762 int error, cg; 1763 uint8_t *inosused; 1764 1765 pip = VTOI(pvp); 1766 fs = pip->i_fs; 1767 if ((uint)ino >= fs->fs_ipg * fs->fs_ncg) 1768 panic("ffs_vfree: range: dev = (%d,%d), ino = %"PRId64", fs = %s", 1769 major(pip->i_dev), minor(pip->i_dev), ino, fs->fs_fsmnt); 1770 cg = ino_to_cg(fs, ino); 1771 error = bread(pip->i_devvp, fsbtodoff(fs, cgtod(fs, cg)), 1772 (int)fs->fs_cgsize, &bp); 1773 if (error) { 1774 brelse(bp); 1775 return (error); 1776 } 1777 cgp = (struct cg *)bp->b_data; 1778 if (!cg_chkmagic(cgp)) { 1779 brelse(bp); 1780 return (0); 1781 } 1782 cgp->cg_time = time_second; 1783 inosused = cg_inosused(cgp); 1784 ino %= fs->fs_ipg; 1785 if (isclr(inosused, ino)) { 1786 kprintf("dev = %s, ino = %lu, fs = %s\n", 1787 devtoname(pip->i_dev), (u_long)ino, fs->fs_fsmnt); 1788 if (fs->fs_ronly == 0) 1789 panic("ffs_vfree: freeing free inode"); 1790 } 1791 clrbit(inosused, ino); 1792 if (ino < cgp->cg_irotor) 1793 cgp->cg_irotor = ino; 1794 cgp->cg_cs.cs_nifree++; 1795 fs->fs_cstotal.cs_nifree++; 1796 fs->fs_cs(fs, cg).cs_nifree++; 1797 if ((mode & IFMT) == IFDIR) { 1798 cgp->cg_cs.cs_ndir--; 1799 fs->fs_cstotal.cs_ndir--; 1800 fs->fs_cs(fs, cg).cs_ndir--; 1801 } 1802 fs->fs_fmod = 1; 1803 bdwrite(bp); 1804 return (0); 1805 } 1806 1807 /* 1808 * Find a block of the specified size in the specified cylinder group. 1809 * 1810 * It is a panic if a request is made to find a block if none are 1811 * available. 1812 */ 1813 static ufs_daddr_t 1814 ffs_mapsearch(struct fs *fs, struct cg *cgp, ufs_daddr_t bpref, int allocsiz) 1815 { 1816 ufs_daddr_t bno; 1817 int start, len, loc, i; 1818 int blk, field, subfield, pos; 1819 uint8_t *blksfree; 1820 1821 /* 1822 * find the fragment by searching through the free block 1823 * map for an appropriate bit pattern. 1824 */ 1825 if (bpref) 1826 start = dtogd(fs, bpref) / NBBY; 1827 else 1828 start = cgp->cg_frotor / NBBY; 1829 blksfree = cg_blksfree(cgp); 1830 len = howmany(fs->fs_fpg, NBBY) - start; 1831 loc = scanc((uint)len, (u_char *)&blksfree[start], 1832 (u_char *)fragtbl[fs->fs_frag], 1833 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1834 if (loc == 0) { 1835 len = start + 1; /* XXX why overlap here? */ 1836 start = 0; 1837 loc = scanc((uint)len, (u_char *)&blksfree[0], 1838 (u_char *)fragtbl[fs->fs_frag], 1839 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 1840 if (loc == 0) { 1841 kprintf("start = %d, len = %d, fs = %s\n", 1842 start, len, fs->fs_fsmnt); 1843 panic("ffs_alloccg: map corrupted"); 1844 /* NOTREACHED */ 1845 } 1846 } 1847 bno = (start + len - loc) * NBBY; 1848 cgp->cg_frotor = bno; 1849 /* 1850 * found the byte in the map 1851 * sift through the bits to find the selected frag 1852 */ 1853 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 1854 blk = blkmap(fs, blksfree, bno); 1855 blk <<= 1; 1856 field = around[allocsiz]; 1857 subfield = inside[allocsiz]; 1858 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 1859 if ((blk & field) == subfield) 1860 return (bno + pos); 1861 field <<= 1; 1862 subfield <<= 1; 1863 } 1864 } 1865 kprintf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt); 1866 panic("ffs_alloccg: block not in map"); 1867 return (-1); 1868 } 1869 1870 /* 1871 * Update the cluster map because of an allocation or free. 1872 * 1873 * Cnt == 1 means free; cnt == -1 means allocating. 1874 */ 1875 static void 1876 ffs_clusteracct(struct fs *fs, struct cg *cgp, ufs_daddr_t blkno, int cnt) 1877 { 1878 int32_t *sump; 1879 int32_t *lp; 1880 u_char *freemapp, *mapp; 1881 int i, start, end, forw, back, map, bit; 1882 1883 if (fs->fs_contigsumsize <= 0) 1884 return; 1885 freemapp = cg_clustersfree(cgp); 1886 sump = cg_clustersum(cgp); 1887 /* 1888 * Allocate or clear the actual block. 1889 */ 1890 if (cnt > 0) 1891 setbit(freemapp, blkno); 1892 else 1893 clrbit(freemapp, blkno); 1894 /* 1895 * Find the size of the cluster going forward. 1896 */ 1897 start = blkno + 1; 1898 end = start + fs->fs_contigsumsize; 1899 if (end >= cgp->cg_nclusterblks) 1900 end = cgp->cg_nclusterblks; 1901 mapp = &freemapp[start / NBBY]; 1902 map = *mapp++; 1903 bit = 1 << (start % NBBY); 1904 for (i = start; i < end; i++) { 1905 if ((map & bit) == 0) 1906 break; 1907 if ((i & (NBBY - 1)) != (NBBY - 1)) { 1908 bit <<= 1; 1909 } else { 1910 map = *mapp++; 1911 bit = 1; 1912 } 1913 } 1914 forw = i - start; 1915 /* 1916 * Find the size of the cluster going backward. 1917 */ 1918 start = blkno - 1; 1919 end = start - fs->fs_contigsumsize; 1920 if (end < 0) 1921 end = -1; 1922 mapp = &freemapp[start / NBBY]; 1923 map = *mapp--; 1924 bit = 1 << (start % NBBY); 1925 for (i = start; i > end; i--) { 1926 if ((map & bit) == 0) 1927 break; 1928 if ((i & (NBBY - 1)) != 0) { 1929 bit >>= 1; 1930 } else { 1931 map = *mapp--; 1932 bit = 1 << (NBBY - 1); 1933 } 1934 } 1935 back = start - i; 1936 /* 1937 * Account for old cluster and the possibly new forward and 1938 * back clusters. 1939 */ 1940 i = back + forw + 1; 1941 if (i > fs->fs_contigsumsize) 1942 i = fs->fs_contigsumsize; 1943 sump[i] += cnt; 1944 if (back > 0) 1945 sump[back] -= cnt; 1946 if (forw > 0) 1947 sump[forw] -= cnt; 1948 /* 1949 * Update cluster summary information. 1950 */ 1951 lp = &sump[fs->fs_contigsumsize]; 1952 for (i = fs->fs_contigsumsize; i > 0; i--) 1953 if (*lp-- > 0) 1954 break; 1955 fs->fs_maxcluster[cgp->cg_cgx] = i; 1956 } 1957 1958 /* 1959 * Fserr prints the name of a filesystem with an error diagnostic. 1960 * 1961 * The form of the error message is: 1962 * fs: error message 1963 */ 1964 static void 1965 ffs_fserr(struct fs *fs, uint uid, char *cp) 1966 { 1967 struct thread *td = curthread; 1968 struct proc *p; 1969 1970 if ((p = td->td_proc) != NULL) { 1971 log(LOG_ERR, "pid %d (%s), uid %d on %s: %s\n", p ? p->p_pid : -1, 1972 p ? p->p_comm : "-", uid, fs->fs_fsmnt, cp); 1973 } else { 1974 log(LOG_ERR, "system thread %p, uid %d on %s: %s\n", 1975 td, uid, fs->fs_fsmnt, cp); 1976 } 1977 } 1978