1 /* $NetBSD: ffs_alloc.c,v 1.19 2012/04/19 17:28:26 christos Exp $ */ 2 /* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */ 3 4 /* 5 * Copyright (c) 2002 Networks Associates Technology, Inc. 6 * All rights reserved. 7 * 8 * This software was developed for the FreeBSD Project by Marshall 9 * Kirk McKusick and Network Associates Laboratories, the Security 10 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 11 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 12 * research program 13 * 14 * Copyright (c) 1982, 1986, 1989, 1993 15 * The Regents of the University of California. All rights reserved. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 1. Redistributions of source code must retain the above copyright 21 * notice, this list of conditions and the following disclaimer. 22 * 2. Redistributions in binary form must reproduce the above copyright 23 * notice, this list of conditions and the following disclaimer in the 24 * documentation and/or other materials provided with the distribution. 25 * 3. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95 42 */ 43 44 #if HAVE_NBTOOL_CONFIG_H 45 #include "nbtool_config.h" 46 #endif 47 48 #include <sys/cdefs.h> 49 #if defined(__RCSID) && !defined(__lint) 50 __RCSID("$NetBSD: ffs_alloc.c,v 1.19 2012/04/19 17:28:26 christos Exp $"); 51 #endif /* !__lint */ 52 53 #include <sys/param.h> 54 #include <sys/time.h> 55 56 #include <errno.h> 57 58 #include "makefs.h" 59 60 #include <ufs/ufs/dinode.h> 61 #include <ufs/ufs/ufs_bswap.h> 62 #include <ufs/ffs/fs.h> 63 64 #include "ffs/buf.h" 65 #include "ffs/ufs_inode.h" 66 #include "ffs/ffs_extern.h" 67 68 69 static int scanc(u_int, const u_char *, const u_char *, int); 70 71 static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int); 72 static daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t); 73 static daddr_t ffs_hashalloc(struct inode *, int, daddr_t, int, 74 daddr_t (*)(struct inode *, int, daddr_t, int)); 75 static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int); 76 77 /* in ffs_tables.c */ 78 extern const int inside[], around[]; 79 extern const u_char * const fragtbl[]; 80 81 /* 82 * Allocate a block in the file system. 83 * 84 * The size of the requested block is given, which must be some 85 * multiple of fs_fsize and <= fs_bsize. 86 * A preference may be optionally specified. If a preference is given 87 * the following hierarchy is used to allocate a block: 88 * 1) allocate the requested block. 89 * 2) allocate a rotationally optimal block in the same cylinder. 90 * 3) allocate a block in the same cylinder group. 91 * 4) quadradically rehash into other cylinder groups, until an 92 * available block is located. 93 * If no block preference is given the following hierarchy is used 94 * to allocate a block: 95 * 1) allocate a block in the cylinder group that contains the 96 * inode for the file. 97 * 2) quadradically rehash into other cylinder groups, until an 98 * available block is located. 99 */ 100 int 101 ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size, 102 daddr_t *bnp) 103 { 104 struct fs *fs = ip->i_fs; 105 daddr_t bno; 106 int cg; 107 108 *bnp = 0; 109 if (size > fs->fs_bsize || fragoff(fs, size) != 0) { 110 errx(1, "ffs_alloc: bad size: bsize %d size %d", 111 fs->fs_bsize, size); 112 } 113 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 114 goto nospace; 115 if (bpref >= fs->fs_size) 116 bpref = 0; 117 if (bpref == 0) 118 cg = ino_to_cg(fs, ip->i_number); 119 else 120 cg = dtog(fs, bpref); 121 bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg); 122 if (bno > 0) { 123 DIP_ADD(ip, blocks, size / DEV_BSIZE); 124 *bnp = bno; 125 return (0); 126 } 127 nospace: 128 return (ENOSPC); 129 } 130 131 /* 132 * Select the desired position for the next block in a file. The file is 133 * logically divided into sections. The first section is composed of the 134 * direct blocks. Each additional section contains fs_maxbpg blocks. 135 * 136 * If no blocks have been allocated in the first section, the policy is to 137 * request a block in the same cylinder group as the inode that describes 138 * the file. If no blocks have been allocated in any other section, the 139 * policy is to place the section in a cylinder group with a greater than 140 * average number of free blocks. An appropriate cylinder group is found 141 * by using a rotor that sweeps the cylinder groups. When a new group of 142 * blocks is needed, the sweep begins in the cylinder group following the 143 * cylinder group from which the previous allocation was made. The sweep 144 * continues until a cylinder group with greater than the average number 145 * of free blocks is found. If the allocation is for the first block in an 146 * indirect block, the information on the previous allocation is unavailable; 147 * here a best guess is made based upon the logical block number being 148 * allocated. 149 * 150 * If a section is already partially allocated, the policy is to 151 * contiguously allocate fs_maxcontig blocks. The end of one of these 152 * contiguous blocks and the beginning of the next is physically separated 153 * so that the disk head will be in transit between them for at least 154 * fs_rotdelay milliseconds. This is to allow time for the processor to 155 * schedule another I/O transfer. 156 */ 157 /* XXX ondisk32 */ 158 daddr_t 159 ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap) 160 { 161 struct fs *fs; 162 int cg; 163 int avgbfree, startcg; 164 165 fs = ip->i_fs; 166 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 167 if (lbn < NDADDR + NINDIR(fs)) { 168 cg = ino_to_cg(fs, ip->i_number); 169 return (fs->fs_fpg * cg + fs->fs_frag); 170 } 171 /* 172 * Find a cylinder with greater than average number of 173 * unused data blocks. 174 */ 175 if (indx == 0 || bap[indx - 1] == 0) 176 startcg = 177 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 178 else 179 startcg = dtog(fs, 180 ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); 181 startcg %= fs->fs_ncg; 182 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 183 for (cg = startcg; cg < fs->fs_ncg; cg++) 184 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) 185 return (fs->fs_fpg * cg + fs->fs_frag); 186 for (cg = 0; cg <= startcg; cg++) 187 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) 188 return (fs->fs_fpg * cg + fs->fs_frag); 189 return (0); 190 } 191 /* 192 * We just always try to lay things out contiguously. 193 */ 194 return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; 195 } 196 197 daddr_t 198 ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap) 199 { 200 struct fs *fs; 201 int cg; 202 int avgbfree, startcg; 203 204 fs = ip->i_fs; 205 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 206 if (lbn < NDADDR + NINDIR(fs)) { 207 cg = ino_to_cg(fs, ip->i_number); 208 return (fs->fs_fpg * cg + fs->fs_frag); 209 } 210 /* 211 * Find a cylinder with greater than average number of 212 * unused data blocks. 213 */ 214 if (indx == 0 || bap[indx - 1] == 0) 215 startcg = 216 ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; 217 else 218 startcg = dtog(fs, 219 ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1); 220 startcg %= fs->fs_ncg; 221 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 222 for (cg = startcg; cg < fs->fs_ncg; cg++) 223 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 224 return (fs->fs_fpg * cg + fs->fs_frag); 225 } 226 for (cg = 0; cg < startcg; cg++) 227 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 228 return (fs->fs_fpg * cg + fs->fs_frag); 229 } 230 return (0); 231 } 232 /* 233 * We just always try to lay things out contiguously. 234 */ 235 return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag; 236 } 237 238 /* 239 * Implement the cylinder overflow algorithm. 240 * 241 * The policy implemented by this algorithm is: 242 * 1) allocate the block in its requested cylinder group. 243 * 2) quadradically rehash on the cylinder group number. 244 * 3) brute force search for a free block. 245 * 246 * `size': size for data blocks, mode for inodes 247 */ 248 /*VARARGS5*/ 249 static daddr_t 250 ffs_hashalloc(struct inode *ip, int cg, daddr_t pref, int size, 251 daddr_t (*allocator)(struct inode *, int, daddr_t, int)) 252 { 253 struct fs *fs; 254 daddr_t result; 255 int i, icg = cg; 256 257 fs = ip->i_fs; 258 /* 259 * 1: preferred cylinder group 260 */ 261 result = (*allocator)(ip, cg, pref, size); 262 if (result) 263 return (result); 264 /* 265 * 2: quadratic rehash 266 */ 267 for (i = 1; i < fs->fs_ncg; i *= 2) { 268 cg += i; 269 if (cg >= fs->fs_ncg) 270 cg -= fs->fs_ncg; 271 result = (*allocator)(ip, cg, 0, size); 272 if (result) 273 return (result); 274 } 275 /* 276 * 3: brute force search 277 * Note that we start at i == 2, since 0 was checked initially, 278 * and 1 is always checked in the quadratic rehash. 279 */ 280 cg = (icg + 2) % fs->fs_ncg; 281 for (i = 2; i < fs->fs_ncg; i++) { 282 result = (*allocator)(ip, cg, 0, size); 283 if (result) 284 return (result); 285 cg++; 286 if (cg == fs->fs_ncg) 287 cg = 0; 288 } 289 return (0); 290 } 291 292 /* 293 * Determine whether a block can be allocated. 294 * 295 * Check to see if a block of the appropriate size is available, 296 * and if it is, allocate it. 297 */ 298 static daddr_t 299 ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) 300 { 301 struct cg *cgp; 302 struct buf *bp; 303 daddr_t bno, blkno; 304 int error, frags, allocsiz, i; 305 struct fs *fs = ip->i_fs; 306 const int needswap = UFS_FSNEEDSWAP(fs); 307 308 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 309 return (0); 310 error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)), 311 (int)fs->fs_cgsize, &bp); 312 if (error) { 313 brelse(bp); 314 return (0); 315 } 316 cgp = (struct cg *)bp->b_data; 317 if (!cg_chkmagic(cgp, needswap) || 318 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { 319 brelse(bp); 320 return (0); 321 } 322 if (size == fs->fs_bsize) { 323 bno = ffs_alloccgblk(ip, bp, bpref); 324 bdwrite(bp); 325 return (bno); 326 } 327 /* 328 * check to see if any fragments are already available 329 * allocsiz is the size which will be allocated, hacking 330 * it down to a smaller size if necessary 331 */ 332 frags = numfrags(fs, size); 333 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 334 if (cgp->cg_frsum[allocsiz] != 0) 335 break; 336 if (allocsiz == fs->fs_frag) { 337 /* 338 * no fragments were available, so a block will be 339 * allocated, and hacked up 340 */ 341 if (cgp->cg_cs.cs_nbfree == 0) { 342 brelse(bp); 343 return (0); 344 } 345 bno = ffs_alloccgblk(ip, bp, bpref); 346 bpref = dtogd(fs, bno); 347 for (i = frags; i < fs->fs_frag; i++) 348 setbit(cg_blksfree(cgp, needswap), bpref + i); 349 i = fs->fs_frag - frags; 350 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 351 fs->fs_cstotal.cs_nffree += i; 352 fs->fs_cs(fs, cg).cs_nffree += i; 353 fs->fs_fmod = 1; 354 ufs_add32(cgp->cg_frsum[i], 1, needswap); 355 bdwrite(bp); 356 return (bno); 357 } 358 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 359 for (i = 0; i < frags; i++) 360 clrbit(cg_blksfree(cgp, needswap), bno + i); 361 ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap); 362 fs->fs_cstotal.cs_nffree -= frags; 363 fs->fs_cs(fs, cg).cs_nffree -= frags; 364 fs->fs_fmod = 1; 365 ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap); 366 if (frags != allocsiz) 367 ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap); 368 blkno = cg * fs->fs_fpg + bno; 369 bdwrite(bp); 370 return blkno; 371 } 372 373 /* 374 * Allocate a block in a cylinder group. 375 * 376 * This algorithm implements the following policy: 377 * 1) allocate the requested block. 378 * 2) allocate a rotationally optimal block in the same cylinder. 379 * 3) allocate the next available block on the block rotor for the 380 * specified cylinder group. 381 * Note that this routine only allocates fs_bsize blocks; these 382 * blocks may be fragmented by the routine that allocates them. 383 */ 384 static daddr_t 385 ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref) 386 { 387 struct cg *cgp; 388 daddr_t blkno; 389 int32_t bno; 390 struct fs *fs = ip->i_fs; 391 const int needswap = UFS_FSNEEDSWAP(fs); 392 u_int8_t *blksfree; 393 394 cgp = (struct cg *)bp->b_data; 395 blksfree = cg_blksfree(cgp, needswap); 396 if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) { 397 bpref = ufs_rw32(cgp->cg_rotor, needswap); 398 } else { 399 bpref = blknum(fs, bpref); 400 bno = dtogd(fs, bpref); 401 /* 402 * if the requested block is available, use it 403 */ 404 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno))) 405 goto gotit; 406 } 407 /* 408 * Take the next available one in this cylinder group. 409 */ 410 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 411 if (bno < 0) 412 return (0); 413 cgp->cg_rotor = ufs_rw32(bno, needswap); 414 gotit: 415 blkno = fragstoblks(fs, bno); 416 ffs_clrblock(fs, blksfree, (long)blkno); 417 ffs_clusteracct(fs, cgp, blkno, -1); 418 ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap); 419 fs->fs_cstotal.cs_nbfree--; 420 fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--; 421 fs->fs_fmod = 1; 422 blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno; 423 return (blkno); 424 } 425 426 /* 427 * Free a block or fragment. 428 * 429 * The specified block or fragment is placed back in the 430 * free map. If a fragment is deallocated, a possible 431 * block reassembly is checked. 432 */ 433 void 434 ffs_blkfree(struct inode *ip, daddr_t bno, long size) 435 { 436 struct cg *cgp; 437 struct buf *bp; 438 int32_t fragno, cgbno; 439 int i, error, cg, blk, frags, bbase; 440 struct fs *fs = ip->i_fs; 441 const int needswap = UFS_FSNEEDSWAP(fs); 442 443 if (size > fs->fs_bsize || fragoff(fs, size) != 0 || 444 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 445 errx(1, "blkfree: bad size: bno %lld bsize %d size %ld", 446 (long long)bno, fs->fs_bsize, size); 447 } 448 cg = dtog(fs, bno); 449 if (bno >= fs->fs_size) { 450 warnx("bad block %lld, ino %llu", (long long)bno, 451 (unsigned long long)ip->i_number); 452 return; 453 } 454 error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)), 455 (int)fs->fs_cgsize, &bp); 456 if (error) { 457 brelse(bp); 458 return; 459 } 460 cgp = (struct cg *)bp->b_data; 461 if (!cg_chkmagic(cgp, needswap)) { 462 brelse(bp); 463 return; 464 } 465 cgbno = dtogd(fs, bno); 466 if (size == fs->fs_bsize) { 467 fragno = fragstoblks(fs, cgbno); 468 if (!ffs_isfreeblock(fs, cg_blksfree(cgp, needswap), fragno)) { 469 errx(1, "blkfree: freeing free block %lld", 470 (long long)bno); 471 } 472 ffs_setblock(fs, cg_blksfree(cgp, needswap), fragno); 473 ffs_clusteracct(fs, cgp, fragno, 1); 474 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 475 fs->fs_cstotal.cs_nbfree++; 476 fs->fs_cs(fs, cg).cs_nbfree++; 477 } else { 478 bbase = cgbno - fragnum(fs, cgbno); 479 /* 480 * decrement the counts associated with the old frags 481 */ 482 blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase); 483 ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap); 484 /* 485 * deallocate the fragment 486 */ 487 frags = numfrags(fs, size); 488 for (i = 0; i < frags; i++) { 489 if (isset(cg_blksfree(cgp, needswap), cgbno + i)) { 490 errx(1, "blkfree: freeing free frag: block %lld", 491 (long long)(cgbno + i)); 492 } 493 setbit(cg_blksfree(cgp, needswap), cgbno + i); 494 } 495 ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); 496 fs->fs_cstotal.cs_nffree += i; 497 fs->fs_cs(fs, cg).cs_nffree += i; 498 /* 499 * add back in counts associated with the new frags 500 */ 501 blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase); 502 ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap); 503 /* 504 * if a complete block has been reassembled, account for it 505 */ 506 fragno = fragstoblks(fs, bbase); 507 if (ffs_isblock(fs, cg_blksfree(cgp, needswap), fragno)) { 508 ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap); 509 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 510 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 511 ffs_clusteracct(fs, cgp, fragno, 1); 512 ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); 513 fs->fs_cstotal.cs_nbfree++; 514 fs->fs_cs(fs, cg).cs_nbfree++; 515 } 516 } 517 fs->fs_fmod = 1; 518 bdwrite(bp); 519 } 520 521 522 static int 523 scanc(u_int size, const u_char *cp, const u_char table[], int mask) 524 { 525 const u_char *end = &cp[size]; 526 527 while (cp < end && (table[*cp] & mask) == 0) 528 cp++; 529 return (end - cp); 530 } 531 532 /* 533 * Find a block of the specified size in the specified cylinder group. 534 * 535 * It is a panic if a request is made to find a block if none are 536 * available. 537 */ 538 static int32_t 539 ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz) 540 { 541 int32_t bno; 542 int start, len, loc, i; 543 int blk, field, subfield, pos; 544 int ostart, olen; 545 const int needswap = UFS_FSNEEDSWAP(fs); 546 547 /* 548 * find the fragment by searching through the free block 549 * map for an appropriate bit pattern 550 */ 551 if (bpref) 552 start = dtogd(fs, bpref) / NBBY; 553 else 554 start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY; 555 len = howmany(fs->fs_fpg, NBBY) - start; 556 ostart = start; 557 olen = len; 558 loc = scanc((u_int)len, 559 (const u_char *)&cg_blksfree(cgp, needswap)[start], 560 (const u_char *)fragtbl[fs->fs_frag], 561 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 562 if (loc == 0) { 563 len = start + 1; 564 start = 0; 565 loc = scanc((u_int)len, 566 (const u_char *)&cg_blksfree(cgp, needswap)[0], 567 (const u_char *)fragtbl[fs->fs_frag], 568 (1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 569 if (loc == 0) { 570 errx(1, 571 "ffs_alloccg: map corrupted: start %d len %d offset %d %ld", 572 ostart, olen, 573 ufs_rw32(cgp->cg_freeoff, needswap), 574 (long)cg_blksfree(cgp, needswap) - (long)cgp); 575 /* NOTREACHED */ 576 } 577 } 578 bno = (start + len - loc) * NBBY; 579 cgp->cg_frotor = ufs_rw32(bno, needswap); 580 /* 581 * found the byte in the map 582 * sift through the bits to find the selected frag 583 */ 584 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 585 blk = blkmap(fs, cg_blksfree(cgp, needswap), bno); 586 blk <<= 1; 587 field = around[allocsiz]; 588 subfield = inside[allocsiz]; 589 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 590 if ((blk & field) == subfield) 591 return (bno + pos); 592 field <<= 1; 593 subfield <<= 1; 594 } 595 } 596 errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno); 597 return (-1); 598 } 599