1 /* 2 * Copyright (c) 2011-2013 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * 3. Neither the name of The DragonFly Project nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific, prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/fcntl.h> 39 #include <sys/buf.h> 40 #include <sys/proc.h> 41 #include <sys/namei.h> 42 #include <sys/mount.h> 43 #include <sys/vnode.h> 44 #include <sys/mountctl.h> 45 46 #include "hammer2.h" 47 48 struct hammer2_fiterate { 49 hammer2_off_t bpref; 50 hammer2_off_t bnext; 51 int loops; 52 }; 53 54 typedef struct hammer2_fiterate hammer2_fiterate_t; 55 56 static int hammer2_freemap_try_alloc(hammer2_trans_t *trans, 57 hammer2_chain_t **parentp, hammer2_blockref_t *bref, 58 int radix, hammer2_fiterate_t *iter); 59 static void hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp, 60 hammer2_key_t key, hammer2_chain_t *chain); 61 static int hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp, 62 hammer2_bmap_data_t *bmap, uint16_t class, 63 int n, int radix, hammer2_key_t *basep); 64 static int hammer2_freemap_iterate(hammer2_trans_t *trans, 65 hammer2_chain_t **parentp, hammer2_chain_t **chainp, 66 hammer2_fiterate_t *iter); 67 68 static __inline 69 int 70 hammer2_freemapradix(int radix) 71 { 72 return(radix); 73 } 74 75 /* 76 * Calculate the device offset for the specified FREEMAP_NODE or FREEMAP_LEAF 77 * bref. Return a combined media offset and physical size radix. Freemap 78 * chains use fixed storage offsets in the 4MB reserved area at the 79 * beginning of each 2GB zone 80 * 81 * Rotate between four possibilities. Theoretically this means we have three 82 * good freemaps in case of a crash which we can use as a base for the fixup 83 * scan at mount-time. 84 */ 85 #define H2FMBASE(key, radix) ((key) & ~(((hammer2_off_t)1 << (radix)) - 1)) 86 #define H2FMSHIFT(radix) ((hammer2_off_t)1 << (radix)) 87 88 static 89 int 90 hammer2_freemap_reserve(hammer2_mount_t *hmp, hammer2_blockref_t *bref, 91 int radix) 92 { 93 hammer2_off_t off; 94 size_t bytes; 95 96 /* 97 * Physical allocation size -> radix. Typically either 256 for 98 * a level 0 freemap leaf or 65536 for a level N freemap node. 99 * 100 * NOTE: A 256 byte bitmap represents 256 x 8 x 1024 = 2MB of storage. 101 * Do not use hammer2_allocsize() here as it has a min cap. 102 */ 103 bytes = 1 << radix; 104 105 /* 106 * Adjust by HAMMER2_ZONE_FREEMAP_{A,B,C,D} using the existing 107 * offset as a basis. Start in zone A if previously unallocated. 108 */ 109 if ((bref->data_off & ~HAMMER2_OFF_MASK_RADIX) == 0) { 110 off = HAMMER2_ZONE_FREEMAP_A; 111 } else { 112 off = bref->data_off & ~HAMMER2_OFF_MASK_RADIX & 113 (((hammer2_off_t)1 << HAMMER2_FREEMAP_LEVEL1_RADIX) - 1); 114 off = off / HAMMER2_PBUFSIZE; 115 KKASSERT(off >= HAMMER2_ZONE_FREEMAP_A); 116 KKASSERT(off < HAMMER2_ZONE_FREEMAP_D + 4); 117 118 if (off >= HAMMER2_ZONE_FREEMAP_D) 119 off = HAMMER2_ZONE_FREEMAP_A; 120 else if (off >= HAMMER2_ZONE_FREEMAP_C) 121 off = HAMMER2_ZONE_FREEMAP_D; 122 else if (off >= HAMMER2_ZONE_FREEMAP_B) 123 off = HAMMER2_ZONE_FREEMAP_C; 124 else 125 off = HAMMER2_ZONE_FREEMAP_B; 126 } 127 off = off * HAMMER2_PBUFSIZE; 128 129 /* 130 * Calculate the block offset of the reserved block. This will 131 * point into the 4MB reserved area at the base of the appropriate 132 * 2GB zone, once added to the FREEMAP_x selection above. 133 */ 134 switch(bref->keybits) { 135 /* case HAMMER2_FREEMAP_LEVEL5_RADIX: not applicable */ 136 case HAMMER2_FREEMAP_LEVEL4_RADIX: /* 2EB */ 137 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 138 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 139 off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL4_RADIX) + 140 HAMMER2_ZONEFM_LEVEL4 * HAMMER2_PBUFSIZE; 141 break; 142 case HAMMER2_FREEMAP_LEVEL3_RADIX: /* 2PB */ 143 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 144 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 145 off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL3_RADIX) + 146 HAMMER2_ZONEFM_LEVEL3 * HAMMER2_PBUFSIZE; 147 break; 148 case HAMMER2_FREEMAP_LEVEL2_RADIX: /* 2TB */ 149 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE); 150 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 151 off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL2_RADIX) + 152 HAMMER2_ZONEFM_LEVEL2 * HAMMER2_PBUFSIZE; 153 break; 154 case HAMMER2_FREEMAP_LEVEL1_RADIX: /* 2GB */ 155 KKASSERT(bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF); 156 KKASSERT(bytes == HAMMER2_FREEMAP_LEVELN_PSIZE); 157 off += H2FMBASE(bref->key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 158 HAMMER2_ZONEFM_LEVEL1 * HAMMER2_PBUFSIZE; 159 break; 160 default: 161 panic("freemap: bad radix(2) %p %d\n", bref, bref->keybits); 162 /* NOT REACHED */ 163 break; 164 } 165 bref->data_off = off | radix; 166 return (0); 167 } 168 169 /* 170 * Normal freemap allocator 171 * 172 * Use available hints to allocate space using the freemap. Create missing 173 * freemap infrastructure on-the-fly as needed (including marking initial 174 * allocations using the iterator as allocated, instantiating new 2GB zones, 175 * and dealing with the end-of-media edge case). 176 * 177 * ip and bpref are only used as a heuristic to determine locality of 178 * reference. bref->key may also be used heuristically. 179 */ 180 int 181 hammer2_freemap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp, 182 hammer2_blockref_t *bref, size_t bytes) 183 { 184 hammer2_chain_t *parent; 185 int radix; 186 int error; 187 unsigned int hindex; 188 hammer2_fiterate_t iter; 189 190 /* 191 * Validate the allocation size. It must be a power of 2. 192 * 193 * For now require that the caller be aware of the minimum 194 * allocation (1K). 195 */ 196 radix = hammer2_getradix(bytes); 197 KKASSERT((size_t)1 << radix == bytes); 198 199 /* 200 * Freemap blocks themselves are simply assigned from the reserve 201 * area, not allocated from the freemap. 202 */ 203 if (bref->type == HAMMER2_BREF_TYPE_FREEMAP_NODE || 204 bref->type == HAMMER2_BREF_TYPE_FREEMAP_LEAF) { 205 return(hammer2_freemap_reserve(hmp, bref, radix)); 206 } 207 208 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX) 209 hammer2_freemap_free(trans, hmp, bref, 0); 210 211 /* 212 * Normal allocations 213 */ 214 KKASSERT(bytes >= HAMMER2_MIN_ALLOC && bytes <= HAMMER2_MAX_ALLOC); 215 216 /* 217 * Calculate the starting point for our allocation search. 218 * 219 * Each freemap leaf is dedicated to a specific freemap_radix. 220 * The freemap_radix can be more fine-grained than the device buffer 221 * radix which results in inodes being grouped together in their 222 * own segment, terminal-data (16K or less) and initial indirect 223 * block being grouped together, and then full-indirect and full-data 224 * blocks (64K) being grouped together. 225 * 226 * The single most important aspect of this is the inode grouping 227 * because that is what allows 'find' and 'ls' and other filesystem 228 * topology operations to run fast. 229 */ 230 #if 0 231 if (bref->data_off & ~HAMMER2_OFF_MASK_RADIX) 232 bpref = bref->data_off & ~HAMMER2_OFF_MASK_RADIX; 233 else if (trans->tmp_bpref) 234 bpref = trans->tmp_bpref; 235 else if (trans->tmp_ip) 236 bpref = trans->tmp_ip->chain->bref.data_off; 237 else 238 #endif 239 /* 240 * Heuristic tracking index. We would like one for each distinct 241 * bref type if possible. heur_freemap[] has room for two classes 242 * for each type. At a minimum we have to break-up our heuristic 243 * by device block sizes. 244 */ 245 hindex = hammer2_devblkradix(radix) - HAMMER2_MINIORADIX; 246 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR_NRADIX); 247 hindex += bref->type * HAMMER2_FREEMAP_HEUR_NRADIX; 248 hindex &= HAMMER2_FREEMAP_HEUR_TYPES * HAMMER2_FREEMAP_HEUR_NRADIX - 1; 249 KKASSERT(hindex < HAMMER2_FREEMAP_HEUR); 250 251 iter.bpref = hmp->heur_freemap[hindex]; 252 253 /* 254 * Make sure bpref is in-bounds. It's ok if bpref covers a zone's 255 * reserved area, the try code will iterate past it. 256 */ 257 if (iter.bpref > hmp->voldata.volu_size) 258 iter.bpref = hmp->voldata.volu_size - 1; 259 260 /* 261 * Iterate the freemap looking for free space before and after. 262 */ 263 parent = &hmp->fchain; 264 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 265 error = EAGAIN; 266 iter.bnext = iter.bpref; 267 iter.loops = 0; 268 269 while (error == EAGAIN) { 270 error = hammer2_freemap_try_alloc(trans, &parent, bref, 271 radix, &iter); 272 } 273 hmp->heur_freemap[hindex] = iter.bnext; 274 hammer2_chain_unlock(parent); 275 276 return (error); 277 } 278 279 static int 280 hammer2_freemap_try_alloc(hammer2_trans_t *trans, hammer2_chain_t **parentp, 281 hammer2_blockref_t *bref, int radix, 282 hammer2_fiterate_t *iter) 283 { 284 hammer2_mount_t *hmp = (*parentp)->hmp; 285 hammer2_off_t l0size; 286 hammer2_off_t l1size; 287 hammer2_off_t l1mask; 288 hammer2_key_t key_dummy; 289 hammer2_chain_t *chain; 290 hammer2_off_t key; 291 size_t bytes; 292 uint16_t class; 293 int error = 0; 294 int cache_index = -1; 295 296 297 /* 298 * Calculate the number of bytes being allocated, the number 299 * of contiguous bits of bitmap being allocated, and the bitmap 300 * mask. 301 * 302 * WARNING! cpu hardware may mask bits == 64 -> 0 and blow up the 303 * mask calculation. 304 */ 305 bytes = (size_t)1 << radix; 306 class = (bref->type << 8) | hammer2_devblkradix(radix); 307 308 /* 309 * Lookup the level1 freemap chain, creating and initializing one 310 * if necessary. Intermediate levels will be created automatically 311 * when necessary by hammer2_chain_create(). 312 */ 313 key = H2FMBASE(iter->bnext, HAMMER2_FREEMAP_LEVEL1_RADIX); 314 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 315 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 316 l1mask = l1size - 1; 317 318 chain = hammer2_chain_lookup(parentp, &key_dummy, key, key + l1mask, 319 &cache_index, 320 HAMMER2_LOOKUP_FREEMAP | 321 HAMMER2_LOOKUP_ALWAYS | 322 HAMMER2_LOOKUP_MATCHIND/*XXX*/); 323 if (chain == NULL) { 324 /* 325 * Create the missing leaf, be sure to initialize 326 * the auxillary freemap tracking information in 327 * the bref.check.freemap structure. 328 */ 329 #if 0 330 kprintf("freemap create L1 @ %016jx bpref %016jx\n", 331 key, iter->bpref); 332 #endif 333 error = hammer2_chain_create(trans, parentp, &chain, 334 key, HAMMER2_FREEMAP_LEVEL1_RADIX, 335 HAMMER2_BREF_TYPE_FREEMAP_LEAF, 336 HAMMER2_FREEMAP_LEVELN_PSIZE); 337 if (error == 0) { 338 hammer2_chain_modify(trans, &chain, 0); 339 bzero(&chain->data->bmdata[0], 340 HAMMER2_FREEMAP_LEVELN_PSIZE); 341 chain->bref.check.freemap.bigmask = (uint32_t)-1; 342 chain->bref.check.freemap.avail = l1size; 343 /* bref.methods should already be inherited */ 344 345 hammer2_freemap_init(trans, hmp, key, chain); 346 } 347 } else if ((chain->bref.check.freemap.bigmask & (1 << radix)) == 0) { 348 /* 349 * Already flagged as not having enough space 350 */ 351 error = ENOSPC; 352 } else { 353 /* 354 * Modify existing chain to setup for adjustment. 355 */ 356 hammer2_chain_modify(trans, &chain, 0); 357 } 358 359 /* 360 * Scan 2MB entries. 361 */ 362 if (error == 0) { 363 hammer2_bmap_data_t *bmap; 364 hammer2_key_t base_key; 365 int count; 366 int start; 367 int n; 368 369 start = (int)((iter->bnext - key) >> 370 HAMMER2_FREEMAP_LEVEL0_RADIX); 371 KKASSERT(start >= 0 && start < HAMMER2_FREEMAP_COUNT); 372 hammer2_chain_modify(trans, &chain, 0); 373 374 error = ENOSPC; 375 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) { 376 if (start + count >= HAMMER2_FREEMAP_COUNT && 377 start - count < 0) { 378 break; 379 } 380 n = start + count; 381 bmap = &chain->data->bmdata[n]; 382 if (n < HAMMER2_FREEMAP_COUNT && bmap->avail && 383 (bmap->class == 0 || bmap->class == class)) { 384 base_key = key + n * l0size; 385 error = hammer2_bmap_alloc(trans, hmp, bmap, 386 class, n, radix, 387 &base_key); 388 if (error != ENOSPC) { 389 key = base_key; 390 break; 391 } 392 } 393 n = start - count; 394 bmap = &chain->data->bmdata[n]; 395 if (n >= 0 && bmap->avail && 396 (bmap->class == 0 || bmap->class == class)) { 397 base_key = key + n * l0size; 398 error = hammer2_bmap_alloc(trans, hmp, bmap, 399 class, n, radix, 400 &base_key); 401 if (error != ENOSPC) { 402 key = base_key; 403 break; 404 } 405 } 406 } 407 if (error == ENOSPC) 408 chain->bref.check.freemap.bigmask &= ~(1 << radix); 409 /* XXX also scan down from original count */ 410 } 411 412 if (error == 0) { 413 /* 414 * Assert validity. Must be beyond the static allocator used 415 * by newfs_hammer2 (and thus also beyond the aux area), 416 * not go past the volume size, and must not be in the 417 * reserved segment area for a zone. 418 */ 419 KKASSERT(key >= hmp->voldata.allocator_beg && 420 key + bytes <= hmp->voldata.volu_size); 421 KKASSERT((key & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG); 422 bref->data_off = key | radix; 423 424 #if 0 425 kprintf("alloc cp=%p %016jx %016jx using %016jx\n", 426 chain, 427 bref->key, bref->data_off, chain->bref.data_off); 428 #endif 429 } else if (error == ENOSPC) { 430 /* 431 * Return EAGAIN with next iteration in iter->bnext, or 432 * return ENOSPC if the allocation map has been exhausted. 433 */ 434 error = hammer2_freemap_iterate(trans, parentp, &chain, iter); 435 } 436 437 /* 438 * Cleanup 439 */ 440 if (chain) 441 hammer2_chain_unlock(chain); 442 return (error); 443 } 444 445 /* 446 * Allocate (1<<radix) bytes from the bmap whos base data offset is (*basep). 447 * 448 * If the linear iterator is mid-block we use it directly (the bitmap should 449 * already be marked allocated), otherwise we search for a block in the bitmap 450 * that fits the allocation request. 451 * 452 * A partial bitmap allocation sets the minimum bitmap granularity (16KB) 453 * to fully allocated and adjusts the linear allocator to allow the 454 * remaining space to be allocated. 455 */ 456 static 457 int 458 hammer2_bmap_alloc(hammer2_trans_t *trans, hammer2_mount_t *hmp, 459 hammer2_bmap_data_t *bmap, 460 uint16_t class, int n, int radix, hammer2_key_t *basep) 461 { 462 struct buf *bp; 463 size_t size; 464 size_t bsize; 465 int bmradix; 466 uint32_t bmmask; 467 int offset; 468 int i; 469 int j; 470 471 /* 472 * Take into account 2-bits per block when calculating bmradix. 473 */ 474 size = (size_t)1 << radix; 475 476 if (radix <= HAMMER2_FREEMAP_BLOCK_RADIX) { 477 bmradix = 2; 478 bsize = HAMMER2_FREEMAP_BLOCK_SIZE; 479 /* (16K) 2 bits per allocation block */ 480 } else { 481 bmradix = 2 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX); 482 bsize = size; 483 /* (32K-256K) 4, 8, 16, 32 bits per allocation block */ 484 } 485 486 /* 487 * Use the linear iterator to pack small allocations, otherwise 488 * fall-back to finding a free 16KB chunk. The linear iterator 489 * is only valid when *NOT* on a freemap chunking boundary (16KB). 490 * If it is the bitmap must be scanned. It can become invalid 491 * once we pack to the boundary. We adjust it after a bitmap 492 * allocation only for sub-16KB allocations (so the perfectly good 493 * previous value can still be used for fragments when 16KB+ 494 * allocations are made). 495 * 496 * Beware of hardware artifacts when bmradix == 32 (intermediate 497 * result can wind up being '1' instead of '0' if hardware masks 498 * bit-count & 31). 499 * 500 * NOTE: j needs to be even in the j= calculation. As an artifact 501 * of the /2 division, our bitmask has to clear bit 0. 502 * 503 * NOTE: TODO this can leave little unallocatable fragments lying 504 * around. 505 */ 506 if (((uint32_t)bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) + size <= 507 HAMMER2_FREEMAP_BLOCK_SIZE && 508 (bmap->linear & HAMMER2_FREEMAP_BLOCK_MASK) && 509 bmap->linear < HAMMER2_SEGSIZE) { 510 KKASSERT(bmap->linear >= 0 && 511 bmap->linear + size <= HAMMER2_SEGSIZE && 512 (bmap->linear & (HAMMER2_MIN_ALLOC - 1)) == 0); 513 offset = bmap->linear; 514 i = offset / (HAMMER2_SEGSIZE / 8); 515 j = (offset / (HAMMER2_FREEMAP_BLOCK_SIZE / 2)) & 30; 516 bmmask = (bmradix == 32) ? 517 0xFFFFFFFFU : (1 << bmradix) - 1; 518 bmmask <<= j; 519 bmap->linear = offset + size; 520 } else { 521 for (i = 0; i < 8; ++i) { 522 bmmask = (bmradix == 32) ? 523 0xFFFFFFFFU : (1 << bmradix) - 1; 524 for (j = 0; j < 32; j += bmradix) { 525 if ((bmap->bitmap[i] & bmmask) == 0) 526 goto success; 527 bmmask <<= bmradix; 528 } 529 } 530 /*fragments might remain*/ 531 /*KKASSERT(bmap->avail == 0);*/ 532 return (ENOSPC); 533 success: 534 offset = i * (HAMMER2_SEGSIZE / 8) + 535 (j * (HAMMER2_FREEMAP_BLOCK_SIZE / 2)); 536 if (size & HAMMER2_FREEMAP_BLOCK_MASK) 537 bmap->linear = offset + size; 538 } 539 540 KKASSERT(i >= 0 && i < 8); /* 8 x 16 -> 128 x 16K -> 2MB */ 541 542 /* 543 * Optimize the buffer cache to avoid unnecessary read-before-write 544 * operations. 545 * 546 * The device block size could be larger than the allocation size 547 * so the actual bitmap test is somewhat more involved. We have 548 * to use a compatible buffer size for this operation. 549 */ 550 if ((bmap->bitmap[i] & bmmask) == 0 && 551 hammer2_devblksize(size) != size) { 552 size_t psize = hammer2_devblksize(size); 553 hammer2_off_t pmask = (hammer2_off_t)psize - 1; 554 int pbmradix = 2 << (hammer2_devblkradix(radix) - 555 HAMMER2_FREEMAP_BLOCK_RADIX); 556 uint32_t pbmmask; 557 558 pbmmask = (pbmradix == 32) ? 0xFFFFFFFFU : (1 << pbmradix) - 1; 559 while ((pbmmask & bmmask) == 0) 560 pbmmask <<= pbmradix; 561 562 #if 0 563 kprintf("%016jx mask %08x %08x %08x (%zd/%zd)\n", 564 *basep + offset, bmap->bitmap[i], 565 pbmmask, bmmask, size, psize); 566 #endif 567 568 if ((bmap->bitmap[i] & pbmmask) == 0) { 569 bp = getblk(hmp->devvp, *basep + (offset & ~pmask), 570 psize, GETBLK_NOWAIT, 0); 571 if (bp) { 572 if ((bp->b_flags & B_CACHE) == 0) 573 vfs_bio_clrbuf(bp); 574 bp->b_flags |= B_CACHE; 575 bqrelse(bp); 576 } 577 } 578 } 579 580 #if 0 581 /* 582 * When initializing a new inode segment also attempt to initialize 583 * an adjacent segment. Be careful not to index beyond the array 584 * bounds. 585 * 586 * We do this to try to localize inode accesses to improve 587 * directory scan rates. XXX doesn't improve scan rates. 588 */ 589 if (size == HAMMER2_INODE_BYTES) { 590 if (n & 1) { 591 if (bmap[-1].radix == 0 && bmap[-1].avail) 592 bmap[-1].radix = radix; 593 } else { 594 if (bmap[1].radix == 0 && bmap[1].avail) 595 bmap[1].radix = radix; 596 } 597 } 598 #endif 599 600 /* 601 * Adjust the linear iterator, set the radix if necessary (might as 602 * well just set it unconditionally), adjust *basep to return the 603 * allocated data offset. 604 */ 605 bmap->bitmap[i] |= bmmask; 606 bmap->class = class; 607 bmap->avail -= size; 608 *basep += offset; 609 610 hammer2_voldata_lock(hmp); 611 hmp->voldata.allocator_free -= size; /* XXX */ 612 hammer2_voldata_unlock(hmp, 1); 613 614 return(0); 615 } 616 617 static 618 void 619 hammer2_freemap_init(hammer2_trans_t *trans, hammer2_mount_t *hmp, 620 hammer2_key_t key, hammer2_chain_t *chain) 621 { 622 hammer2_off_t l1size; 623 hammer2_off_t lokey; 624 hammer2_off_t hikey; 625 hammer2_bmap_data_t *bmap; 626 int count; 627 628 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 629 630 /* 631 * Calculate the portion of the 2GB map that should be initialized 632 * as free. Portions below or after will be initialized as allocated. 633 * SEGMASK-align the areas so we don't have to worry about sub-scans 634 * or endianess when using memset. 635 * 636 * (1) Ensure that all statically allocated space from newfs_hammer2 637 * is marked allocated. 638 * 639 * (2) Ensure that the reserved area is marked allocated (typically 640 * the first 4MB of the 2GB area being represented). 641 * 642 * (3) Ensure that any trailing space at the end-of-volume is marked 643 * allocated. 644 * 645 * WARNING! It is possible for lokey to be larger than hikey if the 646 * entire 2GB segment is within the static allocation. 647 */ 648 lokey = (hmp->voldata.allocator_beg + HAMMER2_SEGMASK64) & 649 ~HAMMER2_SEGMASK64; 650 651 if (lokey < H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 652 HAMMER2_ZONE_SEG64) { 653 lokey = H2FMBASE(key, HAMMER2_FREEMAP_LEVEL1_RADIX) + 654 HAMMER2_ZONE_SEG64; 655 } 656 657 hikey = key + H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 658 if (hikey > hmp->voldata.volu_size) { 659 hikey = hmp->voldata.volu_size & ~HAMMER2_SEGMASK64; 660 } 661 662 chain->bref.check.freemap.avail = 663 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 664 bmap = &chain->data->bmdata[0]; 665 666 for (count = 0; count < HAMMER2_FREEMAP_COUNT; ++count) { 667 if (key < lokey || key >= hikey) { 668 memset(bmap->bitmap, -1, 669 sizeof(bmap->bitmap)); 670 bmap->avail = 0; 671 bmap->linear = HAMMER2_SEGSIZE; 672 chain->bref.check.freemap.avail -= 673 H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 674 } else { 675 bmap->avail = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 676 } 677 key += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 678 ++bmap; 679 } 680 } 681 682 /* 683 * The current Level 1 freemap has been exhausted, iterate to the next 684 * one, return ENOSPC if no freemaps remain. 685 * 686 * XXX this should rotate back to the beginning to handle freed-up space 687 * XXX or use intermediate entries to locate free space. TODO 688 */ 689 static int 690 hammer2_freemap_iterate(hammer2_trans_t *trans, hammer2_chain_t **parentp, 691 hammer2_chain_t **chainp, hammer2_fiterate_t *iter) 692 { 693 hammer2_mount_t *hmp = (*parentp)->hmp; 694 695 iter->bnext &= ~(H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX) - 1); 696 iter->bnext += H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 697 if (iter->bnext >= hmp->voldata.volu_size) { 698 iter->bnext = 0; 699 if (++iter->loops == 2) 700 return (ENOSPC); 701 } 702 return(EAGAIN); 703 } 704 705 /* 706 * Free the specified blockref. This code is only able to fully free 707 * blocks when (how) is non-zero, otherwise the block is marked for 708 * the bulk freeing pass to check. 709 * 710 * Normal use is to only mark inodes as possibly being free. The underlying 711 * file blocks are not necessarily marked. The bulk freescan can 712 * theoretically handle the case. 713 * 714 * XXX currently disabled when how == 0 (the normal real-time case). At 715 * the moment we depend on the bulk freescan to actually free blocks. It 716 * will still call this routine with a non-zero how to stage possible frees 717 * and to do the actual free. 718 */ 719 void 720 hammer2_freemap_free(hammer2_trans_t *trans, hammer2_mount_t *hmp, 721 hammer2_blockref_t *bref, int how) 722 { 723 hammer2_off_t data_off = bref->data_off; 724 hammer2_chain_t *chain; 725 hammer2_chain_t *parent; 726 hammer2_bmap_data_t *bmap; 727 hammer2_key_t key; 728 hammer2_key_t key_dummy; 729 hammer2_off_t l0size; 730 hammer2_off_t l1size; 731 hammer2_off_t l1mask; 732 uint32_t *bitmap; 733 const uint32_t bmmask00 = 0; 734 uint32_t bmmask01; 735 uint32_t bmmask10; 736 uint32_t bmmask11; 737 size_t bytes; 738 uint16_t class; 739 int radix; 740 int start; 741 int count; 742 int modified = 0; 743 int cache_index = -1; 744 745 radix = (int)data_off & HAMMER2_OFF_MASK_RADIX; 746 data_off &= ~HAMMER2_OFF_MASK_RADIX; 747 KKASSERT(radix <= HAMMER2_MAX_RADIX); 748 749 bytes = (size_t)1 << radix; 750 class = (bref->type << 8) | hammer2_devblkradix(radix); 751 752 /* 753 * We can't free data allocated by newfs_hammer2. 754 * Assert validity. 755 */ 756 if (data_off < hmp->voldata.allocator_beg) 757 return; 758 KKASSERT((data_off & HAMMER2_ZONE_MASK64) >= HAMMER2_ZONE_SEG); 759 760 /* 761 * Lookup the level1 freemap chain. The chain must exist. 762 */ 763 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL1_RADIX); 764 l0size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL0_RADIX); 765 l1size = H2FMSHIFT(HAMMER2_FREEMAP_LEVEL1_RADIX); 766 l1mask = l1size - 1; 767 768 parent = &hmp->fchain; 769 hammer2_chain_lock(parent, HAMMER2_RESOLVE_ALWAYS); 770 771 chain = hammer2_chain_lookup(&parent, &key_dummy, key, key + l1mask, 772 &cache_index, 773 HAMMER2_LOOKUP_FREEMAP | 774 HAMMER2_LOOKUP_ALWAYS | 775 HAMMER2_LOOKUP_MATCHIND/*XXX*/); 776 if (chain == NULL) { 777 kprintf("hammer2_freemap_free: %016jx: no chain\n", 778 (intmax_t)bref->data_off); 779 hammer2_chain_unlock(parent); 780 return; 781 } 782 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_FREEMAP_LEAF); 783 784 /* 785 * Find the bmap entry (covering a 2MB swath) 786 * Find the bitmap array index 787 * Find the bitmap bit index (runs in 2-bit pairs) 788 */ 789 bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) & 790 (HAMMER2_FREEMAP_COUNT - 1)]; 791 bitmap = &bmap->bitmap[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7]; 792 793 start = ((int)(data_off >> HAMMER2_FREEMAP_BLOCK_RADIX) & 15) * 2; 794 bmmask01 = 1 << start; 795 bmmask10 = 2 << start; 796 bmmask11 = 3 << start; 797 798 /* 799 * Fixup the bitmap 800 */ 801 if (radix < HAMMER2_FREEMAP_BLOCK_RADIX) { 802 count = 1; 803 how = 0; /* partial block, cannot set to 00 */ 804 } else { 805 count = 1 << (radix - HAMMER2_FREEMAP_BLOCK_RADIX); 806 } 807 808 while (count) { 809 KKASSERT(bmmask11); 810 KKASSERT((*bitmap & bmmask11) != bmmask00); 811 if ((*bitmap & bmmask11) == bmmask11) { 812 if (!modified) { 813 hammer2_chain_modify(trans, &chain, 0); 814 modified = 1; 815 bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) & 816 (HAMMER2_FREEMAP_COUNT - 1)]; 817 bitmap = &bmap->bitmap[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7]; 818 } 819 if (how) 820 *bitmap &= ~bmmask11; 821 else 822 *bitmap = (*bitmap & ~bmmask11) | bmmask10; 823 } else if ((*bitmap & bmmask11) == bmmask10) { 824 if (how) { 825 if (!modified) { 826 hammer2_chain_modify(trans, &chain, 0); 827 modified = 1; 828 bmap = &chain->data->bmdata[(int)(data_off >> HAMMER2_SEGRADIX) & 829 (HAMMER2_FREEMAP_COUNT - 1)]; 830 bitmap = &bmap->bitmap[(int)(data_off >> (HAMMER2_SEGRADIX - 3)) & 7]; 831 } 832 *bitmap &= ~bmmask11; 833 } 834 } else if ((*bitmap & bmmask11) == bmmask01) { 835 KKASSERT(0); 836 } 837 --count; 838 bmmask01 <<= 2; 839 bmmask10 <<= 2; 840 bmmask11 <<= 2; 841 } 842 if (how && modified) { 843 bmap->avail += 1 << radix; 844 KKASSERT(bmap->avail <= HAMMER2_SEGSIZE); 845 if (bmap->avail == HAMMER2_SEGSIZE && 846 bmap->bitmap[0] == 0 && 847 bmap->bitmap[1] == 0 && 848 bmap->bitmap[2] == 0 && 849 bmap->bitmap[3] == 0 && 850 bmap->bitmap[4] == 0 && 851 bmap->bitmap[5] == 0 && 852 bmap->bitmap[6] == 0 && 853 bmap->bitmap[7] == 0) { 854 key = H2FMBASE(data_off, HAMMER2_FREEMAP_LEVEL0_RADIX); 855 kprintf("Freeseg %016jx\n", (intmax_t)key); 856 bmap->class = 0; 857 } 858 } 859 860 /* 861 * chain->bref.check.freemap.bigmask (XXX) 862 */ 863 if (modified) 864 chain->bref.check.freemap.bigmask |= 1 << radix; 865 866 hammer2_chain_unlock(chain); 867 hammer2_chain_unlock(parent); 868 } 869