Lines Matching +full:write +full:- +full:1 +full:- +full:bps

9  * or https://opensource.org/licenses/CDDL-1.0.
54 * - Deduplication is automatic and Block Cloning is not - one has to use a
56 * - Deduplication keeps all data blocks in its table, even those referenced
61 * - Deduplication needs data to work - one needs to pass real data to the
62 * write(2) syscall, so hash can be calculated. Block Cloning doesn't require
64 * neither the cost of reading the data, nor the cost of writing the data -
66 * - If the D (dedup) bit is not set in the block pointer, it means that
74 * - The BRT entry is much smaller than the DDT entry - for BRT we only store
76 * - Dedup keys are cryptographic hashes, so two blocks that are close to each
78 * The BRT entry keys are offsets into a single top-level VDEV, so data blocks
80 * - Scrub will only do a single pass over a block that is referenced multiple
85 * - Deduplication requires cryptographically strong hash as a checksum or
92 * is a small number of top-level VDEVs and a large number of blocks stored in
94 * maintaining one BRT for each top-level VDEV, so we can then have only offset
107 * top-level VDEV into 16MB regions. For each region we maintain a counter that
109 * creates the entries count array of 16bit numbers for each top-level VDEV.
111 * same transaction group as the BRT updates to keep everything in-sync. We can
113 * 1TB VDEV the array requires only 128kB of memory (we may decide to decrease
128 * is not yet implemented - for now we will update entire array if there was
141 * references? To avoid this dilemma BRT cooperates with DDT - if a given block
176 * block-aligned or we will return an error so the upper layer can
178 * Using copy_file_range(2) will call OS-independent zfs_clone_range() function.
181 * function from the source file. Once we have BPs from the source file we call
183 * allocates BPs for us. We iterate over all source BPs. If the given BP is
184 * a hole or an embedded block, we just copy BP as-is. If it points to a real
188 * We use this pending list to keep track of all BPs that got new references
192 * - The block we want to clone may have been created within the same
195 * - The block we want to clone may have been modified within the same
197 * - A block may be cloned multiple times during one transaction group (that's
198 * why pending list is actually a tree and not an append-only list - this
201 * - A block may be cloned and freed within the same transaction group
203 * - A block may be cloned and within the same transaction group the clone
205 * - A file might have been deleted, but the caller still has a file descriptor
217 * all the new clones to the BRT table - we load BRT entries and update
219 * function. This function will sync all dirty per-top-level-vdev BRTs,
224 * Every clone operation is divided into chunks (similar to write) and each
226 * how many BPs we can fit into a single ZIL entry.
228 * as when we log clone operations we cannot use the source object - it may
229 * reside on a different dataset, so we log BPs we want to clone.
236 * entries, we will bump reference counters for their BPs in the BRT. Then
250 static int brt_zap_prefetch = 1;
306 #define BRTSTAT_BUMP(stat) wmsum_add(&brt_sums.stat, 1)
314 rw_enter(&spa->spa_brt_lock, RW_READER);
320 rw_enter(&spa->spa_brt_lock, RW_WRITER);
326 rw_exit(&spa->spa_brt_lock);
333 ASSERT3U(idx, <, brtvd->bv_size);
335 if (unlikely(brtvd->bv_need_byteswap)) {
336 return (BSWAP_16(brtvd->bv_entcount[idx]));
338 return (brtvd->bv_entcount[idx]);
346 ASSERT3U(idx, <, brtvd->bv_size);
348 if (unlikely(brtvd->bv_need_byteswap)) {
349 brtvd->bv_entcount[idx] = BSWAP_16(entcnt);
351 brtvd->bv_entcount[idx] = entcnt;
360 ASSERT3U(idx, <, brtvd->bv_size);
365 brt_vdev_entcount_set(brtvd, idx, entcnt + 1);
373 ASSERT3U(idx, <, brtvd->bv_size);
378 brt_vdev_entcount_set(brtvd, idx, entcnt - 1);
387 uint64_t nblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size);
390 (u_longlong_t)brtvd->bv_vdevid,
391 brtvd->bv_meta_dirty, brtvd->bv_entcount_dirty,
392 (u_longlong_t)brtvd->bv_size,
393 (u_longlong_t)brtvd->bv_totalcount,
396 if (brtvd->bv_totalcount > 0) {
398 for (idx = 0; idx < brtvd->bv_size; idx++) {
406 if (brtvd->bv_entcount_dirty) {
409 bitmap = kmem_alloc(nblocks + 1, KM_SLEEP);
412 BT_TEST(brtvd->bv_bitmap, idx) ? 'x' : '.';
416 kmem_free(bitmap, nblocks + 1);
427 if (vdevid < spa->spa_brt_nvdevs) {
428 brtvd = spa->spa_brt_vdevs[vdevid];
433 if (vdevid >= spa->spa_brt_nvdevs)
434 brt_vdevs_expand(spa, vdevid + 1);
435 brtvd = spa->spa_brt_vdevs[vdevid];
446 ASSERT(brtvd->bv_initiated);
447 ASSERT0(brtvd->bv_mos_brtvdev);
448 ASSERT0(brtvd->bv_mos_entries);
450 uint64_t mos_entries = zap_create_flags(spa->spa_meta_objset, 0,
454 VERIFY0(dnode_hold(spa->spa_meta_objset, mos_entries, brtvd,
455 &brtvd->bv_mos_entries_dnode));
456 rw_enter(&brtvd->bv_mos_entries_lock, RW_WRITER);
457 brtvd->bv_mos_entries = mos_entries;
458 rw_exit(&brtvd->bv_mos_entries_lock);
460 (u_longlong_t)brtvd->bv_mos_entries);
467 brtvd->bv_mos_brtvdev = dmu_object_alloc(spa->spa_meta_objset,
470 VERIFY(brtvd->bv_mos_brtvdev != 0);
472 (u_longlong_t)brtvd->bv_mos_brtvdev);
475 (u_longlong_t)brtvd->bv_vdevid);
476 VERIFY0(zap_add(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, name,
477 sizeof (uint64_t), 1, &brtvd->bv_mos_brtvdev, tx));
491 ASSERT(RW_WRITE_HELD(&brtvd->bv_lock));
494 vd = vdev_lookup_top(spa, brtvd->bv_vdevid);
495 size = (vdev_get_min_asize(vd) - 1) / spa->spa_brt_rangesize + 1;
502 if (!brtvd->bv_initiated) {
503 ASSERT0(brtvd->bv_size);
504 ASSERT0P(brtvd->bv_entcount);
505 ASSERT0P(brtvd->bv_bitmap);
507 ASSERT(brtvd->bv_size > 0);
508 ASSERT(brtvd->bv_entcount != NULL);
509 ASSERT(brtvd->bv_bitmap != NULL);
512 * shrinking the on-disk BRT VDEV object.
513 * dmu_free_range(spa->spa_meta_objset, brtvd->bv_mos_brtvdev,
516 ASSERT3U(brtvd->bv_size, <=, size);
518 memcpy(entcount, brtvd->bv_entcount,
519 sizeof (entcount[0]) * MIN(size, brtvd->bv_size));
520 vmem_free(brtvd->bv_entcount,
521 sizeof (entcount[0]) * brtvd->bv_size);
522 onblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size);
523 memcpy(bitmap, brtvd->bv_bitmap, MIN(BT_SIZEOFMAP(nblocks),
525 kmem_free(brtvd->bv_bitmap, BT_SIZEOFMAP(onblocks));
528 brtvd->bv_size = size;
529 brtvd->bv_entcount = entcount;
530 brtvd->bv_bitmap = bitmap;
531 if (!brtvd->bv_initiated) {
532 brtvd->bv_need_byteswap = FALSE;
533 brtvd->bv_initiated = TRUE;
535 (u_longlong_t)brtvd->bv_vdevid);
546 ASSERT(!brtvd->bv_initiated);
547 ASSERT(brtvd->bv_mos_brtvdev != 0);
549 error = dmu_bonus_hold(spa->spa_meta_objset, brtvd->bv_mos_brtvdev,
554 bvphys = db->db_data;
555 if (spa->spa_brt_rangesize == 0) {
556 spa->spa_brt_rangesize = bvphys->bvp_rangesize;
558 ASSERT3U(spa->spa_brt_rangesize, ==, bvphys->bvp_rangesize);
564 ASSERT3U(bvphys->bvp_size, <=, brtvd->bv_size);
569 error = dmu_read(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 0,
570 MIN(brtvd->bv_size, bvphys->bvp_size) * sizeof (uint16_t),
571 brtvd->bv_entcount, DMU_READ_NO_PREFETCH);
575 ASSERT(bvphys->bvp_mos_entries != 0);
576 VERIFY0(dnode_hold(spa->spa_meta_objset, bvphys->bvp_mos_entries, brtvd,
577 &brtvd->bv_mos_entries_dnode));
578 rw_enter(&brtvd->bv_mos_entries_lock, RW_WRITER);
579 brtvd->bv_mos_entries = bvphys->bvp_mos_entries;
580 rw_exit(&brtvd->bv_mos_entries_lock);
581 brtvd->bv_need_byteswap =
582 (bvphys->bvp_byteorder != BRT_NATIVE_BYTEORDER);
583 brtvd->bv_totalcount = bvphys->bvp_totalcount;
584 brtvd->bv_usedspace = bvphys->bvp_usedspace;
585 brtvd->bv_savedspace = bvphys->bvp_savedspace;
590 (u_longlong_t)brtvd->bv_vdevid,
591 (u_longlong_t)brtvd->bv_mos_brtvdev,
592 (u_longlong_t)brtvd->bv_mos_entries);
599 ASSERT(RW_WRITE_HELD(&brtvd->bv_lock));
600 ASSERT(brtvd->bv_initiated);
601 ASSERT0(avl_numnodes(&brtvd->bv_tree));
603 vmem_free(brtvd->bv_entcount, sizeof (uint16_t) * brtvd->bv_size);
604 brtvd->bv_entcount = NULL;
605 uint64_t nblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size);
606 kmem_free(brtvd->bv_bitmap, BT_SIZEOFMAP(nblocks));
607 brtvd->bv_bitmap = NULL;
609 brtvd->bv_size = 0;
611 brtvd->bv_initiated = FALSE;
612 BRT_DEBUG("BRT VDEV %llu deallocated.", (u_longlong_t)brtvd->bv_vdevid);
621 ASSERT(brtvd->bv_initiated);
622 ASSERT(brtvd->bv_mos_brtvdev != 0);
623 ASSERT(brtvd->bv_mos_entries != 0);
624 ASSERT0(brtvd->bv_totalcount);
625 ASSERT0(brtvd->bv_usedspace);
626 ASSERT0(brtvd->bv_savedspace);
628 uint64_t mos_entries = brtvd->bv_mos_entries;
629 rw_enter(&brtvd->bv_mos_entries_lock, RW_WRITER);
630 brtvd->bv_mos_entries = 0;
631 rw_exit(&brtvd->bv_mos_entries_lock);
632 dnode_rele(brtvd->bv_mos_entries_dnode, brtvd);
633 brtvd->bv_mos_entries_dnode = NULL;
634 ASSERT0(zap_count(spa->spa_meta_objset, mos_entries, &count));
636 VERIFY0(zap_destroy(spa->spa_meta_objset, mos_entries, tx));
640 VERIFY0(dmu_object_free(spa->spa_meta_objset, brtvd->bv_mos_brtvdev,
643 (u_longlong_t)brtvd->bv_mos_brtvdev);
644 brtvd->bv_mos_brtvdev = 0;
645 brtvd->bv_entcount_dirty = FALSE;
648 (u_longlong_t)brtvd->bv_vdevid);
649 VERIFY0(zap_remove(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
653 brtvd->bv_meta_dirty = FALSE;
655 rw_enter(&brtvd->bv_lock, RW_WRITER);
657 rw_exit(&brtvd->bv_lock);
667 ASSERT(RW_WRITE_HELD(&spa->spa_brt_lock));
668 ASSERT3U(nvdevs, >=, spa->spa_brt_nvdevs);
670 if (nvdevs == spa->spa_brt_nvdevs)
673 vdevs = kmem_zalloc(sizeof (*spa->spa_brt_vdevs) * nvdevs, KM_SLEEP);
674 if (spa->spa_brt_nvdevs > 0) {
675 ASSERT(spa->spa_brt_vdevs != NULL);
677 memcpy(vdevs, spa->spa_brt_vdevs,
678 sizeof (*spa->spa_brt_vdevs) * spa->spa_brt_nvdevs);
679 kmem_free(spa->spa_brt_vdevs,
680 sizeof (*spa->spa_brt_vdevs) * spa->spa_brt_nvdevs);
682 spa->spa_brt_vdevs = vdevs;
684 for (uint64_t vdevid = spa->spa_brt_nvdevs; vdevid < nvdevs; vdevid++) {
686 rw_init(&brtvd->bv_lock, NULL, RW_DEFAULT, NULL);
687 brtvd->bv_vdevid = vdevid;
688 brtvd->bv_initiated = FALSE;
689 rw_init(&brtvd->bv_mos_entries_lock, NULL, RW_DEFAULT, NULL);
690 avl_create(&brtvd->bv_tree, brt_entry_compare,
693 avl_create(&brtvd->bv_pending_tree[i],
697 mutex_init(&brtvd->bv_pending_lock, NULL, MUTEX_DEFAULT, NULL);
698 spa->spa_brt_vdevs[vdevid] = brtvd;
702 (u_longlong_t)spa->spa_brt_nvdevs, (u_longlong_t)nvdevs);
703 spa->spa_brt_nvdevs = nvdevs;
709 uint64_t idx = offset / spa->spa_brt_rangesize;
710 if (idx < brtvd->bv_size) {
723 ASSERT(brtvd->bv_initiated);
725 brtvd->bv_savedspace += dsize * count;
726 brtvd->bv_meta_dirty = TRUE;
728 if (bre->bre_count > 0)
731 brtvd->bv_usedspace += dsize;
733 idx = BRE_OFFSET(bre) / spa->spa_brt_rangesize;
734 if (idx >= brtvd->bv_size) {
736 rw_enter(&brtvd->bv_lock, RW_WRITER);
738 rw_exit(&brtvd->bv_lock);
741 ASSERT3U(idx, <, brtvd->bv_size);
743 brtvd->bv_totalcount++;
745 brtvd->bv_entcount_dirty = TRUE;
747 BT_SET(brtvd->bv_bitmap, idx);
756 ASSERT(RW_WRITE_HELD(&brtvd->bv_lock));
757 ASSERT(brtvd->bv_initiated);
759 brtvd->bv_savedspace -= dsize;
760 brtvd->bv_meta_dirty = TRUE;
762 if (bre->bre_count > 0)
765 brtvd->bv_usedspace -= dsize;
767 idx = BRE_OFFSET(bre) / spa->spa_brt_rangesize;
768 ASSERT3U(idx, <, brtvd->bv_size);
770 ASSERT(brtvd->bv_totalcount > 0);
771 brtvd->bv_totalcount--;
773 brtvd->bv_entcount_dirty = TRUE;
775 BT_SET(brtvd->bv_bitmap, idx);
784 ASSERT(brtvd->bv_meta_dirty);
785 ASSERT(brtvd->bv_mos_brtvdev != 0);
788 VERIFY0(dmu_bonus_hold(spa->spa_meta_objset, brtvd->bv_mos_brtvdev,
791 if (brtvd->bv_entcount_dirty) {
793 * TODO: Walk brtvd->bv_bitmap and write only the dirty blocks.
795 dmu_write(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 0,
796 brtvd->bv_size * sizeof (brtvd->bv_entcount[0]),
797 brtvd->bv_entcount, tx);
798 uint64_t nblocks = BRT_RANGESIZE_TO_NBLOCKS(brtvd->bv_size);
799 memset(brtvd->bv_bitmap, 0, BT_SIZEOFMAP(nblocks));
800 brtvd->bv_entcount_dirty = FALSE;
804 bvphys = db->db_data;
805 bvphys->bvp_mos_entries = brtvd->bv_mos_entries;
806 bvphys->bvp_size = brtvd->bv_size;
807 if (brtvd->bv_need_byteswap) {
808 bvphys->bvp_byteorder = BRT_NON_NATIVE_BYTEORDER;
810 bvphys->bvp_byteorder = BRT_NATIVE_BYTEORDER;
812 bvphys->bvp_totalcount = brtvd->bv_totalcount;
813 bvphys->bvp_rangesize = spa->spa_brt_rangesize;
814 bvphys->bvp_usedspace = brtvd->bv_usedspace;
815 bvphys->bvp_savedspace = brtvd->bv_savedspace;
818 brtvd->bv_meta_dirty = FALSE;
824 if (spa->spa_brt_vdevs == 0)
826 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
827 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
828 rw_enter(&brtvd->bv_lock, RW_WRITER);
829 if (brtvd->bv_initiated)
831 rw_exit(&brtvd->bv_lock);
832 rw_destroy(&brtvd->bv_lock);
833 if (brtvd->bv_mos_entries != 0)
834 dnode_rele(brtvd->bv_mos_entries_dnode, brtvd);
835 rw_destroy(&brtvd->bv_mos_entries_lock);
836 avl_destroy(&brtvd->bv_tree);
838 avl_destroy(&brtvd->bv_pending_tree[i]);
839 mutex_destroy(&brtvd->bv_pending_lock);
842 kmem_free(spa->spa_brt_vdevs, sizeof (*spa->spa_brt_vdevs) *
843 spa->spa_brt_nvdevs);
850 bre->bre_bp = *bp;
851 bre->bre_count = 0;
852 bre->bre_pcount = 0;
854 *vdevidp = DVA_GET_VDEV(&bp->blk_dva[0]);
862 if (brtvd->bv_mos_entries == 0)
865 return (zap_lookup_uint64_by_dnode(brtvd->bv_mos_entries_dnode,
866 &off, BRT_KEY_WORDS, 1, sizeof (bre->bre_count), &bre->bre_count));
878 if (spa->spa_brt_nvdevs == 0)
881 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[0]);
883 if (brtvd == NULL || !brtvd->bv_initiated)
892 uint64_t off = DVA_GET_OFFSET(&bp->blk_dva[0]);
899 if (spa->spa_brt_nvdevs == 0)
904 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++)
905 s += spa->spa_brt_vdevs[vdevid]->bv_savedspace;
913 if (spa->spa_brt_nvdevs == 0)
918 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++)
919 s += spa->spa_brt_vdevs[vdevid]->bv_usedspace;
942 brt_stats_t *bs = ksp->ks_data;
947 bs->brt_addref_entry_not_on_disk.value.ui64 =
949 bs->brt_addref_entry_on_disk.value.ui64 =
951 bs->brt_decref_entry_in_memory.value.ui64 =
953 bs->brt_decref_entry_loaded_from_disk.value.ui64 =
955 bs->brt_decref_entry_not_in_memory.value.ui64 =
957 bs->brt_decref_entry_read_lost_race.value.ui64 =
959 bs->brt_decref_entry_still_referenced.value.ui64 =
961 bs->brt_decref_free_data_later.value.ui64 =
963 bs->brt_decref_free_data_now.value.ui64 =
965 bs->brt_decref_no_entry.value.ui64 =
989 brt_ksp->ks_data = &brt_stats;
990 brt_ksp->ks_update = brt_kstats_update;
1047 rw_enter(&brtvd->bv_lock, RW_WRITER);
1048 ASSERT(brtvd->bv_initiated);
1049 bre = avl_find(&brtvd->bv_tree, &bre_search, NULL);
1056 rw_exit(&brtvd->bv_lock);
1066 rw_enter(&brtvd->bv_lock, RW_WRITER);
1067 racebre = avl_find(&brtvd->bv_tree, &bre_search, &where);
1077 bre->bre_bp = bre_search.bre_bp;
1078 bre->bre_count = bre_search.bre_count;
1079 bre->bre_pcount = 0;
1080 avl_insert(&brtvd->bv_tree, bre, where);
1083 if (bre->bre_count == 0) {
1084 rw_exit(&brtvd->bv_lock);
1089 bre->bre_pcount--;
1090 ASSERT(bre->bre_count > 0);
1091 bre->bre_count--;
1092 if (bre->bre_count == 0)
1098 rw_exit(&brtvd->bv_lock);
1115 rw_enter(&brtvd->bv_lock, RW_READER);
1116 ASSERT(brtvd->bv_initiated);
1117 bre = avl_find(&brtvd->bv_tree, &bre_search, NULL);
1119 rw_exit(&brtvd->bv_lock);
1128 refcnt = bre->bre_count;
1129 rw_exit(&brtvd->bv_lock);
1138 if (!brt_zap_prefetch || brtvd->bv_mos_entries == 0)
1141 uint64_t off = DVA_GET_OFFSET(&bp->blk_dva[0]);
1142 rw_enter(&brtvd->bv_mos_entries_lock, RW_READER);
1143 if (brtvd->bv_mos_entries != 0) {
1144 (void) zap_prefetch_uint64_by_dnode(brtvd->bv_mos_entries_dnode,
1147 rw_exit(&brtvd->bv_mos_entries_lock);
1154 const blkptr_t *bp1 = &bre1->bre_bp, *bp2 = &bre2->bre_bp;
1156 return (TREE_CMP(DVA_GET_OFFSET(&bp1->blk_dva[0]),
1157 DVA_GET_OFFSET(&bp2->blk_dva[0])));
1170 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[0]);
1172 avl_tree_t *pending_tree = &brtvd->bv_pending_tree[txg & TXG_MASK];
1175 newbre->bre_bp = *bp;
1176 newbre->bre_count = 0;
1177 newbre->bre_pcount = 1;
1179 mutex_enter(&brtvd->bv_pending_lock);
1185 bre->bre_pcount++;
1187 mutex_exit(&brtvd->bv_pending_lock);
1210 uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[0]);
1213 avl_tree_t *pending_tree = &brtvd->bv_pending_tree[txg & TXG_MASK];
1217 mutex_enter(&brtvd->bv_pending_lock);
1220 ASSERT(bre->bre_pcount > 0);
1221 bre->bre_pcount--;
1222 if (bre->bre_pcount == 0)
1226 mutex_exit(&brtvd->bv_pending_lock);
1241 ASSERT(avl_is_empty(&brtvd->bv_tree));
1242 avl_swap(&brtvd->bv_tree, &brtvd->bv_pending_tree[txg & TXG_MASK]);
1244 for (bre = avl_first(&brtvd->bv_tree); bre; bre = nbre) {
1245 nbre = AVL_NEXT(&brtvd->bv_tree, bre);
1252 if (BP_GET_DEDUP(&bre->bre_bp)) {
1253 while (bre->bre_pcount > 0) {
1254 if (!ddt_addref(spa, &bre->bre_bp))
1256 bre->bre_pcount--;
1258 if (bre->bre_pcount == 0) {
1259 avl_remove(&brtvd->bv_tree, bre);
1270 if (brtvd->bv_mos_entries != 0 &&
1273 brtvd->bv_mos_entries_dnode, &off,
1274 BRT_KEY_WORDS, 1, sizeof (bre->bre_count),
1275 &bre->bre_count);
1289 if (avl_is_empty(&brtvd->bv_tree))
1292 if (!brtvd->bv_initiated) {
1293 rw_enter(&brtvd->bv_lock, RW_WRITER);
1295 rw_exit(&brtvd->bv_lock);
1303 for (bre = avl_first(&brtvd->bv_tree); bre;
1304 bre = AVL_NEXT(&brtvd->bv_tree, bre)) {
1306 bp_get_dsize(spa, &bre->bre_bp), bre->bre_pcount);
1307 bre->bre_count += bre->bre_pcount;
1316 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
1317 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
1332 if (bre->bre_pcount == 0) {
1334 } else if (bre->bre_count == 0) {
1340 BRT_KEY_WORDS, 1, sizeof (bre->bre_count),
1341 &bre->bre_count, tx));
1351 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
1352 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
1355 if (!brtvd->bv_meta_dirty) {
1356 ASSERT(!brtvd->bv_entcount_dirty);
1357 ASSERT0(avl_numnodes(&brtvd->bv_tree));
1362 ASSERT(!brtvd->bv_entcount_dirty ||
1363 avl_numnodes(&brtvd->bv_tree) != 0);
1365 if (brtvd->bv_mos_brtvdev == 0)
1369 while ((bre = avl_destroy_nodes(&brtvd->bv_tree, &c)) != NULL) {
1370 brt_sync_entry(brtvd->bv_mos_entries_dnode, bre, tx);
1378 if (brtvd->bv_totalcount == 0)
1396 for (vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
1397 if (spa->spa_brt_vdevs[vdevid]->bv_meta_dirty)
1400 if (vdevid >= spa->spa_brt_nvdevs) {
1406 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1414 rw_init(&spa->spa_brt_lock, NULL, RW_DEFAULT, NULL);
1415 spa->spa_brt_vdevs = NULL;
1416 spa->spa_brt_nvdevs = 0;
1417 spa->spa_brt_rangesize = 0;
1424 spa->spa_brt_rangesize = BRT_RANGESIZE;
1434 for (uint64_t vdevid = 0; vdevid < spa->spa_root_vdev->vdev_children;
1442 error = zap_lookup(spa->spa_meta_objset,
1443 DMU_POOL_DIRECTORY_OBJECT, name, sizeof (uint64_t), 1,
1453 brt_vdevs_expand(spa, spa->spa_root_vdev->vdev_children);
1454 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
1455 rw_enter(&brtvd->bv_lock, RW_WRITER);
1456 brtvd->bv_mos_brtvdev = mos_brtvdev;
1458 rw_exit(&brtvd->bv_lock);
1463 if (spa->spa_brt_rangesize == 0)
1464 spa->spa_brt_rangesize = BRT_RANGESIZE;
1472 if (spa->spa_brt_rangesize == 0)
1475 rw_destroy(&spa->spa_brt_lock);
1476 spa->spa_brt_rangesize = 0;