Lines Matching defs:spa

27 #include <sys/spa.h>
309 static void brt_vdevs_expand(spa_t *spa, uint64_t nvdevs);
312 brt_rlock(spa_t *spa)
314 rw_enter(&spa->spa_brt_lock, RW_READER);
318 brt_wlock(spa_t *spa)
320 rw_enter(&spa->spa_brt_lock, RW_WRITER);
324 brt_unlock(spa_t *spa)
326 rw_exit(&spa->spa_brt_lock);
422 brt_vdev(spa_t *spa, uint64_t vdevid, boolean_t alloc)
426 brt_rlock(spa);
427 if (vdevid < spa->spa_brt_nvdevs) {
428 brtvd = spa->spa_brt_vdevs[vdevid];
431 brt_unlock(spa);
432 brt_wlock(spa);
433 if (vdevid >= spa->spa_brt_nvdevs)
434 brt_vdevs_expand(spa, vdevid + 1);
435 brtvd = spa->spa_brt_vdevs[vdevid];
437 brt_unlock(spa);
442 brt_vdev_create(spa_t *spa, brt_vdev_t *brtvd, dmu_tx_t *tx)
450 uint64_t mos_entries = zap_create_flags(spa->spa_meta_objset, 0,
454 VERIFY0(dnode_hold(spa->spa_meta_objset, mos_entries, brtvd,
467 brtvd->bv_mos_brtvdev = dmu_object_alloc(spa->spa_meta_objset,
476 VERIFY0(zap_add(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, name,
480 spa_feature_incr(spa, SPA_FEATURE_BLOCK_CLONING, tx);
484 brt_vdev_realloc(spa_t *spa, brt_vdev_t *brtvd)
493 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
494 vd = vdev_lookup_top(spa, brtvd->bv_vdevid);
495 size = (vdev_get_min_asize(vd) - 1) / spa->spa_brt_rangesize + 1;
496 spa_config_exit(spa, SCL_VDEV, FTAG);
513 * dmu_free_range(spa->spa_meta_objset, brtvd->bv_mos_brtvdev,
540 brt_vdev_load(spa_t *spa, brt_vdev_t *brtvd)
549 error = dmu_bonus_hold(spa->spa_meta_objset, brtvd->bv_mos_brtvdev,
555 if (spa->spa_brt_rangesize == 0) {
556 spa->spa_brt_rangesize = bvphys->bvp_rangesize;
558 ASSERT3U(spa->spa_brt_rangesize, ==, bvphys->bvp_rangesize);
561 brt_vdev_realloc(spa, brtvd);
569 error = dmu_read(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 0,
576 VERIFY0(dnode_hold(spa->spa_meta_objset, bvphys->bvp_mos_entries, brtvd,
616 brt_vdev_destroy(spa_t *spa, brt_vdev_t *brtvd, dmu_tx_t *tx)
634 ASSERT0(zap_count(spa->spa_meta_objset, mos_entries, &count));
636 VERIFY0(zap_destroy(spa->spa_meta_objset, mos_entries, tx));
640 VERIFY0(dmu_object_free(spa->spa_meta_objset, brtvd->bv_mos_brtvdev,
649 VERIFY0(zap_remove(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
659 spa_feature_decr(spa, SPA_FEATURE_BLOCK_CLONING, tx);
663 brt_vdevs_expand(spa_t *spa, uint64_t nvdevs)
667 ASSERT(RW_WRITE_HELD(&spa->spa_brt_lock));
668 ASSERT3U(nvdevs, >=, spa->spa_brt_nvdevs);
670 if (nvdevs == spa->spa_brt_nvdevs)
673 vdevs = kmem_zalloc(sizeof (*spa->spa_brt_vdevs) * nvdevs, KM_SLEEP);
674 if (spa->spa_brt_nvdevs > 0) {
675 ASSERT(spa->spa_brt_vdevs != NULL);
677 memcpy(vdevs, spa->spa_brt_vdevs,
678 sizeof (*spa->spa_brt_vdevs) * spa->spa_brt_nvdevs);
679 kmem_free(spa->spa_brt_vdevs,
680 sizeof (*spa->spa_brt_vdevs) * spa->spa_brt_nvdevs);
682 spa->spa_brt_vdevs = vdevs;
684 for (uint64_t vdevid = spa->spa_brt_nvdevs; vdevid < nvdevs; vdevid++) {
698 spa->spa_brt_vdevs[vdevid] = brtvd;
702 (u_longlong_t)spa->spa_brt_nvdevs, (u_longlong_t)nvdevs);
703 spa->spa_brt_nvdevs = nvdevs;
707 brt_vdev_lookup(spa_t *spa, brt_vdev_t *brtvd, uint64_t offset)
709 uint64_t idx = offset / spa->spa_brt_rangesize;
718 brt_vdev_addref(spa_t *spa, brt_vdev_t *brtvd, const brt_entry_t *bre,
733 idx = BRE_OFFSET(bre) / spa->spa_brt_rangesize;
737 brt_vdev_realloc(spa, brtvd);
751 brt_vdev_decref(spa_t *spa, brt_vdev_t *brtvd, const brt_entry_t *bre,
767 idx = BRE_OFFSET(bre) / spa->spa_brt_rangesize;
779 brt_vdev_sync(spa_t *spa, brt_vdev_t *brtvd, dmu_tx_t *tx)
788 VERIFY0(dmu_bonus_hold(spa->spa_meta_objset, brtvd->bv_mos_brtvdev,
795 dmu_write(spa->spa_meta_objset, brtvd->bv_mos_brtvdev, 0,
813 bvphys->bvp_rangesize = spa->spa_brt_rangesize;
822 brt_vdevs_free(spa_t *spa)
824 if (spa->spa_brt_vdevs == 0)
826 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
827 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
842 kmem_free(spa->spa_brt_vdevs, sizeof (*spa->spa_brt_vdevs) *
843 spa->spa_brt_nvdevs);
875 brt_maybe_exists(spa_t *spa, const blkptr_t *bp)
878 if (spa->spa_brt_nvdevs == 0)
882 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_FALSE);
893 return (brt_vdev_lookup(spa, brtvd, off));
897 brt_get_dspace(spa_t *spa)
899 if (spa->spa_brt_nvdevs == 0)
902 brt_rlock(spa);
904 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++)
905 s += spa->spa_brt_vdevs[vdevid]->bv_savedspace;
906 brt_unlock(spa);
911 brt_get_used(spa_t *spa)
913 if (spa->spa_brt_nvdevs == 0)
916 brt_rlock(spa);
918 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++)
919 s += spa->spa_brt_vdevs[vdevid]->bv_usedspace;
920 brt_unlock(spa);
925 brt_get_saved(spa_t *spa)
927 return (brt_get_dspace(spa));
931 brt_get_ratio(spa_t *spa)
933 uint64_t used = brt_get_used(spa);
936 return ((used + brt_get_saved(spa)) * 100 / used);
1034 brt_entry_decref(spa_t *spa, const blkptr_t *bp)
1044 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_FALSE);
1096 brt_vdev_decref(spa, brtvd, bre, bp_get_dsize_sync(spa, bp));
1104 brt_entry_get_refcount(spa_t *spa, const blkptr_t *bp)
1112 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_FALSE);
1161 brt_pending_add(spa_t *spa, const blkptr_t *bp, dmu_tx_t *tx)
1171 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_TRUE);
1202 brt_pending_remove(spa_t *spa, const blkptr_t *bp, dmu_tx_t *tx)
1211 brt_vdev_t *brtvd = brt_vdev(spa, vdevid, B_FALSE);
1233 brt_pending_apply_vdev(spa_t *spa, brt_vdev_t *brtvd, uint64_t txg)
1254 if (!ddt_addref(spa, &bre->bre_bp))
1271 brt_vdev_lookup(spa, brtvd, off)) {
1294 brt_vdev_realloc(spa, brtvd);
1305 brt_vdev_addref(spa, brtvd, bre,
1306 bp_get_dsize(spa, &bre->bre_bp), bre->bre_pcount);
1312 brt_pending_apply(spa_t *spa, uint64_t txg)
1315 brt_rlock(spa);
1316 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
1317 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
1318 brt_unlock(spa);
1320 brt_pending_apply_vdev(spa, brtvd, txg);
1322 brt_rlock(spa);
1324 brt_unlock(spa);
1346 brt_sync_table(spa_t *spa, dmu_tx_t *tx)
1350 brt_rlock(spa);
1351 for (uint64_t vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
1352 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
1353 brt_unlock(spa);
1358 brt_rlock(spa);
1366 brt_vdev_create(spa, brtvd, tx);
1379 brt_vdev_destroy(spa, brtvd, tx);
1381 brt_vdev_sync(spa, brtvd, tx);
1382 brt_rlock(spa);
1384 brt_unlock(spa);
1388 brt_sync(spa_t *spa, uint64_t txg)
1393 ASSERT3U(spa_syncing_txg(spa), ==, txg);
1395 brt_rlock(spa);
1396 for (vdevid = 0; vdevid < spa->spa_brt_nvdevs; vdevid++) {
1397 if (spa->spa_brt_vdevs[vdevid]->bv_meta_dirty)
1400 if (vdevid >= spa->spa_brt_nvdevs) {
1401 brt_unlock(spa);
1404 brt_unlock(spa);
1406 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1407 brt_sync_table(spa, tx);
1412 brt_alloc(spa_t *spa)
1414 rw_init(&spa->spa_brt_lock, NULL, RW_DEFAULT, NULL);
1415 spa->spa_brt_vdevs = NULL;
1416 spa->spa_brt_nvdevs = 0;
1417 spa->spa_brt_rangesize = 0;
1421 brt_create(spa_t *spa)
1423 brt_alloc(spa);
1424 spa->spa_brt_rangesize = BRT_RANGESIZE;
1428 brt_load(spa_t *spa)
1432 brt_alloc(spa);
1433 brt_wlock(spa);
1434 for (uint64_t vdevid = 0; vdevid < spa->spa_root_vdev->vdev_children;
1442 error = zap_lookup(spa->spa_meta_objset,
1453 brt_vdevs_expand(spa, spa->spa_root_vdev->vdev_children);
1454 brt_vdev_t *brtvd = spa->spa_brt_vdevs[vdevid];
1457 error = brt_vdev_load(spa, brtvd);
1463 if (spa->spa_brt_rangesize == 0)
1464 spa->spa_brt_rangesize = BRT_RANGESIZE;
1465 brt_unlock(spa);
1470 brt_unload(spa_t *spa)
1472 if (spa->spa_brt_rangesize == 0)
1474 brt_vdevs_free(spa);
1475 rw_destroy(&spa->spa_brt_lock);
1476 spa->spa_brt_rangesize = 0;