Lines Matching defs:spa
37 #include <sys/spa.h>
418 spa_t *spa = vd->vdev_spa;
419 objset_t *mos = spa->spa_meta_objset;
457 vdev_lookup_top(spa_t *spa, uint64_t vdev)
459 vdev_t *rvd = spa->spa_root_vdev;
461 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
502 vdev_count_leaves(spa_t *spa)
506 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
507 rc = vdev_count_leaves_impl(spa->spa_root_vdev);
508 spa_config_exit(spa, SCL_VDEV, FTAG);
586 spa_t *spa = cvd->vdev_spa;
587 list_remove(&spa->spa_leaf_list, cvd);
588 spa->spa_leaf_list_gen++;
639 vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
647 if (spa->spa_root_vdev == NULL) {
649 spa->spa_root_vdev = vd;
650 spa->spa_load_guid = spa_generate_load_guid();
654 if (spa->spa_root_vdev == vd) {
664 guid = spa_generate_guid(spa);
666 ASSERT(!spa_guid_exists(spa_guid(spa), guid));
669 vd->vdev_spa = spa;
739 txg_list_create(&vd->vdev_ms_list, spa,
741 txg_list_create(&vd->vdev_dtl_list, spa,
755 vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
768 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
803 if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
811 if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
814 if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
829 if (spa->spa_load_state != SPA_LOAD_CREATE &&
830 !spa_feature_is_enabled(spa,
838 spa->spa_load_state != SPA_LOAD_CREATE &&
839 !spa_feature_is_enabled(spa, SPA_FEATURE_DRAID)) {
851 rc = ops->vdev_op_init(spa, nv, &tsd);
857 vd = vdev_alloc_common(spa, id, guid, ops);
1026 if (spa_load_state(spa) == SPA_LOAD_OPEN ||
1027 spa_load_state(spa) == SPA_LOAD_IMPORT) {
1063 spa_t *spa = vd->vdev_spa;
1206 if (vd == spa->spa_root_vdev)
1207 spa->spa_root_vdev = NULL;
1218 spa_t *spa = svd->vdev_spa;
1304 if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
1305 (void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
1346 spa_t *spa = cvd->vdev_spa;
1350 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1352 mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
1449 vdev_spa_set_alloc(spa_t *spa, uint64_t min_alloc)
1451 if (min_alloc < spa->spa_min_alloc)
1452 spa->spa_min_alloc = min_alloc;
1453 if (spa->spa_gcd_alloc == INT_MAX) {
1454 spa->spa_gcd_alloc = min_alloc;
1456 spa->spa_gcd_alloc = vdev_gcd(min_alloc,
1457 spa->spa_gcd_alloc);
1464 spa_t *spa = vd->vdev_spa;
1480 mc = spa_log_class(spa);
1483 mc = spa_special_class(spa);
1486 mc = spa_dedup_class(spa);
1489 mc = spa_normal_class(spa);
1493 spa->spa_alloc_count);
1497 spa_embedded_log_class(spa), vd, 1);
1501 * The spa ashift min/max only apply for the normal metaslab
1506 mc == spa_normal_class(spa) && vd->vdev_aux == NULL) {
1507 if (vd->vdev_ashift > spa->spa_max_ashift)
1508 spa->spa_max_ashift = vd->vdev_ashift;
1509 if (vd->vdev_ashift < spa->spa_min_ashift)
1510 spa->spa_min_ashift = vd->vdev_ashift;
1513 vdev_spa_set_alloc(spa, min_alloc);
1521 spa_t *spa = vd->vdev_spa;
1528 ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
1558 error = dmu_read(spa->spa_meta_objset,
1583 if (vd->vdev_mg->mg_class == spa_normal_class(spa) &&
1620 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
1629 spa->spa_nonallocating_dspace += spa_deflate(spa) ?
1638 spa_config_exit(spa, SCL_ALLOC, FTAG);
1699 spa_t *spa = zio->io_spa;
1708 if (zio->io_error == 0 && spa_writeable(spa)) {
1730 (vdev_writeable(vd) || !spa_writeable(spa))) {
1736 spa, vd, NULL, NULL, 0);
1747 spa_async_request(spa, SPA_ASYNC_FAULT_VDEV);
1775 spa_t *spa = vd->vdev_spa;
1801 if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
1823 vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
2026 spa_t *spa = vd->vdev_spa;
2035 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2240 spa, vd, NULL, NULL, 0);
2262 (vd->vdev_expanding || spa->spa_autoexpand)) ||
2285 vdev_spa_set_alloc(spa, min_alloc);
2293 if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen)
2294 dsl_scan_assess_vdev(spa->spa_dsl_pool, vd);
2322 spa_t *spa = vd->vdev_spa;
2375 if (spa->spa_extreme_rewind || spa_last_synced_txg(spa) == 0 ||
2376 spa_config_held(spa, SCL_CONFIG, RW_WRITER) != SCL_CONFIG)
2379 txg = spa_last_synced_txg(spa);
2394 &aux_guid) == 0 && aux_guid == spa_guid(spa)) {
2412 * If config is not trusted then ignore the spa guid check. This is
2418 if (spa->spa_trust_config && guid != spa_guid(spa)) {
2424 (u_longlong_t)spa_guid(spa));
2469 if (spa->spa_trust_config && !spa->spa_extreme_rewind) {
2510 if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
2511 spa_load_state(spa) == SPA_LOAD_OPEN &&
2514 "for spa %s", (u_longlong_t)state, spa->spa_name);
2685 spa_t *spa __maybe_unused = vd->vdev_spa;
2689 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2717 spa_t *spa = vd->vdev_spa;
2719 ASSERT(spa_is_root(spa));
2720 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
2750 spa_t *spa = vd->vdev_spa;
2752 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
2767 vd->vdev_aux == &spa->spa_l2cache) {
2776 l2arc_add_vdev(spa, vd);
2778 spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
2779 spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
2789 if (!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL) &&
2790 spa->spa_async_tasks & SPA_ASYNC_RESILVER) {
2791 mutex_enter(&spa->spa_async_lock);
2792 spa->spa_async_tasks &= ~SPA_ASYNC_RESILVER;
2793 mutex_exit(&spa->spa_async_lock);
3158 spa_t *spa = vd->vdev_spa;
3162 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
3168 if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
3172 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
3195 (u_longlong_t)scrub_txg, spa->spa_scrub_started,
3211 if (spa->spa_scrub_started ||
3375 spa_t *spa = vd->vdev_spa;
3376 objset_t *mos = spa->spa_meta_objset;
3386 if (spa->spa_mode == SPA_MODE_READ && !spa->spa_read_spacemaps)
3422 spa_t *spa = vd->vdev_spa;
3423 objset_t *mos = spa->spa_meta_objset;
3439 spa_activate_allocation_classes(spa, tx);
3446 spa_t *spa = vd->vdev_spa;
3448 VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
3449 VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
3456 spa_t *spa = vd->vdev_spa;
3457 uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
3461 VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
3498 spa_t *spa = vd->vdev_spa;
3500 objset_t *mos = spa->spa_meta_objset;
3508 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
3560 vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
3561 "new object %llu", (u_longlong_t)txg, spa_name(spa),
3581 spa_t *spa = vd->vdev_spa;
3587 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
3589 if (vd == spa->spa_root_vdev || vd == tvd)
3731 spa_t *spa = vd->vdev_spa;
3734 error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
3751 spa_t *spa = vd->vdev_spa;
3754 error = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
4022 spa_t *spa = vd->vdev_spa;
4026 ASSERT3U(txg, ==, spa_syncing_txg(spa));
4028 dmu_tx_t *tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
4061 spa_t *spa = vd->vdev_spa;
4065 ASSERT3U(txg, ==, spa->spa_syncing_txg);
4066 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
4091 vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
4112 (void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
4139 vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
4143 spa_vdev_state_enter(spa, SCL_NONE);
4145 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4146 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4149 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4212 return (spa_vdev_state_exit(spa, vd, 0));
4221 vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
4225 spa_vdev_state_enter(spa, SCL_NONE);
4227 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4228 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4231 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4237 return (spa_vdev_state_exit(spa, NULL, 0));
4244 return (spa_vdev_state_exit(spa, vd, 0));
4248 vdev_remove_wanted(spa_t *spa, uint64_t guid)
4252 spa_vdev_state_enter(spa, SCL_NONE);
4254 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4255 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4262 return (spa_vdev_state_exit(spa, NULL, 0));
4268 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(EEXIST)));
4271 spa_async_request(spa, SPA_ASYNC_REMOVE);
4273 return (spa_vdev_state_exit(spa, vd, 0));
4286 vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
4288 vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
4292 spa_vdev_state_enter(spa, SCL_NONE);
4294 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4295 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4310 spa->spa_autoexpand);
4330 if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
4334 return (spa_vdev_state_exit(spa, vd, ENOTSUP));
4335 spa->spa_ccw_fail_time = 0;
4336 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
4366 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
4373 !dsl_scan_resilvering(spa->spa_dsl_pool) &&
4374 !dsl_scan_resilver_scheduled(spa->spa_dsl_pool) &&
4376 spa_async_request(spa, SPA_ASYNC_DETACH_SPARE);
4378 return (spa_vdev_state_exit(spa, vd, 0));
4382 vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
4390 spa_vdev_state_enter(spa, SCL_ALLOC);
4392 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
4393 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENODEV)));
4396 return (spa_vdev_state_exit(spa, NULL, SET_ERROR(ENOTSUP)));
4399 return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
4403 generation = spa->spa_config_generation + 1;
4416 return (spa_vdev_state_exit(spa, NULL,
4431 (void) spa_vdev_state_exit(spa, vd, 0);
4433 error = spa_reset_logs(spa);
4446 spa_vdev_state_enter(spa, SCL_ALLOC);
4451 if (error || generation != spa->spa_config_generation) {
4454 return (spa_vdev_state_exit(spa,
4456 (void) spa_vdev_state_exit(spa, vd, 0);
4475 return (spa_vdev_state_exit(spa, NULL,
4489 return (spa_vdev_state_exit(spa, vd, 0));
4493 vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
4497 mutex_enter(&spa->spa_vdev_top_lock);
4498 error = vdev_offline_locked(spa, guid, flags);
4499 mutex_exit(&spa->spa_vdev_top_lock);
4506 * vdev_offline(), we assume the spa config is locked. We also clear all
4510 vdev_clear(spa_t *spa, vdev_t *vd)
4512 vdev_t *rvd = spa->spa_root_vdev;
4514 ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
4526 vdev_clear(spa, vd->vdev_child[c]);
4563 !dsl_scan_resilvering(spa->spa_dsl_pool) &&
4564 !dsl_scan_resilver_scheduled(spa->spa_dsl_pool))
4565 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
4567 spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
4581 zfs_ereport_clear(spa, vd);
4887 spa_t *spa = zio->io_spa;
4888 vdev_t *rvd = spa->spa_root_vdev;
4940 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
5060 spa->spa_claiming)) {
5068 * so we commit the DTL change in spa_syncing_txg(spa).
5069 * In the zil_claim() case, we commit in spa_first_txg(spa).
5080 ASSERT(spa_sync_pass(spa) == 1);
5082 commit_txg = spa_syncing_txg(spa);
5083 } else if (spa->spa_claiming) {
5085 commit_txg = spa_first_txg(spa);
5087 ASSERT(commit_txg >= spa_syncing_txg(spa));
5118 spa_t *spa = vd->vdev_spa;
5119 vdev_t *rvd = spa->spa_root_vdev;
5162 spa_t *spa = vd->vdev_spa;
5163 vdev_t *rvd = spa->spa_root_vdev;
5166 ASSERT(spa_writeable(spa));
5205 aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
5216 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
5217 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5218 spa_config_held(spa, SCL_CONFIG, RW_READER)));
5228 list_insert_head(&spa->spa_config_dirty_list, vd);
5236 spa_t *spa = vd->vdev_spa;
5238 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
5239 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5240 spa_config_held(spa, SCL_CONFIG, RW_READER)));
5243 list_remove(&spa->spa_config_dirty_list, vd);
5255 spa_t *spa = vd->vdev_spa;
5257 ASSERT(spa_writeable(spa));
5266 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
5267 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5268 spa_config_held(spa, SCL_STATE, RW_READER)));
5272 list_insert_head(&spa->spa_state_dirty_list, vd);
5278 spa_t *spa = vd->vdev_spa;
5280 ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
5281 (dsl_pool_sync_context(spa_get_dsl(spa)) &&
5282 spa_config_held(spa, SCL_STATE, RW_READER)));
5285 list_remove(&spa->spa_state_dirty_list, vd);
5294 spa_t *spa = vd->vdev_spa;
5295 vdev_t *rvd = spa->spa_root_vdev;
5312 (!vdev_writeable(child) && spa_writeable(spa))) {
5360 spa_t *spa = vd->vdev_spa;
5372 zfs_post_state_change(spa, vd, vd->vdev_prevstate);
5420 if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
5421 spa_load_state(spa) == SPA_LOAD_RECOVER) &&
5442 vd != spa->spa_root_vdev) {
5471 (void) zfs_ereport_post(class, spa, vd, NULL, NULL,
5494 zfs_post_state_change(spa, vd, save_state);
5625 spa_t *spa = vd->vdev_spa;
5639 if (delta > spa_deadman_synctime(spa))
5664 spa_t *spa = vd->vdev_spa;
5671 if (vd == spa->spa_root_vdev &&
5672 spa_feature_is_active(spa, SPA_FEATURE_RESILVER_DEFER)) {
5673 spa_feature_decr(spa, SPA_FEATURE_RESILVER_DEFER, tx);
5675 spa->spa_resilver_deferred = B_FALSE;
5840 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
5841 objset_t *mos = spa->spa_meta_objset;
5849 vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE);
5868 mutex_enter(&spa->spa_props_lock);
5889 spa_history_log_internal(spa, "vdev set", tx,
5905 spa_history_log_internal(spa, "vdev set", tx,
5919 spa_history_log_internal(spa, "vdev set", tx,
5931 mutex_exit(&spa->spa_props_lock);
5937 spa_t *spa = vd->vdev_spa;
5959 if ((vd = spa_lookup_by_guid(spa, vdev_guid, B_TRUE)) == NULL)
5994 error = spa_vdev_setpath(spa, vdev_guid, strval);
6004 error = spa_vdev_noalloc(spa, vdev_guid);
6006 error = spa_vdev_alloc(spa, vdev_guid);
6069 return (dsl_sync_task(spa->spa_name, NULL, vdev_props_set_sync,
6076 spa_t *spa = vd->vdev_spa;
6077 objset_t *mos = spa->spa_meta_objset;
6108 mutex_enter(&spa->spa_props_lock);
6517 mutex_exit(&spa->spa_props_lock);