Lines Matching +full:ps +full:- +full:hold
9 * or https://opensource.org/licenses/CDDL-1.0.
79 * - Lookup a spa_t by name
80 * - Add or remove a spa_t from the namespace
81 * - Increase spa_refcount from non-zero
82 * - Check if spa_refcount is zero
83 * - Rename a spa_t
84 * - add/remove/attach/detach devices
85 * - Held for the duration of create/destroy
86 * - Held at the start and end of import and export
93 * spa_refcount (per-spa zfs_refcount_t protected by mutex)
96 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
97 * the refcount is never really 'zero' - opening a pool implicitly keeps
99 * present the image of a zero/non-zero value to consumers.
101 * spa_config_lock[] (per-spa array of rwlocks)
106 * - RW_READER to perform I/O to the spa
107 * - RW_WRITER to change the vdev config
111 * spa_namespace_lock -> spa_refcount
116 * spa_refcount -> spa_config_lock[]
121 * spa_namespace_lock -> spa_config_lock[]
164 * To read the configuration, it suffices to hold one of these locks as reader.
165 * To modify the configuration, you must hold all locks as writer. To modify
167 * you must hold SCL_STATE and SCL_ZIO as writer.
172 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
177 * They do, however, obey the usual write-wanted semantics to prevent
197 * Held by bp-level zios (those which have no io_vd upon entry)
198 * to prevent changes to the vdev tree. The bp-level zio implicitly
199 * protects all of its vdev child zios, which do not hold SCL_ZIO.
220 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
221 * or zio_write_phys() -- the caller must ensure that the config cannot
235 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
264 * otherwise-fatal errors, typically caused by on-disk corruption. When
276 * all remaining space to free from the error-encountering filesystem is
289 * e.g. a top-level vdev going offline), or (b) have localized,
334 * wait - Wait for the "hung" I/O (default)
335 * continue - Attempt to recover from a "hung" I/O
336 * panic - Panic the system
341 * The worst case is single-sector max-parity RAID-Z blocks, in which
355 * to the MOS). It also limits the worst-case time to allocate space. If we
377 * completely out of space, causing it to be permanently read-only.
401 * Valid values are zfs_active_allocator=<dynamic|cursor|new-dynamic>.
415 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name,
416 spa->spa_trust_config ? "trusted" : "untrusted", buf);
429 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name,
430 spa->spa_trust_config ? "trusted" : "untrusted", buf);
443 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
457 spa_config_lock_t *scl = &spa->spa_config_lock[i];
458 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
459 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
460 scl->scl_writer = NULL;
461 scl->scl_write_wanted = 0;
462 scl->scl_count = 0;
470 spa_config_lock_t *scl = &spa->spa_config_lock[i];
471 mutex_destroy(&scl->scl_lock);
472 cv_destroy(&scl->scl_cv);
473 ASSERT(scl->scl_writer == NULL);
474 ASSERT(scl->scl_write_wanted == 0);
475 ASSERT(scl->scl_count == 0);
483 spa_config_lock_t *scl = &spa->spa_config_lock[i];
486 mutex_enter(&scl->scl_lock);
488 if (scl->scl_writer || scl->scl_write_wanted) {
489 mutex_exit(&scl->scl_lock);
490 spa_config_exit(spa, locks & ((1 << i) - 1),
495 ASSERT(scl->scl_writer != curthread);
496 if (scl->scl_count != 0) {
497 mutex_exit(&scl->scl_lock);
498 spa_config_exit(spa, locks & ((1 << i) - 1),
502 scl->scl_writer = curthread;
504 scl->scl_count++;
505 mutex_exit(&scl->scl_lock);
520 spa_config_lock_t *scl = &spa->spa_config_lock[i];
521 if (scl->scl_writer == curthread)
525 mutex_enter(&scl->scl_lock);
527 while (scl->scl_writer ||
528 (!mmp_flag && scl->scl_write_wanted)) {
529 cv_wait(&scl->scl_cv, &scl->scl_lock);
532 ASSERT(scl->scl_writer != curthread);
533 while (scl->scl_count != 0) {
534 scl->scl_write_wanted++;
535 cv_wait(&scl->scl_cv, &scl->scl_lock);
536 scl->scl_write_wanted--;
538 scl->scl_writer = curthread;
540 scl->scl_count++;
541 mutex_exit(&scl->scl_lock);
571 for (int i = SCL_LOCKS - 1; i >= 0; i--) {
572 spa_config_lock_t *scl = &spa->spa_config_lock[i];
575 mutex_enter(&scl->scl_lock);
576 ASSERT(scl->scl_count > 0);
577 if (--scl->scl_count == 0) {
578 ASSERT(scl->scl_writer == NULL ||
579 scl->scl_writer == curthread);
580 scl->scl_writer = NULL; /* OK in either case */
581 cv_broadcast(&scl->scl_cv);
583 mutex_exit(&scl->scl_lock);
593 spa_config_lock_t *scl = &spa->spa_config_lock[i];
596 if ((rw == RW_READER && scl->scl_count != 0) ||
597 (rw == RW_WRITER && scl->scl_writer == curthread))
640 * Avoid racing with import/export, which don't hold the namespace
643 if ((spa->spa_load_thread != NULL &&
644 spa->spa_load_thread != curthread) ||
645 (spa->spa_export_thread != NULL &&
646 spa->spa_export_thread != curthread)) {
669 (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
670 (u_longlong_t)++spa->spa_deadman_calls);
672 vdev_deadman(spa->spa_root_vdev, FTAG);
674 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
685 return (TREE_CMP(a->sls_txg, b->sls_txg));
703 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
704 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
705 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
706 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
707 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
708 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
709 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
710 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
711 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
712 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
713 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
714 mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
715 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
716 mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
718 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
719 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
720 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
721 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
722 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
723 cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
724 cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
727 bplist_create(&spa->spa_free_bplist[t]);
729 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
730 spa->spa_state = POOL_STATE_UNINITIALIZED;
731 spa->spa_freeze_txg = UINT64_MAX;
732 spa->spa_final_txg = UINT64_MAX;
733 spa->spa_load_max_txg = UINT64_MAX;
734 spa->spa_proc = &p0;
735 spa->spa_proc_state = SPA_PROC_NONE;
736 spa->spa_trust_config = B_TRUE;
737 spa->spa_hostid = zone_get_hostid(NULL);
739 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
740 spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
744 zfs_refcount_create(&spa->spa_refcount);
755 spa->spa_root = spa_strdup(altroot);
758 spa->spa_alloc_count = MAX(MIN(spa_num_allocators,
761 spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count *
763 for (int i = 0; i < spa->spa_alloc_count; i++) {
764 mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT,
766 avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare,
769 if (spa->spa_alloc_count > 1) {
770 spa->spa_allocs_use = kmem_zalloc(offsetof(spa_allocs_use_t,
771 sau_inuse[spa->spa_alloc_count]), KM_SLEEP);
772 mutex_init(&spa->spa_allocs_use->sau_lock, NULL, MUTEX_DEFAULT,
776 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed,
778 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg,
780 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t),
786 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
790 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
791 list_insert_head(&spa->spa_config_list, dp);
793 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
801 VERIFY(nvlist_dup(features, &spa->spa_label_features,
805 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
808 if (spa->spa_label_features == NULL) {
809 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
813 spa->spa_min_ashift = INT_MAX;
814 spa->spa_max_ashift = 0;
815 spa->spa_min_alloc = INT_MAX;
816 spa->spa_gcd_alloc = INT_MAX;
819 spa->spa_dedup_dspace = ~0ULL;
827 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
830 list_create(&spa->spa_leaf_list, sizeof (vdev_t),
848 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
849 ASSERT0(spa->spa_waiters);
851 nvlist_free(spa->spa_config_splitting);
855 if (spa->spa_root)
856 spa_strfree(spa->spa_root);
858 while ((dp = list_remove_head(&spa->spa_config_list)) != NULL) {
859 if (dp->scd_path != NULL)
860 spa_strfree(dp->scd_path);
864 for (int i = 0; i < spa->spa_alloc_count; i++) {
865 avl_destroy(&spa->spa_allocs[i].spaa_tree);
866 mutex_destroy(&spa->spa_allocs[i].spaa_lock);
868 kmem_free(spa->spa_allocs, spa->spa_alloc_count *
870 if (spa->spa_alloc_count > 1) {
871 mutex_destroy(&spa->spa_allocs_use->sau_lock);
872 kmem_free(spa->spa_allocs_use, offsetof(spa_allocs_use_t,
873 sau_inuse[spa->spa_alloc_count]));
876 avl_destroy(&spa->spa_metaslabs_by_flushed);
877 avl_destroy(&spa->spa_sm_logs_by_txg);
878 list_destroy(&spa->spa_log_summary);
879 list_destroy(&spa->spa_config_list);
880 list_destroy(&spa->spa_leaf_list);
882 nvlist_free(spa->spa_label_features);
883 nvlist_free(spa->spa_load_info);
884 nvlist_free(spa->spa_feat_stats);
887 zfs_refcount_destroy(&spa->spa_refcount);
893 bplist_destroy(&spa->spa_free_bplist[t]);
897 cv_destroy(&spa->spa_async_cv);
898 cv_destroy(&spa->spa_evicting_os_cv);
899 cv_destroy(&spa->spa_proc_cv);
900 cv_destroy(&spa->spa_scrub_io_cv);
901 cv_destroy(&spa->spa_suspend_cv);
902 cv_destroy(&spa->spa_activities_cv);
903 cv_destroy(&spa->spa_waiters_cv);
905 mutex_destroy(&spa->spa_flushed_ms_lock);
906 mutex_destroy(&spa->spa_async_lock);
907 mutex_destroy(&spa->spa_errlist_lock);
908 mutex_destroy(&spa->spa_errlog_lock);
909 mutex_destroy(&spa->spa_evicting_os_lock);
910 mutex_destroy(&spa->spa_history_lock);
911 mutex_destroy(&spa->spa_proc_lock);
912 mutex_destroy(&spa->spa_props_lock);
913 mutex_destroy(&spa->spa_cksum_tmpls_lock);
914 mutex_destroy(&spa->spa_scrub_lock);
915 mutex_destroy(&spa->spa_suspend_lock);
916 mutex_destroy(&spa->spa_vdev_top_lock);
917 mutex_destroy(&spa->spa_feat_stats_lock);
918 mutex_destroy(&spa->spa_activities_lock);
951 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
953 spa->spa_load_thread == curthread);
954 (void) zfs_refcount_add(&spa->spa_refcount, tag);
964 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
966 spa->spa_load_thread == curthread ||
967 spa->spa_export_thread == curthread);
968 (void) zfs_refcount_remove(&spa->spa_refcount, tag);
975 * isn't held and the hold by the object being evicted may contribute to
982 (void) zfs_refcount_remove(&spa->spa_refcount, tag);
995 spa->spa_export_thread == curthread);
997 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
1024 return (TREE_CMP(sa->aux_guid, sb->aux_guid));
1034 search.aux_guid = vd->vdev_guid;
1036 aux->aux_count++;
1039 aux->aux_guid = vd->vdev_guid;
1040 aux->aux_count = 1;
1052 search.aux_guid = vd->vdev_guid;
1057 if (--aux->aux_count == 0) {
1060 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
1061 aux->aux_pool = 0ULL;
1075 *pool = found->aux_pool;
1082 *refcnt = found->aux_count;
1096 search.aux_guid = vd->vdev_guid;
1099 ASSERT(found->aux_pool == 0ULL);
1101 found->aux_pool = spa_guid(vd->vdev_spa);
1107 * - A spare may be part of multiple pools.
1108 * - A spare may be added to a pool even if it's actively in use within
1110 * - A spare in use in any pool can only be the source of a replacement if
1136 ASSERT(!vd->vdev_isspare);
1138 vd->vdev_isspare = B_TRUE;
1146 ASSERT(vd->vdev_isspare);
1148 vd->vdev_isspare = B_FALSE;
1168 ASSERT(vd->vdev_isspare);
1189 ASSERT(!vd->vdev_isl2cache);
1191 vd->vdev_isl2cache = B_TRUE;
1199 ASSERT(vd->vdev_isl2cache);
1201 vd->vdev_isl2cache = B_FALSE;
1221 ASSERT(vd->vdev_isl2cache);
1240 mutex_enter(&spa->spa_vdev_top_lock);
1243 ASSERT0(spa->spa_export_thread);
1259 mutex_enter(&spa->spa_vdev_top_lock);
1262 ASSERT0(spa->spa_export_thread);
1269 vdev_rebuild_stop_wait(vd->vdev_top);
1305 spa->spa_pending_vdev = NULL;
1310 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE);
1312 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1314 spa->spa_config_generation++;
1342 txg_wait_synced(spa->spa_dsl_pool, txg);
1345 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1346 if (vd->vdev_ops->vdev_op_leaf) {
1347 mutex_enter(&vd->vdev_initialize_lock);
1350 mutex_exit(&vd->vdev_initialize_lock);
1352 mutex_enter(&vd->vdev_trim_lock);
1354 mutex_exit(&vd->vdev_trim_lock);
1358 * The vdev may be both a leaf and top-level device.
1388 mutex_exit(&spa->spa_vdev_top_lock);
1411 int low = locks & ~(SCL_ZIO - 1);
1415 vdev_hold(spa->spa_root_vdev);
1420 spa->spa_vdev_locks = locks;
1429 if (vd == NULL || vd == spa->spa_root_vdev) {
1430 vdev_top = spa->spa_root_vdev;
1432 vdev_top = vd->vdev_top;
1439 if (vd != spa->spa_root_vdev)
1443 spa->spa_config_generation++;
1447 vdev_rele(spa->spa_root_vdev);
1449 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1450 spa_config_exit(spa, spa->spa_vdev_locks, spa);
1459 txg_wait_synced(spa->spa_dsl_pool, 0);
1482 if (!nvlist_exists(spa->spa_label_features, feature)) {
1483 fnvlist_add_boolean(spa->spa_label_features, feature);
1491 if (tx->tx_txg != TXG_INITIAL)
1492 vdev_config_dirty(spa->spa_root_vdev);
1499 if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1500 vdev_config_dirty(spa->spa_root_vdev);
1505 * device_guid is non-zero, determine whether the pool exists *and* contains
1517 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1519 if (spa->spa_root_vdev == NULL)
1525 if (vdev_lookup_by_guid(spa->spa_root_vdev,
1532 if (spa->spa_pending_vdev) {
1533 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1655 if (spa->spa_freeze_txg == UINT64_MAX) {
1657 spa->spa_freeze_txg = freeze_txg;
1675 * This is a stripped-down version of strtoull, suitable only for converting
1687 digit = c - '0';
1689 digit = 10 + c - 'a';
1724 return (spa->spa_async_suspended);
1730 return (spa->spa_dsl_pool);
1736 return (spa->spa_is_initializing);
1742 return (spa->spa_indirect_vdevs_loaded);
1748 return (&spa->spa_ubsync.ub_rootbp);
1754 spa->spa_uberblock.ub_rootbp = *bp;
1760 if (spa->spa_root == NULL)
1763 (void) strlcpy(buf, spa->spa_root, buflen);
1769 return (spa->spa_sync_pass);
1775 return (spa->spa_name);
1790 if (spa->spa_root_vdev == NULL)
1791 return (spa->spa_config_guid);
1793 guid = spa->spa_last_synced_guid != 0 ?
1794 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1801 return (spa->spa_root_vdev->vdev_guid);
1814 return (spa->spa_load_guid);
1820 return (spa->spa_ubsync.ub_txg);
1826 return (spa->spa_first_txg);
1832 return (spa->spa_syncing_txg);
1842 return (spa->spa_final_txg - TXG_DEFER_SIZE);
1848 return (spa->spa_state);
1854 return (spa->spa_load_state);
1860 return (spa->spa_freeze_txg);
1875 return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
1885 * default spa_slop_shift=5 and a non-tiny pool).
1898 if (spa->spa_dedup_dspace == ~0ULL)
1901 space = spa->spa_rdspace;
1912 slop -= MIN(embedded_log, slop >> 1);
1925 return (spa->spa_dspace);
1931 return (spa->spa_checkpoint_info.sci_dspace);
1937 spa->spa_rdspace = metaslab_class_get_dspace(spa_normal_class(spa));
1938 if (spa->spa_nonallocating_dspace > 0) {
1940 * Subtract the space provided by all non-allocating vdevs that
1945 * non-allocating vdev, but the new blocks must be allocated on
1947 * the non-allocating vdevs (including allocated space), we
1957 ASSERT3U(spa->spa_rdspace, >=, spa->spa_nonallocating_dspace);
1958 spa->spa_rdspace -= spa->spa_nonallocating_dspace;
1960 spa->spa_dspace = spa->spa_rdspace + ddt_get_dedup_dspace(spa) +
1971 return (spa->spa_failmode);
1977 return (spa->spa_suspended != ZIO_SUSPEND_NONE);
1983 return (spa->spa_ubsync.ub_version);
1989 return (spa->spa_deflate);
1995 return (spa->spa_normal_class);
2001 return (spa->spa_log_class);
2007 return (spa->spa_embedded_log_class);
2013 return (spa->spa_special_class);
2019 return (spa->spa_dedup_class);
2026 spa->spa_special_class->mc_groups != 0);
2035 const zio_prop_t *zp = &zio->io_prop;
2044 zp->zp_storage_type == DMU_OT_NONE ?
2045 zp->zp_type : zp->zp_storage_type;
2052 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
2055 if (spa->spa_dedup_class->mc_groups != 0)
2064 if (zp->zp_level > 0 &&
2072 if (DMU_OT_IS_METADATA(objtype) || zp->zp_level > 0) {
2085 has_special_class && zio->io_size <= zp->zp_zpl_smallblk) {
2090 (space * (100 - zfs_special_class_metadata_reserve_pct))
2103 mutex_enter(&spa->spa_evicting_os_lock);
2104 list_insert_head(&spa->spa_evicting_os_list, os);
2105 mutex_exit(&spa->spa_evicting_os_lock);
2111 mutex_enter(&spa->spa_evicting_os_lock);
2112 list_remove(&spa->spa_evicting_os_list, os);
2113 cv_broadcast(&spa->spa_evicting_os_cv);
2114 mutex_exit(&spa->spa_evicting_os_lock);
2120 mutex_enter(&spa->spa_evicting_os_lock);
2121 while (!list_is_empty(&spa->spa_evicting_os_list))
2122 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
2123 mutex_exit(&spa->spa_evicting_os_lock);
2144 return (spa->spa_prev_software_version);
2150 return (spa->spa_deadman_synctime);
2156 return (spa->spa_autotrim);
2162 return (spa->spa_deadman_ziotime);
2168 return (spa->spa_deadman_failmode);
2175 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
2177 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
2179 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
2181 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
2192 spa->spa_deadman_ziotime = ns;
2205 spa->spa_deadman_synctime = ns;
2218 if (asize != 0 && spa->spa_deflate) {
2222 vd->vdev_deflate_ratio;
2234 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2247 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
2257 return (spa->spa_dsl_pool->dp_dirty_total);
2281 seq_printf(f, "%-20s %-14s %-14s %-12s %-16s %s\n", "pool_guid",
2292 seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %-16s %s\n",
2293 (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state,
2294 (u_longlong_t)sip->mmp_sec_remaining,
2295 (u_longlong_t)sip->spa_load_max_txg,
2296 (sip->pool_name ? sip->pool_name : "-"),
2297 (sip->spa_load_notes ? sip->spa_load_notes : "-"));
2307 while (shl->size > size) {
2308 sip = list_remove_head(&shl->procfs_list.pl_list);
2309 if (sip->pool_name)
2310 spa_strfree(sip->pool_name);
2311 if (sip->spa_load_notes)
2312 kmem_strfree(sip->spa_load_notes);
2314 shl->size--;
2317 IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list));
2326 spa_import_progress_list->size = 0;
2328 spa_import_progress_list->procfs_list.pl_private =
2335 &spa_import_progress_list->procfs_list,
2346 procfs_list_uninstall(&shl->procfs_list);
2348 procfs_list_destroy(&shl->procfs_list);
2360 if (shl->size == 0)
2363 mutex_enter(&shl->procfs_list.pl_lock);
2364 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2365 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2366 if (sip->pool_guid == pool_guid) {
2367 sip->spa_load_state = load_state;
2368 if (sip->spa_load_notes != NULL) {
2369 kmem_strfree(sip->spa_load_notes);
2370 sip->spa_load_notes = NULL;
2376 mutex_exit(&shl->procfs_list.pl_lock);
2389 if (shl->size == 0)
2394 mutex_enter(&shl->procfs_list.pl_lock);
2395 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2396 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2397 if (sip->pool_guid == pool_guid) {
2398 if (sip->spa_load_notes != NULL) {
2399 kmem_strfree(sip->spa_load_notes);
2400 sip->spa_load_notes = NULL;
2402 sip->spa_load_notes = notes;
2404 zfs_dbgmsg("'%s' %s", sip->pool_name, notes);
2409 mutex_exit(&shl->procfs_list.pl_lock);
2441 if (shl->size == 0)
2444 mutex_enter(&shl->procfs_list.pl_lock);
2445 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2446 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2447 if (sip->pool_guid == pool_guid) {
2448 sip->spa_load_max_txg = load_max_txg;
2453 mutex_exit(&shl->procfs_list.pl_lock);
2466 if (shl->size == 0)
2469 mutex_enter(&shl->procfs_list.pl_lock);
2470 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2471 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2472 if (sip->pool_guid == pool_guid) {
2473 sip->mmp_sec_remaining = mmp_sec_remaining;
2478 mutex_exit(&shl->procfs_list.pl_lock);
2494 sip->pool_guid = spa_guid(spa);
2496 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME,
2500 sip->pool_name = spa_strdup(poolname);
2501 sip->spa_load_state = spa_load_state(spa);
2502 sip->spa_load_notes = NULL;
2504 mutex_enter(&shl->procfs_list.pl_lock);
2505 procfs_list_add(&shl->procfs_list, sip);
2506 shl->size++;
2507 mutex_exit(&shl->procfs_list.pl_lock);
2516 mutex_enter(&shl->procfs_list.pl_lock);
2517 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL;
2518 sip = list_prev(&shl->procfs_list.pl_list, sip)) {
2519 if (sip->pool_guid == pool_guid) {
2520 if (sip->pool_name)
2521 spa_strfree(sip->pool_name);
2522 if (sip->spa_load_notes)
2523 spa_strfree(sip->spa_load_notes);
2524 list_remove(&shl->procfs_list.pl_list, sip);
2525 shl->size--;
2530 mutex_exit(&shl->procfs_list.pl_lock);
2546 s = strcmp(s1->spa_name, s2->spa_name);
2584 if (sigaction(SIGSEGV, &sa, NULL) == -1) {
2663 return (spa->spa_log_class->mc_groups != 0);
2669 return (spa->spa_log_state);
2675 spa->spa_log_state = state;
2681 return (spa->spa_is_root);
2687 return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config);
2697 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) ||
2698 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks));
2704 return (spa->spa_mode);
2710 return (spa->spa_scrubbed_last_txg);
2716 return (spa->spa_bootfs);
2722 return (spa->spa_delegation);
2728 return (spa->spa_meta_objset);
2734 return (spa->spa_dedup_checksum);
2744 spa->spa_scan_pass_start = gethrestime_sec();
2745 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
2746 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
2748 spa->spa_scan_pass_scrub_pause = 0;
2750 if (dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan))
2751 spa->spa_scan_pass_errorscrub_pause = spa->spa_scan_pass_start;
2753 spa->spa_scan_pass_errorscrub_pause = 0;
2755 spa->spa_scan_pass_scrub_spent_paused = 0;
2756 spa->spa_scan_pass_exam = 0;
2757 spa->spa_scan_pass_issued = 0;
2760 spa->spa_scan_pass_errorscrub_spent_paused = 0;
2767 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
2769 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
2771 if (scn == NULL || (scn->scn_phys.scn_func == POOL_SCAN_NONE &&
2772 scn->errorscrub_phys.dep_func == POOL_SCAN_NONE))
2775 memset(ps, 0, sizeof (pool_scan_stat_t));
2778 ps->pss_func = scn->scn_phys.scn_func;
2779 ps->pss_state = scn->scn_phys.scn_state;
2780 ps->pss_start_time = scn->scn_phys.scn_start_time;
2781 ps->pss_end_time = scn->scn_phys.scn_end_time;
2782 ps->pss_to_examine = scn->scn_phys.scn_to_examine;
2783 ps->pss_examined = scn->scn_phys.scn_examined;
2784 ps->pss_skipped = scn->scn_phys.scn_skipped;
2785 ps->pss_processed = scn->scn_phys.scn_processed;
2786 ps->pss_errors = scn->scn_phys.scn_errors;
2789 ps->pss_pass_exam = spa->spa_scan_pass_exam;
2790 ps->pss_pass_start = spa->spa_scan_pass_start;
2791 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
2792 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
2793 ps->pss_pass_issued = spa->spa_scan_pass_issued;
2794 ps->pss_issued =
2795 scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
2798 ps->pss_error_scrub_func = scn->errorscrub_phys.dep_func;
2799 ps->pss_error_scrub_state = scn->errorscrub_phys.dep_state;
2800 ps->pss_error_scrub_start = scn->errorscrub_phys.dep_start_time;
2801 ps->pss_error_scrub_end = scn->errorscrub_phys.dep_end_time;
2802 ps->pss_error_scrub_examined = scn->errorscrub_phys.dep_examined;
2803 ps->pss_error_scrub_to_be_examined =
2804 scn->errorscrub_phys.dep_to_examine;
2807 ps->pss_pass_error_scrub_pause = spa->spa_scan_pass_errorscrub_pause;
2830 uint64_t ret = -1ULL;
2835 * config locks, so it is sufficient to hold SCL_VDEV as reader when
2838 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
2840 while (vdevid != -1ULL) {
2842 vdev_indirect_births_t *vib = vd->vdev_indirect_births;
2844 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2854 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
2858 IMPLY(ret != -1ULL,
2876 return (spa->spa_multihost ? B_TRUE : B_FALSE);
2882 return (spa->spa_hostid);
2888 return (spa->spa_trust_config);
2894 return (spa->spa_missing_tvds_allowed);
2900 return (spa->spa_syncing_log_sm);
2906 spa->spa_missing_tvds = missing;
2921 vdev_t *rvd = spa->spa_root_vdev;
2925 vdev_state_t state = rvd->vdev_state;
2926 vdev_aux_t aux = rvd->vdev_stat.vs_aux;
2960 vdev_t *rvd = spa->spa_root_vdev;
2961 for (uint64_t c = 0; c < rvd->vdev_children; c++) {
2962 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c]))
2971 return (spa->spa_checkpoint_txg != 0);
2977 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) &&
2978 spa->spa_mode == SPA_MODE_READ);
2984 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg;
2989 return (spa->spa_first_txg);
3005 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes;
3006 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0;