1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9271171e0SMartin Matuska * or https://opensource.org/licenses/CDDL-1.0. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy /* 22eda14cbcSMatt Macy * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23eda14cbcSMatt Macy * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24eda14cbcSMatt Macy * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25eda14cbcSMatt Macy * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26eda14cbcSMatt Macy * Copyright 2013 Saso Kiselkov. All rights reserved. 27eda14cbcSMatt Macy * Copyright (c) 2017 Datto Inc. 28eda14cbcSMatt Macy * Copyright (c) 2017, Intel Corporation. 29eda14cbcSMatt Macy * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 30cbfe9975SMartin Matuska * Copyright (c) 2023, Klara Inc. 31eda14cbcSMatt Macy */ 32eda14cbcSMatt Macy 33eda14cbcSMatt Macy #include <sys/zfs_context.h> 341f1e2261SMartin Matuska #include <sys/zfs_chksum.h> 35eda14cbcSMatt Macy #include <sys/spa_impl.h> 36eda14cbcSMatt Macy #include <sys/zio.h> 37eda14cbcSMatt Macy #include <sys/zio_checksum.h> 38eda14cbcSMatt Macy #include <sys/zio_compress.h> 39eda14cbcSMatt Macy #include <sys/dmu.h> 40eda14cbcSMatt Macy #include <sys/dmu_tx.h> 41eda14cbcSMatt Macy #include <sys/zap.h> 42eda14cbcSMatt Macy #include <sys/zil.h> 43eda14cbcSMatt Macy #include <sys/vdev_impl.h> 44eda14cbcSMatt Macy #include <sys/vdev_initialize.h> 45eda14cbcSMatt Macy #include <sys/vdev_trim.h> 46eda14cbcSMatt Macy #include <sys/vdev_file.h> 47eda14cbcSMatt Macy #include <sys/vdev_raidz.h> 48eda14cbcSMatt Macy #include <sys/metaslab.h> 49eda14cbcSMatt Macy #include <sys/uberblock_impl.h> 50eda14cbcSMatt Macy #include <sys/txg.h> 51eda14cbcSMatt Macy #include <sys/avl.h> 52eda14cbcSMatt Macy #include <sys/unique.h> 53eda14cbcSMatt Macy #include <sys/dsl_pool.h> 54eda14cbcSMatt Macy #include <sys/dsl_dir.h> 55eda14cbcSMatt Macy #include <sys/dsl_prop.h> 56eda14cbcSMatt Macy #include <sys/fm/util.h> 57eda14cbcSMatt Macy #include <sys/dsl_scan.h> 58eda14cbcSMatt Macy #include <sys/fs/zfs.h> 59eda14cbcSMatt Macy #include <sys/metaslab_impl.h> 60eda14cbcSMatt Macy #include <sys/arc.h> 612a58b312SMartin Matuska #include <sys/brt.h> 62eda14cbcSMatt Macy #include <sys/ddt.h> 63eda14cbcSMatt Macy #include <sys/kstat.h> 64eda14cbcSMatt Macy #include "zfs_prop.h" 65eda14cbcSMatt Macy #include <sys/btree.h> 66eda14cbcSMatt Macy #include <sys/zfeature.h> 67eda14cbcSMatt Macy #include <sys/qat.h> 68eda14cbcSMatt Macy #include <sys/zstd/zstd.h> 69eda14cbcSMatt Macy 70eda14cbcSMatt Macy /* 71eda14cbcSMatt Macy * SPA locking 72eda14cbcSMatt Macy * 73eda14cbcSMatt Macy * There are three basic locks for managing spa_t structures: 74eda14cbcSMatt Macy * 75eda14cbcSMatt Macy * spa_namespace_lock (global mutex) 76eda14cbcSMatt Macy * 77eda14cbcSMatt Macy * This lock must be acquired to do any of the following: 78eda14cbcSMatt Macy * 79eda14cbcSMatt Macy * - Lookup a spa_t by name 80eda14cbcSMatt Macy * - Add or remove a spa_t from the namespace 81eda14cbcSMatt Macy * - Increase spa_refcount from non-zero 82eda14cbcSMatt Macy * - Check if spa_refcount is zero 83eda14cbcSMatt Macy * - Rename a spa_t 84eda14cbcSMatt Macy * - add/remove/attach/detach devices 85eda14cbcSMatt Macy * - Held for the duration of create/destroy/import/export 86eda14cbcSMatt Macy * 87eda14cbcSMatt Macy * It does not need to handle recursion. A create or destroy may 88eda14cbcSMatt Macy * reference objects (files or zvols) in other pools, but by 89eda14cbcSMatt Macy * definition they must have an existing reference, and will never need 90eda14cbcSMatt Macy * to lookup a spa_t by name. 91eda14cbcSMatt Macy * 92eda14cbcSMatt Macy * spa_refcount (per-spa zfs_refcount_t protected by mutex) 93eda14cbcSMatt Macy * 94eda14cbcSMatt Macy * This reference count keep track of any active users of the spa_t. The 95eda14cbcSMatt Macy * spa_t cannot be destroyed or freed while this is non-zero. Internally, 96eda14cbcSMatt Macy * the refcount is never really 'zero' - opening a pool implicitly keeps 97eda14cbcSMatt Macy * some references in the DMU. Internally we check against spa_minref, but 98eda14cbcSMatt Macy * present the image of a zero/non-zero value to consumers. 99eda14cbcSMatt Macy * 100eda14cbcSMatt Macy * spa_config_lock[] (per-spa array of rwlocks) 101eda14cbcSMatt Macy * 102eda14cbcSMatt Macy * This protects the spa_t from config changes, and must be held in 103eda14cbcSMatt Macy * the following circumstances: 104eda14cbcSMatt Macy * 105eda14cbcSMatt Macy * - RW_READER to perform I/O to the spa 106eda14cbcSMatt Macy * - RW_WRITER to change the vdev config 107eda14cbcSMatt Macy * 108eda14cbcSMatt Macy * The locking order is fairly straightforward: 109eda14cbcSMatt Macy * 110eda14cbcSMatt Macy * spa_namespace_lock -> spa_refcount 111eda14cbcSMatt Macy * 112eda14cbcSMatt Macy * The namespace lock must be acquired to increase the refcount from 0 113eda14cbcSMatt Macy * or to check if it is zero. 114eda14cbcSMatt Macy * 115eda14cbcSMatt Macy * spa_refcount -> spa_config_lock[] 116eda14cbcSMatt Macy * 117eda14cbcSMatt Macy * There must be at least one valid reference on the spa_t to acquire 118eda14cbcSMatt Macy * the config lock. 119eda14cbcSMatt Macy * 120eda14cbcSMatt Macy * spa_namespace_lock -> spa_config_lock[] 121eda14cbcSMatt Macy * 122eda14cbcSMatt Macy * The namespace lock must always be taken before the config lock. 123eda14cbcSMatt Macy * 124eda14cbcSMatt Macy * 125eda14cbcSMatt Macy * The spa_namespace_lock can be acquired directly and is globally visible. 126eda14cbcSMatt Macy * 127eda14cbcSMatt Macy * The namespace is manipulated using the following functions, all of which 128eda14cbcSMatt Macy * require the spa_namespace_lock to be held. 129eda14cbcSMatt Macy * 130eda14cbcSMatt Macy * spa_lookup() Lookup a spa_t by name. 131eda14cbcSMatt Macy * 132eda14cbcSMatt Macy * spa_add() Create a new spa_t in the namespace. 133eda14cbcSMatt Macy * 134eda14cbcSMatt Macy * spa_remove() Remove a spa_t from the namespace. This also 135eda14cbcSMatt Macy * frees up any memory associated with the spa_t. 136eda14cbcSMatt Macy * 137eda14cbcSMatt Macy * spa_next() Returns the next spa_t in the system, or the 138eda14cbcSMatt Macy * first if NULL is passed. 139eda14cbcSMatt Macy * 140eda14cbcSMatt Macy * spa_evict_all() Shutdown and remove all spa_t structures in 141eda14cbcSMatt Macy * the system. 142eda14cbcSMatt Macy * 143eda14cbcSMatt Macy * spa_guid_exists() Determine whether a pool/device guid exists. 144eda14cbcSMatt Macy * 145eda14cbcSMatt Macy * The spa_refcount is manipulated using the following functions: 146eda14cbcSMatt Macy * 147eda14cbcSMatt Macy * spa_open_ref() Adds a reference to the given spa_t. Must be 148eda14cbcSMatt Macy * called with spa_namespace_lock held if the 149eda14cbcSMatt Macy * refcount is currently zero. 150eda14cbcSMatt Macy * 151eda14cbcSMatt Macy * spa_close() Remove a reference from the spa_t. This will 152eda14cbcSMatt Macy * not free the spa_t or remove it from the 153eda14cbcSMatt Macy * namespace. No locking is required. 154eda14cbcSMatt Macy * 155eda14cbcSMatt Macy * spa_refcount_zero() Returns true if the refcount is currently 156eda14cbcSMatt Macy * zero. Must be called with spa_namespace_lock 157eda14cbcSMatt Macy * held. 158eda14cbcSMatt Macy * 159eda14cbcSMatt Macy * The spa_config_lock[] is an array of rwlocks, ordered as follows: 160eda14cbcSMatt Macy * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 161eda14cbcSMatt Macy * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 162eda14cbcSMatt Macy * 163eda14cbcSMatt Macy * To read the configuration, it suffices to hold one of these locks as reader. 164eda14cbcSMatt Macy * To modify the configuration, you must hold all locks as writer. To modify 165eda14cbcSMatt Macy * vdev state without altering the vdev tree's topology (e.g. online/offline), 166eda14cbcSMatt Macy * you must hold SCL_STATE and SCL_ZIO as writer. 167eda14cbcSMatt Macy * 168eda14cbcSMatt Macy * We use these distinct config locks to avoid recursive lock entry. 169eda14cbcSMatt Macy * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 170eda14cbcSMatt Macy * block allocations (SCL_ALLOC), which may require reading space maps 171eda14cbcSMatt Macy * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 172eda14cbcSMatt Macy * 173eda14cbcSMatt Macy * The spa config locks cannot be normal rwlocks because we need the 174eda14cbcSMatt Macy * ability to hand off ownership. For example, SCL_ZIO is acquired 175eda14cbcSMatt Macy * by the issuing thread and later released by an interrupt thread. 176eda14cbcSMatt Macy * They do, however, obey the usual write-wanted semantics to prevent 177eda14cbcSMatt Macy * writer (i.e. system administrator) starvation. 178eda14cbcSMatt Macy * 179eda14cbcSMatt Macy * The lock acquisition rules are as follows: 180eda14cbcSMatt Macy * 181eda14cbcSMatt Macy * SCL_CONFIG 182eda14cbcSMatt Macy * Protects changes to the vdev tree topology, such as vdev 183eda14cbcSMatt Macy * add/remove/attach/detach. Protects the dirty config list 184eda14cbcSMatt Macy * (spa_config_dirty_list) and the set of spares and l2arc devices. 185eda14cbcSMatt Macy * 186eda14cbcSMatt Macy * SCL_STATE 187eda14cbcSMatt Macy * Protects changes to pool state and vdev state, such as vdev 188eda14cbcSMatt Macy * online/offline/fault/degrade/clear. Protects the dirty state list 189eda14cbcSMatt Macy * (spa_state_dirty_list) and global pool state (spa_state). 190eda14cbcSMatt Macy * 191eda14cbcSMatt Macy * SCL_ALLOC 192eda14cbcSMatt Macy * Protects changes to metaslab groups and classes. 193eda14cbcSMatt Macy * Held as reader by metaslab_alloc() and metaslab_claim(). 194eda14cbcSMatt Macy * 195eda14cbcSMatt Macy * SCL_ZIO 196eda14cbcSMatt Macy * Held by bp-level zios (those which have no io_vd upon entry) 197eda14cbcSMatt Macy * to prevent changes to the vdev tree. The bp-level zio implicitly 198eda14cbcSMatt Macy * protects all of its vdev child zios, which do not hold SCL_ZIO. 199eda14cbcSMatt Macy * 200eda14cbcSMatt Macy * SCL_FREE 201eda14cbcSMatt Macy * Protects changes to metaslab groups and classes. 202eda14cbcSMatt Macy * Held as reader by metaslab_free(). SCL_FREE is distinct from 203eda14cbcSMatt Macy * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 204eda14cbcSMatt Macy * blocks in zio_done() while another i/o that holds either 205eda14cbcSMatt Macy * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 206eda14cbcSMatt Macy * 207eda14cbcSMatt Macy * SCL_VDEV 208eda14cbcSMatt Macy * Held as reader to prevent changes to the vdev tree during trivial 209eda14cbcSMatt Macy * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 210eda14cbcSMatt Macy * other locks, and lower than all of them, to ensure that it's safe 211eda14cbcSMatt Macy * to acquire regardless of caller context. 212eda14cbcSMatt Macy * 213eda14cbcSMatt Macy * In addition, the following rules apply: 214eda14cbcSMatt Macy * 215eda14cbcSMatt Macy * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 216eda14cbcSMatt Macy * The lock ordering is SCL_CONFIG > spa_props_lock. 217eda14cbcSMatt Macy * 218eda14cbcSMatt Macy * (b) I/O operations on leaf vdevs. For any zio operation that takes 219eda14cbcSMatt Macy * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 220eda14cbcSMatt Macy * or zio_write_phys() -- the caller must ensure that the config cannot 221eda14cbcSMatt Macy * cannot change in the interim, and that the vdev cannot be reopened. 222eda14cbcSMatt Macy * SCL_STATE as reader suffices for both. 223eda14cbcSMatt Macy * 224eda14cbcSMatt Macy * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 225eda14cbcSMatt Macy * 226eda14cbcSMatt Macy * spa_vdev_enter() Acquire the namespace lock and the config lock 227eda14cbcSMatt Macy * for writing. 228eda14cbcSMatt Macy * 229eda14cbcSMatt Macy * spa_vdev_exit() Release the config lock, wait for all I/O 230eda14cbcSMatt Macy * to complete, sync the updated configs to the 231eda14cbcSMatt Macy * cache, and release the namespace lock. 232eda14cbcSMatt Macy * 233eda14cbcSMatt Macy * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 234eda14cbcSMatt Macy * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 235eda14cbcSMatt Macy * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 236eda14cbcSMatt Macy */ 237eda14cbcSMatt Macy 238eda14cbcSMatt Macy static avl_tree_t spa_namespace_avl; 239eda14cbcSMatt Macy kmutex_t spa_namespace_lock; 240eda14cbcSMatt Macy static kcondvar_t spa_namespace_cv; 241e92ffd9bSMartin Matuska static const int spa_max_replication_override = SPA_DVAS_PER_BP; 242eda14cbcSMatt Macy 243eda14cbcSMatt Macy static kmutex_t spa_spare_lock; 244eda14cbcSMatt Macy static avl_tree_t spa_spare_avl; 245eda14cbcSMatt Macy static kmutex_t spa_l2cache_lock; 246eda14cbcSMatt Macy static avl_tree_t spa_l2cache_avl; 247eda14cbcSMatt Macy 248eda14cbcSMatt Macy spa_mode_t spa_mode_global = SPA_MODE_UNINIT; 249eda14cbcSMatt Macy 250eda14cbcSMatt Macy #ifdef ZFS_DEBUG 251eda14cbcSMatt Macy /* 252eda14cbcSMatt Macy * Everything except dprintf, set_error, spa, and indirect_remap is on 253eda14cbcSMatt Macy * by default in debug builds. 254eda14cbcSMatt Macy */ 255eda14cbcSMatt Macy int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR | 256eda14cbcSMatt Macy ZFS_DEBUG_INDIRECT_REMAP); 257eda14cbcSMatt Macy #else 258eda14cbcSMatt Macy int zfs_flags = 0; 259eda14cbcSMatt Macy #endif 260eda14cbcSMatt Macy 261eda14cbcSMatt Macy /* 262eda14cbcSMatt Macy * zfs_recover can be set to nonzero to attempt to recover from 263eda14cbcSMatt Macy * otherwise-fatal errors, typically caused by on-disk corruption. When 264eda14cbcSMatt Macy * set, calls to zfs_panic_recover() will turn into warning messages. 265eda14cbcSMatt Macy * This should only be used as a last resort, as it typically results 266eda14cbcSMatt Macy * in leaked space, or worse. 267eda14cbcSMatt Macy */ 268eda14cbcSMatt Macy int zfs_recover = B_FALSE; 269eda14cbcSMatt Macy 270eda14cbcSMatt Macy /* 271eda14cbcSMatt Macy * If destroy encounters an EIO while reading metadata (e.g. indirect 272eda14cbcSMatt Macy * blocks), space referenced by the missing metadata can not be freed. 273eda14cbcSMatt Macy * Normally this causes the background destroy to become "stalled", as 274eda14cbcSMatt Macy * it is unable to make forward progress. While in this stalled state, 275eda14cbcSMatt Macy * all remaining space to free from the error-encountering filesystem is 276eda14cbcSMatt Macy * "temporarily leaked". Set this flag to cause it to ignore the EIO, 277eda14cbcSMatt Macy * permanently leak the space from indirect blocks that can not be read, 278eda14cbcSMatt Macy * and continue to free everything else that it can. 279eda14cbcSMatt Macy * 280eda14cbcSMatt Macy * The default, "stalling" behavior is useful if the storage partially 281eda14cbcSMatt Macy * fails (i.e. some but not all i/os fail), and then later recovers. In 282eda14cbcSMatt Macy * this case, we will be able to continue pool operations while it is 283eda14cbcSMatt Macy * partially failed, and when it recovers, we can continue to free the 284eda14cbcSMatt Macy * space, with no leaks. However, note that this case is actually 285eda14cbcSMatt Macy * fairly rare. 286eda14cbcSMatt Macy * 287eda14cbcSMatt Macy * Typically pools either (a) fail completely (but perhaps temporarily, 288eda14cbcSMatt Macy * e.g. a top-level vdev going offline), or (b) have localized, 289eda14cbcSMatt Macy * permanent errors (e.g. disk returns the wrong data due to bit flip or 290eda14cbcSMatt Macy * firmware bug). In case (a), this setting does not matter because the 291eda14cbcSMatt Macy * pool will be suspended and the sync thread will not be able to make 292eda14cbcSMatt Macy * forward progress regardless. In case (b), because the error is 293eda14cbcSMatt Macy * permanent, the best we can do is leak the minimum amount of space, 294eda14cbcSMatt Macy * which is what setting this flag will do. Therefore, it is reasonable 295eda14cbcSMatt Macy * for this flag to normally be set, but we chose the more conservative 296eda14cbcSMatt Macy * approach of not setting it, so that there is no possibility of 297eda14cbcSMatt Macy * leaking space in the "partial temporary" failure case. 298eda14cbcSMatt Macy */ 299eda14cbcSMatt Macy int zfs_free_leak_on_eio = B_FALSE; 300eda14cbcSMatt Macy 301eda14cbcSMatt Macy /* 302eda14cbcSMatt Macy * Expiration time in milliseconds. This value has two meanings. First it is 303eda14cbcSMatt Macy * used to determine when the spa_deadman() logic should fire. By default the 304eda14cbcSMatt Macy * spa_deadman() will fire if spa_sync() has not completed in 600 seconds. 305eda14cbcSMatt Macy * Secondly, the value determines if an I/O is considered "hung". Any I/O that 306eda14cbcSMatt Macy * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 307eda14cbcSMatt Macy * in one of three behaviors controlled by zfs_deadman_failmode. 308eda14cbcSMatt Macy */ 309dbd5678dSMartin Matuska uint64_t zfs_deadman_synctime_ms = 600000UL; /* 10 min. */ 310eda14cbcSMatt Macy 311eda14cbcSMatt Macy /* 312eda14cbcSMatt Macy * This value controls the maximum amount of time zio_wait() will block for an 313eda14cbcSMatt Macy * outstanding IO. By default this is 300 seconds at which point the "hung" 314eda14cbcSMatt Macy * behavior will be applied as described for zfs_deadman_synctime_ms. 315eda14cbcSMatt Macy */ 316dbd5678dSMartin Matuska uint64_t zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */ 317eda14cbcSMatt Macy 318eda14cbcSMatt Macy /* 319eda14cbcSMatt Macy * Check time in milliseconds. This defines the frequency at which we check 320eda14cbcSMatt Macy * for hung I/O. 321eda14cbcSMatt Macy */ 322dbd5678dSMartin Matuska uint64_t zfs_deadman_checktime_ms = 60000UL; /* 1 min. */ 323eda14cbcSMatt Macy 324eda14cbcSMatt Macy /* 325eda14cbcSMatt Macy * By default the deadman is enabled. 326eda14cbcSMatt Macy */ 327e92ffd9bSMartin Matuska int zfs_deadman_enabled = B_TRUE; 328eda14cbcSMatt Macy 329eda14cbcSMatt Macy /* 330eda14cbcSMatt Macy * Controls the behavior of the deadman when it detects a "hung" I/O. 331eda14cbcSMatt Macy * Valid values are zfs_deadman_failmode=<wait|continue|panic>. 332eda14cbcSMatt Macy * 333eda14cbcSMatt Macy * wait - Wait for the "hung" I/O (default) 334eda14cbcSMatt Macy * continue - Attempt to recover from a "hung" I/O 335eda14cbcSMatt Macy * panic - Panic the system 336eda14cbcSMatt Macy */ 337e92ffd9bSMartin Matuska const char *zfs_deadman_failmode = "wait"; 338eda14cbcSMatt Macy 339eda14cbcSMatt Macy /* 340eda14cbcSMatt Macy * The worst case is single-sector max-parity RAID-Z blocks, in which 341eda14cbcSMatt Macy * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 342eda14cbcSMatt Macy * times the size; so just assume that. Add to this the fact that 343eda14cbcSMatt Macy * we can have up to 3 DVAs per bp, and one more factor of 2 because 344eda14cbcSMatt Macy * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 345eda14cbcSMatt Macy * the worst case is: 346eda14cbcSMatt Macy * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 347eda14cbcSMatt Macy */ 348be181ee2SMartin Matuska uint_t spa_asize_inflation = 24; 349eda14cbcSMatt Macy 350eda14cbcSMatt Macy /* 351eda14cbcSMatt Macy * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 352caed7b1cSMartin Matuska * the pool to be consumed (bounded by spa_max_slop). This ensures that we 353caed7b1cSMartin Matuska * don't run the pool completely out of space, due to unaccounted changes (e.g. 354caed7b1cSMartin Matuska * to the MOS). It also limits the worst-case time to allocate space. If we 355caed7b1cSMartin Matuska * have less than this amount of free space, most ZPL operations (e.g. write, 356caed7b1cSMartin Matuska * create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are 357caed7b1cSMartin Matuska * also part of this 3.2% of space which can't be consumed by normal writes; 358caed7b1cSMartin Matuska * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded 359caed7b1cSMartin Matuska * log space. 360eda14cbcSMatt Macy * 361eda14cbcSMatt Macy * Certain operations (e.g. file removal, most administrative actions) can 362eda14cbcSMatt Macy * use half the slop space. They will only return ENOSPC if less than half 363eda14cbcSMatt Macy * the slop space is free. Typically, once the pool has less than the slop 364eda14cbcSMatt Macy * space free, the user will use these operations to free up space in the pool. 365eda14cbcSMatt Macy * These are the operations that call dsl_pool_adjustedsize() with the netfree 366eda14cbcSMatt Macy * argument set to TRUE. 367eda14cbcSMatt Macy * 368eda14cbcSMatt Macy * Operations that are almost guaranteed to free up space in the absence of 369eda14cbcSMatt Macy * a pool checkpoint can use up to three quarters of the slop space 370eda14cbcSMatt Macy * (e.g zfs destroy). 371eda14cbcSMatt Macy * 372eda14cbcSMatt Macy * A very restricted set of operations are always permitted, regardless of 373eda14cbcSMatt Macy * the amount of free space. These are the operations that call 374eda14cbcSMatt Macy * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 375eda14cbcSMatt Macy * increase in the amount of space used, it is possible to run the pool 376eda14cbcSMatt Macy * completely out of space, causing it to be permanently read-only. 377eda14cbcSMatt Macy * 378eda14cbcSMatt Macy * Note that on very small pools, the slop space will be larger than 379eda14cbcSMatt Macy * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 380eda14cbcSMatt Macy * but we never allow it to be more than half the pool size. 381eda14cbcSMatt Macy * 382caed7b1cSMartin Matuska * Further, on very large pools, the slop space will be smaller than 383caed7b1cSMartin Matuska * 3.2%, to avoid reserving much more space than we actually need; bounded 384caed7b1cSMartin Matuska * by spa_max_slop (128GB). 385caed7b1cSMartin Matuska * 386eda14cbcSMatt Macy * See also the comments in zfs_space_check_t. 387eda14cbcSMatt Macy */ 388be181ee2SMartin Matuska uint_t spa_slop_shift = 5; 389e92ffd9bSMartin Matuska static const uint64_t spa_min_slop = 128ULL * 1024 * 1024; 390e92ffd9bSMartin Matuska static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024; 391*14c2e0a0SMartin Matuska 392*14c2e0a0SMartin Matuska /* 393*14c2e0a0SMartin Matuska * Number of allocators to use, per spa instance 394*14c2e0a0SMartin Matuska */ 395*14c2e0a0SMartin Matuska static int spa_num_allocators = 4; 396eda14cbcSMatt Macy 3972ad756a6SMartin Matuska /* 3982ad756a6SMartin Matuska * Spa active allocator. 3992ad756a6SMartin Matuska * Valid values are zfs_active_allocator=<dynamic|cursor|new-dynamic>. 4002ad756a6SMartin Matuska */ 4012ad756a6SMartin Matuska const char *zfs_active_allocator = "dynamic"; 402eda14cbcSMatt Macy 403eda14cbcSMatt Macy void 404eda14cbcSMatt Macy spa_load_failed(spa_t *spa, const char *fmt, ...) 405eda14cbcSMatt Macy { 406eda14cbcSMatt Macy va_list adx; 407eda14cbcSMatt Macy char buf[256]; 408eda14cbcSMatt Macy 409eda14cbcSMatt Macy va_start(adx, fmt); 410eda14cbcSMatt Macy (void) vsnprintf(buf, sizeof (buf), fmt, adx); 411eda14cbcSMatt Macy va_end(adx); 412eda14cbcSMatt Macy 413eda14cbcSMatt Macy zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 414eda14cbcSMatt Macy spa->spa_trust_config ? "trusted" : "untrusted", buf); 415eda14cbcSMatt Macy } 416eda14cbcSMatt Macy 417eda14cbcSMatt Macy void 418eda14cbcSMatt Macy spa_load_note(spa_t *spa, const char *fmt, ...) 419eda14cbcSMatt Macy { 420eda14cbcSMatt Macy va_list adx; 421eda14cbcSMatt Macy char buf[256]; 422eda14cbcSMatt Macy 423eda14cbcSMatt Macy va_start(adx, fmt); 424eda14cbcSMatt Macy (void) vsnprintf(buf, sizeof (buf), fmt, adx); 425eda14cbcSMatt Macy va_end(adx); 426eda14cbcSMatt Macy 427eda14cbcSMatt Macy zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 428eda14cbcSMatt Macy spa->spa_trust_config ? "trusted" : "untrusted", buf); 429eda14cbcSMatt Macy } 430eda14cbcSMatt Macy 431eda14cbcSMatt Macy /* 432eda14cbcSMatt Macy * By default dedup and user data indirects land in the special class 433eda14cbcSMatt Macy */ 434e92ffd9bSMartin Matuska static int zfs_ddt_data_is_special = B_TRUE; 435e92ffd9bSMartin Matuska static int zfs_user_indirect_is_special = B_TRUE; 436eda14cbcSMatt Macy 437eda14cbcSMatt Macy /* 438eda14cbcSMatt Macy * The percentage of special class final space reserved for metadata only. 439eda14cbcSMatt Macy * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only 440eda14cbcSMatt Macy * let metadata into the class. 441eda14cbcSMatt Macy */ 442be181ee2SMartin Matuska static uint_t zfs_special_class_metadata_reserve_pct = 25; 443eda14cbcSMatt Macy 444eda14cbcSMatt Macy /* 445eda14cbcSMatt Macy * ========================================================================== 446eda14cbcSMatt Macy * SPA config locking 447eda14cbcSMatt Macy * ========================================================================== 448eda14cbcSMatt Macy */ 449eda14cbcSMatt Macy static void 450eda14cbcSMatt Macy spa_config_lock_init(spa_t *spa) 451eda14cbcSMatt Macy { 452eda14cbcSMatt Macy for (int i = 0; i < SCL_LOCKS; i++) { 453eda14cbcSMatt Macy spa_config_lock_t *scl = &spa->spa_config_lock[i]; 454eda14cbcSMatt Macy mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 455eda14cbcSMatt Macy cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 456eda14cbcSMatt Macy scl->scl_writer = NULL; 457eda14cbcSMatt Macy scl->scl_write_wanted = 0; 4587cd22ac4SMartin Matuska scl->scl_count = 0; 459eda14cbcSMatt Macy } 460eda14cbcSMatt Macy } 461eda14cbcSMatt Macy 462eda14cbcSMatt Macy static void 463eda14cbcSMatt Macy spa_config_lock_destroy(spa_t *spa) 464eda14cbcSMatt Macy { 465eda14cbcSMatt Macy for (int i = 0; i < SCL_LOCKS; i++) { 466eda14cbcSMatt Macy spa_config_lock_t *scl = &spa->spa_config_lock[i]; 467eda14cbcSMatt Macy mutex_destroy(&scl->scl_lock); 468eda14cbcSMatt Macy cv_destroy(&scl->scl_cv); 469eda14cbcSMatt Macy ASSERT(scl->scl_writer == NULL); 470eda14cbcSMatt Macy ASSERT(scl->scl_write_wanted == 0); 4717cd22ac4SMartin Matuska ASSERT(scl->scl_count == 0); 472eda14cbcSMatt Macy } 473eda14cbcSMatt Macy } 474eda14cbcSMatt Macy 475eda14cbcSMatt Macy int 476a0b956f5SMartin Matuska spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw) 477eda14cbcSMatt Macy { 478eda14cbcSMatt Macy for (int i = 0; i < SCL_LOCKS; i++) { 479eda14cbcSMatt Macy spa_config_lock_t *scl = &spa->spa_config_lock[i]; 480eda14cbcSMatt Macy if (!(locks & (1 << i))) 481eda14cbcSMatt Macy continue; 482eda14cbcSMatt Macy mutex_enter(&scl->scl_lock); 483eda14cbcSMatt Macy if (rw == RW_READER) { 484eda14cbcSMatt Macy if (scl->scl_writer || scl->scl_write_wanted) { 485eda14cbcSMatt Macy mutex_exit(&scl->scl_lock); 486eda14cbcSMatt Macy spa_config_exit(spa, locks & ((1 << i) - 1), 487eda14cbcSMatt Macy tag); 488eda14cbcSMatt Macy return (0); 489eda14cbcSMatt Macy } 490eda14cbcSMatt Macy } else { 491eda14cbcSMatt Macy ASSERT(scl->scl_writer != curthread); 4927cd22ac4SMartin Matuska if (scl->scl_count != 0) { 493eda14cbcSMatt Macy mutex_exit(&scl->scl_lock); 494eda14cbcSMatt Macy spa_config_exit(spa, locks & ((1 << i) - 1), 495eda14cbcSMatt Macy tag); 496eda14cbcSMatt Macy return (0); 497eda14cbcSMatt Macy } 498eda14cbcSMatt Macy scl->scl_writer = curthread; 499eda14cbcSMatt Macy } 5007cd22ac4SMartin Matuska scl->scl_count++; 501eda14cbcSMatt Macy mutex_exit(&scl->scl_lock); 502eda14cbcSMatt Macy } 503eda14cbcSMatt Macy return (1); 504eda14cbcSMatt Macy } 505eda14cbcSMatt Macy 506d411c1d6SMartin Matuska static void 507d411c1d6SMartin Matuska spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw, 508d411c1d6SMartin Matuska int mmp_flag) 509eda14cbcSMatt Macy { 510e92ffd9bSMartin Matuska (void) tag; 511eda14cbcSMatt Macy int wlocks_held = 0; 512eda14cbcSMatt Macy 513eda14cbcSMatt Macy ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 514eda14cbcSMatt Macy 515eda14cbcSMatt Macy for (int i = 0; i < SCL_LOCKS; i++) { 516eda14cbcSMatt Macy spa_config_lock_t *scl = &spa->spa_config_lock[i]; 517eda14cbcSMatt Macy if (scl->scl_writer == curthread) 518eda14cbcSMatt Macy wlocks_held |= (1 << i); 519eda14cbcSMatt Macy if (!(locks & (1 << i))) 520eda14cbcSMatt Macy continue; 521eda14cbcSMatt Macy mutex_enter(&scl->scl_lock); 522eda14cbcSMatt Macy if (rw == RW_READER) { 523d411c1d6SMartin Matuska while (scl->scl_writer || 524d411c1d6SMartin Matuska (!mmp_flag && scl->scl_write_wanted)) { 525eda14cbcSMatt Macy cv_wait(&scl->scl_cv, &scl->scl_lock); 526eda14cbcSMatt Macy } 527eda14cbcSMatt Macy } else { 528eda14cbcSMatt Macy ASSERT(scl->scl_writer != curthread); 5297cd22ac4SMartin Matuska while (scl->scl_count != 0) { 530eda14cbcSMatt Macy scl->scl_write_wanted++; 531eda14cbcSMatt Macy cv_wait(&scl->scl_cv, &scl->scl_lock); 532eda14cbcSMatt Macy scl->scl_write_wanted--; 533eda14cbcSMatt Macy } 534eda14cbcSMatt Macy scl->scl_writer = curthread; 535eda14cbcSMatt Macy } 5367cd22ac4SMartin Matuska scl->scl_count++; 537eda14cbcSMatt Macy mutex_exit(&scl->scl_lock); 538eda14cbcSMatt Macy } 539eda14cbcSMatt Macy ASSERT3U(wlocks_held, <=, locks); 540eda14cbcSMatt Macy } 541eda14cbcSMatt Macy 542eda14cbcSMatt Macy void 543d411c1d6SMartin Matuska spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw) 544d411c1d6SMartin Matuska { 545d411c1d6SMartin Matuska spa_config_enter_impl(spa, locks, tag, rw, 0); 546d411c1d6SMartin Matuska } 547d411c1d6SMartin Matuska 548d411c1d6SMartin Matuska /* 549d411c1d6SMartin Matuska * The spa_config_enter_mmp() allows the mmp thread to cut in front of 550d411c1d6SMartin Matuska * outstanding write lock requests. This is needed since the mmp updates are 551d411c1d6SMartin Matuska * time sensitive and failure to service them promptly will result in a 552d411c1d6SMartin Matuska * suspended pool. This pool suspension has been seen in practice when there is 553d411c1d6SMartin Matuska * a single disk in a pool that is responding slowly and presumably about to 554d411c1d6SMartin Matuska * fail. 555d411c1d6SMartin Matuska */ 556d411c1d6SMartin Matuska 557d411c1d6SMartin Matuska void 558d411c1d6SMartin Matuska spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw) 559d411c1d6SMartin Matuska { 560d411c1d6SMartin Matuska spa_config_enter_impl(spa, locks, tag, rw, 1); 561d411c1d6SMartin Matuska } 562d411c1d6SMartin Matuska 563d411c1d6SMartin Matuska void 564eda14cbcSMatt Macy spa_config_exit(spa_t *spa, int locks, const void *tag) 565eda14cbcSMatt Macy { 566e92ffd9bSMartin Matuska (void) tag; 567eda14cbcSMatt Macy for (int i = SCL_LOCKS - 1; i >= 0; i--) { 568eda14cbcSMatt Macy spa_config_lock_t *scl = &spa->spa_config_lock[i]; 569eda14cbcSMatt Macy if (!(locks & (1 << i))) 570eda14cbcSMatt Macy continue; 571eda14cbcSMatt Macy mutex_enter(&scl->scl_lock); 5727cd22ac4SMartin Matuska ASSERT(scl->scl_count > 0); 5737cd22ac4SMartin Matuska if (--scl->scl_count == 0) { 574eda14cbcSMatt Macy ASSERT(scl->scl_writer == NULL || 575eda14cbcSMatt Macy scl->scl_writer == curthread); 576eda14cbcSMatt Macy scl->scl_writer = NULL; /* OK in either case */ 577eda14cbcSMatt Macy cv_broadcast(&scl->scl_cv); 578eda14cbcSMatt Macy } 579eda14cbcSMatt Macy mutex_exit(&scl->scl_lock); 580eda14cbcSMatt Macy } 581eda14cbcSMatt Macy } 582eda14cbcSMatt Macy 583eda14cbcSMatt Macy int 584eda14cbcSMatt Macy spa_config_held(spa_t *spa, int locks, krw_t rw) 585eda14cbcSMatt Macy { 586eda14cbcSMatt Macy int locks_held = 0; 587eda14cbcSMatt Macy 588eda14cbcSMatt Macy for (int i = 0; i < SCL_LOCKS; i++) { 589eda14cbcSMatt Macy spa_config_lock_t *scl = &spa->spa_config_lock[i]; 590eda14cbcSMatt Macy if (!(locks & (1 << i))) 591eda14cbcSMatt Macy continue; 5927cd22ac4SMartin Matuska if ((rw == RW_READER && scl->scl_count != 0) || 593eda14cbcSMatt Macy (rw == RW_WRITER && scl->scl_writer == curthread)) 594eda14cbcSMatt Macy locks_held |= 1 << i; 595eda14cbcSMatt Macy } 596eda14cbcSMatt Macy 597eda14cbcSMatt Macy return (locks_held); 598eda14cbcSMatt Macy } 599eda14cbcSMatt Macy 600eda14cbcSMatt Macy /* 601eda14cbcSMatt Macy * ========================================================================== 602eda14cbcSMatt Macy * SPA namespace functions 603eda14cbcSMatt Macy * ========================================================================== 604eda14cbcSMatt Macy */ 605eda14cbcSMatt Macy 606eda14cbcSMatt Macy /* 607eda14cbcSMatt Macy * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 608eda14cbcSMatt Macy * Returns NULL if no matching spa_t is found. 609eda14cbcSMatt Macy */ 610eda14cbcSMatt Macy spa_t * 611eda14cbcSMatt Macy spa_lookup(const char *name) 612eda14cbcSMatt Macy { 613eda14cbcSMatt Macy static spa_t search; /* spa_t is large; don't allocate on stack */ 614eda14cbcSMatt Macy spa_t *spa; 615eda14cbcSMatt Macy avl_index_t where; 616eda14cbcSMatt Macy char *cp; 617eda14cbcSMatt Macy 618eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 619eda14cbcSMatt Macy 620eda14cbcSMatt Macy (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 621eda14cbcSMatt Macy 622eda14cbcSMatt Macy /* 623eda14cbcSMatt Macy * If it's a full dataset name, figure out the pool name and 624eda14cbcSMatt Macy * just use that. 625eda14cbcSMatt Macy */ 626eda14cbcSMatt Macy cp = strpbrk(search.spa_name, "/@#"); 627eda14cbcSMatt Macy if (cp != NULL) 628eda14cbcSMatt Macy *cp = '\0'; 629eda14cbcSMatt Macy 630eda14cbcSMatt Macy spa = avl_find(&spa_namespace_avl, &search, &where); 631eda14cbcSMatt Macy 632eda14cbcSMatt Macy return (spa); 633eda14cbcSMatt Macy } 634eda14cbcSMatt Macy 635eda14cbcSMatt Macy /* 636eda14cbcSMatt Macy * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 637eda14cbcSMatt Macy * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 638eda14cbcSMatt Macy * looking for potentially hung I/Os. 639eda14cbcSMatt Macy */ 640eda14cbcSMatt Macy void 641eda14cbcSMatt Macy spa_deadman(void *arg) 642eda14cbcSMatt Macy { 643eda14cbcSMatt Macy spa_t *spa = arg; 644eda14cbcSMatt Macy 645eda14cbcSMatt Macy /* Disable the deadman if the pool is suspended. */ 646eda14cbcSMatt Macy if (spa_suspended(spa)) 647eda14cbcSMatt Macy return; 648eda14cbcSMatt Macy 649eda14cbcSMatt Macy zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 650eda14cbcSMatt Macy (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 65133b8c039SMartin Matuska (u_longlong_t)++spa->spa_deadman_calls); 652eda14cbcSMatt Macy if (zfs_deadman_enabled) 653eda14cbcSMatt Macy vdev_deadman(spa->spa_root_vdev, FTAG); 654eda14cbcSMatt Macy 655eda14cbcSMatt Macy spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, 656eda14cbcSMatt Macy spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + 657eda14cbcSMatt Macy MSEC_TO_TICK(zfs_deadman_checktime_ms)); 658eda14cbcSMatt Macy } 659eda14cbcSMatt Macy 660eda14cbcSMatt Macy static int 661eda14cbcSMatt Macy spa_log_sm_sort_by_txg(const void *va, const void *vb) 662eda14cbcSMatt Macy { 663eda14cbcSMatt Macy const spa_log_sm_t *a = va; 664eda14cbcSMatt Macy const spa_log_sm_t *b = vb; 665eda14cbcSMatt Macy 666eda14cbcSMatt Macy return (TREE_CMP(a->sls_txg, b->sls_txg)); 667eda14cbcSMatt Macy } 668eda14cbcSMatt Macy 669eda14cbcSMatt Macy /* 670eda14cbcSMatt Macy * Create an uninitialized spa_t with the given name. Requires 671eda14cbcSMatt Macy * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 672eda14cbcSMatt Macy * exist by calling spa_lookup() first. 673eda14cbcSMatt Macy */ 674eda14cbcSMatt Macy spa_t * 675eda14cbcSMatt Macy spa_add(const char *name, nvlist_t *config, const char *altroot) 676eda14cbcSMatt Macy { 677eda14cbcSMatt Macy spa_t *spa; 678eda14cbcSMatt Macy spa_config_dirent_t *dp; 679eda14cbcSMatt Macy 680eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 681eda14cbcSMatt Macy 682eda14cbcSMatt Macy spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 683eda14cbcSMatt Macy 684eda14cbcSMatt Macy mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 685eda14cbcSMatt Macy mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 686eda14cbcSMatt Macy mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 687eda14cbcSMatt Macy mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 688eda14cbcSMatt Macy mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 689eda14cbcSMatt Macy mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 690eda14cbcSMatt Macy mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 691eda14cbcSMatt Macy mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 692eda14cbcSMatt Macy mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 693eda14cbcSMatt Macy mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 694eda14cbcSMatt Macy mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 695eda14cbcSMatt Macy mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL); 696eda14cbcSMatt Macy mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL); 697eda14cbcSMatt Macy mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL); 698eda14cbcSMatt Macy 699eda14cbcSMatt Macy cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 700eda14cbcSMatt Macy cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 701eda14cbcSMatt Macy cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 702eda14cbcSMatt Macy cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 703eda14cbcSMatt Macy cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 704eda14cbcSMatt Macy cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL); 705eda14cbcSMatt Macy cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL); 706eda14cbcSMatt Macy 707eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) 708eda14cbcSMatt Macy bplist_create(&spa->spa_free_bplist[t]); 709eda14cbcSMatt Macy 710eda14cbcSMatt Macy (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 711eda14cbcSMatt Macy spa->spa_state = POOL_STATE_UNINITIALIZED; 712eda14cbcSMatt Macy spa->spa_freeze_txg = UINT64_MAX; 713eda14cbcSMatt Macy spa->spa_final_txg = UINT64_MAX; 714eda14cbcSMatt Macy spa->spa_load_max_txg = UINT64_MAX; 715eda14cbcSMatt Macy spa->spa_proc = &p0; 716eda14cbcSMatt Macy spa->spa_proc_state = SPA_PROC_NONE; 717eda14cbcSMatt Macy spa->spa_trust_config = B_TRUE; 718eda14cbcSMatt Macy spa->spa_hostid = zone_get_hostid(NULL); 719eda14cbcSMatt Macy 720eda14cbcSMatt Macy spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 721eda14cbcSMatt Macy spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms); 722eda14cbcSMatt Macy spa_set_deadman_failmode(spa, zfs_deadman_failmode); 7232ad756a6SMartin Matuska spa_set_allocator(spa, zfs_active_allocator); 724eda14cbcSMatt Macy 725eda14cbcSMatt Macy zfs_refcount_create(&spa->spa_refcount); 726eda14cbcSMatt Macy spa_config_lock_init(spa); 727eda14cbcSMatt Macy spa_stats_init(spa); 728eda14cbcSMatt Macy 729eda14cbcSMatt Macy avl_add(&spa_namespace_avl, spa); 730eda14cbcSMatt Macy 731eda14cbcSMatt Macy /* 732eda14cbcSMatt Macy * Set the alternate root, if there is one. 733eda14cbcSMatt Macy */ 734eda14cbcSMatt Macy if (altroot) 735eda14cbcSMatt Macy spa->spa_root = spa_strdup(altroot); 736eda14cbcSMatt Macy 737*14c2e0a0SMartin Matuska /* Do not allow more allocators than CPUs. */ 738*14c2e0a0SMartin Matuska spa->spa_alloc_count = MIN(MAX(spa_num_allocators, 1), boot_ncpus); 739*14c2e0a0SMartin Matuska 7403f9d360cSMartin Matuska spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count * 7413f9d360cSMartin Matuska sizeof (spa_alloc_t), KM_SLEEP); 742eda14cbcSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 7433f9d360cSMartin Matuska mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT, 7443f9d360cSMartin Matuska NULL); 7453f9d360cSMartin Matuska avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare, 7467b5e6873SMartin Matuska sizeof (zio_t), offsetof(zio_t, io_queue_node.a)); 747eda14cbcSMatt Macy } 748*14c2e0a0SMartin Matuska 749eda14cbcSMatt Macy avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed, 750eda14cbcSMatt Macy sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node)); 751eda14cbcSMatt Macy avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg, 752eda14cbcSMatt Macy sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node)); 753eda14cbcSMatt Macy list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t), 754eda14cbcSMatt Macy offsetof(log_summary_entry_t, lse_node)); 755eda14cbcSMatt Macy 756eda14cbcSMatt Macy /* 757eda14cbcSMatt Macy * Every pool starts with the default cachefile 758eda14cbcSMatt Macy */ 759eda14cbcSMatt Macy list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 760eda14cbcSMatt Macy offsetof(spa_config_dirent_t, scd_link)); 761eda14cbcSMatt Macy 762eda14cbcSMatt Macy dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 763eda14cbcSMatt Macy dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 764eda14cbcSMatt Macy list_insert_head(&spa->spa_config_list, dp); 765eda14cbcSMatt Macy 766eda14cbcSMatt Macy VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 767eda14cbcSMatt Macy KM_SLEEP) == 0); 768eda14cbcSMatt Macy 769eda14cbcSMatt Macy if (config != NULL) { 770eda14cbcSMatt Macy nvlist_t *features; 771eda14cbcSMatt Macy 772eda14cbcSMatt Macy if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 773eda14cbcSMatt Macy &features) == 0) { 774eda14cbcSMatt Macy VERIFY(nvlist_dup(features, &spa->spa_label_features, 775eda14cbcSMatt Macy 0) == 0); 776eda14cbcSMatt Macy } 777eda14cbcSMatt Macy 778eda14cbcSMatt Macy VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 779eda14cbcSMatt Macy } 780eda14cbcSMatt Macy 781eda14cbcSMatt Macy if (spa->spa_label_features == NULL) { 782eda14cbcSMatt Macy VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 783eda14cbcSMatt Macy KM_SLEEP) == 0); 784eda14cbcSMatt Macy } 785eda14cbcSMatt Macy 786eda14cbcSMatt Macy spa->spa_min_ashift = INT_MAX; 787eda14cbcSMatt Macy spa->spa_max_ashift = 0; 7887877fdebSMatt Macy spa->spa_min_alloc = INT_MAX; 789315ee00fSMartin Matuska spa->spa_gcd_alloc = INT_MAX; 790eda14cbcSMatt Macy 791eda14cbcSMatt Macy /* Reset cached value */ 792eda14cbcSMatt Macy spa->spa_dedup_dspace = ~0ULL; 793eda14cbcSMatt Macy 794eda14cbcSMatt Macy /* 795eda14cbcSMatt Macy * As a pool is being created, treat all features as disabled by 796eda14cbcSMatt Macy * setting SPA_FEATURE_DISABLED for all entries in the feature 797eda14cbcSMatt Macy * refcount cache. 798eda14cbcSMatt Macy */ 799eda14cbcSMatt Macy for (int i = 0; i < SPA_FEATURES; i++) { 800eda14cbcSMatt Macy spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 801eda14cbcSMatt Macy } 802eda14cbcSMatt Macy 803eda14cbcSMatt Macy list_create(&spa->spa_leaf_list, sizeof (vdev_t), 804eda14cbcSMatt Macy offsetof(vdev_t, vdev_leaf_node)); 805eda14cbcSMatt Macy 806eda14cbcSMatt Macy return (spa); 807eda14cbcSMatt Macy } 808eda14cbcSMatt Macy 809eda14cbcSMatt Macy /* 810eda14cbcSMatt Macy * Removes a spa_t from the namespace, freeing up any memory used. Requires 811eda14cbcSMatt Macy * spa_namespace_lock. This is called only after the spa_t has been closed and 812eda14cbcSMatt Macy * deactivated. 813eda14cbcSMatt Macy */ 814eda14cbcSMatt Macy void 815eda14cbcSMatt Macy spa_remove(spa_t *spa) 816eda14cbcSMatt Macy { 817eda14cbcSMatt Macy spa_config_dirent_t *dp; 818eda14cbcSMatt Macy 819eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 820eda14cbcSMatt Macy ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED); 821eda14cbcSMatt Macy ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); 822eda14cbcSMatt Macy ASSERT0(spa->spa_waiters); 823eda14cbcSMatt Macy 824eda14cbcSMatt Macy nvlist_free(spa->spa_config_splitting); 825eda14cbcSMatt Macy 826eda14cbcSMatt Macy avl_remove(&spa_namespace_avl, spa); 827eda14cbcSMatt Macy cv_broadcast(&spa_namespace_cv); 828eda14cbcSMatt Macy 829eda14cbcSMatt Macy if (spa->spa_root) 830eda14cbcSMatt Macy spa_strfree(spa->spa_root); 831eda14cbcSMatt Macy 8324e8d558cSMartin Matuska while ((dp = list_remove_head(&spa->spa_config_list)) != NULL) { 833eda14cbcSMatt Macy if (dp->scd_path != NULL) 834eda14cbcSMatt Macy spa_strfree(dp->scd_path); 835eda14cbcSMatt Macy kmem_free(dp, sizeof (spa_config_dirent_t)); 836eda14cbcSMatt Macy } 837eda14cbcSMatt Macy 838eda14cbcSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 8393f9d360cSMartin Matuska avl_destroy(&spa->spa_allocs[i].spaa_tree); 8403f9d360cSMartin Matuska mutex_destroy(&spa->spa_allocs[i].spaa_lock); 841eda14cbcSMatt Macy } 8423f9d360cSMartin Matuska kmem_free(spa->spa_allocs, spa->spa_alloc_count * 8433f9d360cSMartin Matuska sizeof (spa_alloc_t)); 844eda14cbcSMatt Macy 845eda14cbcSMatt Macy avl_destroy(&spa->spa_metaslabs_by_flushed); 846eda14cbcSMatt Macy avl_destroy(&spa->spa_sm_logs_by_txg); 847eda14cbcSMatt Macy list_destroy(&spa->spa_log_summary); 848eda14cbcSMatt Macy list_destroy(&spa->spa_config_list); 849eda14cbcSMatt Macy list_destroy(&spa->spa_leaf_list); 850eda14cbcSMatt Macy 851eda14cbcSMatt Macy nvlist_free(spa->spa_label_features); 852eda14cbcSMatt Macy nvlist_free(spa->spa_load_info); 853eda14cbcSMatt Macy nvlist_free(spa->spa_feat_stats); 854eda14cbcSMatt Macy spa_config_set(spa, NULL); 855eda14cbcSMatt Macy 856eda14cbcSMatt Macy zfs_refcount_destroy(&spa->spa_refcount); 857eda14cbcSMatt Macy 858eda14cbcSMatt Macy spa_stats_destroy(spa); 859eda14cbcSMatt Macy spa_config_lock_destroy(spa); 860eda14cbcSMatt Macy 861eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) 862eda14cbcSMatt Macy bplist_destroy(&spa->spa_free_bplist[t]); 863eda14cbcSMatt Macy 864eda14cbcSMatt Macy zio_checksum_templates_free(spa); 865eda14cbcSMatt Macy 866eda14cbcSMatt Macy cv_destroy(&spa->spa_async_cv); 867eda14cbcSMatt Macy cv_destroy(&spa->spa_evicting_os_cv); 868eda14cbcSMatt Macy cv_destroy(&spa->spa_proc_cv); 869eda14cbcSMatt Macy cv_destroy(&spa->spa_scrub_io_cv); 870eda14cbcSMatt Macy cv_destroy(&spa->spa_suspend_cv); 871eda14cbcSMatt Macy cv_destroy(&spa->spa_activities_cv); 872eda14cbcSMatt Macy cv_destroy(&spa->spa_waiters_cv); 873eda14cbcSMatt Macy 874eda14cbcSMatt Macy mutex_destroy(&spa->spa_flushed_ms_lock); 875eda14cbcSMatt Macy mutex_destroy(&spa->spa_async_lock); 876eda14cbcSMatt Macy mutex_destroy(&spa->spa_errlist_lock); 877eda14cbcSMatt Macy mutex_destroy(&spa->spa_errlog_lock); 878eda14cbcSMatt Macy mutex_destroy(&spa->spa_evicting_os_lock); 879eda14cbcSMatt Macy mutex_destroy(&spa->spa_history_lock); 880eda14cbcSMatt Macy mutex_destroy(&spa->spa_proc_lock); 881eda14cbcSMatt Macy mutex_destroy(&spa->spa_props_lock); 882eda14cbcSMatt Macy mutex_destroy(&spa->spa_cksum_tmpls_lock); 883eda14cbcSMatt Macy mutex_destroy(&spa->spa_scrub_lock); 884eda14cbcSMatt Macy mutex_destroy(&spa->spa_suspend_lock); 885eda14cbcSMatt Macy mutex_destroy(&spa->spa_vdev_top_lock); 886eda14cbcSMatt Macy mutex_destroy(&spa->spa_feat_stats_lock); 887eda14cbcSMatt Macy mutex_destroy(&spa->spa_activities_lock); 888eda14cbcSMatt Macy 889eda14cbcSMatt Macy kmem_free(spa, sizeof (spa_t)); 890eda14cbcSMatt Macy } 891eda14cbcSMatt Macy 892eda14cbcSMatt Macy /* 893eda14cbcSMatt Macy * Given a pool, return the next pool in the namespace, or NULL if there is 894eda14cbcSMatt Macy * none. If 'prev' is NULL, return the first pool. 895eda14cbcSMatt Macy */ 896eda14cbcSMatt Macy spa_t * 897eda14cbcSMatt Macy spa_next(spa_t *prev) 898eda14cbcSMatt Macy { 899eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 900eda14cbcSMatt Macy 901eda14cbcSMatt Macy if (prev) 902eda14cbcSMatt Macy return (AVL_NEXT(&spa_namespace_avl, prev)); 903eda14cbcSMatt Macy else 904eda14cbcSMatt Macy return (avl_first(&spa_namespace_avl)); 905eda14cbcSMatt Macy } 906eda14cbcSMatt Macy 907eda14cbcSMatt Macy /* 908eda14cbcSMatt Macy * ========================================================================== 909eda14cbcSMatt Macy * SPA refcount functions 910eda14cbcSMatt Macy * ========================================================================== 911eda14cbcSMatt Macy */ 912eda14cbcSMatt Macy 913eda14cbcSMatt Macy /* 914eda14cbcSMatt Macy * Add a reference to the given spa_t. Must have at least one reference, or 915eda14cbcSMatt Macy * have the namespace lock held. 916eda14cbcSMatt Macy */ 917eda14cbcSMatt Macy void 918a0b956f5SMartin Matuska spa_open_ref(spa_t *spa, const void *tag) 919eda14cbcSMatt Macy { 920eda14cbcSMatt Macy ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || 921eda14cbcSMatt Macy MUTEX_HELD(&spa_namespace_lock)); 922eda14cbcSMatt Macy (void) zfs_refcount_add(&spa->spa_refcount, tag); 923eda14cbcSMatt Macy } 924eda14cbcSMatt Macy 925eda14cbcSMatt Macy /* 926eda14cbcSMatt Macy * Remove a reference to the given spa_t. Must have at least one reference, or 927eda14cbcSMatt Macy * have the namespace lock held. 928eda14cbcSMatt Macy */ 929eda14cbcSMatt Macy void 930a0b956f5SMartin Matuska spa_close(spa_t *spa, const void *tag) 931eda14cbcSMatt Macy { 932eda14cbcSMatt Macy ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || 933eda14cbcSMatt Macy MUTEX_HELD(&spa_namespace_lock)); 934eda14cbcSMatt Macy (void) zfs_refcount_remove(&spa->spa_refcount, tag); 935eda14cbcSMatt Macy } 936eda14cbcSMatt Macy 937eda14cbcSMatt Macy /* 938eda14cbcSMatt Macy * Remove a reference to the given spa_t held by a dsl dir that is 939eda14cbcSMatt Macy * being asynchronously released. Async releases occur from a taskq 940eda14cbcSMatt Macy * performing eviction of dsl datasets and dirs. The namespace lock 941eda14cbcSMatt Macy * isn't held and the hold by the object being evicted may contribute to 942eda14cbcSMatt Macy * spa_minref (e.g. dataset or directory released during pool export), 943eda14cbcSMatt Macy * so the asserts in spa_close() do not apply. 944eda14cbcSMatt Macy */ 945eda14cbcSMatt Macy void 946a0b956f5SMartin Matuska spa_async_close(spa_t *spa, const void *tag) 947eda14cbcSMatt Macy { 948eda14cbcSMatt Macy (void) zfs_refcount_remove(&spa->spa_refcount, tag); 949eda14cbcSMatt Macy } 950eda14cbcSMatt Macy 951eda14cbcSMatt Macy /* 952eda14cbcSMatt Macy * Check to see if the spa refcount is zero. Must be called with 953eda14cbcSMatt Macy * spa_namespace_lock held. We really compare against spa_minref, which is the 954eda14cbcSMatt Macy * number of references acquired when opening a pool 955eda14cbcSMatt Macy */ 956eda14cbcSMatt Macy boolean_t 957eda14cbcSMatt Macy spa_refcount_zero(spa_t *spa) 958eda14cbcSMatt Macy { 959eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 960eda14cbcSMatt Macy 961eda14cbcSMatt Macy return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); 962eda14cbcSMatt Macy } 963eda14cbcSMatt Macy 964eda14cbcSMatt Macy /* 965eda14cbcSMatt Macy * ========================================================================== 966eda14cbcSMatt Macy * SPA spare and l2cache tracking 967eda14cbcSMatt Macy * ========================================================================== 968eda14cbcSMatt Macy */ 969eda14cbcSMatt Macy 970eda14cbcSMatt Macy /* 971eda14cbcSMatt Macy * Hot spares and cache devices are tracked using the same code below, 972eda14cbcSMatt Macy * for 'auxiliary' devices. 973eda14cbcSMatt Macy */ 974eda14cbcSMatt Macy 975eda14cbcSMatt Macy typedef struct spa_aux { 976eda14cbcSMatt Macy uint64_t aux_guid; 977eda14cbcSMatt Macy uint64_t aux_pool; 978eda14cbcSMatt Macy avl_node_t aux_avl; 979eda14cbcSMatt Macy int aux_count; 980eda14cbcSMatt Macy } spa_aux_t; 981eda14cbcSMatt Macy 982eda14cbcSMatt Macy static inline int 983eda14cbcSMatt Macy spa_aux_compare(const void *a, const void *b) 984eda14cbcSMatt Macy { 985eda14cbcSMatt Macy const spa_aux_t *sa = (const spa_aux_t *)a; 986eda14cbcSMatt Macy const spa_aux_t *sb = (const spa_aux_t *)b; 987eda14cbcSMatt Macy 988eda14cbcSMatt Macy return (TREE_CMP(sa->aux_guid, sb->aux_guid)); 989eda14cbcSMatt Macy } 990eda14cbcSMatt Macy 991eda14cbcSMatt Macy static void 992eda14cbcSMatt Macy spa_aux_add(vdev_t *vd, avl_tree_t *avl) 993eda14cbcSMatt Macy { 994eda14cbcSMatt Macy avl_index_t where; 995eda14cbcSMatt Macy spa_aux_t search; 996eda14cbcSMatt Macy spa_aux_t *aux; 997eda14cbcSMatt Macy 998eda14cbcSMatt Macy search.aux_guid = vd->vdev_guid; 999eda14cbcSMatt Macy if ((aux = avl_find(avl, &search, &where)) != NULL) { 1000eda14cbcSMatt Macy aux->aux_count++; 1001eda14cbcSMatt Macy } else { 1002eda14cbcSMatt Macy aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 1003eda14cbcSMatt Macy aux->aux_guid = vd->vdev_guid; 1004eda14cbcSMatt Macy aux->aux_count = 1; 1005eda14cbcSMatt Macy avl_insert(avl, aux, where); 1006eda14cbcSMatt Macy } 1007eda14cbcSMatt Macy } 1008eda14cbcSMatt Macy 1009eda14cbcSMatt Macy static void 1010eda14cbcSMatt Macy spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 1011eda14cbcSMatt Macy { 1012eda14cbcSMatt Macy spa_aux_t search; 1013eda14cbcSMatt Macy spa_aux_t *aux; 1014eda14cbcSMatt Macy avl_index_t where; 1015eda14cbcSMatt Macy 1016eda14cbcSMatt Macy search.aux_guid = vd->vdev_guid; 1017eda14cbcSMatt Macy aux = avl_find(avl, &search, &where); 1018eda14cbcSMatt Macy 1019eda14cbcSMatt Macy ASSERT(aux != NULL); 1020eda14cbcSMatt Macy 1021eda14cbcSMatt Macy if (--aux->aux_count == 0) { 1022eda14cbcSMatt Macy avl_remove(avl, aux); 1023eda14cbcSMatt Macy kmem_free(aux, sizeof (spa_aux_t)); 1024eda14cbcSMatt Macy } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 1025eda14cbcSMatt Macy aux->aux_pool = 0ULL; 1026eda14cbcSMatt Macy } 1027eda14cbcSMatt Macy } 1028eda14cbcSMatt Macy 1029eda14cbcSMatt Macy static boolean_t 1030eda14cbcSMatt Macy spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 1031eda14cbcSMatt Macy { 1032eda14cbcSMatt Macy spa_aux_t search, *found; 1033eda14cbcSMatt Macy 1034eda14cbcSMatt Macy search.aux_guid = guid; 1035eda14cbcSMatt Macy found = avl_find(avl, &search, NULL); 1036eda14cbcSMatt Macy 1037eda14cbcSMatt Macy if (pool) { 1038eda14cbcSMatt Macy if (found) 1039eda14cbcSMatt Macy *pool = found->aux_pool; 1040eda14cbcSMatt Macy else 1041eda14cbcSMatt Macy *pool = 0ULL; 1042eda14cbcSMatt Macy } 1043eda14cbcSMatt Macy 1044eda14cbcSMatt Macy if (refcnt) { 1045eda14cbcSMatt Macy if (found) 1046eda14cbcSMatt Macy *refcnt = found->aux_count; 1047eda14cbcSMatt Macy else 1048eda14cbcSMatt Macy *refcnt = 0; 1049eda14cbcSMatt Macy } 1050eda14cbcSMatt Macy 1051eda14cbcSMatt Macy return (found != NULL); 1052eda14cbcSMatt Macy } 1053eda14cbcSMatt Macy 1054eda14cbcSMatt Macy static void 1055eda14cbcSMatt Macy spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1056eda14cbcSMatt Macy { 1057eda14cbcSMatt Macy spa_aux_t search, *found; 1058eda14cbcSMatt Macy avl_index_t where; 1059eda14cbcSMatt Macy 1060eda14cbcSMatt Macy search.aux_guid = vd->vdev_guid; 1061eda14cbcSMatt Macy found = avl_find(avl, &search, &where); 1062eda14cbcSMatt Macy ASSERT(found != NULL); 1063eda14cbcSMatt Macy ASSERT(found->aux_pool == 0ULL); 1064eda14cbcSMatt Macy 1065eda14cbcSMatt Macy found->aux_pool = spa_guid(vd->vdev_spa); 1066eda14cbcSMatt Macy } 1067eda14cbcSMatt Macy 1068eda14cbcSMatt Macy /* 1069eda14cbcSMatt Macy * Spares are tracked globally due to the following constraints: 1070eda14cbcSMatt Macy * 1071eda14cbcSMatt Macy * - A spare may be part of multiple pools. 1072eda14cbcSMatt Macy * - A spare may be added to a pool even if it's actively in use within 1073eda14cbcSMatt Macy * another pool. 1074eda14cbcSMatt Macy * - A spare in use in any pool can only be the source of a replacement if 1075eda14cbcSMatt Macy * the target is a spare in the same pool. 1076eda14cbcSMatt Macy * 1077eda14cbcSMatt Macy * We keep track of all spares on the system through the use of a reference 1078eda14cbcSMatt Macy * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1079eda14cbcSMatt Macy * spare, then we bump the reference count in the AVL tree. In addition, we set 1080eda14cbcSMatt Macy * the 'vdev_isspare' member to indicate that the device is a spare (active or 1081eda14cbcSMatt Macy * inactive). When a spare is made active (used to replace a device in the 1082eda14cbcSMatt Macy * pool), we also keep track of which pool its been made a part of. 1083eda14cbcSMatt Macy * 1084eda14cbcSMatt Macy * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1085eda14cbcSMatt Macy * called under the spa_namespace lock as part of vdev reconfiguration. The 1086eda14cbcSMatt Macy * separate spare lock exists for the status query path, which does not need to 1087eda14cbcSMatt Macy * be completely consistent with respect to other vdev configuration changes. 1088eda14cbcSMatt Macy */ 1089eda14cbcSMatt Macy 1090eda14cbcSMatt Macy static int 1091eda14cbcSMatt Macy spa_spare_compare(const void *a, const void *b) 1092eda14cbcSMatt Macy { 1093eda14cbcSMatt Macy return (spa_aux_compare(a, b)); 1094eda14cbcSMatt Macy } 1095eda14cbcSMatt Macy 1096eda14cbcSMatt Macy void 1097eda14cbcSMatt Macy spa_spare_add(vdev_t *vd) 1098eda14cbcSMatt Macy { 1099eda14cbcSMatt Macy mutex_enter(&spa_spare_lock); 1100eda14cbcSMatt Macy ASSERT(!vd->vdev_isspare); 1101eda14cbcSMatt Macy spa_aux_add(vd, &spa_spare_avl); 1102eda14cbcSMatt Macy vd->vdev_isspare = B_TRUE; 1103eda14cbcSMatt Macy mutex_exit(&spa_spare_lock); 1104eda14cbcSMatt Macy } 1105eda14cbcSMatt Macy 1106eda14cbcSMatt Macy void 1107eda14cbcSMatt Macy spa_spare_remove(vdev_t *vd) 1108eda14cbcSMatt Macy { 1109eda14cbcSMatt Macy mutex_enter(&spa_spare_lock); 1110eda14cbcSMatt Macy ASSERT(vd->vdev_isspare); 1111eda14cbcSMatt Macy spa_aux_remove(vd, &spa_spare_avl); 1112eda14cbcSMatt Macy vd->vdev_isspare = B_FALSE; 1113eda14cbcSMatt Macy mutex_exit(&spa_spare_lock); 1114eda14cbcSMatt Macy } 1115eda14cbcSMatt Macy 1116eda14cbcSMatt Macy boolean_t 1117eda14cbcSMatt Macy spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1118eda14cbcSMatt Macy { 1119eda14cbcSMatt Macy boolean_t found; 1120eda14cbcSMatt Macy 1121eda14cbcSMatt Macy mutex_enter(&spa_spare_lock); 1122eda14cbcSMatt Macy found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1123eda14cbcSMatt Macy mutex_exit(&spa_spare_lock); 1124eda14cbcSMatt Macy 1125eda14cbcSMatt Macy return (found); 1126eda14cbcSMatt Macy } 1127eda14cbcSMatt Macy 1128eda14cbcSMatt Macy void 1129eda14cbcSMatt Macy spa_spare_activate(vdev_t *vd) 1130eda14cbcSMatt Macy { 1131eda14cbcSMatt Macy mutex_enter(&spa_spare_lock); 1132eda14cbcSMatt Macy ASSERT(vd->vdev_isspare); 1133eda14cbcSMatt Macy spa_aux_activate(vd, &spa_spare_avl); 1134eda14cbcSMatt Macy mutex_exit(&spa_spare_lock); 1135eda14cbcSMatt Macy } 1136eda14cbcSMatt Macy 1137eda14cbcSMatt Macy /* 1138eda14cbcSMatt Macy * Level 2 ARC devices are tracked globally for the same reasons as spares. 1139eda14cbcSMatt Macy * Cache devices currently only support one pool per cache device, and so 1140eda14cbcSMatt Macy * for these devices the aux reference count is currently unused beyond 1. 1141eda14cbcSMatt Macy */ 1142eda14cbcSMatt Macy 1143eda14cbcSMatt Macy static int 1144eda14cbcSMatt Macy spa_l2cache_compare(const void *a, const void *b) 1145eda14cbcSMatt Macy { 1146eda14cbcSMatt Macy return (spa_aux_compare(a, b)); 1147eda14cbcSMatt Macy } 1148eda14cbcSMatt Macy 1149eda14cbcSMatt Macy void 1150eda14cbcSMatt Macy spa_l2cache_add(vdev_t *vd) 1151eda14cbcSMatt Macy { 1152eda14cbcSMatt Macy mutex_enter(&spa_l2cache_lock); 1153eda14cbcSMatt Macy ASSERT(!vd->vdev_isl2cache); 1154eda14cbcSMatt Macy spa_aux_add(vd, &spa_l2cache_avl); 1155eda14cbcSMatt Macy vd->vdev_isl2cache = B_TRUE; 1156eda14cbcSMatt Macy mutex_exit(&spa_l2cache_lock); 1157eda14cbcSMatt Macy } 1158eda14cbcSMatt Macy 1159eda14cbcSMatt Macy void 1160eda14cbcSMatt Macy spa_l2cache_remove(vdev_t *vd) 1161eda14cbcSMatt Macy { 1162eda14cbcSMatt Macy mutex_enter(&spa_l2cache_lock); 1163eda14cbcSMatt Macy ASSERT(vd->vdev_isl2cache); 1164eda14cbcSMatt Macy spa_aux_remove(vd, &spa_l2cache_avl); 1165eda14cbcSMatt Macy vd->vdev_isl2cache = B_FALSE; 1166eda14cbcSMatt Macy mutex_exit(&spa_l2cache_lock); 1167eda14cbcSMatt Macy } 1168eda14cbcSMatt Macy 1169eda14cbcSMatt Macy boolean_t 1170eda14cbcSMatt Macy spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1171eda14cbcSMatt Macy { 1172eda14cbcSMatt Macy boolean_t found; 1173eda14cbcSMatt Macy 1174eda14cbcSMatt Macy mutex_enter(&spa_l2cache_lock); 1175eda14cbcSMatt Macy found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1176eda14cbcSMatt Macy mutex_exit(&spa_l2cache_lock); 1177eda14cbcSMatt Macy 1178eda14cbcSMatt Macy return (found); 1179eda14cbcSMatt Macy } 1180eda14cbcSMatt Macy 1181eda14cbcSMatt Macy void 1182eda14cbcSMatt Macy spa_l2cache_activate(vdev_t *vd) 1183eda14cbcSMatt Macy { 1184eda14cbcSMatt Macy mutex_enter(&spa_l2cache_lock); 1185eda14cbcSMatt Macy ASSERT(vd->vdev_isl2cache); 1186eda14cbcSMatt Macy spa_aux_activate(vd, &spa_l2cache_avl); 1187eda14cbcSMatt Macy mutex_exit(&spa_l2cache_lock); 1188eda14cbcSMatt Macy } 1189eda14cbcSMatt Macy 1190eda14cbcSMatt Macy /* 1191eda14cbcSMatt Macy * ========================================================================== 1192eda14cbcSMatt Macy * SPA vdev locking 1193eda14cbcSMatt Macy * ========================================================================== 1194eda14cbcSMatt Macy */ 1195eda14cbcSMatt Macy 1196eda14cbcSMatt Macy /* 1197eda14cbcSMatt Macy * Lock the given spa_t for the purpose of adding or removing a vdev. 1198eda14cbcSMatt Macy * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1199eda14cbcSMatt Macy * It returns the next transaction group for the spa_t. 1200eda14cbcSMatt Macy */ 1201eda14cbcSMatt Macy uint64_t 1202eda14cbcSMatt Macy spa_vdev_enter(spa_t *spa) 1203eda14cbcSMatt Macy { 1204eda14cbcSMatt Macy mutex_enter(&spa->spa_vdev_top_lock); 1205eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 1206eda14cbcSMatt Macy 1207eda14cbcSMatt Macy vdev_autotrim_stop_all(spa); 1208eda14cbcSMatt Macy 1209eda14cbcSMatt Macy return (spa_vdev_config_enter(spa)); 1210eda14cbcSMatt Macy } 1211eda14cbcSMatt Macy 1212eda14cbcSMatt Macy /* 1213eda14cbcSMatt Macy * The same as spa_vdev_enter() above but additionally takes the guid of 1214eda14cbcSMatt Macy * the vdev being detached. When there is a rebuild in process it will be 1215eda14cbcSMatt Macy * suspended while the vdev tree is modified then resumed by spa_vdev_exit(). 1216eda14cbcSMatt Macy * The rebuild is canceled if only a single child remains after the detach. 1217eda14cbcSMatt Macy */ 1218eda14cbcSMatt Macy uint64_t 1219eda14cbcSMatt Macy spa_vdev_detach_enter(spa_t *spa, uint64_t guid) 1220eda14cbcSMatt Macy { 1221eda14cbcSMatt Macy mutex_enter(&spa->spa_vdev_top_lock); 1222eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 1223eda14cbcSMatt Macy 1224eda14cbcSMatt Macy vdev_autotrim_stop_all(spa); 1225eda14cbcSMatt Macy 1226eda14cbcSMatt Macy if (guid != 0) { 1227eda14cbcSMatt Macy vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 1228eda14cbcSMatt Macy if (vd) { 1229eda14cbcSMatt Macy vdev_rebuild_stop_wait(vd->vdev_top); 1230eda14cbcSMatt Macy } 1231eda14cbcSMatt Macy } 1232eda14cbcSMatt Macy 1233eda14cbcSMatt Macy return (spa_vdev_config_enter(spa)); 1234eda14cbcSMatt Macy } 1235eda14cbcSMatt Macy 1236eda14cbcSMatt Macy /* 1237eda14cbcSMatt Macy * Internal implementation for spa_vdev_enter(). Used when a vdev 1238eda14cbcSMatt Macy * operation requires multiple syncs (i.e. removing a device) while 1239eda14cbcSMatt Macy * keeping the spa_namespace_lock held. 1240eda14cbcSMatt Macy */ 1241eda14cbcSMatt Macy uint64_t 1242eda14cbcSMatt Macy spa_vdev_config_enter(spa_t *spa) 1243eda14cbcSMatt Macy { 1244eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1245eda14cbcSMatt Macy 1246eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1247eda14cbcSMatt Macy 1248eda14cbcSMatt Macy return (spa_last_synced_txg(spa) + 1); 1249eda14cbcSMatt Macy } 1250eda14cbcSMatt Macy 1251eda14cbcSMatt Macy /* 1252eda14cbcSMatt Macy * Used in combination with spa_vdev_config_enter() to allow the syncing 1253eda14cbcSMatt Macy * of multiple transactions without releasing the spa_namespace_lock. 1254eda14cbcSMatt Macy */ 1255eda14cbcSMatt Macy void 1256a0b956f5SMartin Matuska spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, 1257a0b956f5SMartin Matuska const char *tag) 1258eda14cbcSMatt Macy { 1259eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1260eda14cbcSMatt Macy 1261eda14cbcSMatt Macy int config_changed = B_FALSE; 1262eda14cbcSMatt Macy 1263eda14cbcSMatt Macy ASSERT(txg > spa_last_synced_txg(spa)); 1264eda14cbcSMatt Macy 1265eda14cbcSMatt Macy spa->spa_pending_vdev = NULL; 1266eda14cbcSMatt Macy 1267eda14cbcSMatt Macy /* 1268eda14cbcSMatt Macy * Reassess the DTLs. 1269eda14cbcSMatt Macy */ 1270eda14cbcSMatt Macy vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE); 1271eda14cbcSMatt Macy 1272eda14cbcSMatt Macy if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1273eda14cbcSMatt Macy config_changed = B_TRUE; 1274eda14cbcSMatt Macy spa->spa_config_generation++; 1275eda14cbcSMatt Macy } 1276eda14cbcSMatt Macy 1277eda14cbcSMatt Macy /* 1278eda14cbcSMatt Macy * Verify the metaslab classes. 1279eda14cbcSMatt Macy */ 1280eda14cbcSMatt Macy ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1281eda14cbcSMatt Macy ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1282184c1b94SMartin Matuska ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0); 1283eda14cbcSMatt Macy ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0); 1284eda14cbcSMatt Macy ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0); 1285eda14cbcSMatt Macy 1286eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, spa); 1287eda14cbcSMatt Macy 1288eda14cbcSMatt Macy /* 1289eda14cbcSMatt Macy * Panic the system if the specified tag requires it. This 1290eda14cbcSMatt Macy * is useful for ensuring that configurations are updated 1291eda14cbcSMatt Macy * transactionally. 1292eda14cbcSMatt Macy */ 1293eda14cbcSMatt Macy if (zio_injection_enabled) 1294eda14cbcSMatt Macy zio_handle_panic_injection(spa, tag, 0); 1295eda14cbcSMatt Macy 1296eda14cbcSMatt Macy /* 1297eda14cbcSMatt Macy * Note: this txg_wait_synced() is important because it ensures 1298eda14cbcSMatt Macy * that there won't be more than one config change per txg. 1299eda14cbcSMatt Macy * This allows us to use the txg as the generation number. 1300eda14cbcSMatt Macy */ 1301eda14cbcSMatt Macy if (error == 0) 1302eda14cbcSMatt Macy txg_wait_synced(spa->spa_dsl_pool, txg); 1303eda14cbcSMatt Macy 1304eda14cbcSMatt Macy if (vd != NULL) { 1305eda14cbcSMatt Macy ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1306eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_leaf) { 1307eda14cbcSMatt Macy mutex_enter(&vd->vdev_initialize_lock); 1308eda14cbcSMatt Macy vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, 1309eda14cbcSMatt Macy NULL); 1310eda14cbcSMatt Macy mutex_exit(&vd->vdev_initialize_lock); 1311eda14cbcSMatt Macy 1312eda14cbcSMatt Macy mutex_enter(&vd->vdev_trim_lock); 1313eda14cbcSMatt Macy vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); 1314eda14cbcSMatt Macy mutex_exit(&vd->vdev_trim_lock); 1315eda14cbcSMatt Macy } 1316eda14cbcSMatt Macy 1317eda14cbcSMatt Macy /* 1318eda14cbcSMatt Macy * The vdev may be both a leaf and top-level device. 1319eda14cbcSMatt Macy */ 1320eda14cbcSMatt Macy vdev_autotrim_stop_wait(vd); 1321eda14cbcSMatt Macy 1322caed7b1cSMartin Matuska spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER); 1323eda14cbcSMatt Macy vdev_free(vd); 1324caed7b1cSMartin Matuska spa_config_exit(spa, SCL_STATE_ALL, spa); 1325eda14cbcSMatt Macy } 1326eda14cbcSMatt Macy 1327eda14cbcSMatt Macy /* 1328eda14cbcSMatt Macy * If the config changed, update the config cache. 1329eda14cbcSMatt Macy */ 1330eda14cbcSMatt Macy if (config_changed) 1331bb2d13b6SMartin Matuska spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE); 1332eda14cbcSMatt Macy } 1333eda14cbcSMatt Macy 1334eda14cbcSMatt Macy /* 1335eda14cbcSMatt Macy * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1336eda14cbcSMatt Macy * locking of spa_vdev_enter(), we also want make sure the transactions have 1337eda14cbcSMatt Macy * synced to disk, and then update the global configuration cache with the new 1338eda14cbcSMatt Macy * information. 1339eda14cbcSMatt Macy */ 1340eda14cbcSMatt Macy int 1341eda14cbcSMatt Macy spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1342eda14cbcSMatt Macy { 1343eda14cbcSMatt Macy vdev_autotrim_restart(spa); 1344eda14cbcSMatt Macy vdev_rebuild_restart(spa); 1345eda14cbcSMatt Macy 1346eda14cbcSMatt Macy spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1347eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 1348eda14cbcSMatt Macy mutex_exit(&spa->spa_vdev_top_lock); 1349eda14cbcSMatt Macy 1350eda14cbcSMatt Macy return (error); 1351eda14cbcSMatt Macy } 1352eda14cbcSMatt Macy 1353eda14cbcSMatt Macy /* 1354eda14cbcSMatt Macy * Lock the given spa_t for the purpose of changing vdev state. 1355eda14cbcSMatt Macy */ 1356eda14cbcSMatt Macy void 1357eda14cbcSMatt Macy spa_vdev_state_enter(spa_t *spa, int oplocks) 1358eda14cbcSMatt Macy { 1359eda14cbcSMatt Macy int locks = SCL_STATE_ALL | oplocks; 1360eda14cbcSMatt Macy 1361eda14cbcSMatt Macy /* 1362eda14cbcSMatt Macy * Root pools may need to read of the underlying devfs filesystem 1363eda14cbcSMatt Macy * when opening up a vdev. Unfortunately if we're holding the 1364eda14cbcSMatt Macy * SCL_ZIO lock it will result in a deadlock when we try to issue 1365eda14cbcSMatt Macy * the read from the root filesystem. Instead we "prefetch" 1366eda14cbcSMatt Macy * the associated vnodes that we need prior to opening the 1367eda14cbcSMatt Macy * underlying devices and cache them so that we can prevent 1368eda14cbcSMatt Macy * any I/O when we are doing the actual open. 1369eda14cbcSMatt Macy */ 1370eda14cbcSMatt Macy if (spa_is_root(spa)) { 1371eda14cbcSMatt Macy int low = locks & ~(SCL_ZIO - 1); 1372eda14cbcSMatt Macy int high = locks & ~low; 1373eda14cbcSMatt Macy 1374eda14cbcSMatt Macy spa_config_enter(spa, high, spa, RW_WRITER); 1375eda14cbcSMatt Macy vdev_hold(spa->spa_root_vdev); 1376eda14cbcSMatt Macy spa_config_enter(spa, low, spa, RW_WRITER); 1377eda14cbcSMatt Macy } else { 1378eda14cbcSMatt Macy spa_config_enter(spa, locks, spa, RW_WRITER); 1379eda14cbcSMatt Macy } 1380eda14cbcSMatt Macy spa->spa_vdev_locks = locks; 1381eda14cbcSMatt Macy } 1382eda14cbcSMatt Macy 1383eda14cbcSMatt Macy int 1384eda14cbcSMatt Macy spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1385eda14cbcSMatt Macy { 1386eda14cbcSMatt Macy boolean_t config_changed = B_FALSE; 1387eda14cbcSMatt Macy vdev_t *vdev_top; 1388eda14cbcSMatt Macy 1389eda14cbcSMatt Macy if (vd == NULL || vd == spa->spa_root_vdev) { 1390eda14cbcSMatt Macy vdev_top = spa->spa_root_vdev; 1391eda14cbcSMatt Macy } else { 1392eda14cbcSMatt Macy vdev_top = vd->vdev_top; 1393eda14cbcSMatt Macy } 1394eda14cbcSMatt Macy 1395eda14cbcSMatt Macy if (vd != NULL || error == 0) 1396eda14cbcSMatt Macy vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE); 1397eda14cbcSMatt Macy 1398eda14cbcSMatt Macy if (vd != NULL) { 1399eda14cbcSMatt Macy if (vd != spa->spa_root_vdev) 1400eda14cbcSMatt Macy vdev_state_dirty(vdev_top); 1401eda14cbcSMatt Macy 1402eda14cbcSMatt Macy config_changed = B_TRUE; 1403eda14cbcSMatt Macy spa->spa_config_generation++; 1404eda14cbcSMatt Macy } 1405eda14cbcSMatt Macy 1406eda14cbcSMatt Macy if (spa_is_root(spa)) 1407eda14cbcSMatt Macy vdev_rele(spa->spa_root_vdev); 1408eda14cbcSMatt Macy 1409eda14cbcSMatt Macy ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1410eda14cbcSMatt Macy spa_config_exit(spa, spa->spa_vdev_locks, spa); 1411eda14cbcSMatt Macy 1412eda14cbcSMatt Macy /* 1413eda14cbcSMatt Macy * If anything changed, wait for it to sync. This ensures that, 14147877fdebSMatt Macy * from the system administrator's perspective, zpool(8) commands 1415eda14cbcSMatt Macy * are synchronous. This is important for things like zpool offline: 1416eda14cbcSMatt Macy * when the command completes, you expect no further I/O from ZFS. 1417eda14cbcSMatt Macy */ 1418eda14cbcSMatt Macy if (vd != NULL) 1419eda14cbcSMatt Macy txg_wait_synced(spa->spa_dsl_pool, 0); 1420eda14cbcSMatt Macy 1421eda14cbcSMatt Macy /* 1422eda14cbcSMatt Macy * If the config changed, update the config cache. 1423eda14cbcSMatt Macy */ 1424eda14cbcSMatt Macy if (config_changed) { 1425eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 1426be181ee2SMartin Matuska spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE); 1427eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 1428eda14cbcSMatt Macy } 1429eda14cbcSMatt Macy 1430eda14cbcSMatt Macy return (error); 1431eda14cbcSMatt Macy } 1432eda14cbcSMatt Macy 1433eda14cbcSMatt Macy /* 1434eda14cbcSMatt Macy * ========================================================================== 1435eda14cbcSMatt Macy * Miscellaneous functions 1436eda14cbcSMatt Macy * ========================================================================== 1437eda14cbcSMatt Macy */ 1438eda14cbcSMatt Macy 1439eda14cbcSMatt Macy void 1440eda14cbcSMatt Macy spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1441eda14cbcSMatt Macy { 1442eda14cbcSMatt Macy if (!nvlist_exists(spa->spa_label_features, feature)) { 1443eda14cbcSMatt Macy fnvlist_add_boolean(spa->spa_label_features, feature); 1444eda14cbcSMatt Macy /* 1445eda14cbcSMatt Macy * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1446eda14cbcSMatt Macy * dirty the vdev config because lock SCL_CONFIG is not held. 1447eda14cbcSMatt Macy * Thankfully, in this case we don't need to dirty the config 1448eda14cbcSMatt Macy * because it will be written out anyway when we finish 1449eda14cbcSMatt Macy * creating the pool. 1450eda14cbcSMatt Macy */ 1451eda14cbcSMatt Macy if (tx->tx_txg != TXG_INITIAL) 1452eda14cbcSMatt Macy vdev_config_dirty(spa->spa_root_vdev); 1453eda14cbcSMatt Macy } 1454eda14cbcSMatt Macy } 1455eda14cbcSMatt Macy 1456eda14cbcSMatt Macy void 1457eda14cbcSMatt Macy spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1458eda14cbcSMatt Macy { 1459eda14cbcSMatt Macy if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1460eda14cbcSMatt Macy vdev_config_dirty(spa->spa_root_vdev); 1461eda14cbcSMatt Macy } 1462eda14cbcSMatt Macy 1463eda14cbcSMatt Macy /* 1464eda14cbcSMatt Macy * Return the spa_t associated with given pool_guid, if it exists. If 1465eda14cbcSMatt Macy * device_guid is non-zero, determine whether the pool exists *and* contains 1466eda14cbcSMatt Macy * a device with the specified device_guid. 1467eda14cbcSMatt Macy */ 1468eda14cbcSMatt Macy spa_t * 1469eda14cbcSMatt Macy spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1470eda14cbcSMatt Macy { 1471eda14cbcSMatt Macy spa_t *spa; 1472eda14cbcSMatt Macy avl_tree_t *t = &spa_namespace_avl; 1473eda14cbcSMatt Macy 1474eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1475eda14cbcSMatt Macy 1476eda14cbcSMatt Macy for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1477eda14cbcSMatt Macy if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1478eda14cbcSMatt Macy continue; 1479eda14cbcSMatt Macy if (spa->spa_root_vdev == NULL) 1480eda14cbcSMatt Macy continue; 1481eda14cbcSMatt Macy if (spa_guid(spa) == pool_guid) { 1482eda14cbcSMatt Macy if (device_guid == 0) 1483eda14cbcSMatt Macy break; 1484eda14cbcSMatt Macy 1485eda14cbcSMatt Macy if (vdev_lookup_by_guid(spa->spa_root_vdev, 1486eda14cbcSMatt Macy device_guid) != NULL) 1487eda14cbcSMatt Macy break; 1488eda14cbcSMatt Macy 1489eda14cbcSMatt Macy /* 1490eda14cbcSMatt Macy * Check any devices we may be in the process of adding. 1491eda14cbcSMatt Macy */ 1492eda14cbcSMatt Macy if (spa->spa_pending_vdev) { 1493eda14cbcSMatt Macy if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1494eda14cbcSMatt Macy device_guid) != NULL) 1495eda14cbcSMatt Macy break; 1496eda14cbcSMatt Macy } 1497eda14cbcSMatt Macy } 1498eda14cbcSMatt Macy } 1499eda14cbcSMatt Macy 1500eda14cbcSMatt Macy return (spa); 1501eda14cbcSMatt Macy } 1502eda14cbcSMatt Macy 1503eda14cbcSMatt Macy /* 1504eda14cbcSMatt Macy * Determine whether a pool with the given pool_guid exists. 1505eda14cbcSMatt Macy */ 1506eda14cbcSMatt Macy boolean_t 1507eda14cbcSMatt Macy spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1508eda14cbcSMatt Macy { 1509eda14cbcSMatt Macy return (spa_by_guid(pool_guid, device_guid) != NULL); 1510eda14cbcSMatt Macy } 1511eda14cbcSMatt Macy 1512eda14cbcSMatt Macy char * 1513eda14cbcSMatt Macy spa_strdup(const char *s) 1514eda14cbcSMatt Macy { 1515eda14cbcSMatt Macy size_t len; 1516eda14cbcSMatt Macy char *new; 1517eda14cbcSMatt Macy 1518eda14cbcSMatt Macy len = strlen(s); 1519eda14cbcSMatt Macy new = kmem_alloc(len + 1, KM_SLEEP); 1520da5137abSMartin Matuska memcpy(new, s, len + 1); 1521eda14cbcSMatt Macy 1522eda14cbcSMatt Macy return (new); 1523eda14cbcSMatt Macy } 1524eda14cbcSMatt Macy 1525eda14cbcSMatt Macy void 1526eda14cbcSMatt Macy spa_strfree(char *s) 1527eda14cbcSMatt Macy { 1528eda14cbcSMatt Macy kmem_free(s, strlen(s) + 1); 1529eda14cbcSMatt Macy } 1530eda14cbcSMatt Macy 1531eda14cbcSMatt Macy uint64_t 1532eda14cbcSMatt Macy spa_generate_guid(spa_t *spa) 1533eda14cbcSMatt Macy { 153433b8c039SMartin Matuska uint64_t guid; 1535eda14cbcSMatt Macy 1536eda14cbcSMatt Macy if (spa != NULL) { 153733b8c039SMartin Matuska do { 153833b8c039SMartin Matuska (void) random_get_pseudo_bytes((void *)&guid, 153933b8c039SMartin Matuska sizeof (guid)); 154033b8c039SMartin Matuska } while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)); 1541eda14cbcSMatt Macy } else { 154233b8c039SMartin Matuska do { 154333b8c039SMartin Matuska (void) random_get_pseudo_bytes((void *)&guid, 154433b8c039SMartin Matuska sizeof (guid)); 154533b8c039SMartin Matuska } while (guid == 0 || spa_guid_exists(guid, 0)); 1546eda14cbcSMatt Macy } 1547eda14cbcSMatt Macy 1548eda14cbcSMatt Macy return (guid); 1549eda14cbcSMatt Macy } 1550eda14cbcSMatt Macy 1551eda14cbcSMatt Macy void 1552eda14cbcSMatt Macy snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1553eda14cbcSMatt Macy { 1554eda14cbcSMatt Macy char type[256]; 1555a0b956f5SMartin Matuska const char *checksum = NULL; 1556a0b956f5SMartin Matuska const char *compress = NULL; 1557eda14cbcSMatt Macy 1558eda14cbcSMatt Macy if (bp != NULL) { 1559eda14cbcSMatt Macy if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1560eda14cbcSMatt Macy dmu_object_byteswap_t bswap = 1561eda14cbcSMatt Macy DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1562eda14cbcSMatt Macy (void) snprintf(type, sizeof (type), "bswap %s %s", 1563eda14cbcSMatt Macy DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1564eda14cbcSMatt Macy "metadata" : "data", 1565eda14cbcSMatt Macy dmu_ot_byteswap[bswap].ob_name); 1566eda14cbcSMatt Macy } else { 1567eda14cbcSMatt Macy (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1568eda14cbcSMatt Macy sizeof (type)); 1569eda14cbcSMatt Macy } 1570eda14cbcSMatt Macy if (!BP_IS_EMBEDDED(bp)) { 1571eda14cbcSMatt Macy checksum = 1572eda14cbcSMatt Macy zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1573eda14cbcSMatt Macy } 1574eda14cbcSMatt Macy compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1575eda14cbcSMatt Macy } 1576eda14cbcSMatt Macy 1577dbd5678dSMartin Matuska SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum, 1578eda14cbcSMatt Macy compress); 1579eda14cbcSMatt Macy } 1580eda14cbcSMatt Macy 1581eda14cbcSMatt Macy void 1582eda14cbcSMatt Macy spa_freeze(spa_t *spa) 1583eda14cbcSMatt Macy { 1584eda14cbcSMatt Macy uint64_t freeze_txg = 0; 1585eda14cbcSMatt Macy 1586eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1587eda14cbcSMatt Macy if (spa->spa_freeze_txg == UINT64_MAX) { 1588eda14cbcSMatt Macy freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1589eda14cbcSMatt Macy spa->spa_freeze_txg = freeze_txg; 1590eda14cbcSMatt Macy } 1591eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALL, FTAG); 1592eda14cbcSMatt Macy if (freeze_txg != 0) 1593eda14cbcSMatt Macy txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1594eda14cbcSMatt Macy } 1595eda14cbcSMatt Macy 1596eda14cbcSMatt Macy void 1597eda14cbcSMatt Macy zfs_panic_recover(const char *fmt, ...) 1598eda14cbcSMatt Macy { 1599eda14cbcSMatt Macy va_list adx; 1600eda14cbcSMatt Macy 1601eda14cbcSMatt Macy va_start(adx, fmt); 1602eda14cbcSMatt Macy vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1603eda14cbcSMatt Macy va_end(adx); 1604eda14cbcSMatt Macy } 1605eda14cbcSMatt Macy 1606eda14cbcSMatt Macy /* 1607eda14cbcSMatt Macy * This is a stripped-down version of strtoull, suitable only for converting 1608eda14cbcSMatt Macy * lowercase hexadecimal numbers that don't overflow. 1609eda14cbcSMatt Macy */ 1610eda14cbcSMatt Macy uint64_t 1611eda14cbcSMatt Macy zfs_strtonum(const char *str, char **nptr) 1612eda14cbcSMatt Macy { 1613eda14cbcSMatt Macy uint64_t val = 0; 1614eda14cbcSMatt Macy char c; 1615eda14cbcSMatt Macy int digit; 1616eda14cbcSMatt Macy 1617eda14cbcSMatt Macy while ((c = *str) != '\0') { 1618eda14cbcSMatt Macy if (c >= '0' && c <= '9') 1619eda14cbcSMatt Macy digit = c - '0'; 1620eda14cbcSMatt Macy else if (c >= 'a' && c <= 'f') 1621eda14cbcSMatt Macy digit = 10 + c - 'a'; 1622eda14cbcSMatt Macy else 1623eda14cbcSMatt Macy break; 1624eda14cbcSMatt Macy 1625eda14cbcSMatt Macy val *= 16; 1626eda14cbcSMatt Macy val += digit; 1627eda14cbcSMatt Macy 1628eda14cbcSMatt Macy str++; 1629eda14cbcSMatt Macy } 1630eda14cbcSMatt Macy 1631eda14cbcSMatt Macy if (nptr) 1632eda14cbcSMatt Macy *nptr = (char *)str; 1633eda14cbcSMatt Macy 1634eda14cbcSMatt Macy return (val); 1635eda14cbcSMatt Macy } 1636eda14cbcSMatt Macy 1637eda14cbcSMatt Macy void 1638eda14cbcSMatt Macy spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx) 1639eda14cbcSMatt Macy { 1640eda14cbcSMatt Macy /* 1641eda14cbcSMatt Macy * We bump the feature refcount for each special vdev added to the pool 1642eda14cbcSMatt Macy */ 1643eda14cbcSMatt Macy ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)); 1644eda14cbcSMatt Macy spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx); 1645eda14cbcSMatt Macy } 1646eda14cbcSMatt Macy 1647eda14cbcSMatt Macy /* 1648eda14cbcSMatt Macy * ========================================================================== 1649eda14cbcSMatt Macy * Accessor functions 1650eda14cbcSMatt Macy * ========================================================================== 1651eda14cbcSMatt Macy */ 1652eda14cbcSMatt Macy 1653eda14cbcSMatt Macy boolean_t 1654eda14cbcSMatt Macy spa_shutting_down(spa_t *spa) 1655eda14cbcSMatt Macy { 1656eda14cbcSMatt Macy return (spa->spa_async_suspended); 1657eda14cbcSMatt Macy } 1658eda14cbcSMatt Macy 1659eda14cbcSMatt Macy dsl_pool_t * 1660eda14cbcSMatt Macy spa_get_dsl(spa_t *spa) 1661eda14cbcSMatt Macy { 1662eda14cbcSMatt Macy return (spa->spa_dsl_pool); 1663eda14cbcSMatt Macy } 1664eda14cbcSMatt Macy 1665eda14cbcSMatt Macy boolean_t 1666eda14cbcSMatt Macy spa_is_initializing(spa_t *spa) 1667eda14cbcSMatt Macy { 1668eda14cbcSMatt Macy return (spa->spa_is_initializing); 1669eda14cbcSMatt Macy } 1670eda14cbcSMatt Macy 1671eda14cbcSMatt Macy boolean_t 1672eda14cbcSMatt Macy spa_indirect_vdevs_loaded(spa_t *spa) 1673eda14cbcSMatt Macy { 1674eda14cbcSMatt Macy return (spa->spa_indirect_vdevs_loaded); 1675eda14cbcSMatt Macy } 1676eda14cbcSMatt Macy 1677eda14cbcSMatt Macy blkptr_t * 1678eda14cbcSMatt Macy spa_get_rootblkptr(spa_t *spa) 1679eda14cbcSMatt Macy { 1680eda14cbcSMatt Macy return (&spa->spa_ubsync.ub_rootbp); 1681eda14cbcSMatt Macy } 1682eda14cbcSMatt Macy 1683eda14cbcSMatt Macy void 1684eda14cbcSMatt Macy spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1685eda14cbcSMatt Macy { 1686eda14cbcSMatt Macy spa->spa_uberblock.ub_rootbp = *bp; 1687eda14cbcSMatt Macy } 1688eda14cbcSMatt Macy 1689eda14cbcSMatt Macy void 1690eda14cbcSMatt Macy spa_altroot(spa_t *spa, char *buf, size_t buflen) 1691eda14cbcSMatt Macy { 1692eda14cbcSMatt Macy if (spa->spa_root == NULL) 1693eda14cbcSMatt Macy buf[0] = '\0'; 1694eda14cbcSMatt Macy else 1695be181ee2SMartin Matuska (void) strlcpy(buf, spa->spa_root, buflen); 1696eda14cbcSMatt Macy } 1697eda14cbcSMatt Macy 1698be181ee2SMartin Matuska uint32_t 1699eda14cbcSMatt Macy spa_sync_pass(spa_t *spa) 1700eda14cbcSMatt Macy { 1701eda14cbcSMatt Macy return (spa->spa_sync_pass); 1702eda14cbcSMatt Macy } 1703eda14cbcSMatt Macy 1704eda14cbcSMatt Macy char * 1705eda14cbcSMatt Macy spa_name(spa_t *spa) 1706eda14cbcSMatt Macy { 1707eda14cbcSMatt Macy return (spa->spa_name); 1708eda14cbcSMatt Macy } 1709eda14cbcSMatt Macy 1710eda14cbcSMatt Macy uint64_t 1711eda14cbcSMatt Macy spa_guid(spa_t *spa) 1712eda14cbcSMatt Macy { 1713eda14cbcSMatt Macy dsl_pool_t *dp = spa_get_dsl(spa); 1714eda14cbcSMatt Macy uint64_t guid; 1715eda14cbcSMatt Macy 1716eda14cbcSMatt Macy /* 1717eda14cbcSMatt Macy * If we fail to parse the config during spa_load(), we can go through 1718eda14cbcSMatt Macy * the error path (which posts an ereport) and end up here with no root 1719eda14cbcSMatt Macy * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1720eda14cbcSMatt Macy * this case. 1721eda14cbcSMatt Macy */ 1722eda14cbcSMatt Macy if (spa->spa_root_vdev == NULL) 1723eda14cbcSMatt Macy return (spa->spa_config_guid); 1724eda14cbcSMatt Macy 1725eda14cbcSMatt Macy guid = spa->spa_last_synced_guid != 0 ? 1726eda14cbcSMatt Macy spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1727eda14cbcSMatt Macy 1728eda14cbcSMatt Macy /* 1729eda14cbcSMatt Macy * Return the most recently synced out guid unless we're 1730eda14cbcSMatt Macy * in syncing context. 1731eda14cbcSMatt Macy */ 1732eda14cbcSMatt Macy if (dp && dsl_pool_sync_context(dp)) 1733eda14cbcSMatt Macy return (spa->spa_root_vdev->vdev_guid); 1734eda14cbcSMatt Macy else 1735eda14cbcSMatt Macy return (guid); 1736eda14cbcSMatt Macy } 1737eda14cbcSMatt Macy 1738eda14cbcSMatt Macy uint64_t 1739eda14cbcSMatt Macy spa_load_guid(spa_t *spa) 1740eda14cbcSMatt Macy { 1741eda14cbcSMatt Macy /* 1742eda14cbcSMatt Macy * This is a GUID that exists solely as a reference for the 1743eda14cbcSMatt Macy * purposes of the arc. It is generated at load time, and 1744eda14cbcSMatt Macy * is never written to persistent storage. 1745eda14cbcSMatt Macy */ 1746eda14cbcSMatt Macy return (spa->spa_load_guid); 1747eda14cbcSMatt Macy } 1748eda14cbcSMatt Macy 1749eda14cbcSMatt Macy uint64_t 1750eda14cbcSMatt Macy spa_last_synced_txg(spa_t *spa) 1751eda14cbcSMatt Macy { 1752eda14cbcSMatt Macy return (spa->spa_ubsync.ub_txg); 1753eda14cbcSMatt Macy } 1754eda14cbcSMatt Macy 1755eda14cbcSMatt Macy uint64_t 1756eda14cbcSMatt Macy spa_first_txg(spa_t *spa) 1757eda14cbcSMatt Macy { 1758eda14cbcSMatt Macy return (spa->spa_first_txg); 1759eda14cbcSMatt Macy } 1760eda14cbcSMatt Macy 1761eda14cbcSMatt Macy uint64_t 1762eda14cbcSMatt Macy spa_syncing_txg(spa_t *spa) 1763eda14cbcSMatt Macy { 1764eda14cbcSMatt Macy return (spa->spa_syncing_txg); 1765eda14cbcSMatt Macy } 1766eda14cbcSMatt Macy 1767eda14cbcSMatt Macy /* 1768eda14cbcSMatt Macy * Return the last txg where data can be dirtied. The final txgs 1769eda14cbcSMatt Macy * will be used to just clear out any deferred frees that remain. 1770eda14cbcSMatt Macy */ 1771eda14cbcSMatt Macy uint64_t 1772eda14cbcSMatt Macy spa_final_dirty_txg(spa_t *spa) 1773eda14cbcSMatt Macy { 1774eda14cbcSMatt Macy return (spa->spa_final_txg - TXG_DEFER_SIZE); 1775eda14cbcSMatt Macy } 1776eda14cbcSMatt Macy 1777eda14cbcSMatt Macy pool_state_t 1778eda14cbcSMatt Macy spa_state(spa_t *spa) 1779eda14cbcSMatt Macy { 1780eda14cbcSMatt Macy return (spa->spa_state); 1781eda14cbcSMatt Macy } 1782eda14cbcSMatt Macy 1783eda14cbcSMatt Macy spa_load_state_t 1784eda14cbcSMatt Macy spa_load_state(spa_t *spa) 1785eda14cbcSMatt Macy { 1786eda14cbcSMatt Macy return (spa->spa_load_state); 1787eda14cbcSMatt Macy } 1788eda14cbcSMatt Macy 1789eda14cbcSMatt Macy uint64_t 1790eda14cbcSMatt Macy spa_freeze_txg(spa_t *spa) 1791eda14cbcSMatt Macy { 1792eda14cbcSMatt Macy return (spa->spa_freeze_txg); 1793eda14cbcSMatt Macy } 1794eda14cbcSMatt Macy 1795eda14cbcSMatt Macy /* 1796eda14cbcSMatt Macy * Return the inflated asize for a logical write in bytes. This is used by the 1797eda14cbcSMatt Macy * DMU to calculate the space a logical write will require on disk. 1798eda14cbcSMatt Macy * If lsize is smaller than the largest physical block size allocatable on this 1799eda14cbcSMatt Macy * pool we use its value instead, since the write will end up using the whole 1800eda14cbcSMatt Macy * block anyway. 1801eda14cbcSMatt Macy */ 1802eda14cbcSMatt Macy uint64_t 1803eda14cbcSMatt Macy spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1804eda14cbcSMatt Macy { 1805eda14cbcSMatt Macy if (lsize == 0) 1806eda14cbcSMatt Macy return (0); /* No inflation needed */ 1807eda14cbcSMatt Macy return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation); 1808eda14cbcSMatt Macy } 1809eda14cbcSMatt Macy 1810eda14cbcSMatt Macy /* 1811184c1b94SMartin Matuska * Return the amount of slop space in bytes. It is typically 1/32 of the pool 1812184c1b94SMartin Matuska * (3.2%), minus the embedded log space. On very small pools, it may be 1813caed7b1cSMartin Matuska * slightly larger than this. On very large pools, it will be capped to 1814caed7b1cSMartin Matuska * the value of spa_max_slop. The embedded log space is not included in 1815184c1b94SMartin Matuska * spa_dspace. By subtracting it, the usable space (per "zfs list") is a 1816184c1b94SMartin Matuska * constant 97% of the total space, regardless of metaslab size (assuming the 1817184c1b94SMartin Matuska * default spa_slop_shift=5 and a non-tiny pool). 1818eda14cbcSMatt Macy * 1819184c1b94SMartin Matuska * See the comment above spa_slop_shift for more details. 1820eda14cbcSMatt Macy */ 1821eda14cbcSMatt Macy uint64_t 1822eda14cbcSMatt Macy spa_get_slop_space(spa_t *spa) 1823eda14cbcSMatt Macy { 18243f9d360cSMartin Matuska uint64_t space = 0; 18253f9d360cSMartin Matuska uint64_t slop = 0; 18263f9d360cSMartin Matuska 18273f9d360cSMartin Matuska /* 18283f9d360cSMartin Matuska * Make sure spa_dedup_dspace has been set. 18293f9d360cSMartin Matuska */ 18303f9d360cSMartin Matuska if (spa->spa_dedup_dspace == ~0ULL) 18313f9d360cSMartin Matuska spa_update_dspace(spa); 18323f9d360cSMartin Matuska 18333f9d360cSMartin Matuska /* 18343f9d360cSMartin Matuska * spa_get_dspace() includes the space only logically "used" by 18353f9d360cSMartin Matuska * deduplicated data, so since it's not useful to reserve more 18363f9d360cSMartin Matuska * space with more deduplicated data, we subtract that out here. 18373f9d360cSMartin Matuska */ 18383f9d360cSMartin Matuska space = spa_get_dspace(spa) - spa->spa_dedup_dspace; 18393f9d360cSMartin Matuska slop = MIN(space >> spa_slop_shift, spa_max_slop); 1840184c1b94SMartin Matuska 1841184c1b94SMartin Matuska /* 1842184c1b94SMartin Matuska * Subtract the embedded log space, but no more than half the (3.2%) 1843184c1b94SMartin Matuska * unusable space. Note, the "no more than half" is only relevant if 1844184c1b94SMartin Matuska * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by 1845184c1b94SMartin Matuska * default. 1846184c1b94SMartin Matuska */ 1847184c1b94SMartin Matuska uint64_t embedded_log = 1848184c1b94SMartin Matuska metaslab_class_get_dspace(spa_embedded_log_class(spa)); 1849184c1b94SMartin Matuska slop -= MIN(embedded_log, slop >> 1); 1850184c1b94SMartin Matuska 1851184c1b94SMartin Matuska /* 1852184c1b94SMartin Matuska * Slop space should be at least spa_min_slop, but no more than half 1853184c1b94SMartin Matuska * the entire pool. 1854184c1b94SMartin Matuska */ 1855184c1b94SMartin Matuska slop = MAX(slop, MIN(space >> 1, spa_min_slop)); 1856184c1b94SMartin Matuska return (slop); 1857eda14cbcSMatt Macy } 1858eda14cbcSMatt Macy 1859eda14cbcSMatt Macy uint64_t 1860eda14cbcSMatt Macy spa_get_dspace(spa_t *spa) 1861eda14cbcSMatt Macy { 1862eda14cbcSMatt Macy return (spa->spa_dspace); 1863eda14cbcSMatt Macy } 1864eda14cbcSMatt Macy 1865eda14cbcSMatt Macy uint64_t 1866eda14cbcSMatt Macy spa_get_checkpoint_space(spa_t *spa) 1867eda14cbcSMatt Macy { 1868eda14cbcSMatt Macy return (spa->spa_checkpoint_info.sci_dspace); 1869eda14cbcSMatt Macy } 1870eda14cbcSMatt Macy 1871eda14cbcSMatt Macy void 1872eda14cbcSMatt Macy spa_update_dspace(spa_t *spa) 1873eda14cbcSMatt Macy { 1874eda14cbcSMatt Macy spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 18752a58b312SMartin Matuska ddt_get_dedup_dspace(spa) + brt_get_dspace(spa); 1876681ce946SMartin Matuska if (spa->spa_nonallocating_dspace > 0) { 1877eda14cbcSMatt Macy /* 1878681ce946SMartin Matuska * Subtract the space provided by all non-allocating vdevs that 1879681ce946SMartin Matuska * contribute to dspace. If a file is overwritten, its old 1880681ce946SMartin Matuska * blocks are freed and new blocks are allocated. If there are 1881681ce946SMartin Matuska * no snapshots of the file, the available space should remain 1882681ce946SMartin Matuska * the same. The old blocks could be freed from the 1883681ce946SMartin Matuska * non-allocating vdev, but the new blocks must be allocated on 1884681ce946SMartin Matuska * other (allocating) vdevs. By reserving the entire size of 1885681ce946SMartin Matuska * the non-allocating vdevs (including allocated space), we 1886681ce946SMartin Matuska * ensure that there will be enough space on the allocating 1887681ce946SMartin Matuska * vdevs for this file overwrite to succeed. 1888eda14cbcSMatt Macy * 1889eda14cbcSMatt Macy * Note that the DMU/DSL doesn't actually know or care 1890eda14cbcSMatt Macy * how much space is allocated (it does its own tracking 1891eda14cbcSMatt Macy * of how much space has been logically used). So it 1892eda14cbcSMatt Macy * doesn't matter that the data we are moving may be 1893681ce946SMartin Matuska * allocated twice (on the old device and the new device). 1894eda14cbcSMatt Macy */ 1895681ce946SMartin Matuska ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace); 1896681ce946SMartin Matuska spa->spa_dspace -= spa->spa_nonallocating_dspace; 1897eda14cbcSMatt Macy } 1898eda14cbcSMatt Macy } 1899eda14cbcSMatt Macy 1900eda14cbcSMatt Macy /* 1901eda14cbcSMatt Macy * Return the failure mode that has been set to this pool. The default 1902eda14cbcSMatt Macy * behavior will be to block all I/Os when a complete failure occurs. 1903eda14cbcSMatt Macy */ 1904eda14cbcSMatt Macy uint64_t 1905eda14cbcSMatt Macy spa_get_failmode(spa_t *spa) 1906eda14cbcSMatt Macy { 1907eda14cbcSMatt Macy return (spa->spa_failmode); 1908eda14cbcSMatt Macy } 1909eda14cbcSMatt Macy 1910eda14cbcSMatt Macy boolean_t 1911eda14cbcSMatt Macy spa_suspended(spa_t *spa) 1912eda14cbcSMatt Macy { 1913eda14cbcSMatt Macy return (spa->spa_suspended != ZIO_SUSPEND_NONE); 1914eda14cbcSMatt Macy } 1915eda14cbcSMatt Macy 1916eda14cbcSMatt Macy uint64_t 1917eda14cbcSMatt Macy spa_version(spa_t *spa) 1918eda14cbcSMatt Macy { 1919eda14cbcSMatt Macy return (spa->spa_ubsync.ub_version); 1920eda14cbcSMatt Macy } 1921eda14cbcSMatt Macy 1922eda14cbcSMatt Macy boolean_t 1923eda14cbcSMatt Macy spa_deflate(spa_t *spa) 1924eda14cbcSMatt Macy { 1925eda14cbcSMatt Macy return (spa->spa_deflate); 1926eda14cbcSMatt Macy } 1927eda14cbcSMatt Macy 1928eda14cbcSMatt Macy metaslab_class_t * 1929eda14cbcSMatt Macy spa_normal_class(spa_t *spa) 1930eda14cbcSMatt Macy { 1931eda14cbcSMatt Macy return (spa->spa_normal_class); 1932eda14cbcSMatt Macy } 1933eda14cbcSMatt Macy 1934eda14cbcSMatt Macy metaslab_class_t * 1935eda14cbcSMatt Macy spa_log_class(spa_t *spa) 1936eda14cbcSMatt Macy { 1937eda14cbcSMatt Macy return (spa->spa_log_class); 1938eda14cbcSMatt Macy } 1939eda14cbcSMatt Macy 1940eda14cbcSMatt Macy metaslab_class_t * 1941184c1b94SMartin Matuska spa_embedded_log_class(spa_t *spa) 1942184c1b94SMartin Matuska { 1943184c1b94SMartin Matuska return (spa->spa_embedded_log_class); 1944184c1b94SMartin Matuska } 1945184c1b94SMartin Matuska 1946184c1b94SMartin Matuska metaslab_class_t * 1947eda14cbcSMatt Macy spa_special_class(spa_t *spa) 1948eda14cbcSMatt Macy { 1949eda14cbcSMatt Macy return (spa->spa_special_class); 1950eda14cbcSMatt Macy } 1951eda14cbcSMatt Macy 1952eda14cbcSMatt Macy metaslab_class_t * 1953eda14cbcSMatt Macy spa_dedup_class(spa_t *spa) 1954eda14cbcSMatt Macy { 1955eda14cbcSMatt Macy return (spa->spa_dedup_class); 1956eda14cbcSMatt Macy } 1957eda14cbcSMatt Macy 1958eda14cbcSMatt Macy /* 1959eda14cbcSMatt Macy * Locate an appropriate allocation class 1960eda14cbcSMatt Macy */ 1961eda14cbcSMatt Macy metaslab_class_t * 1962eda14cbcSMatt Macy spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype, 1963eda14cbcSMatt Macy uint_t level, uint_t special_smallblk) 1964eda14cbcSMatt Macy { 1965184c1b94SMartin Matuska /* 1966184c1b94SMartin Matuska * ZIL allocations determine their class in zio_alloc_zil(). 1967184c1b94SMartin Matuska */ 1968184c1b94SMartin Matuska ASSERT(objtype != DMU_OT_INTENT_LOG); 1969eda14cbcSMatt Macy 1970eda14cbcSMatt Macy boolean_t has_special_class = spa->spa_special_class->mc_groups != 0; 1971eda14cbcSMatt Macy 1972eda14cbcSMatt Macy if (DMU_OT_IS_DDT(objtype)) { 1973eda14cbcSMatt Macy if (spa->spa_dedup_class->mc_groups != 0) 1974eda14cbcSMatt Macy return (spa_dedup_class(spa)); 1975eda14cbcSMatt Macy else if (has_special_class && zfs_ddt_data_is_special) 1976eda14cbcSMatt Macy return (spa_special_class(spa)); 1977eda14cbcSMatt Macy else 1978eda14cbcSMatt Macy return (spa_normal_class(spa)); 1979eda14cbcSMatt Macy } 1980eda14cbcSMatt Macy 1981eda14cbcSMatt Macy /* Indirect blocks for user data can land in special if allowed */ 1982eda14cbcSMatt Macy if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) { 1983eda14cbcSMatt Macy if (has_special_class && zfs_user_indirect_is_special) 1984eda14cbcSMatt Macy return (spa_special_class(spa)); 1985eda14cbcSMatt Macy else 1986eda14cbcSMatt Macy return (spa_normal_class(spa)); 1987eda14cbcSMatt Macy } 1988eda14cbcSMatt Macy 1989eda14cbcSMatt Macy if (DMU_OT_IS_METADATA(objtype) || level > 0) { 1990eda14cbcSMatt Macy if (has_special_class) 1991eda14cbcSMatt Macy return (spa_special_class(spa)); 1992eda14cbcSMatt Macy else 1993eda14cbcSMatt Macy return (spa_normal_class(spa)); 1994eda14cbcSMatt Macy } 1995eda14cbcSMatt Macy 1996eda14cbcSMatt Macy /* 1997eda14cbcSMatt Macy * Allow small file blocks in special class in some cases (like 1998eda14cbcSMatt Macy * for the dRAID vdev feature). But always leave a reserve of 1999eda14cbcSMatt Macy * zfs_special_class_metadata_reserve_pct exclusively for metadata. 2000eda14cbcSMatt Macy */ 2001eda14cbcSMatt Macy if (DMU_OT_IS_FILE(objtype) && 2002eda14cbcSMatt Macy has_special_class && size <= special_smallblk) { 2003eda14cbcSMatt Macy metaslab_class_t *special = spa_special_class(spa); 2004eda14cbcSMatt Macy uint64_t alloc = metaslab_class_get_alloc(special); 2005eda14cbcSMatt Macy uint64_t space = metaslab_class_get_space(special); 2006eda14cbcSMatt Macy uint64_t limit = 2007eda14cbcSMatt Macy (space * (100 - zfs_special_class_metadata_reserve_pct)) 2008eda14cbcSMatt Macy / 100; 2009eda14cbcSMatt Macy 2010eda14cbcSMatt Macy if (alloc < limit) 2011eda14cbcSMatt Macy return (special); 2012eda14cbcSMatt Macy } 2013eda14cbcSMatt Macy 2014eda14cbcSMatt Macy return (spa_normal_class(spa)); 2015eda14cbcSMatt Macy } 2016eda14cbcSMatt Macy 2017eda14cbcSMatt Macy void 2018eda14cbcSMatt Macy spa_evicting_os_register(spa_t *spa, objset_t *os) 2019eda14cbcSMatt Macy { 2020eda14cbcSMatt Macy mutex_enter(&spa->spa_evicting_os_lock); 2021eda14cbcSMatt Macy list_insert_head(&spa->spa_evicting_os_list, os); 2022eda14cbcSMatt Macy mutex_exit(&spa->spa_evicting_os_lock); 2023eda14cbcSMatt Macy } 2024eda14cbcSMatt Macy 2025eda14cbcSMatt Macy void 2026eda14cbcSMatt Macy spa_evicting_os_deregister(spa_t *spa, objset_t *os) 2027eda14cbcSMatt Macy { 2028eda14cbcSMatt Macy mutex_enter(&spa->spa_evicting_os_lock); 2029eda14cbcSMatt Macy list_remove(&spa->spa_evicting_os_list, os); 2030eda14cbcSMatt Macy cv_broadcast(&spa->spa_evicting_os_cv); 2031eda14cbcSMatt Macy mutex_exit(&spa->spa_evicting_os_lock); 2032eda14cbcSMatt Macy } 2033eda14cbcSMatt Macy 2034eda14cbcSMatt Macy void 2035eda14cbcSMatt Macy spa_evicting_os_wait(spa_t *spa) 2036eda14cbcSMatt Macy { 2037eda14cbcSMatt Macy mutex_enter(&spa->spa_evicting_os_lock); 2038eda14cbcSMatt Macy while (!list_is_empty(&spa->spa_evicting_os_list)) 2039eda14cbcSMatt Macy cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 2040eda14cbcSMatt Macy mutex_exit(&spa->spa_evicting_os_lock); 2041eda14cbcSMatt Macy 2042eda14cbcSMatt Macy dmu_buf_user_evict_wait(); 2043eda14cbcSMatt Macy } 2044eda14cbcSMatt Macy 2045eda14cbcSMatt Macy int 2046eda14cbcSMatt Macy spa_max_replication(spa_t *spa) 2047eda14cbcSMatt Macy { 2048eda14cbcSMatt Macy /* 2049eda14cbcSMatt Macy * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 2050eda14cbcSMatt Macy * handle BPs with more than one DVA allocated. Set our max 2051eda14cbcSMatt Macy * replication level accordingly. 2052eda14cbcSMatt Macy */ 2053eda14cbcSMatt Macy if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 2054eda14cbcSMatt Macy return (1); 2055eda14cbcSMatt Macy return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 2056eda14cbcSMatt Macy } 2057eda14cbcSMatt Macy 2058eda14cbcSMatt Macy int 2059eda14cbcSMatt Macy spa_prev_software_version(spa_t *spa) 2060eda14cbcSMatt Macy { 2061eda14cbcSMatt Macy return (spa->spa_prev_software_version); 2062eda14cbcSMatt Macy } 2063eda14cbcSMatt Macy 2064eda14cbcSMatt Macy uint64_t 2065eda14cbcSMatt Macy spa_deadman_synctime(spa_t *spa) 2066eda14cbcSMatt Macy { 2067eda14cbcSMatt Macy return (spa->spa_deadman_synctime); 2068eda14cbcSMatt Macy } 2069eda14cbcSMatt Macy 2070eda14cbcSMatt Macy spa_autotrim_t 2071eda14cbcSMatt Macy spa_get_autotrim(spa_t *spa) 2072eda14cbcSMatt Macy { 2073eda14cbcSMatt Macy return (spa->spa_autotrim); 2074eda14cbcSMatt Macy } 2075eda14cbcSMatt Macy 2076eda14cbcSMatt Macy uint64_t 2077eda14cbcSMatt Macy spa_deadman_ziotime(spa_t *spa) 2078eda14cbcSMatt Macy { 2079eda14cbcSMatt Macy return (spa->spa_deadman_ziotime); 2080eda14cbcSMatt Macy } 2081eda14cbcSMatt Macy 2082eda14cbcSMatt Macy uint64_t 2083eda14cbcSMatt Macy spa_get_deadman_failmode(spa_t *spa) 2084eda14cbcSMatt Macy { 2085eda14cbcSMatt Macy return (spa->spa_deadman_failmode); 2086eda14cbcSMatt Macy } 2087eda14cbcSMatt Macy 2088eda14cbcSMatt Macy void 2089eda14cbcSMatt Macy spa_set_deadman_failmode(spa_t *spa, const char *failmode) 2090eda14cbcSMatt Macy { 2091eda14cbcSMatt Macy if (strcmp(failmode, "wait") == 0) 2092eda14cbcSMatt Macy spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2093eda14cbcSMatt Macy else if (strcmp(failmode, "continue") == 0) 2094eda14cbcSMatt Macy spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE; 2095eda14cbcSMatt Macy else if (strcmp(failmode, "panic") == 0) 2096eda14cbcSMatt Macy spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC; 2097eda14cbcSMatt Macy else 2098eda14cbcSMatt Macy spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2099eda14cbcSMatt Macy } 2100eda14cbcSMatt Macy 2101eda14cbcSMatt Macy void 2102eda14cbcSMatt Macy spa_set_deadman_ziotime(hrtime_t ns) 2103eda14cbcSMatt Macy { 2104eda14cbcSMatt Macy spa_t *spa = NULL; 2105eda14cbcSMatt Macy 2106eda14cbcSMatt Macy if (spa_mode_global != SPA_MODE_UNINIT) { 2107eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 2108eda14cbcSMatt Macy while ((spa = spa_next(spa)) != NULL) 2109eda14cbcSMatt Macy spa->spa_deadman_ziotime = ns; 2110eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 2111eda14cbcSMatt Macy } 2112eda14cbcSMatt Macy } 2113eda14cbcSMatt Macy 2114eda14cbcSMatt Macy void 2115eda14cbcSMatt Macy spa_set_deadman_synctime(hrtime_t ns) 2116eda14cbcSMatt Macy { 2117eda14cbcSMatt Macy spa_t *spa = NULL; 2118eda14cbcSMatt Macy 2119eda14cbcSMatt Macy if (spa_mode_global != SPA_MODE_UNINIT) { 2120eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 2121eda14cbcSMatt Macy while ((spa = spa_next(spa)) != NULL) 2122eda14cbcSMatt Macy spa->spa_deadman_synctime = ns; 2123eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 2124eda14cbcSMatt Macy } 2125eda14cbcSMatt Macy } 2126eda14cbcSMatt Macy 2127eda14cbcSMatt Macy uint64_t 2128eda14cbcSMatt Macy dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 2129eda14cbcSMatt Macy { 2130eda14cbcSMatt Macy uint64_t asize = DVA_GET_ASIZE(dva); 2131eda14cbcSMatt Macy uint64_t dsize = asize; 2132eda14cbcSMatt Macy 2133eda14cbcSMatt Macy ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2134eda14cbcSMatt Macy 2135eda14cbcSMatt Macy if (asize != 0 && spa->spa_deflate) { 2136eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 2137eda14cbcSMatt Macy if (vd != NULL) 2138eda14cbcSMatt Macy dsize = (asize >> SPA_MINBLOCKSHIFT) * 2139eda14cbcSMatt Macy vd->vdev_deflate_ratio; 2140eda14cbcSMatt Macy } 2141eda14cbcSMatt Macy 2142eda14cbcSMatt Macy return (dsize); 2143eda14cbcSMatt Macy } 2144eda14cbcSMatt Macy 2145eda14cbcSMatt Macy uint64_t 2146eda14cbcSMatt Macy bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 2147eda14cbcSMatt Macy { 2148eda14cbcSMatt Macy uint64_t dsize = 0; 2149eda14cbcSMatt Macy 2150eda14cbcSMatt Macy for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2151eda14cbcSMatt Macy dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2152eda14cbcSMatt Macy 2153eda14cbcSMatt Macy return (dsize); 2154eda14cbcSMatt Macy } 2155eda14cbcSMatt Macy 2156eda14cbcSMatt Macy uint64_t 2157eda14cbcSMatt Macy bp_get_dsize(spa_t *spa, const blkptr_t *bp) 2158eda14cbcSMatt Macy { 2159eda14cbcSMatt Macy uint64_t dsize = 0; 2160eda14cbcSMatt Macy 2161eda14cbcSMatt Macy spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2162eda14cbcSMatt Macy 2163eda14cbcSMatt Macy for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2164eda14cbcSMatt Macy dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2165eda14cbcSMatt Macy 2166eda14cbcSMatt Macy spa_config_exit(spa, SCL_VDEV, FTAG); 2167eda14cbcSMatt Macy 2168eda14cbcSMatt Macy return (dsize); 2169eda14cbcSMatt Macy } 2170eda14cbcSMatt Macy 2171eda14cbcSMatt Macy uint64_t 2172eda14cbcSMatt Macy spa_dirty_data(spa_t *spa) 2173eda14cbcSMatt Macy { 2174eda14cbcSMatt Macy return (spa->spa_dsl_pool->dp_dirty_total); 2175eda14cbcSMatt Macy } 2176eda14cbcSMatt Macy 2177eda14cbcSMatt Macy /* 2178eda14cbcSMatt Macy * ========================================================================== 2179eda14cbcSMatt Macy * SPA Import Progress Routines 2180eda14cbcSMatt Macy * ========================================================================== 2181eda14cbcSMatt Macy */ 2182eda14cbcSMatt Macy 2183eda14cbcSMatt Macy typedef struct spa_import_progress { 2184eda14cbcSMatt Macy uint64_t pool_guid; /* unique id for updates */ 2185eda14cbcSMatt Macy char *pool_name; 2186eda14cbcSMatt Macy spa_load_state_t spa_load_state; 2187eda14cbcSMatt Macy uint64_t mmp_sec_remaining; /* MMP activity check */ 2188eda14cbcSMatt Macy uint64_t spa_load_max_txg; /* rewind txg */ 2189eda14cbcSMatt Macy procfs_list_node_t smh_node; 2190eda14cbcSMatt Macy } spa_import_progress_t; 2191eda14cbcSMatt Macy 2192eda14cbcSMatt Macy spa_history_list_t *spa_import_progress_list = NULL; 2193eda14cbcSMatt Macy 2194eda14cbcSMatt Macy static int 2195eda14cbcSMatt Macy spa_import_progress_show_header(struct seq_file *f) 2196eda14cbcSMatt Macy { 2197eda14cbcSMatt Macy seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid", 2198eda14cbcSMatt Macy "load_state", "multihost_secs", "max_txg", 2199eda14cbcSMatt Macy "pool_name"); 2200eda14cbcSMatt Macy return (0); 2201eda14cbcSMatt Macy } 2202eda14cbcSMatt Macy 2203eda14cbcSMatt Macy static int 2204eda14cbcSMatt Macy spa_import_progress_show(struct seq_file *f, void *data) 2205eda14cbcSMatt Macy { 2206eda14cbcSMatt Macy spa_import_progress_t *sip = (spa_import_progress_t *)data; 2207eda14cbcSMatt Macy 2208eda14cbcSMatt Macy seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n", 2209eda14cbcSMatt Macy (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state, 2210eda14cbcSMatt Macy (u_longlong_t)sip->mmp_sec_remaining, 2211eda14cbcSMatt Macy (u_longlong_t)sip->spa_load_max_txg, 2212eda14cbcSMatt Macy (sip->pool_name ? sip->pool_name : "-")); 2213eda14cbcSMatt Macy 2214eda14cbcSMatt Macy return (0); 2215eda14cbcSMatt Macy } 2216eda14cbcSMatt Macy 2217eda14cbcSMatt Macy /* Remove oldest elements from list until there are no more than 'size' left */ 2218eda14cbcSMatt Macy static void 2219eda14cbcSMatt Macy spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size) 2220eda14cbcSMatt Macy { 2221eda14cbcSMatt Macy spa_import_progress_t *sip; 2222eda14cbcSMatt Macy while (shl->size > size) { 2223eda14cbcSMatt Macy sip = list_remove_head(&shl->procfs_list.pl_list); 2224eda14cbcSMatt Macy if (sip->pool_name) 2225eda14cbcSMatt Macy spa_strfree(sip->pool_name); 2226eda14cbcSMatt Macy kmem_free(sip, sizeof (spa_import_progress_t)); 2227eda14cbcSMatt Macy shl->size--; 2228eda14cbcSMatt Macy } 2229eda14cbcSMatt Macy 2230eda14cbcSMatt Macy IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list)); 2231eda14cbcSMatt Macy } 2232eda14cbcSMatt Macy 2233eda14cbcSMatt Macy static void 2234eda14cbcSMatt Macy spa_import_progress_init(void) 2235eda14cbcSMatt Macy { 2236eda14cbcSMatt Macy spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t), 2237eda14cbcSMatt Macy KM_SLEEP); 2238eda14cbcSMatt Macy 2239eda14cbcSMatt Macy spa_import_progress_list->size = 0; 2240eda14cbcSMatt Macy 2241eda14cbcSMatt Macy spa_import_progress_list->procfs_list.pl_private = 2242eda14cbcSMatt Macy spa_import_progress_list; 2243eda14cbcSMatt Macy 2244eda14cbcSMatt Macy procfs_list_install("zfs", 2245c40487d4SMatt Macy NULL, 2246eda14cbcSMatt Macy "import_progress", 2247eda14cbcSMatt Macy 0644, 2248eda14cbcSMatt Macy &spa_import_progress_list->procfs_list, 2249eda14cbcSMatt Macy spa_import_progress_show, 2250eda14cbcSMatt Macy spa_import_progress_show_header, 2251eda14cbcSMatt Macy NULL, 2252eda14cbcSMatt Macy offsetof(spa_import_progress_t, smh_node)); 2253eda14cbcSMatt Macy } 2254eda14cbcSMatt Macy 2255eda14cbcSMatt Macy static void 2256eda14cbcSMatt Macy spa_import_progress_destroy(void) 2257eda14cbcSMatt Macy { 2258eda14cbcSMatt Macy spa_history_list_t *shl = spa_import_progress_list; 2259eda14cbcSMatt Macy procfs_list_uninstall(&shl->procfs_list); 2260eda14cbcSMatt Macy spa_import_progress_truncate(shl, 0); 2261eda14cbcSMatt Macy procfs_list_destroy(&shl->procfs_list); 2262eda14cbcSMatt Macy kmem_free(shl, sizeof (spa_history_list_t)); 2263eda14cbcSMatt Macy } 2264eda14cbcSMatt Macy 2265eda14cbcSMatt Macy int 2266eda14cbcSMatt Macy spa_import_progress_set_state(uint64_t pool_guid, 2267eda14cbcSMatt Macy spa_load_state_t load_state) 2268eda14cbcSMatt Macy { 2269eda14cbcSMatt Macy spa_history_list_t *shl = spa_import_progress_list; 2270eda14cbcSMatt Macy spa_import_progress_t *sip; 2271eda14cbcSMatt Macy int error = ENOENT; 2272eda14cbcSMatt Macy 2273eda14cbcSMatt Macy if (shl->size == 0) 2274eda14cbcSMatt Macy return (0); 2275eda14cbcSMatt Macy 2276eda14cbcSMatt Macy mutex_enter(&shl->procfs_list.pl_lock); 2277eda14cbcSMatt Macy for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2278eda14cbcSMatt Macy sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2279eda14cbcSMatt Macy if (sip->pool_guid == pool_guid) { 2280eda14cbcSMatt Macy sip->spa_load_state = load_state; 2281eda14cbcSMatt Macy error = 0; 2282eda14cbcSMatt Macy break; 2283eda14cbcSMatt Macy } 2284eda14cbcSMatt Macy } 2285eda14cbcSMatt Macy mutex_exit(&shl->procfs_list.pl_lock); 2286eda14cbcSMatt Macy 2287eda14cbcSMatt Macy return (error); 2288eda14cbcSMatt Macy } 2289eda14cbcSMatt Macy 2290eda14cbcSMatt Macy int 2291eda14cbcSMatt Macy spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg) 2292eda14cbcSMatt Macy { 2293eda14cbcSMatt Macy spa_history_list_t *shl = spa_import_progress_list; 2294eda14cbcSMatt Macy spa_import_progress_t *sip; 2295eda14cbcSMatt Macy int error = ENOENT; 2296eda14cbcSMatt Macy 2297eda14cbcSMatt Macy if (shl->size == 0) 2298eda14cbcSMatt Macy return (0); 2299eda14cbcSMatt Macy 2300eda14cbcSMatt Macy mutex_enter(&shl->procfs_list.pl_lock); 2301eda14cbcSMatt Macy for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2302eda14cbcSMatt Macy sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2303eda14cbcSMatt Macy if (sip->pool_guid == pool_guid) { 2304eda14cbcSMatt Macy sip->spa_load_max_txg = load_max_txg; 2305eda14cbcSMatt Macy error = 0; 2306eda14cbcSMatt Macy break; 2307eda14cbcSMatt Macy } 2308eda14cbcSMatt Macy } 2309eda14cbcSMatt Macy mutex_exit(&shl->procfs_list.pl_lock); 2310eda14cbcSMatt Macy 2311eda14cbcSMatt Macy return (error); 2312eda14cbcSMatt Macy } 2313eda14cbcSMatt Macy 2314eda14cbcSMatt Macy int 2315eda14cbcSMatt Macy spa_import_progress_set_mmp_check(uint64_t pool_guid, 2316eda14cbcSMatt Macy uint64_t mmp_sec_remaining) 2317eda14cbcSMatt Macy { 2318eda14cbcSMatt Macy spa_history_list_t *shl = spa_import_progress_list; 2319eda14cbcSMatt Macy spa_import_progress_t *sip; 2320eda14cbcSMatt Macy int error = ENOENT; 2321eda14cbcSMatt Macy 2322eda14cbcSMatt Macy if (shl->size == 0) 2323eda14cbcSMatt Macy return (0); 2324eda14cbcSMatt Macy 2325eda14cbcSMatt Macy mutex_enter(&shl->procfs_list.pl_lock); 2326eda14cbcSMatt Macy for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2327eda14cbcSMatt Macy sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2328eda14cbcSMatt Macy if (sip->pool_guid == pool_guid) { 2329eda14cbcSMatt Macy sip->mmp_sec_remaining = mmp_sec_remaining; 2330eda14cbcSMatt Macy error = 0; 2331eda14cbcSMatt Macy break; 2332eda14cbcSMatt Macy } 2333eda14cbcSMatt Macy } 2334eda14cbcSMatt Macy mutex_exit(&shl->procfs_list.pl_lock); 2335eda14cbcSMatt Macy 2336eda14cbcSMatt Macy return (error); 2337eda14cbcSMatt Macy } 2338eda14cbcSMatt Macy 2339eda14cbcSMatt Macy /* 2340eda14cbcSMatt Macy * A new import is in progress, add an entry. 2341eda14cbcSMatt Macy */ 2342eda14cbcSMatt Macy void 2343eda14cbcSMatt Macy spa_import_progress_add(spa_t *spa) 2344eda14cbcSMatt Macy { 2345eda14cbcSMatt Macy spa_history_list_t *shl = spa_import_progress_list; 2346eda14cbcSMatt Macy spa_import_progress_t *sip; 23472a58b312SMartin Matuska const char *poolname = NULL; 2348eda14cbcSMatt Macy 2349eda14cbcSMatt Macy sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP); 2350eda14cbcSMatt Macy sip->pool_guid = spa_guid(spa); 2351eda14cbcSMatt Macy 2352eda14cbcSMatt Macy (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, 2353eda14cbcSMatt Macy &poolname); 2354eda14cbcSMatt Macy if (poolname == NULL) 2355eda14cbcSMatt Macy poolname = spa_name(spa); 2356eda14cbcSMatt Macy sip->pool_name = spa_strdup(poolname); 2357eda14cbcSMatt Macy sip->spa_load_state = spa_load_state(spa); 2358eda14cbcSMatt Macy 2359eda14cbcSMatt Macy mutex_enter(&shl->procfs_list.pl_lock); 2360eda14cbcSMatt Macy procfs_list_add(&shl->procfs_list, sip); 2361eda14cbcSMatt Macy shl->size++; 2362eda14cbcSMatt Macy mutex_exit(&shl->procfs_list.pl_lock); 2363eda14cbcSMatt Macy } 2364eda14cbcSMatt Macy 2365eda14cbcSMatt Macy void 2366eda14cbcSMatt Macy spa_import_progress_remove(uint64_t pool_guid) 2367eda14cbcSMatt Macy { 2368eda14cbcSMatt Macy spa_history_list_t *shl = spa_import_progress_list; 2369eda14cbcSMatt Macy spa_import_progress_t *sip; 2370eda14cbcSMatt Macy 2371eda14cbcSMatt Macy mutex_enter(&shl->procfs_list.pl_lock); 2372eda14cbcSMatt Macy for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2373eda14cbcSMatt Macy sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2374eda14cbcSMatt Macy if (sip->pool_guid == pool_guid) { 2375eda14cbcSMatt Macy if (sip->pool_name) 2376eda14cbcSMatt Macy spa_strfree(sip->pool_name); 2377eda14cbcSMatt Macy list_remove(&shl->procfs_list.pl_list, sip); 2378eda14cbcSMatt Macy shl->size--; 2379eda14cbcSMatt Macy kmem_free(sip, sizeof (spa_import_progress_t)); 2380eda14cbcSMatt Macy break; 2381eda14cbcSMatt Macy } 2382eda14cbcSMatt Macy } 2383eda14cbcSMatt Macy mutex_exit(&shl->procfs_list.pl_lock); 2384eda14cbcSMatt Macy } 2385eda14cbcSMatt Macy 2386eda14cbcSMatt Macy /* 2387eda14cbcSMatt Macy * ========================================================================== 2388eda14cbcSMatt Macy * Initialization and Termination 2389eda14cbcSMatt Macy * ========================================================================== 2390eda14cbcSMatt Macy */ 2391eda14cbcSMatt Macy 2392eda14cbcSMatt Macy static int 2393eda14cbcSMatt Macy spa_name_compare(const void *a1, const void *a2) 2394eda14cbcSMatt Macy { 2395eda14cbcSMatt Macy const spa_t *s1 = a1; 2396eda14cbcSMatt Macy const spa_t *s2 = a2; 2397eda14cbcSMatt Macy int s; 2398eda14cbcSMatt Macy 2399eda14cbcSMatt Macy s = strcmp(s1->spa_name, s2->spa_name); 2400eda14cbcSMatt Macy 2401eda14cbcSMatt Macy return (TREE_ISIGN(s)); 2402eda14cbcSMatt Macy } 2403eda14cbcSMatt Macy 2404eda14cbcSMatt Macy void 2405eda14cbcSMatt Macy spa_boot_init(void) 2406eda14cbcSMatt Macy { 2407eda14cbcSMatt Macy spa_config_load(); 2408eda14cbcSMatt Macy } 2409eda14cbcSMatt Macy 2410eda14cbcSMatt Macy void 2411eda14cbcSMatt Macy spa_init(spa_mode_t mode) 2412eda14cbcSMatt Macy { 2413eda14cbcSMatt Macy mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 2414eda14cbcSMatt Macy mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 2415eda14cbcSMatt Macy mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 2416eda14cbcSMatt Macy cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 2417eda14cbcSMatt Macy 2418eda14cbcSMatt Macy avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 2419eda14cbcSMatt Macy offsetof(spa_t, spa_avl)); 2420eda14cbcSMatt Macy 2421eda14cbcSMatt Macy avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 2422eda14cbcSMatt Macy offsetof(spa_aux_t, aux_avl)); 2423eda14cbcSMatt Macy 2424eda14cbcSMatt Macy avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 2425eda14cbcSMatt Macy offsetof(spa_aux_t, aux_avl)); 2426eda14cbcSMatt Macy 2427eda14cbcSMatt Macy spa_mode_global = mode; 2428eda14cbcSMatt Macy 2429eda14cbcSMatt Macy #ifndef _KERNEL 2430eda14cbcSMatt Macy if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) { 2431eda14cbcSMatt Macy struct sigaction sa; 2432eda14cbcSMatt Macy 2433eda14cbcSMatt Macy sa.sa_flags = SA_SIGINFO; 2434eda14cbcSMatt Macy sigemptyset(&sa.sa_mask); 2435eda14cbcSMatt Macy sa.sa_sigaction = arc_buf_sigsegv; 2436eda14cbcSMatt Macy 2437eda14cbcSMatt Macy if (sigaction(SIGSEGV, &sa, NULL) == -1) { 2438eda14cbcSMatt Macy perror("could not enable watchpoints: " 2439eda14cbcSMatt Macy "sigaction(SIGSEGV, ...) = "); 2440eda14cbcSMatt Macy } else { 2441eda14cbcSMatt Macy arc_watch = B_TRUE; 2442eda14cbcSMatt Macy } 2443eda14cbcSMatt Macy } 2444eda14cbcSMatt Macy #endif 2445eda14cbcSMatt Macy 2446eda14cbcSMatt Macy fm_init(); 2447eda14cbcSMatt Macy zfs_refcount_init(); 2448eda14cbcSMatt Macy unique_init(); 2449eda14cbcSMatt Macy zfs_btree_init(); 2450eda14cbcSMatt Macy metaslab_stat_init(); 24512a58b312SMartin Matuska brt_init(); 2452eda14cbcSMatt Macy ddt_init(); 2453eda14cbcSMatt Macy zio_init(); 2454eda14cbcSMatt Macy dmu_init(); 2455eda14cbcSMatt Macy zil_init(); 2456eda14cbcSMatt Macy vdev_mirror_stat_init(); 2457eda14cbcSMatt Macy vdev_raidz_math_init(); 2458eda14cbcSMatt Macy vdev_file_init(); 2459eda14cbcSMatt Macy zfs_prop_init(); 24601f1e2261SMartin Matuska chksum_init(); 2461eda14cbcSMatt Macy zpool_prop_init(); 2462eda14cbcSMatt Macy zpool_feature_init(); 2463eda14cbcSMatt Macy spa_config_load(); 2464681ce946SMartin Matuska vdev_prop_init(); 2465eda14cbcSMatt Macy l2arc_start(); 2466eda14cbcSMatt Macy scan_init(); 2467eda14cbcSMatt Macy qat_init(); 2468eda14cbcSMatt Macy spa_import_progress_init(); 2469eda14cbcSMatt Macy } 2470eda14cbcSMatt Macy 2471eda14cbcSMatt Macy void 2472eda14cbcSMatt Macy spa_fini(void) 2473eda14cbcSMatt Macy { 2474eda14cbcSMatt Macy l2arc_stop(); 2475eda14cbcSMatt Macy 2476eda14cbcSMatt Macy spa_evict_all(); 2477eda14cbcSMatt Macy 2478eda14cbcSMatt Macy vdev_file_fini(); 2479eda14cbcSMatt Macy vdev_mirror_stat_fini(); 2480eda14cbcSMatt Macy vdev_raidz_math_fini(); 24811f1e2261SMartin Matuska chksum_fini(); 2482eda14cbcSMatt Macy zil_fini(); 2483eda14cbcSMatt Macy dmu_fini(); 2484eda14cbcSMatt Macy zio_fini(); 2485eda14cbcSMatt Macy ddt_fini(); 24862a58b312SMartin Matuska brt_fini(); 2487eda14cbcSMatt Macy metaslab_stat_fini(); 2488eda14cbcSMatt Macy zfs_btree_fini(); 2489eda14cbcSMatt Macy unique_fini(); 2490eda14cbcSMatt Macy zfs_refcount_fini(); 2491eda14cbcSMatt Macy fm_fini(); 2492eda14cbcSMatt Macy scan_fini(); 2493eda14cbcSMatt Macy qat_fini(); 2494eda14cbcSMatt Macy spa_import_progress_destroy(); 2495eda14cbcSMatt Macy 2496eda14cbcSMatt Macy avl_destroy(&spa_namespace_avl); 2497eda14cbcSMatt Macy avl_destroy(&spa_spare_avl); 2498eda14cbcSMatt Macy avl_destroy(&spa_l2cache_avl); 2499eda14cbcSMatt Macy 2500eda14cbcSMatt Macy cv_destroy(&spa_namespace_cv); 2501eda14cbcSMatt Macy mutex_destroy(&spa_namespace_lock); 2502eda14cbcSMatt Macy mutex_destroy(&spa_spare_lock); 2503eda14cbcSMatt Macy mutex_destroy(&spa_l2cache_lock); 2504eda14cbcSMatt Macy } 2505eda14cbcSMatt Macy 2506eda14cbcSMatt Macy /* 2507184c1b94SMartin Matuska * Return whether this pool has a dedicated slog device. No locking needed. 2508eda14cbcSMatt Macy * It's not a problem if the wrong answer is returned as it's only for 2509184c1b94SMartin Matuska * performance and not correctness. 2510eda14cbcSMatt Macy */ 2511eda14cbcSMatt Macy boolean_t 2512eda14cbcSMatt Macy spa_has_slogs(spa_t *spa) 2513eda14cbcSMatt Macy { 25147877fdebSMatt Macy return (spa->spa_log_class->mc_groups != 0); 2515eda14cbcSMatt Macy } 2516eda14cbcSMatt Macy 2517eda14cbcSMatt Macy spa_log_state_t 2518eda14cbcSMatt Macy spa_get_log_state(spa_t *spa) 2519eda14cbcSMatt Macy { 2520eda14cbcSMatt Macy return (spa->spa_log_state); 2521eda14cbcSMatt Macy } 2522eda14cbcSMatt Macy 2523eda14cbcSMatt Macy void 2524eda14cbcSMatt Macy spa_set_log_state(spa_t *spa, spa_log_state_t state) 2525eda14cbcSMatt Macy { 2526eda14cbcSMatt Macy spa->spa_log_state = state; 2527eda14cbcSMatt Macy } 2528eda14cbcSMatt Macy 2529eda14cbcSMatt Macy boolean_t 2530eda14cbcSMatt Macy spa_is_root(spa_t *spa) 2531eda14cbcSMatt Macy { 2532eda14cbcSMatt Macy return (spa->spa_is_root); 2533eda14cbcSMatt Macy } 2534eda14cbcSMatt Macy 2535eda14cbcSMatt Macy boolean_t 2536eda14cbcSMatt Macy spa_writeable(spa_t *spa) 2537eda14cbcSMatt Macy { 2538eda14cbcSMatt Macy return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config); 2539eda14cbcSMatt Macy } 2540eda14cbcSMatt Macy 2541eda14cbcSMatt Macy /* 2542eda14cbcSMatt Macy * Returns true if there is a pending sync task in any of the current 2543eda14cbcSMatt Macy * syncing txg, the current quiescing txg, or the current open txg. 2544eda14cbcSMatt Macy */ 2545eda14cbcSMatt Macy boolean_t 2546eda14cbcSMatt Macy spa_has_pending_synctask(spa_t *spa) 2547eda14cbcSMatt Macy { 2548eda14cbcSMatt Macy return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2549eda14cbcSMatt Macy !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2550eda14cbcSMatt Macy } 2551eda14cbcSMatt Macy 2552eda14cbcSMatt Macy spa_mode_t 2553eda14cbcSMatt Macy spa_mode(spa_t *spa) 2554eda14cbcSMatt Macy { 2555eda14cbcSMatt Macy return (spa->spa_mode); 2556eda14cbcSMatt Macy } 2557eda14cbcSMatt Macy 2558eda14cbcSMatt Macy uint64_t 2559eda14cbcSMatt Macy spa_bootfs(spa_t *spa) 2560eda14cbcSMatt Macy { 2561eda14cbcSMatt Macy return (spa->spa_bootfs); 2562eda14cbcSMatt Macy } 2563eda14cbcSMatt Macy 2564eda14cbcSMatt Macy uint64_t 2565eda14cbcSMatt Macy spa_delegation(spa_t *spa) 2566eda14cbcSMatt Macy { 2567eda14cbcSMatt Macy return (spa->spa_delegation); 2568eda14cbcSMatt Macy } 2569eda14cbcSMatt Macy 2570eda14cbcSMatt Macy objset_t * 2571eda14cbcSMatt Macy spa_meta_objset(spa_t *spa) 2572eda14cbcSMatt Macy { 2573eda14cbcSMatt Macy return (spa->spa_meta_objset); 2574eda14cbcSMatt Macy } 2575eda14cbcSMatt Macy 2576eda14cbcSMatt Macy enum zio_checksum 2577eda14cbcSMatt Macy spa_dedup_checksum(spa_t *spa) 2578eda14cbcSMatt Macy { 2579eda14cbcSMatt Macy return (spa->spa_dedup_checksum); 2580eda14cbcSMatt Macy } 2581eda14cbcSMatt Macy 2582eda14cbcSMatt Macy /* 2583eda14cbcSMatt Macy * Reset pool scan stat per scan pass (or reboot). 2584eda14cbcSMatt Macy */ 2585eda14cbcSMatt Macy void 2586eda14cbcSMatt Macy spa_scan_stat_init(spa_t *spa) 2587eda14cbcSMatt Macy { 2588eda14cbcSMatt Macy /* data not stored on disk */ 2589eda14cbcSMatt Macy spa->spa_scan_pass_start = gethrestime_sec(); 2590eda14cbcSMatt Macy if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2591eda14cbcSMatt Macy spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2592eda14cbcSMatt Macy else 2593eda14cbcSMatt Macy spa->spa_scan_pass_scrub_pause = 0; 2594c0a83fe0SMartin Matuska 2595c0a83fe0SMartin Matuska if (dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) 2596c0a83fe0SMartin Matuska spa->spa_scan_pass_errorscrub_pause = spa->spa_scan_pass_start; 2597c0a83fe0SMartin Matuska else 2598c0a83fe0SMartin Matuska spa->spa_scan_pass_errorscrub_pause = 0; 2599c0a83fe0SMartin Matuska 2600eda14cbcSMatt Macy spa->spa_scan_pass_scrub_spent_paused = 0; 2601eda14cbcSMatt Macy spa->spa_scan_pass_exam = 0; 2602eda14cbcSMatt Macy spa->spa_scan_pass_issued = 0; 2603c0a83fe0SMartin Matuska 2604c0a83fe0SMartin Matuska // error scrub stats 2605c0a83fe0SMartin Matuska spa->spa_scan_pass_errorscrub_spent_paused = 0; 2606eda14cbcSMatt Macy } 2607eda14cbcSMatt Macy 2608eda14cbcSMatt Macy /* 2609eda14cbcSMatt Macy * Get scan stats for zpool status reports 2610eda14cbcSMatt Macy */ 2611eda14cbcSMatt Macy int 2612eda14cbcSMatt Macy spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2613eda14cbcSMatt Macy { 2614eda14cbcSMatt Macy dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2615eda14cbcSMatt Macy 2616c0a83fe0SMartin Matuska if (scn == NULL || (scn->scn_phys.scn_func == POOL_SCAN_NONE && 2617c0a83fe0SMartin Matuska scn->errorscrub_phys.dep_func == POOL_SCAN_NONE)) 2618eda14cbcSMatt Macy return (SET_ERROR(ENOENT)); 2619c0a83fe0SMartin Matuska 2620da5137abSMartin Matuska memset(ps, 0, sizeof (pool_scan_stat_t)); 2621eda14cbcSMatt Macy 2622eda14cbcSMatt Macy /* data stored on disk */ 2623eda14cbcSMatt Macy ps->pss_func = scn->scn_phys.scn_func; 2624eda14cbcSMatt Macy ps->pss_state = scn->scn_phys.scn_state; 2625eda14cbcSMatt Macy ps->pss_start_time = scn->scn_phys.scn_start_time; 2626eda14cbcSMatt Macy ps->pss_end_time = scn->scn_phys.scn_end_time; 2627eda14cbcSMatt Macy ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2628eda14cbcSMatt Macy ps->pss_examined = scn->scn_phys.scn_examined; 26290a97523dSMartin Matuska ps->pss_skipped = scn->scn_phys.scn_skipped; 2630eda14cbcSMatt Macy ps->pss_processed = scn->scn_phys.scn_processed; 2631eda14cbcSMatt Macy ps->pss_errors = scn->scn_phys.scn_errors; 2632eda14cbcSMatt Macy 2633eda14cbcSMatt Macy /* data not stored on disk */ 2634eda14cbcSMatt Macy ps->pss_pass_exam = spa->spa_scan_pass_exam; 2635eda14cbcSMatt Macy ps->pss_pass_start = spa->spa_scan_pass_start; 2636eda14cbcSMatt Macy ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2637eda14cbcSMatt Macy ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2638eda14cbcSMatt Macy ps->pss_pass_issued = spa->spa_scan_pass_issued; 2639eda14cbcSMatt Macy ps->pss_issued = 2640eda14cbcSMatt Macy scn->scn_issued_before_pass + spa->spa_scan_pass_issued; 2641eda14cbcSMatt Macy 2642c0a83fe0SMartin Matuska /* error scrub data stored on disk */ 2643c0a83fe0SMartin Matuska ps->pss_error_scrub_func = scn->errorscrub_phys.dep_func; 2644c0a83fe0SMartin Matuska ps->pss_error_scrub_state = scn->errorscrub_phys.dep_state; 2645c0a83fe0SMartin Matuska ps->pss_error_scrub_start = scn->errorscrub_phys.dep_start_time; 2646c0a83fe0SMartin Matuska ps->pss_error_scrub_end = scn->errorscrub_phys.dep_end_time; 2647c0a83fe0SMartin Matuska ps->pss_error_scrub_examined = scn->errorscrub_phys.dep_examined; 2648c0a83fe0SMartin Matuska ps->pss_error_scrub_to_be_examined = 2649c0a83fe0SMartin Matuska scn->errorscrub_phys.dep_to_examine; 2650c0a83fe0SMartin Matuska 2651c0a83fe0SMartin Matuska /* error scrub data not stored on disk */ 2652c0a83fe0SMartin Matuska ps->pss_pass_error_scrub_pause = spa->spa_scan_pass_errorscrub_pause; 2653c0a83fe0SMartin Matuska 2654eda14cbcSMatt Macy return (0); 2655eda14cbcSMatt Macy } 2656eda14cbcSMatt Macy 2657eda14cbcSMatt Macy int 2658eda14cbcSMatt Macy spa_maxblocksize(spa_t *spa) 2659eda14cbcSMatt Macy { 2660eda14cbcSMatt Macy if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2661eda14cbcSMatt Macy return (SPA_MAXBLOCKSIZE); 2662eda14cbcSMatt Macy else 2663eda14cbcSMatt Macy return (SPA_OLD_MAXBLOCKSIZE); 2664eda14cbcSMatt Macy } 2665eda14cbcSMatt Macy 2666eda14cbcSMatt Macy 2667eda14cbcSMatt Macy /* 2668eda14cbcSMatt Macy * Returns the txg that the last device removal completed. No indirect mappings 2669eda14cbcSMatt Macy * have been added since this txg. 2670eda14cbcSMatt Macy */ 2671eda14cbcSMatt Macy uint64_t 2672eda14cbcSMatt Macy spa_get_last_removal_txg(spa_t *spa) 2673eda14cbcSMatt Macy { 2674eda14cbcSMatt Macy uint64_t vdevid; 2675eda14cbcSMatt Macy uint64_t ret = -1ULL; 2676eda14cbcSMatt Macy 2677eda14cbcSMatt Macy spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2678eda14cbcSMatt Macy /* 2679eda14cbcSMatt Macy * sr_prev_indirect_vdev is only modified while holding all the 2680eda14cbcSMatt Macy * config locks, so it is sufficient to hold SCL_VDEV as reader when 2681eda14cbcSMatt Macy * examining it. 2682eda14cbcSMatt Macy */ 2683eda14cbcSMatt Macy vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2684eda14cbcSMatt Macy 2685eda14cbcSMatt Macy while (vdevid != -1ULL) { 2686eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, vdevid); 2687eda14cbcSMatt Macy vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2688eda14cbcSMatt Macy 2689eda14cbcSMatt Macy ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2690eda14cbcSMatt Macy 2691eda14cbcSMatt Macy /* 2692eda14cbcSMatt Macy * If the removal did not remap any data, we don't care. 2693eda14cbcSMatt Macy */ 2694eda14cbcSMatt Macy if (vdev_indirect_births_count(vib) != 0) { 2695eda14cbcSMatt Macy ret = vdev_indirect_births_last_entry_txg(vib); 2696eda14cbcSMatt Macy break; 2697eda14cbcSMatt Macy } 2698eda14cbcSMatt Macy 2699eda14cbcSMatt Macy vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2700eda14cbcSMatt Macy } 2701eda14cbcSMatt Macy spa_config_exit(spa, SCL_VDEV, FTAG); 2702eda14cbcSMatt Macy 2703eda14cbcSMatt Macy IMPLY(ret != -1ULL, 2704eda14cbcSMatt Macy spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2705eda14cbcSMatt Macy 2706eda14cbcSMatt Macy return (ret); 2707eda14cbcSMatt Macy } 2708eda14cbcSMatt Macy 2709eda14cbcSMatt Macy int 2710eda14cbcSMatt Macy spa_maxdnodesize(spa_t *spa) 2711eda14cbcSMatt Macy { 2712eda14cbcSMatt Macy if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) 2713eda14cbcSMatt Macy return (DNODE_MAX_SIZE); 2714eda14cbcSMatt Macy else 2715eda14cbcSMatt Macy return (DNODE_MIN_SIZE); 2716eda14cbcSMatt Macy } 2717eda14cbcSMatt Macy 2718eda14cbcSMatt Macy boolean_t 2719eda14cbcSMatt Macy spa_multihost(spa_t *spa) 2720eda14cbcSMatt Macy { 2721eda14cbcSMatt Macy return (spa->spa_multihost ? B_TRUE : B_FALSE); 2722eda14cbcSMatt Macy } 2723eda14cbcSMatt Macy 2724eda14cbcSMatt Macy uint32_t 2725eda14cbcSMatt Macy spa_get_hostid(spa_t *spa) 2726eda14cbcSMatt Macy { 2727eda14cbcSMatt Macy return (spa->spa_hostid); 2728eda14cbcSMatt Macy } 2729eda14cbcSMatt Macy 2730eda14cbcSMatt Macy boolean_t 2731eda14cbcSMatt Macy spa_trust_config(spa_t *spa) 2732eda14cbcSMatt Macy { 2733eda14cbcSMatt Macy return (spa->spa_trust_config); 2734eda14cbcSMatt Macy } 2735eda14cbcSMatt Macy 2736eda14cbcSMatt Macy uint64_t 2737eda14cbcSMatt Macy spa_missing_tvds_allowed(spa_t *spa) 2738eda14cbcSMatt Macy { 2739eda14cbcSMatt Macy return (spa->spa_missing_tvds_allowed); 2740eda14cbcSMatt Macy } 2741eda14cbcSMatt Macy 2742eda14cbcSMatt Macy space_map_t * 2743eda14cbcSMatt Macy spa_syncing_log_sm(spa_t *spa) 2744eda14cbcSMatt Macy { 2745eda14cbcSMatt Macy return (spa->spa_syncing_log_sm); 2746eda14cbcSMatt Macy } 2747eda14cbcSMatt Macy 2748eda14cbcSMatt Macy void 2749eda14cbcSMatt Macy spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2750eda14cbcSMatt Macy { 2751eda14cbcSMatt Macy spa->spa_missing_tvds = missing; 2752eda14cbcSMatt Macy } 2753eda14cbcSMatt Macy 2754eda14cbcSMatt Macy /* 2755eda14cbcSMatt Macy * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc). 2756eda14cbcSMatt Macy */ 2757eda14cbcSMatt Macy const char * 2758eda14cbcSMatt Macy spa_state_to_name(spa_t *spa) 2759eda14cbcSMatt Macy { 2760eda14cbcSMatt Macy ASSERT3P(spa, !=, NULL); 2761eda14cbcSMatt Macy 2762eda14cbcSMatt Macy /* 2763eda14cbcSMatt Macy * it is possible for the spa to exist, without root vdev 2764eda14cbcSMatt Macy * as the spa transitions during import/export 2765eda14cbcSMatt Macy */ 2766eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 2767eda14cbcSMatt Macy if (rvd == NULL) { 2768eda14cbcSMatt Macy return ("TRANSITIONING"); 2769eda14cbcSMatt Macy } 2770eda14cbcSMatt Macy vdev_state_t state = rvd->vdev_state; 2771eda14cbcSMatt Macy vdev_aux_t aux = rvd->vdev_stat.vs_aux; 2772eda14cbcSMatt Macy 2773cbfe9975SMartin Matuska if (spa_suspended(spa)) 2774eda14cbcSMatt Macy return ("SUSPENDED"); 2775eda14cbcSMatt Macy 2776eda14cbcSMatt Macy switch (state) { 2777eda14cbcSMatt Macy case VDEV_STATE_CLOSED: 2778eda14cbcSMatt Macy case VDEV_STATE_OFFLINE: 2779eda14cbcSMatt Macy return ("OFFLINE"); 2780eda14cbcSMatt Macy case VDEV_STATE_REMOVED: 2781eda14cbcSMatt Macy return ("REMOVED"); 2782eda14cbcSMatt Macy case VDEV_STATE_CANT_OPEN: 2783eda14cbcSMatt Macy if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 2784eda14cbcSMatt Macy return ("FAULTED"); 2785eda14cbcSMatt Macy else if (aux == VDEV_AUX_SPLIT_POOL) 2786eda14cbcSMatt Macy return ("SPLIT"); 2787eda14cbcSMatt Macy else 2788eda14cbcSMatt Macy return ("UNAVAIL"); 2789eda14cbcSMatt Macy case VDEV_STATE_FAULTED: 2790eda14cbcSMatt Macy return ("FAULTED"); 2791eda14cbcSMatt Macy case VDEV_STATE_DEGRADED: 2792eda14cbcSMatt Macy return ("DEGRADED"); 2793eda14cbcSMatt Macy case VDEV_STATE_HEALTHY: 2794eda14cbcSMatt Macy return ("ONLINE"); 2795eda14cbcSMatt Macy default: 2796eda14cbcSMatt Macy break; 2797eda14cbcSMatt Macy } 2798eda14cbcSMatt Macy 2799eda14cbcSMatt Macy return ("UNKNOWN"); 2800eda14cbcSMatt Macy } 2801eda14cbcSMatt Macy 2802eda14cbcSMatt Macy boolean_t 2803eda14cbcSMatt Macy spa_top_vdevs_spacemap_addressable(spa_t *spa) 2804eda14cbcSMatt Macy { 2805eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 2806eda14cbcSMatt Macy for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2807eda14cbcSMatt Macy if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2808eda14cbcSMatt Macy return (B_FALSE); 2809eda14cbcSMatt Macy } 2810eda14cbcSMatt Macy return (B_TRUE); 2811eda14cbcSMatt Macy } 2812eda14cbcSMatt Macy 2813eda14cbcSMatt Macy boolean_t 2814eda14cbcSMatt Macy spa_has_checkpoint(spa_t *spa) 2815eda14cbcSMatt Macy { 2816eda14cbcSMatt Macy return (spa->spa_checkpoint_txg != 0); 2817eda14cbcSMatt Macy } 2818eda14cbcSMatt Macy 2819eda14cbcSMatt Macy boolean_t 2820eda14cbcSMatt Macy spa_importing_readonly_checkpoint(spa_t *spa) 2821eda14cbcSMatt Macy { 2822eda14cbcSMatt Macy return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2823eda14cbcSMatt Macy spa->spa_mode == SPA_MODE_READ); 2824eda14cbcSMatt Macy } 2825eda14cbcSMatt Macy 2826eda14cbcSMatt Macy uint64_t 2827eda14cbcSMatt Macy spa_min_claim_txg(spa_t *spa) 2828eda14cbcSMatt Macy { 2829eda14cbcSMatt Macy uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2830eda14cbcSMatt Macy 2831eda14cbcSMatt Macy if (checkpoint_txg != 0) 2832eda14cbcSMatt Macy return (checkpoint_txg + 1); 2833eda14cbcSMatt Macy 2834eda14cbcSMatt Macy return (spa->spa_first_txg); 2835eda14cbcSMatt Macy } 2836eda14cbcSMatt Macy 2837eda14cbcSMatt Macy /* 2838eda14cbcSMatt Macy * If there is a checkpoint, async destroys may consume more space from 2839eda14cbcSMatt Macy * the pool instead of freeing it. In an attempt to save the pool from 2840eda14cbcSMatt Macy * getting suspended when it is about to run out of space, we stop 2841eda14cbcSMatt Macy * processing async destroys. 2842eda14cbcSMatt Macy */ 2843eda14cbcSMatt Macy boolean_t 2844eda14cbcSMatt Macy spa_suspend_async_destroy(spa_t *spa) 2845eda14cbcSMatt Macy { 2846eda14cbcSMatt Macy dsl_pool_t *dp = spa_get_dsl(spa); 2847eda14cbcSMatt Macy 2848eda14cbcSMatt Macy uint64_t unreserved = dsl_pool_unreserved_space(dp, 2849eda14cbcSMatt Macy ZFS_SPACE_CHECK_EXTRA_RESERVED); 2850eda14cbcSMatt Macy uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2851eda14cbcSMatt Macy uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2852eda14cbcSMatt Macy 2853eda14cbcSMatt Macy if (spa_has_checkpoint(spa) && avail == 0) 2854eda14cbcSMatt Macy return (B_TRUE); 2855eda14cbcSMatt Macy 2856eda14cbcSMatt Macy return (B_FALSE); 2857eda14cbcSMatt Macy } 2858eda14cbcSMatt Macy 2859eda14cbcSMatt Macy #if defined(_KERNEL) 2860eda14cbcSMatt Macy 2861eda14cbcSMatt Macy int 2862eda14cbcSMatt Macy param_set_deadman_failmode_common(const char *val) 2863eda14cbcSMatt Macy { 2864eda14cbcSMatt Macy spa_t *spa = NULL; 2865eda14cbcSMatt Macy char *p; 2866eda14cbcSMatt Macy 2867eda14cbcSMatt Macy if (val == NULL) 2868eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 2869eda14cbcSMatt Macy 2870eda14cbcSMatt Macy if ((p = strchr(val, '\n')) != NULL) 2871eda14cbcSMatt Macy *p = '\0'; 2872eda14cbcSMatt Macy 2873eda14cbcSMatt Macy if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 && 2874eda14cbcSMatt Macy strcmp(val, "panic")) 2875eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 2876eda14cbcSMatt Macy 2877eda14cbcSMatt Macy if (spa_mode_global != SPA_MODE_UNINIT) { 2878eda14cbcSMatt Macy mutex_enter(&spa_namespace_lock); 2879eda14cbcSMatt Macy while ((spa = spa_next(spa)) != NULL) 2880eda14cbcSMatt Macy spa_set_deadman_failmode(spa, val); 2881eda14cbcSMatt Macy mutex_exit(&spa_namespace_lock); 2882eda14cbcSMatt Macy } 2883eda14cbcSMatt Macy 2884eda14cbcSMatt Macy return (0); 2885eda14cbcSMatt Macy } 2886eda14cbcSMatt Macy #endif 2887eda14cbcSMatt Macy 2888eda14cbcSMatt Macy /* Namespace manipulation */ 2889eda14cbcSMatt Macy EXPORT_SYMBOL(spa_lookup); 2890eda14cbcSMatt Macy EXPORT_SYMBOL(spa_add); 2891eda14cbcSMatt Macy EXPORT_SYMBOL(spa_remove); 2892eda14cbcSMatt Macy EXPORT_SYMBOL(spa_next); 2893eda14cbcSMatt Macy 2894eda14cbcSMatt Macy /* Refcount functions */ 2895eda14cbcSMatt Macy EXPORT_SYMBOL(spa_open_ref); 2896eda14cbcSMatt Macy EXPORT_SYMBOL(spa_close); 2897eda14cbcSMatt Macy EXPORT_SYMBOL(spa_refcount_zero); 2898eda14cbcSMatt Macy 2899eda14cbcSMatt Macy /* Pool configuration lock */ 2900eda14cbcSMatt Macy EXPORT_SYMBOL(spa_config_tryenter); 2901eda14cbcSMatt Macy EXPORT_SYMBOL(spa_config_enter); 2902eda14cbcSMatt Macy EXPORT_SYMBOL(spa_config_exit); 2903eda14cbcSMatt Macy EXPORT_SYMBOL(spa_config_held); 2904eda14cbcSMatt Macy 2905eda14cbcSMatt Macy /* Pool vdev add/remove lock */ 2906eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_enter); 2907eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_exit); 2908eda14cbcSMatt Macy 2909eda14cbcSMatt Macy /* Pool vdev state change lock */ 2910eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_state_enter); 2911eda14cbcSMatt Macy EXPORT_SYMBOL(spa_vdev_state_exit); 2912eda14cbcSMatt Macy 2913eda14cbcSMatt Macy /* Accessor functions */ 2914eda14cbcSMatt Macy EXPORT_SYMBOL(spa_shutting_down); 2915eda14cbcSMatt Macy EXPORT_SYMBOL(spa_get_dsl); 2916eda14cbcSMatt Macy EXPORT_SYMBOL(spa_get_rootblkptr); 2917eda14cbcSMatt Macy EXPORT_SYMBOL(spa_set_rootblkptr); 2918eda14cbcSMatt Macy EXPORT_SYMBOL(spa_altroot); 2919eda14cbcSMatt Macy EXPORT_SYMBOL(spa_sync_pass); 2920eda14cbcSMatt Macy EXPORT_SYMBOL(spa_name); 2921eda14cbcSMatt Macy EXPORT_SYMBOL(spa_guid); 2922eda14cbcSMatt Macy EXPORT_SYMBOL(spa_last_synced_txg); 2923eda14cbcSMatt Macy EXPORT_SYMBOL(spa_first_txg); 2924eda14cbcSMatt Macy EXPORT_SYMBOL(spa_syncing_txg); 2925eda14cbcSMatt Macy EXPORT_SYMBOL(spa_version); 2926eda14cbcSMatt Macy EXPORT_SYMBOL(spa_state); 2927eda14cbcSMatt Macy EXPORT_SYMBOL(spa_load_state); 2928eda14cbcSMatt Macy EXPORT_SYMBOL(spa_freeze_txg); 2929eda14cbcSMatt Macy EXPORT_SYMBOL(spa_get_dspace); 2930eda14cbcSMatt Macy EXPORT_SYMBOL(spa_update_dspace); 2931eda14cbcSMatt Macy EXPORT_SYMBOL(spa_deflate); 2932eda14cbcSMatt Macy EXPORT_SYMBOL(spa_normal_class); 2933eda14cbcSMatt Macy EXPORT_SYMBOL(spa_log_class); 2934eda14cbcSMatt Macy EXPORT_SYMBOL(spa_special_class); 2935eda14cbcSMatt Macy EXPORT_SYMBOL(spa_preferred_class); 2936eda14cbcSMatt Macy EXPORT_SYMBOL(spa_max_replication); 2937eda14cbcSMatt Macy EXPORT_SYMBOL(spa_prev_software_version); 2938eda14cbcSMatt Macy EXPORT_SYMBOL(spa_get_failmode); 2939eda14cbcSMatt Macy EXPORT_SYMBOL(spa_suspended); 2940eda14cbcSMatt Macy EXPORT_SYMBOL(spa_bootfs); 2941eda14cbcSMatt Macy EXPORT_SYMBOL(spa_delegation); 2942eda14cbcSMatt Macy EXPORT_SYMBOL(spa_meta_objset); 2943eda14cbcSMatt Macy EXPORT_SYMBOL(spa_maxblocksize); 2944eda14cbcSMatt Macy EXPORT_SYMBOL(spa_maxdnodesize); 2945eda14cbcSMatt Macy 2946eda14cbcSMatt Macy /* Miscellaneous support routines */ 2947eda14cbcSMatt Macy EXPORT_SYMBOL(spa_guid_exists); 2948eda14cbcSMatt Macy EXPORT_SYMBOL(spa_strdup); 2949eda14cbcSMatt Macy EXPORT_SYMBOL(spa_strfree); 2950eda14cbcSMatt Macy EXPORT_SYMBOL(spa_generate_guid); 2951eda14cbcSMatt Macy EXPORT_SYMBOL(snprintf_blkptr); 2952eda14cbcSMatt Macy EXPORT_SYMBOL(spa_freeze); 2953eda14cbcSMatt Macy EXPORT_SYMBOL(spa_upgrade); 2954eda14cbcSMatt Macy EXPORT_SYMBOL(spa_evict_all); 2955eda14cbcSMatt Macy EXPORT_SYMBOL(spa_lookup_by_guid); 2956eda14cbcSMatt Macy EXPORT_SYMBOL(spa_has_spare); 2957eda14cbcSMatt Macy EXPORT_SYMBOL(dva_get_dsize_sync); 2958eda14cbcSMatt Macy EXPORT_SYMBOL(bp_get_dsize_sync); 2959eda14cbcSMatt Macy EXPORT_SYMBOL(bp_get_dsize); 2960eda14cbcSMatt Macy EXPORT_SYMBOL(spa_has_slogs); 2961eda14cbcSMatt Macy EXPORT_SYMBOL(spa_is_root); 2962eda14cbcSMatt Macy EXPORT_SYMBOL(spa_writeable); 2963eda14cbcSMatt Macy EXPORT_SYMBOL(spa_mode); 2964eda14cbcSMatt Macy EXPORT_SYMBOL(spa_namespace_lock); 2965eda14cbcSMatt Macy EXPORT_SYMBOL(spa_trust_config); 2966eda14cbcSMatt Macy EXPORT_SYMBOL(spa_missing_tvds_allowed); 2967eda14cbcSMatt Macy EXPORT_SYMBOL(spa_set_missing_tvds); 2968eda14cbcSMatt Macy EXPORT_SYMBOL(spa_state_to_name); 2969eda14cbcSMatt Macy EXPORT_SYMBOL(spa_importing_readonly_checkpoint); 2970eda14cbcSMatt Macy EXPORT_SYMBOL(spa_min_claim_txg); 2971eda14cbcSMatt Macy EXPORT_SYMBOL(spa_suspend_async_destroy); 2972eda14cbcSMatt Macy EXPORT_SYMBOL(spa_has_checkpoint); 2973eda14cbcSMatt Macy EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable); 2974eda14cbcSMatt Macy 2975eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW, 2976eda14cbcSMatt Macy "Set additional debugging flags"); 2977eda14cbcSMatt Macy 2978eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW, 2979eda14cbcSMatt Macy "Set to attempt to recover from fatal errors"); 2980eda14cbcSMatt Macy 2981eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW, 2982eda14cbcSMatt Macy "Set to ignore IO errors during free and permanently leak the space"); 2983eda14cbcSMatt Macy 2984dbd5678dSMartin Matuska ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW, 2985eda14cbcSMatt Macy "Dead I/O check interval in milliseconds"); 2986eda14cbcSMatt Macy 29879db44a8eSMartin Matuska ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW, 2988eda14cbcSMatt Macy "Enable deadman timer"); 2989eda14cbcSMatt Macy 2990be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW, 2991eda14cbcSMatt Macy "SPA size estimate multiplication factor"); 2992eda14cbcSMatt Macy 2993eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW, 2994eda14cbcSMatt Macy "Place DDT data into the special class"); 2995eda14cbcSMatt Macy 2996eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW, 2997eda14cbcSMatt Macy "Place user data indirect blocks into the special class"); 2998eda14cbcSMatt Macy 2999eda14cbcSMatt Macy /* BEGIN CSTYLED */ 3000eda14cbcSMatt Macy ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode, 3001eda14cbcSMatt Macy param_set_deadman_failmode, param_get_charp, ZMOD_RW, 3002eda14cbcSMatt Macy "Failmode for deadman timer"); 3003eda14cbcSMatt Macy 3004eda14cbcSMatt Macy ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms, 3005dbd5678dSMartin Matuska param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW, 3006eda14cbcSMatt Macy "Pool sync expiration time in milliseconds"); 3007eda14cbcSMatt Macy 3008eda14cbcSMatt Macy ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms, 3009dbd5678dSMartin Matuska param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW, 3010eda14cbcSMatt Macy "IO expiration time in milliseconds"); 3011eda14cbcSMatt Macy 3012be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW, 3013eda14cbcSMatt Macy "Small file blocks in special vdevs depends on this much " 3014eda14cbcSMatt Macy "free space available"); 3015eda14cbcSMatt Macy /* END CSTYLED */ 3016eda14cbcSMatt Macy 3017eda14cbcSMatt Macy ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift, 3018be181ee2SMartin Matuska param_get_uint, ZMOD_RW, "Reserved free space in pool"); 3019*14c2e0a0SMartin Matuska 3020*14c2e0a0SMartin Matuska ZFS_MODULE_PARAM(zfs, spa_, num_allocators, INT, ZMOD_RW, 3021*14c2e0a0SMartin Matuska "Number of allocators per spa, capped by ncpus"); 3022