1*eda14cbcSMatt Macy /* 2*eda14cbcSMatt Macy * CDDL HEADER START 3*eda14cbcSMatt Macy * 4*eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5*eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6*eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7*eda14cbcSMatt Macy * 8*eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9*eda14cbcSMatt Macy * or http://www.opensolaris.org/os/licensing. 10*eda14cbcSMatt Macy * See the License for the specific language governing permissions 11*eda14cbcSMatt Macy * and limitations under the License. 12*eda14cbcSMatt Macy * 13*eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14*eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15*eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16*eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17*eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18*eda14cbcSMatt Macy * 19*eda14cbcSMatt Macy * CDDL HEADER END 20*eda14cbcSMatt Macy */ 21*eda14cbcSMatt Macy /* 22*eda14cbcSMatt Macy * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23*eda14cbcSMatt Macy * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24*eda14cbcSMatt Macy * Copyright (c) 2013 Steven Hartland. All rights reserved. 25*eda14cbcSMatt Macy * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26*eda14cbcSMatt Macy * Copyright 2016 Nexenta Systems, Inc. All rights reserved. 27*eda14cbcSMatt Macy */ 28*eda14cbcSMatt Macy 29*eda14cbcSMatt Macy #include <sys/dsl_pool.h> 30*eda14cbcSMatt Macy #include <sys/dsl_dataset.h> 31*eda14cbcSMatt Macy #include <sys/dsl_prop.h> 32*eda14cbcSMatt Macy #include <sys/dsl_dir.h> 33*eda14cbcSMatt Macy #include <sys/dsl_synctask.h> 34*eda14cbcSMatt Macy #include <sys/dsl_scan.h> 35*eda14cbcSMatt Macy #include <sys/dnode.h> 36*eda14cbcSMatt Macy #include <sys/dmu_tx.h> 37*eda14cbcSMatt Macy #include <sys/dmu_objset.h> 38*eda14cbcSMatt Macy #include <sys/arc.h> 39*eda14cbcSMatt Macy #include <sys/zap.h> 40*eda14cbcSMatt Macy #include <sys/zio.h> 41*eda14cbcSMatt Macy #include <sys/zfs_context.h> 42*eda14cbcSMatt Macy #include <sys/fs/zfs.h> 43*eda14cbcSMatt Macy #include <sys/zfs_znode.h> 44*eda14cbcSMatt Macy #include <sys/spa_impl.h> 45*eda14cbcSMatt Macy #include <sys/vdev_impl.h> 46*eda14cbcSMatt Macy #include <sys/metaslab_impl.h> 47*eda14cbcSMatt Macy #include <sys/bptree.h> 48*eda14cbcSMatt Macy #include <sys/zfeature.h> 49*eda14cbcSMatt Macy #include <sys/zil_impl.h> 50*eda14cbcSMatt Macy #include <sys/dsl_userhold.h> 51*eda14cbcSMatt Macy #include <sys/trace_zfs.h> 52*eda14cbcSMatt Macy #include <sys/mmp.h> 53*eda14cbcSMatt Macy 54*eda14cbcSMatt Macy /* 55*eda14cbcSMatt Macy * ZFS Write Throttle 56*eda14cbcSMatt Macy * ------------------ 57*eda14cbcSMatt Macy * 58*eda14cbcSMatt Macy * ZFS must limit the rate of incoming writes to the rate at which it is able 59*eda14cbcSMatt Macy * to sync data modifications to the backend storage. Throttling by too much 60*eda14cbcSMatt Macy * creates an artificial limit; throttling by too little can only be sustained 61*eda14cbcSMatt Macy * for short periods and would lead to highly lumpy performance. On a per-pool 62*eda14cbcSMatt Macy * basis, ZFS tracks the amount of modified (dirty) data. As operations change 63*eda14cbcSMatt Macy * data, the amount of dirty data increases; as ZFS syncs out data, the amount 64*eda14cbcSMatt Macy * of dirty data decreases. When the amount of dirty data exceeds a 65*eda14cbcSMatt Macy * predetermined threshold further modifications are blocked until the amount 66*eda14cbcSMatt Macy * of dirty data decreases (as data is synced out). 67*eda14cbcSMatt Macy * 68*eda14cbcSMatt Macy * The limit on dirty data is tunable, and should be adjusted according to 69*eda14cbcSMatt Macy * both the IO capacity and available memory of the system. The larger the 70*eda14cbcSMatt Macy * window, the more ZFS is able to aggregate and amortize metadata (and data) 71*eda14cbcSMatt Macy * changes. However, memory is a limited resource, and allowing for more dirty 72*eda14cbcSMatt Macy * data comes at the cost of keeping other useful data in memory (for example 73*eda14cbcSMatt Macy * ZFS data cached by the ARC). 74*eda14cbcSMatt Macy * 75*eda14cbcSMatt Macy * Implementation 76*eda14cbcSMatt Macy * 77*eda14cbcSMatt Macy * As buffers are modified dsl_pool_willuse_space() increments both the per- 78*eda14cbcSMatt Macy * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of 79*eda14cbcSMatt Macy * dirty space used; dsl_pool_dirty_space() decrements those values as data 80*eda14cbcSMatt Macy * is synced out from dsl_pool_sync(). While only the poolwide value is 81*eda14cbcSMatt Macy * relevant, the per-txg value is useful for debugging. The tunable 82*eda14cbcSMatt Macy * zfs_dirty_data_max determines the dirty space limit. Once that value is 83*eda14cbcSMatt Macy * exceeded, new writes are halted until space frees up. 84*eda14cbcSMatt Macy * 85*eda14cbcSMatt Macy * The zfs_dirty_data_sync_percent tunable dictates the threshold at which we 86*eda14cbcSMatt Macy * ensure that there is a txg syncing (see the comment in txg.c for a full 87*eda14cbcSMatt Macy * description of transaction group stages). 88*eda14cbcSMatt Macy * 89*eda14cbcSMatt Macy * The IO scheduler uses both the dirty space limit and current amount of 90*eda14cbcSMatt Macy * dirty data as inputs. Those values affect the number of concurrent IOs ZFS 91*eda14cbcSMatt Macy * issues. See the comment in vdev_queue.c for details of the IO scheduler. 92*eda14cbcSMatt Macy * 93*eda14cbcSMatt Macy * The delay is also calculated based on the amount of dirty data. See the 94*eda14cbcSMatt Macy * comment above dmu_tx_delay() for details. 95*eda14cbcSMatt Macy */ 96*eda14cbcSMatt Macy 97*eda14cbcSMatt Macy /* 98*eda14cbcSMatt Macy * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory, 99*eda14cbcSMatt Macy * capped at zfs_dirty_data_max_max. It can also be overridden with a module 100*eda14cbcSMatt Macy * parameter. 101*eda14cbcSMatt Macy */ 102*eda14cbcSMatt Macy unsigned long zfs_dirty_data_max = 0; 103*eda14cbcSMatt Macy unsigned long zfs_dirty_data_max_max = 0; 104*eda14cbcSMatt Macy int zfs_dirty_data_max_percent = 10; 105*eda14cbcSMatt Macy int zfs_dirty_data_max_max_percent = 25; 106*eda14cbcSMatt Macy 107*eda14cbcSMatt Macy /* 108*eda14cbcSMatt Macy * If there's at least this much dirty data (as a percentage of 109*eda14cbcSMatt Macy * zfs_dirty_data_max), push out a txg. This should be less than 110*eda14cbcSMatt Macy * zfs_vdev_async_write_active_min_dirty_percent. 111*eda14cbcSMatt Macy */ 112*eda14cbcSMatt Macy int zfs_dirty_data_sync_percent = 20; 113*eda14cbcSMatt Macy 114*eda14cbcSMatt Macy /* 115*eda14cbcSMatt Macy * Once there is this amount of dirty data, the dmu_tx_delay() will kick in 116*eda14cbcSMatt Macy * and delay each transaction. 117*eda14cbcSMatt Macy * This value should be >= zfs_vdev_async_write_active_max_dirty_percent. 118*eda14cbcSMatt Macy */ 119*eda14cbcSMatt Macy int zfs_delay_min_dirty_percent = 60; 120*eda14cbcSMatt Macy 121*eda14cbcSMatt Macy /* 122*eda14cbcSMatt Macy * This controls how quickly the delay approaches infinity. 123*eda14cbcSMatt Macy * Larger values cause it to delay more for a given amount of dirty data. 124*eda14cbcSMatt Macy * Therefore larger values will cause there to be less dirty data for a 125*eda14cbcSMatt Macy * given throughput. 126*eda14cbcSMatt Macy * 127*eda14cbcSMatt Macy * For the smoothest delay, this value should be about 1 billion divided 128*eda14cbcSMatt Macy * by the maximum number of operations per second. This will smoothly 129*eda14cbcSMatt Macy * handle between 10x and 1/10th this number. 130*eda14cbcSMatt Macy * 131*eda14cbcSMatt Macy * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the 132*eda14cbcSMatt Macy * multiply in dmu_tx_delay(). 133*eda14cbcSMatt Macy */ 134*eda14cbcSMatt Macy unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000; 135*eda14cbcSMatt Macy 136*eda14cbcSMatt Macy /* 137*eda14cbcSMatt Macy * This determines the number of threads used by the dp_sync_taskq. 138*eda14cbcSMatt Macy */ 139*eda14cbcSMatt Macy int zfs_sync_taskq_batch_pct = 75; 140*eda14cbcSMatt Macy 141*eda14cbcSMatt Macy /* 142*eda14cbcSMatt Macy * These tunables determine the behavior of how zil_itxg_clean() is 143*eda14cbcSMatt Macy * called via zil_clean() in the context of spa_sync(). When an itxg 144*eda14cbcSMatt Macy * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching. 145*eda14cbcSMatt Macy * If the dispatch fails, the call to zil_itxg_clean() will occur 146*eda14cbcSMatt Macy * synchronously in the context of spa_sync(), which can negatively 147*eda14cbcSMatt Macy * impact the performance of spa_sync() (e.g. in the case of the itxg 148*eda14cbcSMatt Macy * list having a large number of itxs that needs to be cleaned). 149*eda14cbcSMatt Macy * 150*eda14cbcSMatt Macy * Thus, these tunables can be used to manipulate the behavior of the 151*eda14cbcSMatt Macy * taskq used by zil_clean(); they determine the number of taskq entries 152*eda14cbcSMatt Macy * that are pre-populated when the taskq is first created (via the 153*eda14cbcSMatt Macy * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of 154*eda14cbcSMatt Macy * taskq entries that are cached after an on-demand allocation (via the 155*eda14cbcSMatt Macy * "zfs_zil_clean_taskq_maxalloc"). 156*eda14cbcSMatt Macy * 157*eda14cbcSMatt Macy * The idea being, we want to try reasonably hard to ensure there will 158*eda14cbcSMatt Macy * already be a taskq entry pre-allocated by the time that it is needed 159*eda14cbcSMatt Macy * by zil_clean(). This way, we can avoid the possibility of an 160*eda14cbcSMatt Macy * on-demand allocation of a new taskq entry from failing, which would 161*eda14cbcSMatt Macy * result in zil_itxg_clean() being called synchronously from zil_clean() 162*eda14cbcSMatt Macy * (which can adversely affect performance of spa_sync()). 163*eda14cbcSMatt Macy * 164*eda14cbcSMatt Macy * Additionally, the number of threads used by the taskq can be 165*eda14cbcSMatt Macy * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable. 166*eda14cbcSMatt Macy */ 167*eda14cbcSMatt Macy int zfs_zil_clean_taskq_nthr_pct = 100; 168*eda14cbcSMatt Macy int zfs_zil_clean_taskq_minalloc = 1024; 169*eda14cbcSMatt Macy int zfs_zil_clean_taskq_maxalloc = 1024 * 1024; 170*eda14cbcSMatt Macy 171*eda14cbcSMatt Macy int 172*eda14cbcSMatt Macy dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) 173*eda14cbcSMatt Macy { 174*eda14cbcSMatt Macy uint64_t obj; 175*eda14cbcSMatt Macy int err; 176*eda14cbcSMatt Macy 177*eda14cbcSMatt Macy err = zap_lookup(dp->dp_meta_objset, 178*eda14cbcSMatt Macy dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj, 179*eda14cbcSMatt Macy name, sizeof (obj), 1, &obj); 180*eda14cbcSMatt Macy if (err) 181*eda14cbcSMatt Macy return (err); 182*eda14cbcSMatt Macy 183*eda14cbcSMatt Macy return (dsl_dir_hold_obj(dp, obj, name, dp, ddp)); 184*eda14cbcSMatt Macy } 185*eda14cbcSMatt Macy 186*eda14cbcSMatt Macy static dsl_pool_t * 187*eda14cbcSMatt Macy dsl_pool_open_impl(spa_t *spa, uint64_t txg) 188*eda14cbcSMatt Macy { 189*eda14cbcSMatt Macy dsl_pool_t *dp; 190*eda14cbcSMatt Macy blkptr_t *bp = spa_get_rootblkptr(spa); 191*eda14cbcSMatt Macy 192*eda14cbcSMatt Macy dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); 193*eda14cbcSMatt Macy dp->dp_spa = spa; 194*eda14cbcSMatt Macy dp->dp_meta_rootbp = *bp; 195*eda14cbcSMatt Macy rrw_init(&dp->dp_config_rwlock, B_TRUE); 196*eda14cbcSMatt Macy txg_init(dp, txg); 197*eda14cbcSMatt Macy mmp_init(spa); 198*eda14cbcSMatt Macy 199*eda14cbcSMatt Macy txg_list_create(&dp->dp_dirty_datasets, spa, 200*eda14cbcSMatt Macy offsetof(dsl_dataset_t, ds_dirty_link)); 201*eda14cbcSMatt Macy txg_list_create(&dp->dp_dirty_zilogs, spa, 202*eda14cbcSMatt Macy offsetof(zilog_t, zl_dirty_link)); 203*eda14cbcSMatt Macy txg_list_create(&dp->dp_dirty_dirs, spa, 204*eda14cbcSMatt Macy offsetof(dsl_dir_t, dd_dirty_link)); 205*eda14cbcSMatt Macy txg_list_create(&dp->dp_sync_tasks, spa, 206*eda14cbcSMatt Macy offsetof(dsl_sync_task_t, dst_node)); 207*eda14cbcSMatt Macy txg_list_create(&dp->dp_early_sync_tasks, spa, 208*eda14cbcSMatt Macy offsetof(dsl_sync_task_t, dst_node)); 209*eda14cbcSMatt Macy 210*eda14cbcSMatt Macy dp->dp_sync_taskq = taskq_create("dp_sync_taskq", 211*eda14cbcSMatt Macy zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX, 212*eda14cbcSMatt Macy TASKQ_THREADS_CPU_PCT); 213*eda14cbcSMatt Macy 214*eda14cbcSMatt Macy dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq", 215*eda14cbcSMatt Macy zfs_zil_clean_taskq_nthr_pct, minclsyspri, 216*eda14cbcSMatt Macy zfs_zil_clean_taskq_minalloc, 217*eda14cbcSMatt Macy zfs_zil_clean_taskq_maxalloc, 218*eda14cbcSMatt Macy TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT); 219*eda14cbcSMatt Macy 220*eda14cbcSMatt Macy mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); 221*eda14cbcSMatt Macy cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); 222*eda14cbcSMatt Macy 223*eda14cbcSMatt Macy dp->dp_zrele_taskq = taskq_create("z_zrele", boot_ncpus, defclsyspri, 224*eda14cbcSMatt Macy boot_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); 225*eda14cbcSMatt Macy dp->dp_unlinked_drain_taskq = taskq_create("z_unlinked_drain", 226*eda14cbcSMatt Macy boot_ncpus, defclsyspri, boot_ncpus, INT_MAX, 227*eda14cbcSMatt Macy TASKQ_PREPOPULATE | TASKQ_DYNAMIC); 228*eda14cbcSMatt Macy 229*eda14cbcSMatt Macy return (dp); 230*eda14cbcSMatt Macy } 231*eda14cbcSMatt Macy 232*eda14cbcSMatt Macy int 233*eda14cbcSMatt Macy dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) 234*eda14cbcSMatt Macy { 235*eda14cbcSMatt Macy int err; 236*eda14cbcSMatt Macy dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 237*eda14cbcSMatt Macy 238*eda14cbcSMatt Macy /* 239*eda14cbcSMatt Macy * Initialize the caller's dsl_pool_t structure before we actually open 240*eda14cbcSMatt Macy * the meta objset. This is done because a self-healing write zio may 241*eda14cbcSMatt Macy * be issued as part of dmu_objset_open_impl() and the spa needs its 242*eda14cbcSMatt Macy * dsl_pool_t initialized in order to handle the write. 243*eda14cbcSMatt Macy */ 244*eda14cbcSMatt Macy *dpp = dp; 245*eda14cbcSMatt Macy 246*eda14cbcSMatt Macy err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, 247*eda14cbcSMatt Macy &dp->dp_meta_objset); 248*eda14cbcSMatt Macy if (err != 0) { 249*eda14cbcSMatt Macy dsl_pool_close(dp); 250*eda14cbcSMatt Macy *dpp = NULL; 251*eda14cbcSMatt Macy } 252*eda14cbcSMatt Macy 253*eda14cbcSMatt Macy return (err); 254*eda14cbcSMatt Macy } 255*eda14cbcSMatt Macy 256*eda14cbcSMatt Macy int 257*eda14cbcSMatt Macy dsl_pool_open(dsl_pool_t *dp) 258*eda14cbcSMatt Macy { 259*eda14cbcSMatt Macy int err; 260*eda14cbcSMatt Macy dsl_dir_t *dd; 261*eda14cbcSMatt Macy dsl_dataset_t *ds; 262*eda14cbcSMatt Macy uint64_t obj; 263*eda14cbcSMatt Macy 264*eda14cbcSMatt Macy rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 265*eda14cbcSMatt Macy err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 266*eda14cbcSMatt Macy DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, 267*eda14cbcSMatt Macy &dp->dp_root_dir_obj); 268*eda14cbcSMatt Macy if (err) 269*eda14cbcSMatt Macy goto out; 270*eda14cbcSMatt Macy 271*eda14cbcSMatt Macy err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, 272*eda14cbcSMatt Macy NULL, dp, &dp->dp_root_dir); 273*eda14cbcSMatt Macy if (err) 274*eda14cbcSMatt Macy goto out; 275*eda14cbcSMatt Macy 276*eda14cbcSMatt Macy err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); 277*eda14cbcSMatt Macy if (err) 278*eda14cbcSMatt Macy goto out; 279*eda14cbcSMatt Macy 280*eda14cbcSMatt Macy if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) { 281*eda14cbcSMatt Macy err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); 282*eda14cbcSMatt Macy if (err) 283*eda14cbcSMatt Macy goto out; 284*eda14cbcSMatt Macy err = dsl_dataset_hold_obj(dp, 285*eda14cbcSMatt Macy dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds); 286*eda14cbcSMatt Macy if (err == 0) { 287*eda14cbcSMatt Macy err = dsl_dataset_hold_obj(dp, 288*eda14cbcSMatt Macy dsl_dataset_phys(ds)->ds_prev_snap_obj, dp, 289*eda14cbcSMatt Macy &dp->dp_origin_snap); 290*eda14cbcSMatt Macy dsl_dataset_rele(ds, FTAG); 291*eda14cbcSMatt Macy } 292*eda14cbcSMatt Macy dsl_dir_rele(dd, dp); 293*eda14cbcSMatt Macy if (err) 294*eda14cbcSMatt Macy goto out; 295*eda14cbcSMatt Macy } 296*eda14cbcSMatt Macy 297*eda14cbcSMatt Macy if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { 298*eda14cbcSMatt Macy err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME, 299*eda14cbcSMatt Macy &dp->dp_free_dir); 300*eda14cbcSMatt Macy if (err) 301*eda14cbcSMatt Macy goto out; 302*eda14cbcSMatt Macy 303*eda14cbcSMatt Macy err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 304*eda14cbcSMatt Macy DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj); 305*eda14cbcSMatt Macy if (err) 306*eda14cbcSMatt Macy goto out; 307*eda14cbcSMatt Macy VERIFY0(bpobj_open(&dp->dp_free_bpobj, 308*eda14cbcSMatt Macy dp->dp_meta_objset, obj)); 309*eda14cbcSMatt Macy } 310*eda14cbcSMatt Macy 311*eda14cbcSMatt Macy if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 312*eda14cbcSMatt Macy err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 313*eda14cbcSMatt Macy DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj); 314*eda14cbcSMatt Macy if (err == 0) { 315*eda14cbcSMatt Macy VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, 316*eda14cbcSMatt Macy dp->dp_meta_objset, obj)); 317*eda14cbcSMatt Macy } else if (err == ENOENT) { 318*eda14cbcSMatt Macy /* 319*eda14cbcSMatt Macy * We might not have created the remap bpobj yet. 320*eda14cbcSMatt Macy */ 321*eda14cbcSMatt Macy err = 0; 322*eda14cbcSMatt Macy } else { 323*eda14cbcSMatt Macy goto out; 324*eda14cbcSMatt Macy } 325*eda14cbcSMatt Macy } 326*eda14cbcSMatt Macy 327*eda14cbcSMatt Macy /* 328*eda14cbcSMatt Macy * Note: errors ignored, because the these special dirs, used for 329*eda14cbcSMatt Macy * space accounting, are only created on demand. 330*eda14cbcSMatt Macy */ 331*eda14cbcSMatt Macy (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME, 332*eda14cbcSMatt Macy &dp->dp_leak_dir); 333*eda14cbcSMatt Macy 334*eda14cbcSMatt Macy if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) { 335*eda14cbcSMatt Macy err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 336*eda14cbcSMatt Macy DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1, 337*eda14cbcSMatt Macy &dp->dp_bptree_obj); 338*eda14cbcSMatt Macy if (err != 0) 339*eda14cbcSMatt Macy goto out; 340*eda14cbcSMatt Macy } 341*eda14cbcSMatt Macy 342*eda14cbcSMatt Macy if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) { 343*eda14cbcSMatt Macy err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 344*eda14cbcSMatt Macy DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1, 345*eda14cbcSMatt Macy &dp->dp_empty_bpobj); 346*eda14cbcSMatt Macy if (err != 0) 347*eda14cbcSMatt Macy goto out; 348*eda14cbcSMatt Macy } 349*eda14cbcSMatt Macy 350*eda14cbcSMatt Macy err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 351*eda14cbcSMatt Macy DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1, 352*eda14cbcSMatt Macy &dp->dp_tmp_userrefs_obj); 353*eda14cbcSMatt Macy if (err == ENOENT) 354*eda14cbcSMatt Macy err = 0; 355*eda14cbcSMatt Macy if (err) 356*eda14cbcSMatt Macy goto out; 357*eda14cbcSMatt Macy 358*eda14cbcSMatt Macy err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg); 359*eda14cbcSMatt Macy 360*eda14cbcSMatt Macy out: 361*eda14cbcSMatt Macy rrw_exit(&dp->dp_config_rwlock, FTAG); 362*eda14cbcSMatt Macy return (err); 363*eda14cbcSMatt Macy } 364*eda14cbcSMatt Macy 365*eda14cbcSMatt Macy void 366*eda14cbcSMatt Macy dsl_pool_close(dsl_pool_t *dp) 367*eda14cbcSMatt Macy { 368*eda14cbcSMatt Macy /* 369*eda14cbcSMatt Macy * Drop our references from dsl_pool_open(). 370*eda14cbcSMatt Macy * 371*eda14cbcSMatt Macy * Since we held the origin_snap from "syncing" context (which 372*eda14cbcSMatt Macy * includes pool-opening context), it actually only got a "ref" 373*eda14cbcSMatt Macy * and not a hold, so just drop that here. 374*eda14cbcSMatt Macy */ 375*eda14cbcSMatt Macy if (dp->dp_origin_snap != NULL) 376*eda14cbcSMatt Macy dsl_dataset_rele(dp->dp_origin_snap, dp); 377*eda14cbcSMatt Macy if (dp->dp_mos_dir != NULL) 378*eda14cbcSMatt Macy dsl_dir_rele(dp->dp_mos_dir, dp); 379*eda14cbcSMatt Macy if (dp->dp_free_dir != NULL) 380*eda14cbcSMatt Macy dsl_dir_rele(dp->dp_free_dir, dp); 381*eda14cbcSMatt Macy if (dp->dp_leak_dir != NULL) 382*eda14cbcSMatt Macy dsl_dir_rele(dp->dp_leak_dir, dp); 383*eda14cbcSMatt Macy if (dp->dp_root_dir != NULL) 384*eda14cbcSMatt Macy dsl_dir_rele(dp->dp_root_dir, dp); 385*eda14cbcSMatt Macy 386*eda14cbcSMatt Macy bpobj_close(&dp->dp_free_bpobj); 387*eda14cbcSMatt Macy bpobj_close(&dp->dp_obsolete_bpobj); 388*eda14cbcSMatt Macy 389*eda14cbcSMatt Macy /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ 390*eda14cbcSMatt Macy if (dp->dp_meta_objset != NULL) 391*eda14cbcSMatt Macy dmu_objset_evict(dp->dp_meta_objset); 392*eda14cbcSMatt Macy 393*eda14cbcSMatt Macy txg_list_destroy(&dp->dp_dirty_datasets); 394*eda14cbcSMatt Macy txg_list_destroy(&dp->dp_dirty_zilogs); 395*eda14cbcSMatt Macy txg_list_destroy(&dp->dp_sync_tasks); 396*eda14cbcSMatt Macy txg_list_destroy(&dp->dp_early_sync_tasks); 397*eda14cbcSMatt Macy txg_list_destroy(&dp->dp_dirty_dirs); 398*eda14cbcSMatt Macy 399*eda14cbcSMatt Macy taskq_destroy(dp->dp_zil_clean_taskq); 400*eda14cbcSMatt Macy taskq_destroy(dp->dp_sync_taskq); 401*eda14cbcSMatt Macy 402*eda14cbcSMatt Macy /* 403*eda14cbcSMatt Macy * We can't set retry to TRUE since we're explicitly specifying 404*eda14cbcSMatt Macy * a spa to flush. This is good enough; any missed buffers for 405*eda14cbcSMatt Macy * this spa won't cause trouble, and they'll eventually fall 406*eda14cbcSMatt Macy * out of the ARC just like any other unused buffer. 407*eda14cbcSMatt Macy */ 408*eda14cbcSMatt Macy arc_flush(dp->dp_spa, FALSE); 409*eda14cbcSMatt Macy 410*eda14cbcSMatt Macy mmp_fini(dp->dp_spa); 411*eda14cbcSMatt Macy txg_fini(dp); 412*eda14cbcSMatt Macy dsl_scan_fini(dp); 413*eda14cbcSMatt Macy dmu_buf_user_evict_wait(); 414*eda14cbcSMatt Macy 415*eda14cbcSMatt Macy rrw_destroy(&dp->dp_config_rwlock); 416*eda14cbcSMatt Macy mutex_destroy(&dp->dp_lock); 417*eda14cbcSMatt Macy cv_destroy(&dp->dp_spaceavail_cv); 418*eda14cbcSMatt Macy taskq_destroy(dp->dp_unlinked_drain_taskq); 419*eda14cbcSMatt Macy taskq_destroy(dp->dp_zrele_taskq); 420*eda14cbcSMatt Macy if (dp->dp_blkstats != NULL) { 421*eda14cbcSMatt Macy mutex_destroy(&dp->dp_blkstats->zab_lock); 422*eda14cbcSMatt Macy vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); 423*eda14cbcSMatt Macy } 424*eda14cbcSMatt Macy kmem_free(dp, sizeof (dsl_pool_t)); 425*eda14cbcSMatt Macy } 426*eda14cbcSMatt Macy 427*eda14cbcSMatt Macy void 428*eda14cbcSMatt Macy dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) 429*eda14cbcSMatt Macy { 430*eda14cbcSMatt Macy uint64_t obj; 431*eda14cbcSMatt Macy /* 432*eda14cbcSMatt Macy * Currently, we only create the obsolete_bpobj where there are 433*eda14cbcSMatt Macy * indirect vdevs with referenced mappings. 434*eda14cbcSMatt Macy */ 435*eda14cbcSMatt Macy ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL)); 436*eda14cbcSMatt Macy /* create and open the obsolete_bpobj */ 437*eda14cbcSMatt Macy obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); 438*eda14cbcSMatt Macy VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj)); 439*eda14cbcSMatt Macy VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 440*eda14cbcSMatt Macy DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); 441*eda14cbcSMatt Macy spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 442*eda14cbcSMatt Macy } 443*eda14cbcSMatt Macy 444*eda14cbcSMatt Macy void 445*eda14cbcSMatt Macy dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) 446*eda14cbcSMatt Macy { 447*eda14cbcSMatt Macy spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 448*eda14cbcSMatt Macy VERIFY0(zap_remove(dp->dp_meta_objset, 449*eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, 450*eda14cbcSMatt Macy DMU_POOL_OBSOLETE_BPOBJ, tx)); 451*eda14cbcSMatt Macy bpobj_free(dp->dp_meta_objset, 452*eda14cbcSMatt Macy dp->dp_obsolete_bpobj.bpo_object, tx); 453*eda14cbcSMatt Macy bpobj_close(&dp->dp_obsolete_bpobj); 454*eda14cbcSMatt Macy } 455*eda14cbcSMatt Macy 456*eda14cbcSMatt Macy dsl_pool_t * 457*eda14cbcSMatt Macy dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp, 458*eda14cbcSMatt Macy uint64_t txg) 459*eda14cbcSMatt Macy { 460*eda14cbcSMatt Macy int err; 461*eda14cbcSMatt Macy dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); 462*eda14cbcSMatt Macy dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); 463*eda14cbcSMatt Macy #ifdef _KERNEL 464*eda14cbcSMatt Macy objset_t *os; 465*eda14cbcSMatt Macy #else 466*eda14cbcSMatt Macy objset_t *os __attribute__((unused)); 467*eda14cbcSMatt Macy #endif 468*eda14cbcSMatt Macy dsl_dataset_t *ds; 469*eda14cbcSMatt Macy uint64_t obj; 470*eda14cbcSMatt Macy 471*eda14cbcSMatt Macy rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 472*eda14cbcSMatt Macy 473*eda14cbcSMatt Macy /* create and open the MOS (meta-objset) */ 474*eda14cbcSMatt Macy dp->dp_meta_objset = dmu_objset_create_impl(spa, 475*eda14cbcSMatt Macy NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); 476*eda14cbcSMatt Macy spa->spa_meta_objset = dp->dp_meta_objset; 477*eda14cbcSMatt Macy 478*eda14cbcSMatt Macy /* create the pool directory */ 479*eda14cbcSMatt Macy err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 480*eda14cbcSMatt Macy DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); 481*eda14cbcSMatt Macy ASSERT0(err); 482*eda14cbcSMatt Macy 483*eda14cbcSMatt Macy /* Initialize scan structures */ 484*eda14cbcSMatt Macy VERIFY0(dsl_scan_init(dp, txg)); 485*eda14cbcSMatt Macy 486*eda14cbcSMatt Macy /* create and open the root dir */ 487*eda14cbcSMatt Macy dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); 488*eda14cbcSMatt Macy VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, 489*eda14cbcSMatt Macy NULL, dp, &dp->dp_root_dir)); 490*eda14cbcSMatt Macy 491*eda14cbcSMatt Macy /* create and open the meta-objset dir */ 492*eda14cbcSMatt Macy (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); 493*eda14cbcSMatt Macy VERIFY0(dsl_pool_open_special_dir(dp, 494*eda14cbcSMatt Macy MOS_DIR_NAME, &dp->dp_mos_dir)); 495*eda14cbcSMatt Macy 496*eda14cbcSMatt Macy if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { 497*eda14cbcSMatt Macy /* create and open the free dir */ 498*eda14cbcSMatt Macy (void) dsl_dir_create_sync(dp, dp->dp_root_dir, 499*eda14cbcSMatt Macy FREE_DIR_NAME, tx); 500*eda14cbcSMatt Macy VERIFY0(dsl_pool_open_special_dir(dp, 501*eda14cbcSMatt Macy FREE_DIR_NAME, &dp->dp_free_dir)); 502*eda14cbcSMatt Macy 503*eda14cbcSMatt Macy /* create and open the free_bplist */ 504*eda14cbcSMatt Macy obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); 505*eda14cbcSMatt Macy VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 506*eda14cbcSMatt Macy DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0); 507*eda14cbcSMatt Macy VERIFY0(bpobj_open(&dp->dp_free_bpobj, 508*eda14cbcSMatt Macy dp->dp_meta_objset, obj)); 509*eda14cbcSMatt Macy } 510*eda14cbcSMatt Macy 511*eda14cbcSMatt Macy if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) 512*eda14cbcSMatt Macy dsl_pool_create_origin(dp, tx); 513*eda14cbcSMatt Macy 514*eda14cbcSMatt Macy /* 515*eda14cbcSMatt Macy * Some features may be needed when creating the root dataset, so we 516*eda14cbcSMatt Macy * create the feature objects here. 517*eda14cbcSMatt Macy */ 518*eda14cbcSMatt Macy if (spa_version(spa) >= SPA_VERSION_FEATURES) 519*eda14cbcSMatt Macy spa_feature_create_zap_objects(spa, tx); 520*eda14cbcSMatt Macy 521*eda14cbcSMatt Macy if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF && 522*eda14cbcSMatt Macy dcp->cp_crypt != ZIO_CRYPT_INHERIT) 523*eda14cbcSMatt Macy spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx); 524*eda14cbcSMatt Macy 525*eda14cbcSMatt Macy /* create the root dataset */ 526*eda14cbcSMatt Macy obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx); 527*eda14cbcSMatt Macy 528*eda14cbcSMatt Macy /* create the root objset */ 529*eda14cbcSMatt Macy VERIFY0(dsl_dataset_hold_obj_flags(dp, obj, 530*eda14cbcSMatt Macy DS_HOLD_FLAG_DECRYPT, FTAG, &ds)); 531*eda14cbcSMatt Macy rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 532*eda14cbcSMatt Macy os = dmu_objset_create_impl(dp->dp_spa, ds, 533*eda14cbcSMatt Macy dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); 534*eda14cbcSMatt Macy rrw_exit(&ds->ds_bp_rwlock, FTAG); 535*eda14cbcSMatt Macy #ifdef _KERNEL 536*eda14cbcSMatt Macy zfs_create_fs(os, kcred, zplprops, tx); 537*eda14cbcSMatt Macy #endif 538*eda14cbcSMatt Macy dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); 539*eda14cbcSMatt Macy 540*eda14cbcSMatt Macy dmu_tx_commit(tx); 541*eda14cbcSMatt Macy 542*eda14cbcSMatt Macy rrw_exit(&dp->dp_config_rwlock, FTAG); 543*eda14cbcSMatt Macy 544*eda14cbcSMatt Macy return (dp); 545*eda14cbcSMatt Macy } 546*eda14cbcSMatt Macy 547*eda14cbcSMatt Macy /* 548*eda14cbcSMatt Macy * Account for the meta-objset space in its placeholder dsl_dir. 549*eda14cbcSMatt Macy */ 550*eda14cbcSMatt Macy void 551*eda14cbcSMatt Macy dsl_pool_mos_diduse_space(dsl_pool_t *dp, 552*eda14cbcSMatt Macy int64_t used, int64_t comp, int64_t uncomp) 553*eda14cbcSMatt Macy { 554*eda14cbcSMatt Macy ASSERT3U(comp, ==, uncomp); /* it's all metadata */ 555*eda14cbcSMatt Macy mutex_enter(&dp->dp_lock); 556*eda14cbcSMatt Macy dp->dp_mos_used_delta += used; 557*eda14cbcSMatt Macy dp->dp_mos_compressed_delta += comp; 558*eda14cbcSMatt Macy dp->dp_mos_uncompressed_delta += uncomp; 559*eda14cbcSMatt Macy mutex_exit(&dp->dp_lock); 560*eda14cbcSMatt Macy } 561*eda14cbcSMatt Macy 562*eda14cbcSMatt Macy static void 563*eda14cbcSMatt Macy dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx) 564*eda14cbcSMatt Macy { 565*eda14cbcSMatt Macy zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 566*eda14cbcSMatt Macy dmu_objset_sync(dp->dp_meta_objset, zio, tx); 567*eda14cbcSMatt Macy VERIFY0(zio_wait(zio)); 568*eda14cbcSMatt Macy dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); 569*eda14cbcSMatt Macy spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); 570*eda14cbcSMatt Macy } 571*eda14cbcSMatt Macy 572*eda14cbcSMatt Macy static void 573*eda14cbcSMatt Macy dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta) 574*eda14cbcSMatt Macy { 575*eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&dp->dp_lock)); 576*eda14cbcSMatt Macy 577*eda14cbcSMatt Macy if (delta < 0) 578*eda14cbcSMatt Macy ASSERT3U(-delta, <=, dp->dp_dirty_total); 579*eda14cbcSMatt Macy 580*eda14cbcSMatt Macy dp->dp_dirty_total += delta; 581*eda14cbcSMatt Macy 582*eda14cbcSMatt Macy /* 583*eda14cbcSMatt Macy * Note: we signal even when increasing dp_dirty_total. 584*eda14cbcSMatt Macy * This ensures forward progress -- each thread wakes the next waiter. 585*eda14cbcSMatt Macy */ 586*eda14cbcSMatt Macy if (dp->dp_dirty_total < zfs_dirty_data_max) 587*eda14cbcSMatt Macy cv_signal(&dp->dp_spaceavail_cv); 588*eda14cbcSMatt Macy } 589*eda14cbcSMatt Macy 590*eda14cbcSMatt Macy #ifdef ZFS_DEBUG 591*eda14cbcSMatt Macy static boolean_t 592*eda14cbcSMatt Macy dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg) 593*eda14cbcSMatt Macy { 594*eda14cbcSMatt Macy spa_t *spa = dp->dp_spa; 595*eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 596*eda14cbcSMatt Macy 597*eda14cbcSMatt Macy for (uint64_t c = 0; c < rvd->vdev_children; c++) { 598*eda14cbcSMatt Macy vdev_t *vd = rvd->vdev_child[c]; 599*eda14cbcSMatt Macy txg_list_t *tl = &vd->vdev_ms_list; 600*eda14cbcSMatt Macy metaslab_t *ms; 601*eda14cbcSMatt Macy 602*eda14cbcSMatt Macy for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms; 603*eda14cbcSMatt Macy ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) { 604*eda14cbcSMatt Macy VERIFY(range_tree_is_empty(ms->ms_freeing)); 605*eda14cbcSMatt Macy VERIFY(range_tree_is_empty(ms->ms_checkpointing)); 606*eda14cbcSMatt Macy } 607*eda14cbcSMatt Macy } 608*eda14cbcSMatt Macy 609*eda14cbcSMatt Macy return (B_TRUE); 610*eda14cbcSMatt Macy } 611*eda14cbcSMatt Macy #endif 612*eda14cbcSMatt Macy 613*eda14cbcSMatt Macy void 614*eda14cbcSMatt Macy dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) 615*eda14cbcSMatt Macy { 616*eda14cbcSMatt Macy zio_t *zio; 617*eda14cbcSMatt Macy dmu_tx_t *tx; 618*eda14cbcSMatt Macy dsl_dir_t *dd; 619*eda14cbcSMatt Macy dsl_dataset_t *ds; 620*eda14cbcSMatt Macy objset_t *mos = dp->dp_meta_objset; 621*eda14cbcSMatt Macy list_t synced_datasets; 622*eda14cbcSMatt Macy 623*eda14cbcSMatt Macy list_create(&synced_datasets, sizeof (dsl_dataset_t), 624*eda14cbcSMatt Macy offsetof(dsl_dataset_t, ds_synced_link)); 625*eda14cbcSMatt Macy 626*eda14cbcSMatt Macy tx = dmu_tx_create_assigned(dp, txg); 627*eda14cbcSMatt Macy 628*eda14cbcSMatt Macy /* 629*eda14cbcSMatt Macy * Run all early sync tasks before writing out any dirty blocks. 630*eda14cbcSMatt Macy * For more info on early sync tasks see block comment in 631*eda14cbcSMatt Macy * dsl_early_sync_task(). 632*eda14cbcSMatt Macy */ 633*eda14cbcSMatt Macy if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) { 634*eda14cbcSMatt Macy dsl_sync_task_t *dst; 635*eda14cbcSMatt Macy 636*eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); 637*eda14cbcSMatt Macy while ((dst = 638*eda14cbcSMatt Macy txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) { 639*eda14cbcSMatt Macy ASSERT(dsl_early_sync_task_verify(dp, txg)); 640*eda14cbcSMatt Macy dsl_sync_task_sync(dst, tx); 641*eda14cbcSMatt Macy } 642*eda14cbcSMatt Macy ASSERT(dsl_early_sync_task_verify(dp, txg)); 643*eda14cbcSMatt Macy } 644*eda14cbcSMatt Macy 645*eda14cbcSMatt Macy /* 646*eda14cbcSMatt Macy * Write out all dirty blocks of dirty datasets. 647*eda14cbcSMatt Macy */ 648*eda14cbcSMatt Macy zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 649*eda14cbcSMatt Macy while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { 650*eda14cbcSMatt Macy /* 651*eda14cbcSMatt Macy * We must not sync any non-MOS datasets twice, because 652*eda14cbcSMatt Macy * we may have taken a snapshot of them. However, we 653*eda14cbcSMatt Macy * may sync newly-created datasets on pass 2. 654*eda14cbcSMatt Macy */ 655*eda14cbcSMatt Macy ASSERT(!list_link_active(&ds->ds_synced_link)); 656*eda14cbcSMatt Macy list_insert_tail(&synced_datasets, ds); 657*eda14cbcSMatt Macy dsl_dataset_sync(ds, zio, tx); 658*eda14cbcSMatt Macy } 659*eda14cbcSMatt Macy VERIFY0(zio_wait(zio)); 660*eda14cbcSMatt Macy 661*eda14cbcSMatt Macy /* 662*eda14cbcSMatt Macy * Update the long range free counter after 663*eda14cbcSMatt Macy * we're done syncing user data 664*eda14cbcSMatt Macy */ 665*eda14cbcSMatt Macy mutex_enter(&dp->dp_lock); 666*eda14cbcSMatt Macy ASSERT(spa_sync_pass(dp->dp_spa) == 1 || 667*eda14cbcSMatt Macy dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0); 668*eda14cbcSMatt Macy dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0; 669*eda14cbcSMatt Macy mutex_exit(&dp->dp_lock); 670*eda14cbcSMatt Macy 671*eda14cbcSMatt Macy /* 672*eda14cbcSMatt Macy * After the data blocks have been written (ensured by the zio_wait() 673*eda14cbcSMatt Macy * above), update the user/group/project space accounting. This happens 674*eda14cbcSMatt Macy * in tasks dispatched to dp_sync_taskq, so wait for them before 675*eda14cbcSMatt Macy * continuing. 676*eda14cbcSMatt Macy */ 677*eda14cbcSMatt Macy for (ds = list_head(&synced_datasets); ds != NULL; 678*eda14cbcSMatt Macy ds = list_next(&synced_datasets, ds)) { 679*eda14cbcSMatt Macy dmu_objset_do_userquota_updates(ds->ds_objset, tx); 680*eda14cbcSMatt Macy } 681*eda14cbcSMatt Macy taskq_wait(dp->dp_sync_taskq); 682*eda14cbcSMatt Macy 683*eda14cbcSMatt Macy /* 684*eda14cbcSMatt Macy * Sync the datasets again to push out the changes due to 685*eda14cbcSMatt Macy * userspace updates. This must be done before we process the 686*eda14cbcSMatt Macy * sync tasks, so that any snapshots will have the correct 687*eda14cbcSMatt Macy * user accounting information (and we won't get confused 688*eda14cbcSMatt Macy * about which blocks are part of the snapshot). 689*eda14cbcSMatt Macy */ 690*eda14cbcSMatt Macy zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); 691*eda14cbcSMatt Macy while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { 692*eda14cbcSMatt Macy objset_t *os = ds->ds_objset; 693*eda14cbcSMatt Macy 694*eda14cbcSMatt Macy ASSERT(list_link_active(&ds->ds_synced_link)); 695*eda14cbcSMatt Macy dmu_buf_rele(ds->ds_dbuf, ds); 696*eda14cbcSMatt Macy dsl_dataset_sync(ds, zio, tx); 697*eda14cbcSMatt Macy 698*eda14cbcSMatt Macy /* 699*eda14cbcSMatt Macy * Release any key mappings created by calls to 700*eda14cbcSMatt Macy * dsl_dataset_dirty() from the userquota accounting 701*eda14cbcSMatt Macy * code paths. 702*eda14cbcSMatt Macy */ 703*eda14cbcSMatt Macy if (os->os_encrypted && !os->os_raw_receive && 704*eda14cbcSMatt Macy !os->os_next_write_raw[txg & TXG_MASK]) { 705*eda14cbcSMatt Macy ASSERT3P(ds->ds_key_mapping, !=, NULL); 706*eda14cbcSMatt Macy key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds); 707*eda14cbcSMatt Macy } 708*eda14cbcSMatt Macy } 709*eda14cbcSMatt Macy VERIFY0(zio_wait(zio)); 710*eda14cbcSMatt Macy 711*eda14cbcSMatt Macy /* 712*eda14cbcSMatt Macy * Now that the datasets have been completely synced, we can 713*eda14cbcSMatt Macy * clean up our in-memory structures accumulated while syncing: 714*eda14cbcSMatt Macy * 715*eda14cbcSMatt Macy * - move dead blocks from the pending deadlist and livelists 716*eda14cbcSMatt Macy * to the on-disk versions 717*eda14cbcSMatt Macy * - release hold from dsl_dataset_dirty() 718*eda14cbcSMatt Macy * - release key mapping hold from dsl_dataset_dirty() 719*eda14cbcSMatt Macy */ 720*eda14cbcSMatt Macy while ((ds = list_remove_head(&synced_datasets)) != NULL) { 721*eda14cbcSMatt Macy objset_t *os = ds->ds_objset; 722*eda14cbcSMatt Macy 723*eda14cbcSMatt Macy if (os->os_encrypted && !os->os_raw_receive && 724*eda14cbcSMatt Macy !os->os_next_write_raw[txg & TXG_MASK]) { 725*eda14cbcSMatt Macy ASSERT3P(ds->ds_key_mapping, !=, NULL); 726*eda14cbcSMatt Macy key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds); 727*eda14cbcSMatt Macy } 728*eda14cbcSMatt Macy 729*eda14cbcSMatt Macy dsl_dataset_sync_done(ds, tx); 730*eda14cbcSMatt Macy } 731*eda14cbcSMatt Macy 732*eda14cbcSMatt Macy while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) { 733*eda14cbcSMatt Macy dsl_dir_sync(dd, tx); 734*eda14cbcSMatt Macy } 735*eda14cbcSMatt Macy 736*eda14cbcSMatt Macy /* 737*eda14cbcSMatt Macy * The MOS's space is accounted for in the pool/$MOS 738*eda14cbcSMatt Macy * (dp_mos_dir). We can't modify the mos while we're syncing 739*eda14cbcSMatt Macy * it, so we remember the deltas and apply them here. 740*eda14cbcSMatt Macy */ 741*eda14cbcSMatt Macy if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 || 742*eda14cbcSMatt Macy dp->dp_mos_uncompressed_delta != 0) { 743*eda14cbcSMatt Macy dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD, 744*eda14cbcSMatt Macy dp->dp_mos_used_delta, 745*eda14cbcSMatt Macy dp->dp_mos_compressed_delta, 746*eda14cbcSMatt Macy dp->dp_mos_uncompressed_delta, tx); 747*eda14cbcSMatt Macy dp->dp_mos_used_delta = 0; 748*eda14cbcSMatt Macy dp->dp_mos_compressed_delta = 0; 749*eda14cbcSMatt Macy dp->dp_mos_uncompressed_delta = 0; 750*eda14cbcSMatt Macy } 751*eda14cbcSMatt Macy 752*eda14cbcSMatt Macy if (dmu_objset_is_dirty(mos, txg)) { 753*eda14cbcSMatt Macy dsl_pool_sync_mos(dp, tx); 754*eda14cbcSMatt Macy } 755*eda14cbcSMatt Macy 756*eda14cbcSMatt Macy /* 757*eda14cbcSMatt Macy * We have written all of the accounted dirty data, so our 758*eda14cbcSMatt Macy * dp_space_towrite should now be zero. However, some seldom-used 759*eda14cbcSMatt Macy * code paths do not adhere to this (e.g. dbuf_undirty()). Shore up 760*eda14cbcSMatt Macy * the accounting of any dirtied space now. 761*eda14cbcSMatt Macy * 762*eda14cbcSMatt Macy * Note that, besides any dirty data from datasets, the amount of 763*eda14cbcSMatt Macy * dirty data in the MOS is also accounted by the pool. Therefore, 764*eda14cbcSMatt Macy * we want to do this cleanup after dsl_pool_sync_mos() so we don't 765*eda14cbcSMatt Macy * attempt to update the accounting for the same dirty data twice. 766*eda14cbcSMatt Macy * (i.e. at this point we only update the accounting for the space 767*eda14cbcSMatt Macy * that we know that we "leaked"). 768*eda14cbcSMatt Macy */ 769*eda14cbcSMatt Macy dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); 770*eda14cbcSMatt Macy 771*eda14cbcSMatt Macy /* 772*eda14cbcSMatt Macy * If we modify a dataset in the same txg that we want to destroy it, 773*eda14cbcSMatt Macy * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it. 774*eda14cbcSMatt Macy * dsl_dir_destroy_check() will fail if there are unexpected holds. 775*eda14cbcSMatt Macy * Therefore, we want to sync the MOS (thus syncing the dd_dbuf 776*eda14cbcSMatt Macy * and clearing the hold on it) before we process the sync_tasks. 777*eda14cbcSMatt Macy * The MOS data dirtied by the sync_tasks will be synced on the next 778*eda14cbcSMatt Macy * pass. 779*eda14cbcSMatt Macy */ 780*eda14cbcSMatt Macy if (!txg_list_empty(&dp->dp_sync_tasks, txg)) { 781*eda14cbcSMatt Macy dsl_sync_task_t *dst; 782*eda14cbcSMatt Macy /* 783*eda14cbcSMatt Macy * No more sync tasks should have been added while we 784*eda14cbcSMatt Macy * were syncing. 785*eda14cbcSMatt Macy */ 786*eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); 787*eda14cbcSMatt Macy while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL) 788*eda14cbcSMatt Macy dsl_sync_task_sync(dst, tx); 789*eda14cbcSMatt Macy } 790*eda14cbcSMatt Macy 791*eda14cbcSMatt Macy dmu_tx_commit(tx); 792*eda14cbcSMatt Macy 793*eda14cbcSMatt Macy DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg); 794*eda14cbcSMatt Macy } 795*eda14cbcSMatt Macy 796*eda14cbcSMatt Macy void 797*eda14cbcSMatt Macy dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) 798*eda14cbcSMatt Macy { 799*eda14cbcSMatt Macy zilog_t *zilog; 800*eda14cbcSMatt Macy 801*eda14cbcSMatt Macy while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) { 802*eda14cbcSMatt Macy dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); 803*eda14cbcSMatt Macy /* 804*eda14cbcSMatt Macy * We don't remove the zilog from the dp_dirty_zilogs 805*eda14cbcSMatt Macy * list until after we've cleaned it. This ensures that 806*eda14cbcSMatt Macy * callers of zilog_is_dirty() receive an accurate 807*eda14cbcSMatt Macy * answer when they are racing with the spa sync thread. 808*eda14cbcSMatt Macy */ 809*eda14cbcSMatt Macy zil_clean(zilog, txg); 810*eda14cbcSMatt Macy (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg); 811*eda14cbcSMatt Macy ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg)); 812*eda14cbcSMatt Macy dmu_buf_rele(ds->ds_dbuf, zilog); 813*eda14cbcSMatt Macy } 814*eda14cbcSMatt Macy ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); 815*eda14cbcSMatt Macy } 816*eda14cbcSMatt Macy 817*eda14cbcSMatt Macy /* 818*eda14cbcSMatt Macy * TRUE if the current thread is the tx_sync_thread or if we 819*eda14cbcSMatt Macy * are being called from SPA context during pool initialization. 820*eda14cbcSMatt Macy */ 821*eda14cbcSMatt Macy int 822*eda14cbcSMatt Macy dsl_pool_sync_context(dsl_pool_t *dp) 823*eda14cbcSMatt Macy { 824*eda14cbcSMatt Macy return (curthread == dp->dp_tx.tx_sync_thread || 825*eda14cbcSMatt Macy spa_is_initializing(dp->dp_spa) || 826*eda14cbcSMatt Macy taskq_member(dp->dp_sync_taskq, curthread)); 827*eda14cbcSMatt Macy } 828*eda14cbcSMatt Macy 829*eda14cbcSMatt Macy /* 830*eda14cbcSMatt Macy * This function returns the amount of allocatable space in the pool 831*eda14cbcSMatt Macy * minus whatever space is currently reserved by ZFS for specific 832*eda14cbcSMatt Macy * purposes. Specifically: 833*eda14cbcSMatt Macy * 834*eda14cbcSMatt Macy * 1] Any reserved SLOP space 835*eda14cbcSMatt Macy * 2] Any space used by the checkpoint 836*eda14cbcSMatt Macy * 3] Any space used for deferred frees 837*eda14cbcSMatt Macy * 838*eda14cbcSMatt Macy * The latter 2 are especially important because they are needed to 839*eda14cbcSMatt Macy * rectify the SPA's and DMU's different understanding of how much space 840*eda14cbcSMatt Macy * is used. Now the DMU is aware of that extra space tracked by the SPA 841*eda14cbcSMatt Macy * without having to maintain a separate special dir (e.g similar to 842*eda14cbcSMatt Macy * $MOS, $FREEING, and $LEAKED). 843*eda14cbcSMatt Macy * 844*eda14cbcSMatt Macy * Note: By deferred frees here, we mean the frees that were deferred 845*eda14cbcSMatt Macy * in spa_sync() after sync pass 1 (spa_deferred_bpobj), and not the 846*eda14cbcSMatt Macy * segments placed in ms_defer trees during metaslab_sync_done(). 847*eda14cbcSMatt Macy */ 848*eda14cbcSMatt Macy uint64_t 849*eda14cbcSMatt Macy dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy) 850*eda14cbcSMatt Macy { 851*eda14cbcSMatt Macy spa_t *spa = dp->dp_spa; 852*eda14cbcSMatt Macy uint64_t space, resv, adjustedsize; 853*eda14cbcSMatt Macy uint64_t spa_deferred_frees = 854*eda14cbcSMatt Macy spa->spa_deferred_bpobj.bpo_phys->bpo_bytes; 855*eda14cbcSMatt Macy 856*eda14cbcSMatt Macy space = spa_get_dspace(spa) 857*eda14cbcSMatt Macy - spa_get_checkpoint_space(spa) - spa_deferred_frees; 858*eda14cbcSMatt Macy resv = spa_get_slop_space(spa); 859*eda14cbcSMatt Macy 860*eda14cbcSMatt Macy switch (slop_policy) { 861*eda14cbcSMatt Macy case ZFS_SPACE_CHECK_NORMAL: 862*eda14cbcSMatt Macy break; 863*eda14cbcSMatt Macy case ZFS_SPACE_CHECK_RESERVED: 864*eda14cbcSMatt Macy resv >>= 1; 865*eda14cbcSMatt Macy break; 866*eda14cbcSMatt Macy case ZFS_SPACE_CHECK_EXTRA_RESERVED: 867*eda14cbcSMatt Macy resv >>= 2; 868*eda14cbcSMatt Macy break; 869*eda14cbcSMatt Macy case ZFS_SPACE_CHECK_NONE: 870*eda14cbcSMatt Macy resv = 0; 871*eda14cbcSMatt Macy break; 872*eda14cbcSMatt Macy default: 873*eda14cbcSMatt Macy panic("invalid slop policy value: %d", slop_policy); 874*eda14cbcSMatt Macy break; 875*eda14cbcSMatt Macy } 876*eda14cbcSMatt Macy adjustedsize = (space >= resv) ? (space - resv) : 0; 877*eda14cbcSMatt Macy 878*eda14cbcSMatt Macy return (adjustedsize); 879*eda14cbcSMatt Macy } 880*eda14cbcSMatt Macy 881*eda14cbcSMatt Macy uint64_t 882*eda14cbcSMatt Macy dsl_pool_unreserved_space(dsl_pool_t *dp, zfs_space_check_t slop_policy) 883*eda14cbcSMatt Macy { 884*eda14cbcSMatt Macy uint64_t poolsize = dsl_pool_adjustedsize(dp, slop_policy); 885*eda14cbcSMatt Macy uint64_t deferred = 886*eda14cbcSMatt Macy metaslab_class_get_deferred(spa_normal_class(dp->dp_spa)); 887*eda14cbcSMatt Macy uint64_t quota = (poolsize >= deferred) ? (poolsize - deferred) : 0; 888*eda14cbcSMatt Macy return (quota); 889*eda14cbcSMatt Macy } 890*eda14cbcSMatt Macy 891*eda14cbcSMatt Macy boolean_t 892*eda14cbcSMatt Macy dsl_pool_need_dirty_delay(dsl_pool_t *dp) 893*eda14cbcSMatt Macy { 894*eda14cbcSMatt Macy uint64_t delay_min_bytes = 895*eda14cbcSMatt Macy zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 896*eda14cbcSMatt Macy uint64_t dirty_min_bytes = 897*eda14cbcSMatt Macy zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100; 898*eda14cbcSMatt Macy uint64_t dirty; 899*eda14cbcSMatt Macy 900*eda14cbcSMatt Macy mutex_enter(&dp->dp_lock); 901*eda14cbcSMatt Macy dirty = dp->dp_dirty_total; 902*eda14cbcSMatt Macy mutex_exit(&dp->dp_lock); 903*eda14cbcSMatt Macy if (dirty > dirty_min_bytes) 904*eda14cbcSMatt Macy txg_kick(dp); 905*eda14cbcSMatt Macy return (dirty > delay_min_bytes); 906*eda14cbcSMatt Macy } 907*eda14cbcSMatt Macy 908*eda14cbcSMatt Macy void 909*eda14cbcSMatt Macy dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) 910*eda14cbcSMatt Macy { 911*eda14cbcSMatt Macy if (space > 0) { 912*eda14cbcSMatt Macy mutex_enter(&dp->dp_lock); 913*eda14cbcSMatt Macy dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space; 914*eda14cbcSMatt Macy dsl_pool_dirty_delta(dp, space); 915*eda14cbcSMatt Macy mutex_exit(&dp->dp_lock); 916*eda14cbcSMatt Macy } 917*eda14cbcSMatt Macy } 918*eda14cbcSMatt Macy 919*eda14cbcSMatt Macy void 920*eda14cbcSMatt Macy dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) 921*eda14cbcSMatt Macy { 922*eda14cbcSMatt Macy ASSERT3S(space, >=, 0); 923*eda14cbcSMatt Macy if (space == 0) 924*eda14cbcSMatt Macy return; 925*eda14cbcSMatt Macy 926*eda14cbcSMatt Macy mutex_enter(&dp->dp_lock); 927*eda14cbcSMatt Macy if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) { 928*eda14cbcSMatt Macy /* XXX writing something we didn't dirty? */ 929*eda14cbcSMatt Macy space = dp->dp_dirty_pertxg[txg & TXG_MASK]; 930*eda14cbcSMatt Macy } 931*eda14cbcSMatt Macy ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space); 932*eda14cbcSMatt Macy dp->dp_dirty_pertxg[txg & TXG_MASK] -= space; 933*eda14cbcSMatt Macy ASSERT3U(dp->dp_dirty_total, >=, space); 934*eda14cbcSMatt Macy dsl_pool_dirty_delta(dp, -space); 935*eda14cbcSMatt Macy mutex_exit(&dp->dp_lock); 936*eda14cbcSMatt Macy } 937*eda14cbcSMatt Macy 938*eda14cbcSMatt Macy /* ARGSUSED */ 939*eda14cbcSMatt Macy static int 940*eda14cbcSMatt Macy upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) 941*eda14cbcSMatt Macy { 942*eda14cbcSMatt Macy dmu_tx_t *tx = arg; 943*eda14cbcSMatt Macy dsl_dataset_t *ds, *prev = NULL; 944*eda14cbcSMatt Macy int err; 945*eda14cbcSMatt Macy 946*eda14cbcSMatt Macy err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); 947*eda14cbcSMatt Macy if (err) 948*eda14cbcSMatt Macy return (err); 949*eda14cbcSMatt Macy 950*eda14cbcSMatt Macy while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { 951*eda14cbcSMatt Macy err = dsl_dataset_hold_obj(dp, 952*eda14cbcSMatt Macy dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); 953*eda14cbcSMatt Macy if (err) { 954*eda14cbcSMatt Macy dsl_dataset_rele(ds, FTAG); 955*eda14cbcSMatt Macy return (err); 956*eda14cbcSMatt Macy } 957*eda14cbcSMatt Macy 958*eda14cbcSMatt Macy if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) 959*eda14cbcSMatt Macy break; 960*eda14cbcSMatt Macy dsl_dataset_rele(ds, FTAG); 961*eda14cbcSMatt Macy ds = prev; 962*eda14cbcSMatt Macy prev = NULL; 963*eda14cbcSMatt Macy } 964*eda14cbcSMatt Macy 965*eda14cbcSMatt Macy if (prev == NULL) { 966*eda14cbcSMatt Macy prev = dp->dp_origin_snap; 967*eda14cbcSMatt Macy 968*eda14cbcSMatt Macy /* 969*eda14cbcSMatt Macy * The $ORIGIN can't have any data, or the accounting 970*eda14cbcSMatt Macy * will be wrong. 971*eda14cbcSMatt Macy */ 972*eda14cbcSMatt Macy rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 973*eda14cbcSMatt Macy ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth); 974*eda14cbcSMatt Macy rrw_exit(&ds->ds_bp_rwlock, FTAG); 975*eda14cbcSMatt Macy 976*eda14cbcSMatt Macy /* The origin doesn't get attached to itself */ 977*eda14cbcSMatt Macy if (ds->ds_object == prev->ds_object) { 978*eda14cbcSMatt Macy dsl_dataset_rele(ds, FTAG); 979*eda14cbcSMatt Macy return (0); 980*eda14cbcSMatt Macy } 981*eda14cbcSMatt Macy 982*eda14cbcSMatt Macy dmu_buf_will_dirty(ds->ds_dbuf, tx); 983*eda14cbcSMatt Macy dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object; 984*eda14cbcSMatt Macy dsl_dataset_phys(ds)->ds_prev_snap_txg = 985*eda14cbcSMatt Macy dsl_dataset_phys(prev)->ds_creation_txg; 986*eda14cbcSMatt Macy 987*eda14cbcSMatt Macy dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); 988*eda14cbcSMatt Macy dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object; 989*eda14cbcSMatt Macy 990*eda14cbcSMatt Macy dmu_buf_will_dirty(prev->ds_dbuf, tx); 991*eda14cbcSMatt Macy dsl_dataset_phys(prev)->ds_num_children++; 992*eda14cbcSMatt Macy 993*eda14cbcSMatt Macy if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) { 994*eda14cbcSMatt Macy ASSERT(ds->ds_prev == NULL); 995*eda14cbcSMatt Macy VERIFY0(dsl_dataset_hold_obj(dp, 996*eda14cbcSMatt Macy dsl_dataset_phys(ds)->ds_prev_snap_obj, 997*eda14cbcSMatt Macy ds, &ds->ds_prev)); 998*eda14cbcSMatt Macy } 999*eda14cbcSMatt Macy } 1000*eda14cbcSMatt Macy 1001*eda14cbcSMatt Macy ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object); 1002*eda14cbcSMatt Macy ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object); 1003*eda14cbcSMatt Macy 1004*eda14cbcSMatt Macy if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) { 1005*eda14cbcSMatt Macy dmu_buf_will_dirty(prev->ds_dbuf, tx); 1006*eda14cbcSMatt Macy dsl_dataset_phys(prev)->ds_next_clones_obj = 1007*eda14cbcSMatt Macy zap_create(dp->dp_meta_objset, 1008*eda14cbcSMatt Macy DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); 1009*eda14cbcSMatt Macy } 1010*eda14cbcSMatt Macy VERIFY0(zap_add_int(dp->dp_meta_objset, 1011*eda14cbcSMatt Macy dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx)); 1012*eda14cbcSMatt Macy 1013*eda14cbcSMatt Macy dsl_dataset_rele(ds, FTAG); 1014*eda14cbcSMatt Macy if (prev != dp->dp_origin_snap) 1015*eda14cbcSMatt Macy dsl_dataset_rele(prev, FTAG); 1016*eda14cbcSMatt Macy return (0); 1017*eda14cbcSMatt Macy } 1018*eda14cbcSMatt Macy 1019*eda14cbcSMatt Macy void 1020*eda14cbcSMatt Macy dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) 1021*eda14cbcSMatt Macy { 1022*eda14cbcSMatt Macy ASSERT(dmu_tx_is_syncing(tx)); 1023*eda14cbcSMatt Macy ASSERT(dp->dp_origin_snap != NULL); 1024*eda14cbcSMatt Macy 1025*eda14cbcSMatt Macy VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb, 1026*eda14cbcSMatt Macy tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); 1027*eda14cbcSMatt Macy } 1028*eda14cbcSMatt Macy 1029*eda14cbcSMatt Macy /* ARGSUSED */ 1030*eda14cbcSMatt Macy static int 1031*eda14cbcSMatt Macy upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) 1032*eda14cbcSMatt Macy { 1033*eda14cbcSMatt Macy dmu_tx_t *tx = arg; 1034*eda14cbcSMatt Macy objset_t *mos = dp->dp_meta_objset; 1035*eda14cbcSMatt Macy 1036*eda14cbcSMatt Macy if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) { 1037*eda14cbcSMatt Macy dsl_dataset_t *origin; 1038*eda14cbcSMatt Macy 1039*eda14cbcSMatt Macy VERIFY0(dsl_dataset_hold_obj(dp, 1040*eda14cbcSMatt Macy dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin)); 1041*eda14cbcSMatt Macy 1042*eda14cbcSMatt Macy if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) { 1043*eda14cbcSMatt Macy dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); 1044*eda14cbcSMatt Macy dsl_dir_phys(origin->ds_dir)->dd_clones = 1045*eda14cbcSMatt Macy zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE, 1046*eda14cbcSMatt Macy 0, tx); 1047*eda14cbcSMatt Macy } 1048*eda14cbcSMatt Macy 1049*eda14cbcSMatt Macy VERIFY0(zap_add_int(dp->dp_meta_objset, 1050*eda14cbcSMatt Macy dsl_dir_phys(origin->ds_dir)->dd_clones, 1051*eda14cbcSMatt Macy ds->ds_object, tx)); 1052*eda14cbcSMatt Macy 1053*eda14cbcSMatt Macy dsl_dataset_rele(origin, FTAG); 1054*eda14cbcSMatt Macy } 1055*eda14cbcSMatt Macy return (0); 1056*eda14cbcSMatt Macy } 1057*eda14cbcSMatt Macy 1058*eda14cbcSMatt Macy void 1059*eda14cbcSMatt Macy dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) 1060*eda14cbcSMatt Macy { 1061*eda14cbcSMatt Macy uint64_t obj; 1062*eda14cbcSMatt Macy 1063*eda14cbcSMatt Macy ASSERT(dmu_tx_is_syncing(tx)); 1064*eda14cbcSMatt Macy 1065*eda14cbcSMatt Macy (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); 1066*eda14cbcSMatt Macy VERIFY0(dsl_pool_open_special_dir(dp, 1067*eda14cbcSMatt Macy FREE_DIR_NAME, &dp->dp_free_dir)); 1068*eda14cbcSMatt Macy 1069*eda14cbcSMatt Macy /* 1070*eda14cbcSMatt Macy * We can't use bpobj_alloc(), because spa_version() still 1071*eda14cbcSMatt Macy * returns the old version, and we need a new-version bpobj with 1072*eda14cbcSMatt Macy * subobj support. So call dmu_object_alloc() directly. 1073*eda14cbcSMatt Macy */ 1074*eda14cbcSMatt Macy obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ, 1075*eda14cbcSMatt Macy SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx); 1076*eda14cbcSMatt Macy VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1077*eda14cbcSMatt Macy DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); 1078*eda14cbcSMatt Macy VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj)); 1079*eda14cbcSMatt Macy 1080*eda14cbcSMatt Macy VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, 1081*eda14cbcSMatt Macy upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); 1082*eda14cbcSMatt Macy } 1083*eda14cbcSMatt Macy 1084*eda14cbcSMatt Macy void 1085*eda14cbcSMatt Macy dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) 1086*eda14cbcSMatt Macy { 1087*eda14cbcSMatt Macy uint64_t dsobj; 1088*eda14cbcSMatt Macy dsl_dataset_t *ds; 1089*eda14cbcSMatt Macy 1090*eda14cbcSMatt Macy ASSERT(dmu_tx_is_syncing(tx)); 1091*eda14cbcSMatt Macy ASSERT(dp->dp_origin_snap == NULL); 1092*eda14cbcSMatt Macy ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER)); 1093*eda14cbcSMatt Macy 1094*eda14cbcSMatt Macy /* create the origin dir, ds, & snap-ds */ 1095*eda14cbcSMatt Macy dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, 1096*eda14cbcSMatt Macy NULL, 0, kcred, NULL, tx); 1097*eda14cbcSMatt Macy VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); 1098*eda14cbcSMatt Macy dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx); 1099*eda14cbcSMatt Macy VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj, 1100*eda14cbcSMatt Macy dp, &dp->dp_origin_snap)); 1101*eda14cbcSMatt Macy dsl_dataset_rele(ds, FTAG); 1102*eda14cbcSMatt Macy } 1103*eda14cbcSMatt Macy 1104*eda14cbcSMatt Macy taskq_t * 1105*eda14cbcSMatt Macy dsl_pool_zrele_taskq(dsl_pool_t *dp) 1106*eda14cbcSMatt Macy { 1107*eda14cbcSMatt Macy return (dp->dp_zrele_taskq); 1108*eda14cbcSMatt Macy } 1109*eda14cbcSMatt Macy 1110*eda14cbcSMatt Macy taskq_t * 1111*eda14cbcSMatt Macy dsl_pool_unlinked_drain_taskq(dsl_pool_t *dp) 1112*eda14cbcSMatt Macy { 1113*eda14cbcSMatt Macy return (dp->dp_unlinked_drain_taskq); 1114*eda14cbcSMatt Macy } 1115*eda14cbcSMatt Macy 1116*eda14cbcSMatt Macy /* 1117*eda14cbcSMatt Macy * Walk through the pool-wide zap object of temporary snapshot user holds 1118*eda14cbcSMatt Macy * and release them. 1119*eda14cbcSMatt Macy */ 1120*eda14cbcSMatt Macy void 1121*eda14cbcSMatt Macy dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp) 1122*eda14cbcSMatt Macy { 1123*eda14cbcSMatt Macy zap_attribute_t za; 1124*eda14cbcSMatt Macy zap_cursor_t zc; 1125*eda14cbcSMatt Macy objset_t *mos = dp->dp_meta_objset; 1126*eda14cbcSMatt Macy uint64_t zapobj = dp->dp_tmp_userrefs_obj; 1127*eda14cbcSMatt Macy nvlist_t *holds; 1128*eda14cbcSMatt Macy 1129*eda14cbcSMatt Macy if (zapobj == 0) 1130*eda14cbcSMatt Macy return; 1131*eda14cbcSMatt Macy ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); 1132*eda14cbcSMatt Macy 1133*eda14cbcSMatt Macy holds = fnvlist_alloc(); 1134*eda14cbcSMatt Macy 1135*eda14cbcSMatt Macy for (zap_cursor_init(&zc, mos, zapobj); 1136*eda14cbcSMatt Macy zap_cursor_retrieve(&zc, &za) == 0; 1137*eda14cbcSMatt Macy zap_cursor_advance(&zc)) { 1138*eda14cbcSMatt Macy char *htag; 1139*eda14cbcSMatt Macy nvlist_t *tags; 1140*eda14cbcSMatt Macy 1141*eda14cbcSMatt Macy htag = strchr(za.za_name, '-'); 1142*eda14cbcSMatt Macy *htag = '\0'; 1143*eda14cbcSMatt Macy ++htag; 1144*eda14cbcSMatt Macy if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) { 1145*eda14cbcSMatt Macy tags = fnvlist_alloc(); 1146*eda14cbcSMatt Macy fnvlist_add_boolean(tags, htag); 1147*eda14cbcSMatt Macy fnvlist_add_nvlist(holds, za.za_name, tags); 1148*eda14cbcSMatt Macy fnvlist_free(tags); 1149*eda14cbcSMatt Macy } else { 1150*eda14cbcSMatt Macy fnvlist_add_boolean(tags, htag); 1151*eda14cbcSMatt Macy } 1152*eda14cbcSMatt Macy } 1153*eda14cbcSMatt Macy dsl_dataset_user_release_tmp(dp, holds); 1154*eda14cbcSMatt Macy fnvlist_free(holds); 1155*eda14cbcSMatt Macy zap_cursor_fini(&zc); 1156*eda14cbcSMatt Macy } 1157*eda14cbcSMatt Macy 1158*eda14cbcSMatt Macy /* 1159*eda14cbcSMatt Macy * Create the pool-wide zap object for storing temporary snapshot holds. 1160*eda14cbcSMatt Macy */ 1161*eda14cbcSMatt Macy static void 1162*eda14cbcSMatt Macy dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx) 1163*eda14cbcSMatt Macy { 1164*eda14cbcSMatt Macy objset_t *mos = dp->dp_meta_objset; 1165*eda14cbcSMatt Macy 1166*eda14cbcSMatt Macy ASSERT(dp->dp_tmp_userrefs_obj == 0); 1167*eda14cbcSMatt Macy ASSERT(dmu_tx_is_syncing(tx)); 1168*eda14cbcSMatt Macy 1169*eda14cbcSMatt Macy dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS, 1170*eda14cbcSMatt Macy DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx); 1171*eda14cbcSMatt Macy } 1172*eda14cbcSMatt Macy 1173*eda14cbcSMatt Macy static int 1174*eda14cbcSMatt Macy dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj, 1175*eda14cbcSMatt Macy const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding) 1176*eda14cbcSMatt Macy { 1177*eda14cbcSMatt Macy objset_t *mos = dp->dp_meta_objset; 1178*eda14cbcSMatt Macy uint64_t zapobj = dp->dp_tmp_userrefs_obj; 1179*eda14cbcSMatt Macy char *name; 1180*eda14cbcSMatt Macy int error; 1181*eda14cbcSMatt Macy 1182*eda14cbcSMatt Macy ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); 1183*eda14cbcSMatt Macy ASSERT(dmu_tx_is_syncing(tx)); 1184*eda14cbcSMatt Macy 1185*eda14cbcSMatt Macy /* 1186*eda14cbcSMatt Macy * If the pool was created prior to SPA_VERSION_USERREFS, the 1187*eda14cbcSMatt Macy * zap object for temporary holds might not exist yet. 1188*eda14cbcSMatt Macy */ 1189*eda14cbcSMatt Macy if (zapobj == 0) { 1190*eda14cbcSMatt Macy if (holding) { 1191*eda14cbcSMatt Macy dsl_pool_user_hold_create_obj(dp, tx); 1192*eda14cbcSMatt Macy zapobj = dp->dp_tmp_userrefs_obj; 1193*eda14cbcSMatt Macy } else { 1194*eda14cbcSMatt Macy return (SET_ERROR(ENOENT)); 1195*eda14cbcSMatt Macy } 1196*eda14cbcSMatt Macy } 1197*eda14cbcSMatt Macy 1198*eda14cbcSMatt Macy name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag); 1199*eda14cbcSMatt Macy if (holding) 1200*eda14cbcSMatt Macy error = zap_add(mos, zapobj, name, 8, 1, &now, tx); 1201*eda14cbcSMatt Macy else 1202*eda14cbcSMatt Macy error = zap_remove(mos, zapobj, name, tx); 1203*eda14cbcSMatt Macy kmem_strfree(name); 1204*eda14cbcSMatt Macy 1205*eda14cbcSMatt Macy return (error); 1206*eda14cbcSMatt Macy } 1207*eda14cbcSMatt Macy 1208*eda14cbcSMatt Macy /* 1209*eda14cbcSMatt Macy * Add a temporary hold for the given dataset object and tag. 1210*eda14cbcSMatt Macy */ 1211*eda14cbcSMatt Macy int 1212*eda14cbcSMatt Macy dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag, 1213*eda14cbcSMatt Macy uint64_t now, dmu_tx_t *tx) 1214*eda14cbcSMatt Macy { 1215*eda14cbcSMatt Macy return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE)); 1216*eda14cbcSMatt Macy } 1217*eda14cbcSMatt Macy 1218*eda14cbcSMatt Macy /* 1219*eda14cbcSMatt Macy * Release a temporary hold for the given dataset object and tag. 1220*eda14cbcSMatt Macy */ 1221*eda14cbcSMatt Macy int 1222*eda14cbcSMatt Macy dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag, 1223*eda14cbcSMatt Macy dmu_tx_t *tx) 1224*eda14cbcSMatt Macy { 1225*eda14cbcSMatt Macy return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0, 1226*eda14cbcSMatt Macy tx, B_FALSE)); 1227*eda14cbcSMatt Macy } 1228*eda14cbcSMatt Macy 1229*eda14cbcSMatt Macy /* 1230*eda14cbcSMatt Macy * DSL Pool Configuration Lock 1231*eda14cbcSMatt Macy * 1232*eda14cbcSMatt Macy * The dp_config_rwlock protects against changes to DSL state (e.g. dataset 1233*eda14cbcSMatt Macy * creation / destruction / rename / property setting). It must be held for 1234*eda14cbcSMatt Macy * read to hold a dataset or dsl_dir. I.e. you must call 1235*eda14cbcSMatt Macy * dsl_pool_config_enter() or dsl_pool_hold() before calling 1236*eda14cbcSMatt Macy * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock 1237*eda14cbcSMatt Macy * must be held continuously until all datasets and dsl_dirs are released. 1238*eda14cbcSMatt Macy * 1239*eda14cbcSMatt Macy * The only exception to this rule is that if a "long hold" is placed on 1240*eda14cbcSMatt Macy * a dataset, then the dp_config_rwlock may be dropped while the dataset 1241*eda14cbcSMatt Macy * is still held. The long hold will prevent the dataset from being 1242*eda14cbcSMatt Macy * destroyed -- the destroy will fail with EBUSY. A long hold can be 1243*eda14cbcSMatt Macy * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset 1244*eda14cbcSMatt Macy * (by calling dsl_{dataset,objset}_{try}own{_obj}). 1245*eda14cbcSMatt Macy * 1246*eda14cbcSMatt Macy * Legitimate long-holders (including owners) should be long-running, cancelable 1247*eda14cbcSMatt Macy * tasks that should cause "zfs destroy" to fail. This includes DMU 1248*eda14cbcSMatt Macy * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open), 1249*eda14cbcSMatt Macy * "zfs send", and "zfs diff". There are several other long-holders whose 1250*eda14cbcSMatt Macy * uses are suboptimal (e.g. "zfs promote", and zil_suspend()). 1251*eda14cbcSMatt Macy * 1252*eda14cbcSMatt Macy * The usual formula for long-holding would be: 1253*eda14cbcSMatt Macy * dsl_pool_hold() 1254*eda14cbcSMatt Macy * dsl_dataset_hold() 1255*eda14cbcSMatt Macy * ... perform checks ... 1256*eda14cbcSMatt Macy * dsl_dataset_long_hold() 1257*eda14cbcSMatt Macy * dsl_pool_rele() 1258*eda14cbcSMatt Macy * ... perform long-running task ... 1259*eda14cbcSMatt Macy * dsl_dataset_long_rele() 1260*eda14cbcSMatt Macy * dsl_dataset_rele() 1261*eda14cbcSMatt Macy * 1262*eda14cbcSMatt Macy * Note that when the long hold is released, the dataset is still held but 1263*eda14cbcSMatt Macy * the pool is not held. The dataset may change arbitrarily during this time 1264*eda14cbcSMatt Macy * (e.g. it could be destroyed). Therefore you shouldn't do anything to the 1265*eda14cbcSMatt Macy * dataset except release it. 1266*eda14cbcSMatt Macy * 1267*eda14cbcSMatt Macy * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only 1268*eda14cbcSMatt Macy * or modifying operations. 1269*eda14cbcSMatt Macy * 1270*eda14cbcSMatt Macy * Modifying operations should generally use dsl_sync_task(). The synctask 1271*eda14cbcSMatt Macy * infrastructure enforces proper locking strategy with respect to the 1272*eda14cbcSMatt Macy * dp_config_rwlock. See the comment above dsl_sync_task() for details. 1273*eda14cbcSMatt Macy * 1274*eda14cbcSMatt Macy * Read-only operations will manually hold the pool, then the dataset, obtain 1275*eda14cbcSMatt Macy * information from the dataset, then release the pool and dataset. 1276*eda14cbcSMatt Macy * dmu_objset_{hold,rele}() are convenience routines that also do the pool 1277*eda14cbcSMatt Macy * hold/rele. 1278*eda14cbcSMatt Macy */ 1279*eda14cbcSMatt Macy 1280*eda14cbcSMatt Macy int 1281*eda14cbcSMatt Macy dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp) 1282*eda14cbcSMatt Macy { 1283*eda14cbcSMatt Macy spa_t *spa; 1284*eda14cbcSMatt Macy int error; 1285*eda14cbcSMatt Macy 1286*eda14cbcSMatt Macy error = spa_open(name, &spa, tag); 1287*eda14cbcSMatt Macy if (error == 0) { 1288*eda14cbcSMatt Macy *dp = spa_get_dsl(spa); 1289*eda14cbcSMatt Macy dsl_pool_config_enter(*dp, tag); 1290*eda14cbcSMatt Macy } 1291*eda14cbcSMatt Macy return (error); 1292*eda14cbcSMatt Macy } 1293*eda14cbcSMatt Macy 1294*eda14cbcSMatt Macy void 1295*eda14cbcSMatt Macy dsl_pool_rele(dsl_pool_t *dp, void *tag) 1296*eda14cbcSMatt Macy { 1297*eda14cbcSMatt Macy dsl_pool_config_exit(dp, tag); 1298*eda14cbcSMatt Macy spa_close(dp->dp_spa, tag); 1299*eda14cbcSMatt Macy } 1300*eda14cbcSMatt Macy 1301*eda14cbcSMatt Macy void 1302*eda14cbcSMatt Macy dsl_pool_config_enter(dsl_pool_t *dp, void *tag) 1303*eda14cbcSMatt Macy { 1304*eda14cbcSMatt Macy /* 1305*eda14cbcSMatt Macy * We use a "reentrant" reader-writer lock, but not reentrantly. 1306*eda14cbcSMatt Macy * 1307*eda14cbcSMatt Macy * The rrwlock can (with the track_all flag) track all reading threads, 1308*eda14cbcSMatt Macy * which is very useful for debugging which code path failed to release 1309*eda14cbcSMatt Macy * the lock, and for verifying that the *current* thread does hold 1310*eda14cbcSMatt Macy * the lock. 1311*eda14cbcSMatt Macy * 1312*eda14cbcSMatt Macy * (Unlike a rwlock, which knows that N threads hold it for 1313*eda14cbcSMatt Macy * read, but not *which* threads, so rw_held(RW_READER) returns TRUE 1314*eda14cbcSMatt Macy * if any thread holds it for read, even if this thread doesn't). 1315*eda14cbcSMatt Macy */ 1316*eda14cbcSMatt Macy ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); 1317*eda14cbcSMatt Macy rrw_enter(&dp->dp_config_rwlock, RW_READER, tag); 1318*eda14cbcSMatt Macy } 1319*eda14cbcSMatt Macy 1320*eda14cbcSMatt Macy void 1321*eda14cbcSMatt Macy dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag) 1322*eda14cbcSMatt Macy { 1323*eda14cbcSMatt Macy ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); 1324*eda14cbcSMatt Macy rrw_enter_read_prio(&dp->dp_config_rwlock, tag); 1325*eda14cbcSMatt Macy } 1326*eda14cbcSMatt Macy 1327*eda14cbcSMatt Macy void 1328*eda14cbcSMatt Macy dsl_pool_config_exit(dsl_pool_t *dp, void *tag) 1329*eda14cbcSMatt Macy { 1330*eda14cbcSMatt Macy rrw_exit(&dp->dp_config_rwlock, tag); 1331*eda14cbcSMatt Macy } 1332*eda14cbcSMatt Macy 1333*eda14cbcSMatt Macy boolean_t 1334*eda14cbcSMatt Macy dsl_pool_config_held(dsl_pool_t *dp) 1335*eda14cbcSMatt Macy { 1336*eda14cbcSMatt Macy return (RRW_LOCK_HELD(&dp->dp_config_rwlock)); 1337*eda14cbcSMatt Macy } 1338*eda14cbcSMatt Macy 1339*eda14cbcSMatt Macy boolean_t 1340*eda14cbcSMatt Macy dsl_pool_config_held_writer(dsl_pool_t *dp) 1341*eda14cbcSMatt Macy { 1342*eda14cbcSMatt Macy return (RRW_WRITE_HELD(&dp->dp_config_rwlock)); 1343*eda14cbcSMatt Macy } 1344*eda14cbcSMatt Macy 1345*eda14cbcSMatt Macy EXPORT_SYMBOL(dsl_pool_config_enter); 1346*eda14cbcSMatt Macy EXPORT_SYMBOL(dsl_pool_config_exit); 1347*eda14cbcSMatt Macy 1348*eda14cbcSMatt Macy /* BEGIN CSTYLED */ 1349*eda14cbcSMatt Macy /* zfs_dirty_data_max_percent only applied at module load in arc_init(). */ 1350*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_percent, INT, ZMOD_RD, 1351*eda14cbcSMatt Macy "Max percent of RAM allowed to be dirty"); 1352*eda14cbcSMatt Macy 1353*eda14cbcSMatt Macy /* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */ 1354*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max_percent, INT, ZMOD_RD, 1355*eda14cbcSMatt Macy "zfs_dirty_data_max upper bound as % of RAM"); 1356*eda14cbcSMatt Macy 1357*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, delay_min_dirty_percent, INT, ZMOD_RW, 1358*eda14cbcSMatt Macy "Transaction delay threshold"); 1359*eda14cbcSMatt Macy 1360*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max, ULONG, ZMOD_RW, 1361*eda14cbcSMatt Macy "Determines the dirty space limit"); 1362*eda14cbcSMatt Macy 1363*eda14cbcSMatt Macy /* zfs_dirty_data_max_max only applied at module load in arc_init(). */ 1364*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max, ULONG, ZMOD_RD, 1365*eda14cbcSMatt Macy "zfs_dirty_data_max upper bound in bytes"); 1366*eda14cbcSMatt Macy 1367*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_sync_percent, INT, ZMOD_RW, 1368*eda14cbcSMatt Macy "Dirty data txg sync threshold as a percentage of zfs_dirty_data_max"); 1369*eda14cbcSMatt Macy 1370*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, delay_scale, ULONG, ZMOD_RW, 1371*eda14cbcSMatt Macy "How quickly delay approaches infinity"); 1372*eda14cbcSMatt Macy 1373*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, sync_taskq_batch_pct, INT, ZMOD_RW, 1374*eda14cbcSMatt Macy "Max percent of CPUs that are used to sync dirty data"); 1375*eda14cbcSMatt Macy 1376*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_nthr_pct, INT, ZMOD_RW, 1377*eda14cbcSMatt Macy "Max percent of CPUs that are used per dp_sync_taskq"); 1378*eda14cbcSMatt Macy 1379*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_minalloc, INT, ZMOD_RW, 1380*eda14cbcSMatt Macy "Number of taskq entries that are pre-populated"); 1381*eda14cbcSMatt Macy 1382*eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_maxalloc, INT, ZMOD_RW, 1383*eda14cbcSMatt Macy "Max number of taskq entries that are cached"); 1384*eda14cbcSMatt Macy /* END CSTYLED */ 1385