1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9271171e0SMartin Matuska * or https://opensource.org/licenses/CDDL-1.0. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy /* 22eda14cbcSMatt Macy * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23eda14cbcSMatt Macy * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24eda14cbcSMatt Macy * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 252c48331dSMatt Macy * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 26eda14cbcSMatt Macy * Copyright (c) 2017, Intel Corporation. 27eda14cbcSMatt Macy */ 28eda14cbcSMatt Macy 29eda14cbcSMatt Macy #include <sys/zfs_context.h> 30eda14cbcSMatt Macy #include <sys/dmu.h> 31eda14cbcSMatt Macy #include <sys/dmu_tx.h> 32eda14cbcSMatt Macy #include <sys/space_map.h> 33eda14cbcSMatt Macy #include <sys/metaslab_impl.h> 34eda14cbcSMatt Macy #include <sys/vdev_impl.h> 357877fdebSMatt Macy #include <sys/vdev_draid.h> 36eda14cbcSMatt Macy #include <sys/zio.h> 37eda14cbcSMatt Macy #include <sys/spa_impl.h> 38eda14cbcSMatt Macy #include <sys/zfeature.h> 39eda14cbcSMatt Macy #include <sys/vdev_indirect_mapping.h> 40eda14cbcSMatt Macy #include <sys/zap.h> 41eda14cbcSMatt Macy #include <sys/btree.h> 42eda14cbcSMatt Macy 43eda14cbcSMatt Macy #define GANG_ALLOCATION(flags) \ 44eda14cbcSMatt Macy ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER)) 45eda14cbcSMatt Macy 46eda14cbcSMatt Macy /* 47eda14cbcSMatt Macy * Metaslab granularity, in bytes. This is roughly similar to what would be 48eda14cbcSMatt Macy * referred to as the "stripe size" in traditional RAID arrays. In normal 49716fd348SMartin Matuska * operation, we will try to write this amount of data to each disk before 50716fd348SMartin Matuska * moving on to the next top-level vdev. 51eda14cbcSMatt Macy */ 52dbd5678dSMartin Matuska static uint64_t metaslab_aliquot = 1024 * 1024; 53eda14cbcSMatt Macy 54eda14cbcSMatt Macy /* 55eda14cbcSMatt Macy * For testing, make some blocks above a certain size be gang blocks. 56eda14cbcSMatt Macy */ 57dbd5678dSMartin Matuska uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; 58eda14cbcSMatt Macy 59eda14cbcSMatt Macy /* 60315ee00fSMartin Matuska * Of blocks of size >= metaslab_force_ganging, actually gang them this often. 61315ee00fSMartin Matuska */ 62315ee00fSMartin Matuska uint_t metaslab_force_ganging_pct = 3; 63315ee00fSMartin Matuska 64315ee00fSMartin Matuska /* 65eda14cbcSMatt Macy * In pools where the log space map feature is not enabled we touch 66eda14cbcSMatt Macy * multiple metaslabs (and their respective space maps) with each 67eda14cbcSMatt Macy * transaction group. Thus, we benefit from having a small space map 68eda14cbcSMatt Macy * block size since it allows us to issue more I/O operations scattered 69eda14cbcSMatt Macy * around the disk. So a sane default for the space map block size 70eda14cbcSMatt Macy * is 8~16K. 71eda14cbcSMatt Macy */ 72eda14cbcSMatt Macy int zfs_metaslab_sm_blksz_no_log = (1 << 14); 73eda14cbcSMatt Macy 74eda14cbcSMatt Macy /* 75eda14cbcSMatt Macy * When the log space map feature is enabled, we accumulate a lot of 76eda14cbcSMatt Macy * changes per metaslab that are flushed once in a while so we benefit 77eda14cbcSMatt Macy * from a bigger block size like 128K for the metaslab space maps. 78eda14cbcSMatt Macy */ 79eda14cbcSMatt Macy int zfs_metaslab_sm_blksz_with_log = (1 << 17); 80eda14cbcSMatt Macy 81eda14cbcSMatt Macy /* 82eda14cbcSMatt Macy * The in-core space map representation is more compact than its on-disk form. 83eda14cbcSMatt Macy * The zfs_condense_pct determines how much more compact the in-core 84eda14cbcSMatt Macy * space map representation must be before we compact it on-disk. 85eda14cbcSMatt Macy * Values should be greater than or equal to 100. 86eda14cbcSMatt Macy */ 87be181ee2SMartin Matuska uint_t zfs_condense_pct = 200; 88eda14cbcSMatt Macy 89eda14cbcSMatt Macy /* 90eda14cbcSMatt Macy * Condensing a metaslab is not guaranteed to actually reduce the amount of 91eda14cbcSMatt Macy * space used on disk. In particular, a space map uses data in increments of 92eda14cbcSMatt Macy * MAX(1 << ashift, space_map_blksz), so a metaslab might use the 93eda14cbcSMatt Macy * same number of blocks after condensing. Since the goal of condensing is to 94eda14cbcSMatt Macy * reduce the number of IOPs required to read the space map, we only want to 95eda14cbcSMatt Macy * condense when we can be sure we will reduce the number of blocks used by the 96eda14cbcSMatt Macy * space map. Unfortunately, we cannot precisely compute whether or not this is 97eda14cbcSMatt Macy * the case in metaslab_should_condense since we are holding ms_lock. Instead, 98eda14cbcSMatt Macy * we apply the following heuristic: do not condense a spacemap unless the 99eda14cbcSMatt Macy * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold 100eda14cbcSMatt Macy * blocks. 101eda14cbcSMatt Macy */ 102e92ffd9bSMartin Matuska static const int zfs_metaslab_condense_block_threshold = 4; 103eda14cbcSMatt Macy 104eda14cbcSMatt Macy /* 105eda14cbcSMatt Macy * The zfs_mg_noalloc_threshold defines which metaslab groups should 106eda14cbcSMatt Macy * be eligible for allocation. The value is defined as a percentage of 107eda14cbcSMatt Macy * free space. Metaslab groups that have more free space than 108eda14cbcSMatt Macy * zfs_mg_noalloc_threshold are always eligible for allocations. Once 109eda14cbcSMatt Macy * a metaslab group's free space is less than or equal to the 110eda14cbcSMatt Macy * zfs_mg_noalloc_threshold the allocator will avoid allocating to that 111eda14cbcSMatt Macy * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. 112eda14cbcSMatt Macy * Once all groups in the pool reach zfs_mg_noalloc_threshold then all 113eda14cbcSMatt Macy * groups are allowed to accept allocations. Gang blocks are always 114eda14cbcSMatt Macy * eligible to allocate on any metaslab group. The default value of 0 means 115eda14cbcSMatt Macy * no metaslab group will be excluded based on this criterion. 116eda14cbcSMatt Macy */ 117be181ee2SMartin Matuska static uint_t zfs_mg_noalloc_threshold = 0; 118eda14cbcSMatt Macy 119eda14cbcSMatt Macy /* 120eda14cbcSMatt Macy * Metaslab groups are considered eligible for allocations if their 121eda14cbcSMatt Macy * fragmentation metric (measured as a percentage) is less than or 122eda14cbcSMatt Macy * equal to zfs_mg_fragmentation_threshold. If a metaslab group 123eda14cbcSMatt Macy * exceeds this threshold then it will be skipped unless all metaslab 124eda14cbcSMatt Macy * groups within the metaslab class have also crossed this threshold. 125eda14cbcSMatt Macy * 126eda14cbcSMatt Macy * This tunable was introduced to avoid edge cases where we continue 127eda14cbcSMatt Macy * allocating from very fragmented disks in our pool while other, less 128eda14cbcSMatt Macy * fragmented disks, exists. On the other hand, if all disks in the 129eda14cbcSMatt Macy * pool are uniformly approaching the threshold, the threshold can 130eda14cbcSMatt Macy * be a speed bump in performance, where we keep switching the disks 131eda14cbcSMatt Macy * that we allocate from (e.g. we allocate some segments from disk A 132eda14cbcSMatt Macy * making it bypassing the threshold while freeing segments from disk 133eda14cbcSMatt Macy * B getting its fragmentation below the threshold). 134eda14cbcSMatt Macy * 135eda14cbcSMatt Macy * Empirically, we've seen that our vdev selection for allocations is 136eda14cbcSMatt Macy * good enough that fragmentation increases uniformly across all vdevs 137eda14cbcSMatt Macy * the majority of the time. Thus we set the threshold percentage high 138eda14cbcSMatt Macy * enough to avoid hitting the speed bump on pools that are being pushed 139eda14cbcSMatt Macy * to the edge. 140eda14cbcSMatt Macy */ 141be181ee2SMartin Matuska static uint_t zfs_mg_fragmentation_threshold = 95; 142eda14cbcSMatt Macy 143eda14cbcSMatt Macy /* 144eda14cbcSMatt Macy * Allow metaslabs to keep their active state as long as their fragmentation 145eda14cbcSMatt Macy * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An 146eda14cbcSMatt Macy * active metaslab that exceeds this threshold will no longer keep its active 147eda14cbcSMatt Macy * status allowing better metaslabs to be selected. 148eda14cbcSMatt Macy */ 149be181ee2SMartin Matuska static uint_t zfs_metaslab_fragmentation_threshold = 70; 150eda14cbcSMatt Macy 151eda14cbcSMatt Macy /* 152eda14cbcSMatt Macy * When set will load all metaslabs when pool is first opened. 153eda14cbcSMatt Macy */ 154e92ffd9bSMartin Matuska int metaslab_debug_load = B_FALSE; 155eda14cbcSMatt Macy 156eda14cbcSMatt Macy /* 157eda14cbcSMatt Macy * When set will prevent metaslabs from being unloaded. 158eda14cbcSMatt Macy */ 159e92ffd9bSMartin Matuska static int metaslab_debug_unload = B_FALSE; 160eda14cbcSMatt Macy 161eda14cbcSMatt Macy /* 162eda14cbcSMatt Macy * Minimum size which forces the dynamic allocator to change 163eda14cbcSMatt Macy * it's allocation strategy. Once the space map cannot satisfy 164eda14cbcSMatt Macy * an allocation of this size then it switches to using more 165eda14cbcSMatt Macy * aggressive strategy (i.e search by size rather than offset). 166eda14cbcSMatt Macy */ 167eda14cbcSMatt Macy uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE; 168eda14cbcSMatt Macy 169eda14cbcSMatt Macy /* 170eda14cbcSMatt Macy * The minimum free space, in percent, which must be available 171eda14cbcSMatt Macy * in a space map to continue allocations in a first-fit fashion. 172eda14cbcSMatt Macy * Once the space map's free space drops below this level we dynamically 173eda14cbcSMatt Macy * switch to using best-fit allocations. 174eda14cbcSMatt Macy */ 175be181ee2SMartin Matuska uint_t metaslab_df_free_pct = 4; 176eda14cbcSMatt Macy 177eda14cbcSMatt Macy /* 178eda14cbcSMatt Macy * Maximum distance to search forward from the last offset. Without this 179eda14cbcSMatt Macy * limit, fragmented pools can see >100,000 iterations and 180eda14cbcSMatt Macy * metaslab_block_picker() becomes the performance limiting factor on 181eda14cbcSMatt Macy * high-performance storage. 182eda14cbcSMatt Macy * 183eda14cbcSMatt Macy * With the default setting of 16MB, we typically see less than 500 184eda14cbcSMatt Macy * iterations, even with very fragmented, ashift=9 pools. The maximum number 185eda14cbcSMatt Macy * of iterations possible is: 186eda14cbcSMatt Macy * metaslab_df_max_search / (2 * (1<<ashift)) 187eda14cbcSMatt Macy * With the default setting of 16MB this is 16*1024 (with ashift=9) or 188eda14cbcSMatt Macy * 2048 (with ashift=12). 189eda14cbcSMatt Macy */ 190be181ee2SMartin Matuska static uint_t metaslab_df_max_search = 16 * 1024 * 1024; 191eda14cbcSMatt Macy 192eda14cbcSMatt Macy /* 193eda14cbcSMatt Macy * Forces the metaslab_block_picker function to search for at least this many 194eda14cbcSMatt Macy * segments forwards until giving up on finding a segment that the allocation 195eda14cbcSMatt Macy * will fit into. 196eda14cbcSMatt Macy */ 197e92ffd9bSMartin Matuska static const uint32_t metaslab_min_search_count = 100; 198eda14cbcSMatt Macy 199eda14cbcSMatt Macy /* 200eda14cbcSMatt Macy * If we are not searching forward (due to metaslab_df_max_search, 201eda14cbcSMatt Macy * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable 202eda14cbcSMatt Macy * controls what segment is used. If it is set, we will use the largest free 203eda14cbcSMatt Macy * segment. If it is not set, we will use a segment of exactly the requested 204eda14cbcSMatt Macy * size (or larger). 205eda14cbcSMatt Macy */ 206e92ffd9bSMartin Matuska static int metaslab_df_use_largest_segment = B_FALSE; 207eda14cbcSMatt Macy 208eda14cbcSMatt Macy /* 209eda14cbcSMatt Macy * These tunables control how long a metaslab will remain loaded after the 210eda14cbcSMatt Macy * last allocation from it. A metaslab can't be unloaded until at least 211eda14cbcSMatt Macy * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds 212eda14cbcSMatt Macy * have elapsed. However, zfs_metaslab_mem_limit may cause it to be 213eda14cbcSMatt Macy * unloaded sooner. These settings are intended to be generous -- to keep 214eda14cbcSMatt Macy * metaslabs loaded for a long time, reducing the rate of metaslab loading. 215eda14cbcSMatt Macy */ 216be181ee2SMartin Matuska static uint_t metaslab_unload_delay = 32; 217be181ee2SMartin Matuska static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */ 218eda14cbcSMatt Macy 219eda14cbcSMatt Macy /* 220eda14cbcSMatt Macy * Max number of metaslabs per group to preload. 221eda14cbcSMatt Macy */ 222be181ee2SMartin Matuska uint_t metaslab_preload_limit = 10; 223eda14cbcSMatt Macy 224eda14cbcSMatt Macy /* 225eda14cbcSMatt Macy * Enable/disable preloading of metaslab. 226eda14cbcSMatt Macy */ 227e92ffd9bSMartin Matuska static int metaslab_preload_enabled = B_TRUE; 228eda14cbcSMatt Macy 229eda14cbcSMatt Macy /* 230eda14cbcSMatt Macy * Enable/disable fragmentation weighting on metaslabs. 231eda14cbcSMatt Macy */ 232e92ffd9bSMartin Matuska static int metaslab_fragmentation_factor_enabled = B_TRUE; 233eda14cbcSMatt Macy 234eda14cbcSMatt Macy /* 235eda14cbcSMatt Macy * Enable/disable lba weighting (i.e. outer tracks are given preference). 236eda14cbcSMatt Macy */ 237e92ffd9bSMartin Matuska static int metaslab_lba_weighting_enabled = B_TRUE; 238eda14cbcSMatt Macy 239eda14cbcSMatt Macy /* 240eda14cbcSMatt Macy * Enable/disable metaslab group biasing. 241eda14cbcSMatt Macy */ 242e92ffd9bSMartin Matuska static int metaslab_bias_enabled = B_TRUE; 243eda14cbcSMatt Macy 244eda14cbcSMatt Macy /* 245eda14cbcSMatt Macy * Enable/disable remapping of indirect DVAs to their concrete vdevs. 246eda14cbcSMatt Macy */ 247e92ffd9bSMartin Matuska static const boolean_t zfs_remap_blkptr_enable = B_TRUE; 248eda14cbcSMatt Macy 249eda14cbcSMatt Macy /* 250eda14cbcSMatt Macy * Enable/disable segment-based metaslab selection. 251eda14cbcSMatt Macy */ 252e92ffd9bSMartin Matuska static int zfs_metaslab_segment_weight_enabled = B_TRUE; 253eda14cbcSMatt Macy 254eda14cbcSMatt Macy /* 255eda14cbcSMatt Macy * When using segment-based metaslab selection, we will continue 256eda14cbcSMatt Macy * allocating from the active metaslab until we have exhausted 257eda14cbcSMatt Macy * zfs_metaslab_switch_threshold of its buckets. 258eda14cbcSMatt Macy */ 259e92ffd9bSMartin Matuska static int zfs_metaslab_switch_threshold = 2; 260eda14cbcSMatt Macy 261eda14cbcSMatt Macy /* 262eda14cbcSMatt Macy * Internal switch to enable/disable the metaslab allocation tracing 263eda14cbcSMatt Macy * facility. 264eda14cbcSMatt Macy */ 265e92ffd9bSMartin Matuska static const boolean_t metaslab_trace_enabled = B_FALSE; 266eda14cbcSMatt Macy 267eda14cbcSMatt Macy /* 268eda14cbcSMatt Macy * Maximum entries that the metaslab allocation tracing facility will keep 269eda14cbcSMatt Macy * in a given list when running in non-debug mode. We limit the number 270eda14cbcSMatt Macy * of entries in non-debug mode to prevent us from using up too much memory. 271eda14cbcSMatt Macy * The limit should be sufficiently large that we don't expect any allocation 272eda14cbcSMatt Macy * to every exceed this value. In debug mode, the system will panic if this 273eda14cbcSMatt Macy * limit is ever reached allowing for further investigation. 274eda14cbcSMatt Macy */ 275e92ffd9bSMartin Matuska static const uint64_t metaslab_trace_max_entries = 5000; 276eda14cbcSMatt Macy 277eda14cbcSMatt Macy /* 278eda14cbcSMatt Macy * Maximum number of metaslabs per group that can be disabled 279eda14cbcSMatt Macy * simultaneously. 280eda14cbcSMatt Macy */ 281e92ffd9bSMartin Matuska static const int max_disabled_ms = 3; 282eda14cbcSMatt Macy 283eda14cbcSMatt Macy /* 284eda14cbcSMatt Macy * Time (in seconds) to respect ms_max_size when the metaslab is not loaded. 285eda14cbcSMatt Macy * To avoid 64-bit overflow, don't set above UINT32_MAX. 286eda14cbcSMatt Macy */ 287dbd5678dSMartin Matuska static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */ 288eda14cbcSMatt Macy 289eda14cbcSMatt Macy /* 290eda14cbcSMatt Macy * Maximum percentage of memory to use on storing loaded metaslabs. If loading 291eda14cbcSMatt Macy * a metaslab would take it over this percentage, the oldest selected metaslab 292eda14cbcSMatt Macy * is automatically unloaded. 293eda14cbcSMatt Macy */ 294be181ee2SMartin Matuska static uint_t zfs_metaslab_mem_limit = 25; 295eda14cbcSMatt Macy 296eda14cbcSMatt Macy /* 297eda14cbcSMatt Macy * Force the per-metaslab range trees to use 64-bit integers to store 298eda14cbcSMatt Macy * segments. Used for debugging purposes. 299eda14cbcSMatt Macy */ 300e92ffd9bSMartin Matuska static const boolean_t zfs_metaslab_force_large_segs = B_FALSE; 301eda14cbcSMatt Macy 302eda14cbcSMatt Macy /* 303eda14cbcSMatt Macy * By default we only store segments over a certain size in the size-sorted 304eda14cbcSMatt Macy * metaslab trees (ms_allocatable_by_size and 305eda14cbcSMatt Macy * ms_unflushed_frees_by_size). This dramatically reduces memory usage and 306eda14cbcSMatt Macy * improves load and unload times at the cost of causing us to use slightly 307eda14cbcSMatt Macy * larger segments than we would otherwise in some cases. 308eda14cbcSMatt Macy */ 309e92ffd9bSMartin Matuska static const uint32_t metaslab_by_size_min_shift = 14; 310eda14cbcSMatt Macy 3117877fdebSMatt Macy /* 3127877fdebSMatt Macy * If not set, we will first try normal allocation. If that fails then 3137877fdebSMatt Macy * we will do a gang allocation. If that fails then we will do a "try hard" 3147877fdebSMatt Macy * gang allocation. If that fails then we will have a multi-layer gang 3157877fdebSMatt Macy * block. 3167877fdebSMatt Macy * 3177877fdebSMatt Macy * If set, we will first try normal allocation. If that fails then 3187877fdebSMatt Macy * we will do a "try hard" allocation. If that fails we will do a gang 3197877fdebSMatt Macy * allocation. If that fails we will do a "try hard" gang allocation. If 3207877fdebSMatt Macy * that fails then we will have a multi-layer gang block. 3217877fdebSMatt Macy */ 322e92ffd9bSMartin Matuska static int zfs_metaslab_try_hard_before_gang = B_FALSE; 3237877fdebSMatt Macy 3247877fdebSMatt Macy /* 3257877fdebSMatt Macy * When not trying hard, we only consider the best zfs_metaslab_find_max_tries 3267877fdebSMatt Macy * metaslabs. This improves performance, especially when there are many 3277877fdebSMatt Macy * metaslabs per vdev and the allocation can't actually be satisfied (so we 3287877fdebSMatt Macy * would otherwise iterate all the metaslabs). If there is a metaslab with a 3297877fdebSMatt Macy * worse weight but it can actually satisfy the allocation, we won't find it 3307877fdebSMatt Macy * until trying hard. This may happen if the worse metaslab is not loaded 3317877fdebSMatt Macy * (and the true weight is better than we have calculated), or due to weight 3327877fdebSMatt Macy * bucketization. E.g. we are looking for a 60K segment, and the best 3337877fdebSMatt Macy * metaslabs all have free segments in the 32-63K bucket, but the best 3347877fdebSMatt Macy * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a 3357877fdebSMatt Macy * subsequent metaslab has ms_max_size >60KB (but fewer segments in this 3367877fdebSMatt Macy * bucket, and therefore a lower weight). 3377877fdebSMatt Macy */ 338be181ee2SMartin Matuska static uint_t zfs_metaslab_find_max_tries = 100; 3397877fdebSMatt Macy 340eda14cbcSMatt Macy static uint64_t metaslab_weight(metaslab_t *, boolean_t); 341eda14cbcSMatt Macy static void metaslab_set_fragmentation(metaslab_t *, boolean_t); 342eda14cbcSMatt Macy static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t); 343eda14cbcSMatt Macy static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t); 344eda14cbcSMatt Macy 345eda14cbcSMatt Macy static void metaslab_passivate(metaslab_t *msp, uint64_t weight); 346eda14cbcSMatt Macy static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp); 347eda14cbcSMatt Macy static void metaslab_flush_update(metaslab_t *, dmu_tx_t *); 348eda14cbcSMatt Macy static unsigned int metaslab_idx_func(multilist_t *, void *); 349eda14cbcSMatt Macy static void metaslab_evict(metaslab_t *, uint64_t); 350eda14cbcSMatt Macy static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg); 351eda14cbcSMatt Macy kmem_cache_t *metaslab_alloc_trace_cache; 352eda14cbcSMatt Macy 353eda14cbcSMatt Macy typedef struct metaslab_stats { 354eda14cbcSMatt Macy kstat_named_t metaslabstat_trace_over_limit; 355eda14cbcSMatt Macy kstat_named_t metaslabstat_reload_tree; 3567877fdebSMatt Macy kstat_named_t metaslabstat_too_many_tries; 3577877fdebSMatt Macy kstat_named_t metaslabstat_try_hard; 358eda14cbcSMatt Macy } metaslab_stats_t; 359eda14cbcSMatt Macy 360eda14cbcSMatt Macy static metaslab_stats_t metaslab_stats = { 361eda14cbcSMatt Macy { "trace_over_limit", KSTAT_DATA_UINT64 }, 362eda14cbcSMatt Macy { "reload_tree", KSTAT_DATA_UINT64 }, 3637877fdebSMatt Macy { "too_many_tries", KSTAT_DATA_UINT64 }, 3647877fdebSMatt Macy { "try_hard", KSTAT_DATA_UINT64 }, 365eda14cbcSMatt Macy }; 366eda14cbcSMatt Macy 367eda14cbcSMatt Macy #define METASLABSTAT_BUMP(stat) \ 368eda14cbcSMatt Macy atomic_inc_64(&metaslab_stats.stat.value.ui64); 369eda14cbcSMatt Macy 370eda14cbcSMatt Macy 371e92ffd9bSMartin Matuska static kstat_t *metaslab_ksp; 372eda14cbcSMatt Macy 373eda14cbcSMatt Macy void 374eda14cbcSMatt Macy metaslab_stat_init(void) 375eda14cbcSMatt Macy { 376eda14cbcSMatt Macy ASSERT(metaslab_alloc_trace_cache == NULL); 377eda14cbcSMatt Macy metaslab_alloc_trace_cache = kmem_cache_create( 378eda14cbcSMatt Macy "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t), 379eda14cbcSMatt Macy 0, NULL, NULL, NULL, NULL, NULL, 0); 380eda14cbcSMatt Macy metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats", 381eda14cbcSMatt Macy "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) / 382eda14cbcSMatt Macy sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 383eda14cbcSMatt Macy if (metaslab_ksp != NULL) { 384eda14cbcSMatt Macy metaslab_ksp->ks_data = &metaslab_stats; 385eda14cbcSMatt Macy kstat_install(metaslab_ksp); 386eda14cbcSMatt Macy } 387eda14cbcSMatt Macy } 388eda14cbcSMatt Macy 389eda14cbcSMatt Macy void 390eda14cbcSMatt Macy metaslab_stat_fini(void) 391eda14cbcSMatt Macy { 392eda14cbcSMatt Macy if (metaslab_ksp != NULL) { 393eda14cbcSMatt Macy kstat_delete(metaslab_ksp); 394eda14cbcSMatt Macy metaslab_ksp = NULL; 395eda14cbcSMatt Macy } 396eda14cbcSMatt Macy 397eda14cbcSMatt Macy kmem_cache_destroy(metaslab_alloc_trace_cache); 398eda14cbcSMatt Macy metaslab_alloc_trace_cache = NULL; 399eda14cbcSMatt Macy } 400eda14cbcSMatt Macy 401eda14cbcSMatt Macy /* 402eda14cbcSMatt Macy * ========================================================================== 403eda14cbcSMatt Macy * Metaslab classes 404eda14cbcSMatt Macy * ========================================================================== 405eda14cbcSMatt Macy */ 406eda14cbcSMatt Macy metaslab_class_t * 407e92ffd9bSMartin Matuska metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops) 408eda14cbcSMatt Macy { 409eda14cbcSMatt Macy metaslab_class_t *mc; 410eda14cbcSMatt Macy 4117877fdebSMatt Macy mc = kmem_zalloc(offsetof(metaslab_class_t, 4127877fdebSMatt Macy mc_allocator[spa->spa_alloc_count]), KM_SLEEP); 413eda14cbcSMatt Macy 414eda14cbcSMatt Macy mc->mc_spa = spa; 415eda14cbcSMatt Macy mc->mc_ops = ops; 416eda14cbcSMatt Macy mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL); 4173ff01b23SMartin Matuska multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t), 418eda14cbcSMatt Macy offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func); 4197877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 4207877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[i]; 4217877fdebSMatt Macy mca->mca_rotor = NULL; 4227877fdebSMatt Macy zfs_refcount_create_tracked(&mca->mca_alloc_slots); 4237877fdebSMatt Macy } 424eda14cbcSMatt Macy 425eda14cbcSMatt Macy return (mc); 426eda14cbcSMatt Macy } 427eda14cbcSMatt Macy 428eda14cbcSMatt Macy void 429eda14cbcSMatt Macy metaslab_class_destroy(metaslab_class_t *mc) 430eda14cbcSMatt Macy { 4317877fdebSMatt Macy spa_t *spa = mc->mc_spa; 4327877fdebSMatt Macy 433eda14cbcSMatt Macy ASSERT(mc->mc_alloc == 0); 434eda14cbcSMatt Macy ASSERT(mc->mc_deferred == 0); 435eda14cbcSMatt Macy ASSERT(mc->mc_space == 0); 436eda14cbcSMatt Macy ASSERT(mc->mc_dspace == 0); 437eda14cbcSMatt Macy 4387877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 4397877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[i]; 4407877fdebSMatt Macy ASSERT(mca->mca_rotor == NULL); 4417877fdebSMatt Macy zfs_refcount_destroy(&mca->mca_alloc_slots); 4427877fdebSMatt Macy } 443eda14cbcSMatt Macy mutex_destroy(&mc->mc_lock); 4443ff01b23SMartin Matuska multilist_destroy(&mc->mc_metaslab_txg_list); 4457877fdebSMatt Macy kmem_free(mc, offsetof(metaslab_class_t, 4467877fdebSMatt Macy mc_allocator[spa->spa_alloc_count])); 447eda14cbcSMatt Macy } 448eda14cbcSMatt Macy 449eda14cbcSMatt Macy int 450eda14cbcSMatt Macy metaslab_class_validate(metaslab_class_t *mc) 451eda14cbcSMatt Macy { 452eda14cbcSMatt Macy metaslab_group_t *mg; 453eda14cbcSMatt Macy vdev_t *vd; 454eda14cbcSMatt Macy 455eda14cbcSMatt Macy /* 456eda14cbcSMatt Macy * Must hold one of the spa_config locks. 457eda14cbcSMatt Macy */ 458eda14cbcSMatt Macy ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || 459eda14cbcSMatt Macy spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); 460eda14cbcSMatt Macy 4617877fdebSMatt Macy if ((mg = mc->mc_allocator[0].mca_rotor) == NULL) 462eda14cbcSMatt Macy return (0); 463eda14cbcSMatt Macy 464eda14cbcSMatt Macy do { 465eda14cbcSMatt Macy vd = mg->mg_vd; 466eda14cbcSMatt Macy ASSERT(vd->vdev_mg != NULL); 467eda14cbcSMatt Macy ASSERT3P(vd->vdev_top, ==, vd); 468eda14cbcSMatt Macy ASSERT3P(mg->mg_class, ==, mc); 469eda14cbcSMatt Macy ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); 4707877fdebSMatt Macy } while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor); 471eda14cbcSMatt Macy 472eda14cbcSMatt Macy return (0); 473eda14cbcSMatt Macy } 474eda14cbcSMatt Macy 475eda14cbcSMatt Macy static void 476eda14cbcSMatt Macy metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, 477eda14cbcSMatt Macy int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) 478eda14cbcSMatt Macy { 479eda14cbcSMatt Macy atomic_add_64(&mc->mc_alloc, alloc_delta); 480eda14cbcSMatt Macy atomic_add_64(&mc->mc_deferred, defer_delta); 481eda14cbcSMatt Macy atomic_add_64(&mc->mc_space, space_delta); 482eda14cbcSMatt Macy atomic_add_64(&mc->mc_dspace, dspace_delta); 483eda14cbcSMatt Macy } 484eda14cbcSMatt Macy 485eda14cbcSMatt Macy uint64_t 486eda14cbcSMatt Macy metaslab_class_get_alloc(metaslab_class_t *mc) 487eda14cbcSMatt Macy { 488eda14cbcSMatt Macy return (mc->mc_alloc); 489eda14cbcSMatt Macy } 490eda14cbcSMatt Macy 491eda14cbcSMatt Macy uint64_t 492eda14cbcSMatt Macy metaslab_class_get_deferred(metaslab_class_t *mc) 493eda14cbcSMatt Macy { 494eda14cbcSMatt Macy return (mc->mc_deferred); 495eda14cbcSMatt Macy } 496eda14cbcSMatt Macy 497eda14cbcSMatt Macy uint64_t 498eda14cbcSMatt Macy metaslab_class_get_space(metaslab_class_t *mc) 499eda14cbcSMatt Macy { 500eda14cbcSMatt Macy return (mc->mc_space); 501eda14cbcSMatt Macy } 502eda14cbcSMatt Macy 503eda14cbcSMatt Macy uint64_t 504eda14cbcSMatt Macy metaslab_class_get_dspace(metaslab_class_t *mc) 505eda14cbcSMatt Macy { 506eda14cbcSMatt Macy return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); 507eda14cbcSMatt Macy } 508eda14cbcSMatt Macy 509eda14cbcSMatt Macy void 510eda14cbcSMatt Macy metaslab_class_histogram_verify(metaslab_class_t *mc) 511eda14cbcSMatt Macy { 512eda14cbcSMatt Macy spa_t *spa = mc->mc_spa; 513eda14cbcSMatt Macy vdev_t *rvd = spa->spa_root_vdev; 514eda14cbcSMatt Macy uint64_t *mc_hist; 515eda14cbcSMatt Macy int i; 516eda14cbcSMatt Macy 517eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 518eda14cbcSMatt Macy return; 519eda14cbcSMatt Macy 520eda14cbcSMatt Macy mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 521eda14cbcSMatt Macy KM_SLEEP); 522eda14cbcSMatt Macy 523184c1b94SMartin Matuska mutex_enter(&mc->mc_lock); 524eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 525eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 526184c1b94SMartin Matuska metaslab_group_t *mg = vdev_get_mg(tvd, mc); 527eda14cbcSMatt Macy 528eda14cbcSMatt Macy /* 529eda14cbcSMatt Macy * Skip any holes, uninitialized top-levels, or 530eda14cbcSMatt Macy * vdevs that are not in this metalab class. 531eda14cbcSMatt Macy */ 532eda14cbcSMatt Macy if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 533eda14cbcSMatt Macy mg->mg_class != mc) { 534eda14cbcSMatt Macy continue; 535eda14cbcSMatt Macy } 536eda14cbcSMatt Macy 537184c1b94SMartin Matuska IMPLY(mg == mg->mg_vd->vdev_log_mg, 538184c1b94SMartin Matuska mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); 539184c1b94SMartin Matuska 540eda14cbcSMatt Macy for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 541eda14cbcSMatt Macy mc_hist[i] += mg->mg_histogram[i]; 542eda14cbcSMatt Macy } 543eda14cbcSMatt Macy 544184c1b94SMartin Matuska for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 545eda14cbcSMatt Macy VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); 546184c1b94SMartin Matuska } 547eda14cbcSMatt Macy 548184c1b94SMartin Matuska mutex_exit(&mc->mc_lock); 549eda14cbcSMatt Macy kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 550eda14cbcSMatt Macy } 551eda14cbcSMatt Macy 552eda14cbcSMatt Macy /* 553eda14cbcSMatt Macy * Calculate the metaslab class's fragmentation metric. The metric 554eda14cbcSMatt Macy * is weighted based on the space contribution of each metaslab group. 555eda14cbcSMatt Macy * The return value will be a number between 0 and 100 (inclusive), or 556eda14cbcSMatt Macy * ZFS_FRAG_INVALID if the metric has not been set. See comment above the 557eda14cbcSMatt Macy * zfs_frag_table for more information about the metric. 558eda14cbcSMatt Macy */ 559eda14cbcSMatt Macy uint64_t 560eda14cbcSMatt Macy metaslab_class_fragmentation(metaslab_class_t *mc) 561eda14cbcSMatt Macy { 562eda14cbcSMatt Macy vdev_t *rvd = mc->mc_spa->spa_root_vdev; 563eda14cbcSMatt Macy uint64_t fragmentation = 0; 564eda14cbcSMatt Macy 565eda14cbcSMatt Macy spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 566eda14cbcSMatt Macy 567eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 568eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 569eda14cbcSMatt Macy metaslab_group_t *mg = tvd->vdev_mg; 570eda14cbcSMatt Macy 571eda14cbcSMatt Macy /* 572eda14cbcSMatt Macy * Skip any holes, uninitialized top-levels, 573eda14cbcSMatt Macy * or vdevs that are not in this metalab class. 574eda14cbcSMatt Macy */ 575eda14cbcSMatt Macy if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 576eda14cbcSMatt Macy mg->mg_class != mc) { 577eda14cbcSMatt Macy continue; 578eda14cbcSMatt Macy } 579eda14cbcSMatt Macy 580eda14cbcSMatt Macy /* 581eda14cbcSMatt Macy * If a metaslab group does not contain a fragmentation 582eda14cbcSMatt Macy * metric then just bail out. 583eda14cbcSMatt Macy */ 584eda14cbcSMatt Macy if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 585eda14cbcSMatt Macy spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 586eda14cbcSMatt Macy return (ZFS_FRAG_INVALID); 587eda14cbcSMatt Macy } 588eda14cbcSMatt Macy 589eda14cbcSMatt Macy /* 590eda14cbcSMatt Macy * Determine how much this metaslab_group is contributing 591eda14cbcSMatt Macy * to the overall pool fragmentation metric. 592eda14cbcSMatt Macy */ 593eda14cbcSMatt Macy fragmentation += mg->mg_fragmentation * 594eda14cbcSMatt Macy metaslab_group_get_space(mg); 595eda14cbcSMatt Macy } 596eda14cbcSMatt Macy fragmentation /= metaslab_class_get_space(mc); 597eda14cbcSMatt Macy 598eda14cbcSMatt Macy ASSERT3U(fragmentation, <=, 100); 599eda14cbcSMatt Macy spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 600eda14cbcSMatt Macy return (fragmentation); 601eda14cbcSMatt Macy } 602eda14cbcSMatt Macy 603eda14cbcSMatt Macy /* 604eda14cbcSMatt Macy * Calculate the amount of expandable space that is available in 605eda14cbcSMatt Macy * this metaslab class. If a device is expanded then its expandable 606eda14cbcSMatt Macy * space will be the amount of allocatable space that is currently not 607eda14cbcSMatt Macy * part of this metaslab class. 608eda14cbcSMatt Macy */ 609eda14cbcSMatt Macy uint64_t 610eda14cbcSMatt Macy metaslab_class_expandable_space(metaslab_class_t *mc) 611eda14cbcSMatt Macy { 612eda14cbcSMatt Macy vdev_t *rvd = mc->mc_spa->spa_root_vdev; 613eda14cbcSMatt Macy uint64_t space = 0; 614eda14cbcSMatt Macy 615eda14cbcSMatt Macy spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 616eda14cbcSMatt Macy for (int c = 0; c < rvd->vdev_children; c++) { 617eda14cbcSMatt Macy vdev_t *tvd = rvd->vdev_child[c]; 618eda14cbcSMatt Macy metaslab_group_t *mg = tvd->vdev_mg; 619eda14cbcSMatt Macy 620eda14cbcSMatt Macy if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 621eda14cbcSMatt Macy mg->mg_class != mc) { 622eda14cbcSMatt Macy continue; 623eda14cbcSMatt Macy } 624eda14cbcSMatt Macy 625eda14cbcSMatt Macy /* 626eda14cbcSMatt Macy * Calculate if we have enough space to add additional 627eda14cbcSMatt Macy * metaslabs. We report the expandable space in terms 628eda14cbcSMatt Macy * of the metaslab size since that's the unit of expansion. 629eda14cbcSMatt Macy */ 630aca928a5SMartin Matuska space += P2ALIGN_TYPED(tvd->vdev_max_asize - tvd->vdev_asize, 631aca928a5SMartin Matuska 1ULL << tvd->vdev_ms_shift, uint64_t); 632eda14cbcSMatt Macy } 633eda14cbcSMatt Macy spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 634eda14cbcSMatt Macy return (space); 635eda14cbcSMatt Macy } 636eda14cbcSMatt Macy 637eda14cbcSMatt Macy void 638eda14cbcSMatt Macy metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg) 639eda14cbcSMatt Macy { 6403ff01b23SMartin Matuska multilist_t *ml = &mc->mc_metaslab_txg_list; 641aca928a5SMartin Matuska hrtime_t now = gethrtime(); 642eda14cbcSMatt Macy for (int i = 0; i < multilist_get_num_sublists(ml); i++) { 6431719886fSMartin Matuska multilist_sublist_t *mls = multilist_sublist_lock_idx(ml, i); 644eda14cbcSMatt Macy metaslab_t *msp = multilist_sublist_head(mls); 645eda14cbcSMatt Macy multilist_sublist_unlock(mls); 646eda14cbcSMatt Macy while (msp != NULL) { 647eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 648eda14cbcSMatt Macy 649eda14cbcSMatt Macy /* 650eda14cbcSMatt Macy * If the metaslab has been removed from the list 651eda14cbcSMatt Macy * (which could happen if we were at the memory limit 652eda14cbcSMatt Macy * and it was evicted during this loop), then we can't 653eda14cbcSMatt Macy * proceed and we should restart the sublist. 654eda14cbcSMatt Macy */ 655eda14cbcSMatt Macy if (!multilist_link_active(&msp->ms_class_txg_node)) { 656eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 657eda14cbcSMatt Macy i--; 658eda14cbcSMatt Macy break; 659eda14cbcSMatt Macy } 6601719886fSMartin Matuska mls = multilist_sublist_lock_idx(ml, i); 661eda14cbcSMatt Macy metaslab_t *next_msp = multilist_sublist_next(mls, msp); 662eda14cbcSMatt Macy multilist_sublist_unlock(mls); 663eda14cbcSMatt Macy if (txg > 664eda14cbcSMatt Macy msp->ms_selected_txg + metaslab_unload_delay && 665aca928a5SMartin Matuska now > msp->ms_selected_time + 666aca928a5SMartin Matuska MSEC2NSEC(metaslab_unload_delay_ms) && 667aca928a5SMartin Matuska (msp->ms_allocator == -1 || 668aca928a5SMartin Matuska !metaslab_preload_enabled)) { 669eda14cbcSMatt Macy metaslab_evict(msp, txg); 670eda14cbcSMatt Macy } else { 671eda14cbcSMatt Macy /* 672eda14cbcSMatt Macy * Once we've hit a metaslab selected too 673eda14cbcSMatt Macy * recently to evict, we're done evicting for 674eda14cbcSMatt Macy * now. 675eda14cbcSMatt Macy */ 676eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 677eda14cbcSMatt Macy break; 678eda14cbcSMatt Macy } 679eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 680eda14cbcSMatt Macy msp = next_msp; 681eda14cbcSMatt Macy } 682eda14cbcSMatt Macy } 683eda14cbcSMatt Macy } 684eda14cbcSMatt Macy 685eda14cbcSMatt Macy static int 686eda14cbcSMatt Macy metaslab_compare(const void *x1, const void *x2) 687eda14cbcSMatt Macy { 688eda14cbcSMatt Macy const metaslab_t *m1 = (const metaslab_t *)x1; 689eda14cbcSMatt Macy const metaslab_t *m2 = (const metaslab_t *)x2; 690eda14cbcSMatt Macy 691eda14cbcSMatt Macy int sort1 = 0; 692eda14cbcSMatt Macy int sort2 = 0; 693eda14cbcSMatt Macy if (m1->ms_allocator != -1 && m1->ms_primary) 694eda14cbcSMatt Macy sort1 = 1; 695eda14cbcSMatt Macy else if (m1->ms_allocator != -1 && !m1->ms_primary) 696eda14cbcSMatt Macy sort1 = 2; 697eda14cbcSMatt Macy if (m2->ms_allocator != -1 && m2->ms_primary) 698eda14cbcSMatt Macy sort2 = 1; 699eda14cbcSMatt Macy else if (m2->ms_allocator != -1 && !m2->ms_primary) 700eda14cbcSMatt Macy sort2 = 2; 701eda14cbcSMatt Macy 702eda14cbcSMatt Macy /* 703eda14cbcSMatt Macy * Sort inactive metaslabs first, then primaries, then secondaries. When 704eda14cbcSMatt Macy * selecting a metaslab to allocate from, an allocator first tries its 705eda14cbcSMatt Macy * primary, then secondary active metaslab. If it doesn't have active 706eda14cbcSMatt Macy * metaslabs, or can't allocate from them, it searches for an inactive 707eda14cbcSMatt Macy * metaslab to activate. If it can't find a suitable one, it will steal 708eda14cbcSMatt Macy * a primary or secondary metaslab from another allocator. 709eda14cbcSMatt Macy */ 710eda14cbcSMatt Macy if (sort1 < sort2) 711eda14cbcSMatt Macy return (-1); 712eda14cbcSMatt Macy if (sort1 > sort2) 713eda14cbcSMatt Macy return (1); 714eda14cbcSMatt Macy 715eda14cbcSMatt Macy int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight); 716eda14cbcSMatt Macy if (likely(cmp)) 717eda14cbcSMatt Macy return (cmp); 718eda14cbcSMatt Macy 719eda14cbcSMatt Macy IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2); 720eda14cbcSMatt Macy 721eda14cbcSMatt Macy return (TREE_CMP(m1->ms_start, m2->ms_start)); 722eda14cbcSMatt Macy } 723eda14cbcSMatt Macy 724eda14cbcSMatt Macy /* 725eda14cbcSMatt Macy * ========================================================================== 726eda14cbcSMatt Macy * Metaslab groups 727eda14cbcSMatt Macy * ========================================================================== 728eda14cbcSMatt Macy */ 729eda14cbcSMatt Macy /* 730eda14cbcSMatt Macy * Update the allocatable flag and the metaslab group's capacity. 731eda14cbcSMatt Macy * The allocatable flag is set to true if the capacity is below 732eda14cbcSMatt Macy * the zfs_mg_noalloc_threshold or has a fragmentation value that is 733eda14cbcSMatt Macy * greater than zfs_mg_fragmentation_threshold. If a metaslab group 734eda14cbcSMatt Macy * transitions from allocatable to non-allocatable or vice versa then the 735eda14cbcSMatt Macy * metaslab group's class is updated to reflect the transition. 736eda14cbcSMatt Macy */ 737eda14cbcSMatt Macy static void 738eda14cbcSMatt Macy metaslab_group_alloc_update(metaslab_group_t *mg) 739eda14cbcSMatt Macy { 740eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 741eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 742eda14cbcSMatt Macy vdev_stat_t *vs = &vd->vdev_stat; 743eda14cbcSMatt Macy boolean_t was_allocatable; 744eda14cbcSMatt Macy boolean_t was_initialized; 745eda14cbcSMatt Macy 746eda14cbcSMatt Macy ASSERT(vd == vd->vdev_top); 747eda14cbcSMatt Macy ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==, 748eda14cbcSMatt Macy SCL_ALLOC); 749eda14cbcSMatt Macy 750eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 751eda14cbcSMatt Macy was_allocatable = mg->mg_allocatable; 752eda14cbcSMatt Macy was_initialized = mg->mg_initialized; 753eda14cbcSMatt Macy 754eda14cbcSMatt Macy mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / 755eda14cbcSMatt Macy (vs->vs_space + 1); 756eda14cbcSMatt Macy 757eda14cbcSMatt Macy mutex_enter(&mc->mc_lock); 758eda14cbcSMatt Macy 759eda14cbcSMatt Macy /* 760eda14cbcSMatt Macy * If the metaslab group was just added then it won't 761eda14cbcSMatt Macy * have any space until we finish syncing out this txg. 762eda14cbcSMatt Macy * At that point we will consider it initialized and available 763eda14cbcSMatt Macy * for allocations. We also don't consider non-activated 764eda14cbcSMatt Macy * metaslab groups (e.g. vdevs that are in the middle of being removed) 765eda14cbcSMatt Macy * to be initialized, because they can't be used for allocation. 766eda14cbcSMatt Macy */ 767eda14cbcSMatt Macy mg->mg_initialized = metaslab_group_initialized(mg); 768eda14cbcSMatt Macy if (!was_initialized && mg->mg_initialized) { 769eda14cbcSMatt Macy mc->mc_groups++; 770eda14cbcSMatt Macy } else if (was_initialized && !mg->mg_initialized) { 771eda14cbcSMatt Macy ASSERT3U(mc->mc_groups, >, 0); 772eda14cbcSMatt Macy mc->mc_groups--; 773eda14cbcSMatt Macy } 774eda14cbcSMatt Macy if (mg->mg_initialized) 775eda14cbcSMatt Macy mg->mg_no_free_space = B_FALSE; 776eda14cbcSMatt Macy 777eda14cbcSMatt Macy /* 778eda14cbcSMatt Macy * A metaslab group is considered allocatable if it has plenty 779eda14cbcSMatt Macy * of free space or is not heavily fragmented. We only take 780eda14cbcSMatt Macy * fragmentation into account if the metaslab group has a valid 781eda14cbcSMatt Macy * fragmentation metric (i.e. a value between 0 and 100). 782eda14cbcSMatt Macy */ 783eda14cbcSMatt Macy mg->mg_allocatable = (mg->mg_activation_count > 0 && 784eda14cbcSMatt Macy mg->mg_free_capacity > zfs_mg_noalloc_threshold && 785eda14cbcSMatt Macy (mg->mg_fragmentation == ZFS_FRAG_INVALID || 786eda14cbcSMatt Macy mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); 787eda14cbcSMatt Macy 788eda14cbcSMatt Macy /* 789eda14cbcSMatt Macy * The mc_alloc_groups maintains a count of the number of 790eda14cbcSMatt Macy * groups in this metaslab class that are still above the 791eda14cbcSMatt Macy * zfs_mg_noalloc_threshold. This is used by the allocating 792eda14cbcSMatt Macy * threads to determine if they should avoid allocations to 793eda14cbcSMatt Macy * a given group. The allocator will avoid allocations to a group 794eda14cbcSMatt Macy * if that group has reached or is below the zfs_mg_noalloc_threshold 795eda14cbcSMatt Macy * and there are still other groups that are above the threshold. 796eda14cbcSMatt Macy * When a group transitions from allocatable to non-allocatable or 797eda14cbcSMatt Macy * vice versa we update the metaslab class to reflect that change. 798eda14cbcSMatt Macy * When the mc_alloc_groups value drops to 0 that means that all 799eda14cbcSMatt Macy * groups have reached the zfs_mg_noalloc_threshold making all groups 800eda14cbcSMatt Macy * eligible for allocations. This effectively means that all devices 801eda14cbcSMatt Macy * are balanced again. 802eda14cbcSMatt Macy */ 803eda14cbcSMatt Macy if (was_allocatable && !mg->mg_allocatable) 804eda14cbcSMatt Macy mc->mc_alloc_groups--; 805eda14cbcSMatt Macy else if (!was_allocatable && mg->mg_allocatable) 806eda14cbcSMatt Macy mc->mc_alloc_groups++; 807eda14cbcSMatt Macy mutex_exit(&mc->mc_lock); 808eda14cbcSMatt Macy 809eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 810eda14cbcSMatt Macy } 811eda14cbcSMatt Macy 812eda14cbcSMatt Macy int 813eda14cbcSMatt Macy metaslab_sort_by_flushed(const void *va, const void *vb) 814eda14cbcSMatt Macy { 815eda14cbcSMatt Macy const metaslab_t *a = va; 816eda14cbcSMatt Macy const metaslab_t *b = vb; 817eda14cbcSMatt Macy 818eda14cbcSMatt Macy int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg); 819eda14cbcSMatt Macy if (likely(cmp)) 820eda14cbcSMatt Macy return (cmp); 821eda14cbcSMatt Macy 822eda14cbcSMatt Macy uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id; 823eda14cbcSMatt Macy uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id; 824eda14cbcSMatt Macy cmp = TREE_CMP(a_vdev_id, b_vdev_id); 825eda14cbcSMatt Macy if (cmp) 826eda14cbcSMatt Macy return (cmp); 827eda14cbcSMatt Macy 828eda14cbcSMatt Macy return (TREE_CMP(a->ms_id, b->ms_id)); 829eda14cbcSMatt Macy } 830eda14cbcSMatt Macy 831eda14cbcSMatt Macy metaslab_group_t * 832eda14cbcSMatt Macy metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators) 833eda14cbcSMatt Macy { 834eda14cbcSMatt Macy metaslab_group_t *mg; 835eda14cbcSMatt Macy 8367877fdebSMatt Macy mg = kmem_zalloc(offsetof(metaslab_group_t, 8377877fdebSMatt Macy mg_allocator[allocators]), KM_SLEEP); 838eda14cbcSMatt Macy mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 839eda14cbcSMatt Macy mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL); 840eda14cbcSMatt Macy cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL); 841eda14cbcSMatt Macy avl_create(&mg->mg_metaslab_tree, metaslab_compare, 842eda14cbcSMatt Macy sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node)); 843eda14cbcSMatt Macy mg->mg_vd = vd; 844eda14cbcSMatt Macy mg->mg_class = mc; 845eda14cbcSMatt Macy mg->mg_activation_count = 0; 846eda14cbcSMatt Macy mg->mg_initialized = B_FALSE; 847eda14cbcSMatt Macy mg->mg_no_free_space = B_TRUE; 848eda14cbcSMatt Macy mg->mg_allocators = allocators; 849eda14cbcSMatt Macy 850eda14cbcSMatt Macy for (int i = 0; i < allocators; i++) { 851eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 852eda14cbcSMatt Macy zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth); 853eda14cbcSMatt Macy } 854eda14cbcSMatt Macy 855eda14cbcSMatt Macy return (mg); 856eda14cbcSMatt Macy } 857eda14cbcSMatt Macy 858eda14cbcSMatt Macy void 859eda14cbcSMatt Macy metaslab_group_destroy(metaslab_group_t *mg) 860eda14cbcSMatt Macy { 861eda14cbcSMatt Macy ASSERT(mg->mg_prev == NULL); 862eda14cbcSMatt Macy ASSERT(mg->mg_next == NULL); 863eda14cbcSMatt Macy /* 864eda14cbcSMatt Macy * We may have gone below zero with the activation count 865eda14cbcSMatt Macy * either because we never activated in the first place or 866eda14cbcSMatt Macy * because we're done, and possibly removing the vdev. 867eda14cbcSMatt Macy */ 868eda14cbcSMatt Macy ASSERT(mg->mg_activation_count <= 0); 869eda14cbcSMatt Macy 870eda14cbcSMatt Macy avl_destroy(&mg->mg_metaslab_tree); 871eda14cbcSMatt Macy mutex_destroy(&mg->mg_lock); 872eda14cbcSMatt Macy mutex_destroy(&mg->mg_ms_disabled_lock); 873eda14cbcSMatt Macy cv_destroy(&mg->mg_ms_disabled_cv); 874eda14cbcSMatt Macy 875eda14cbcSMatt Macy for (int i = 0; i < mg->mg_allocators; i++) { 876eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 877eda14cbcSMatt Macy zfs_refcount_destroy(&mga->mga_alloc_queue_depth); 878eda14cbcSMatt Macy } 8797877fdebSMatt Macy kmem_free(mg, offsetof(metaslab_group_t, 8807877fdebSMatt Macy mg_allocator[mg->mg_allocators])); 881eda14cbcSMatt Macy } 882eda14cbcSMatt Macy 883eda14cbcSMatt Macy void 884eda14cbcSMatt Macy metaslab_group_activate(metaslab_group_t *mg) 885eda14cbcSMatt Macy { 886eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 8877877fdebSMatt Macy spa_t *spa = mc->mc_spa; 888eda14cbcSMatt Macy metaslab_group_t *mgprev, *mgnext; 889eda14cbcSMatt Macy 8907877fdebSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0); 891eda14cbcSMatt Macy 892eda14cbcSMatt Macy ASSERT(mg->mg_prev == NULL); 893eda14cbcSMatt Macy ASSERT(mg->mg_next == NULL); 894eda14cbcSMatt Macy ASSERT(mg->mg_activation_count <= 0); 895eda14cbcSMatt Macy 896eda14cbcSMatt Macy if (++mg->mg_activation_count <= 0) 897eda14cbcSMatt Macy return; 898eda14cbcSMatt Macy 899716fd348SMartin Matuska mg->mg_aliquot = metaslab_aliquot * MAX(1, 900716fd348SMartin Matuska vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd)); 901eda14cbcSMatt Macy metaslab_group_alloc_update(mg); 902eda14cbcSMatt Macy 9037877fdebSMatt Macy if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) { 904eda14cbcSMatt Macy mg->mg_prev = mg; 905eda14cbcSMatt Macy mg->mg_next = mg; 906eda14cbcSMatt Macy } else { 907eda14cbcSMatt Macy mgnext = mgprev->mg_next; 908eda14cbcSMatt Macy mg->mg_prev = mgprev; 909eda14cbcSMatt Macy mg->mg_next = mgnext; 910eda14cbcSMatt Macy mgprev->mg_next = mg; 911eda14cbcSMatt Macy mgnext->mg_prev = mg; 912eda14cbcSMatt Macy } 9137877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 9147877fdebSMatt Macy mc->mc_allocator[i].mca_rotor = mg; 9157877fdebSMatt Macy mg = mg->mg_next; 9167877fdebSMatt Macy } 917eda14cbcSMatt Macy } 918eda14cbcSMatt Macy 919eda14cbcSMatt Macy /* 920eda14cbcSMatt Macy * Passivate a metaslab group and remove it from the allocation rotor. 921eda14cbcSMatt Macy * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating 922eda14cbcSMatt Macy * a metaslab group. This function will momentarily drop spa_config_locks 923eda14cbcSMatt Macy * that are lower than the SCL_ALLOC lock (see comment below). 924eda14cbcSMatt Macy */ 925eda14cbcSMatt Macy void 926eda14cbcSMatt Macy metaslab_group_passivate(metaslab_group_t *mg) 927eda14cbcSMatt Macy { 928eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 929eda14cbcSMatt Macy spa_t *spa = mc->mc_spa; 930eda14cbcSMatt Macy metaslab_group_t *mgprev, *mgnext; 931eda14cbcSMatt Macy int locks = spa_config_held(spa, SCL_ALL, RW_WRITER); 932eda14cbcSMatt Macy 933eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==, 934eda14cbcSMatt Macy (SCL_ALLOC | SCL_ZIO)); 935eda14cbcSMatt Macy 936eda14cbcSMatt Macy if (--mg->mg_activation_count != 0) { 9377877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) 9387877fdebSMatt Macy ASSERT(mc->mc_allocator[i].mca_rotor != mg); 939eda14cbcSMatt Macy ASSERT(mg->mg_prev == NULL); 940eda14cbcSMatt Macy ASSERT(mg->mg_next == NULL); 941eda14cbcSMatt Macy ASSERT(mg->mg_activation_count < 0); 942eda14cbcSMatt Macy return; 943eda14cbcSMatt Macy } 944eda14cbcSMatt Macy 945eda14cbcSMatt Macy /* 946eda14cbcSMatt Macy * The spa_config_lock is an array of rwlocks, ordered as 947eda14cbcSMatt Macy * follows (from highest to lowest): 948eda14cbcSMatt Macy * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC > 949eda14cbcSMatt Macy * SCL_ZIO > SCL_FREE > SCL_VDEV 950eda14cbcSMatt Macy * (For more information about the spa_config_lock see spa_misc.c) 951eda14cbcSMatt Macy * The higher the lock, the broader its coverage. When we passivate 952eda14cbcSMatt Macy * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO 953eda14cbcSMatt Macy * config locks. However, the metaslab group's taskq might be trying 954eda14cbcSMatt Macy * to preload metaslabs so we must drop the SCL_ZIO lock and any 955eda14cbcSMatt Macy * lower locks to allow the I/O to complete. At a minimum, 956eda14cbcSMatt Macy * we continue to hold the SCL_ALLOC lock, which prevents any future 957eda14cbcSMatt Macy * allocations from taking place and any changes to the vdev tree. 958eda14cbcSMatt Macy */ 959eda14cbcSMatt Macy spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa); 960b2526e8bSMartin Matuska taskq_wait_outstanding(spa->spa_metaslab_taskq, 0); 961eda14cbcSMatt Macy spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER); 962eda14cbcSMatt Macy metaslab_group_alloc_update(mg); 963eda14cbcSMatt Macy for (int i = 0; i < mg->mg_allocators; i++) { 964eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 965eda14cbcSMatt Macy metaslab_t *msp = mga->mga_primary; 966eda14cbcSMatt Macy if (msp != NULL) { 967eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 968eda14cbcSMatt Macy metaslab_passivate(msp, 969eda14cbcSMatt Macy metaslab_weight_from_range_tree(msp)); 970eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 971eda14cbcSMatt Macy } 972eda14cbcSMatt Macy msp = mga->mga_secondary; 973eda14cbcSMatt Macy if (msp != NULL) { 974eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 975eda14cbcSMatt Macy metaslab_passivate(msp, 976eda14cbcSMatt Macy metaslab_weight_from_range_tree(msp)); 977eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 978eda14cbcSMatt Macy } 979eda14cbcSMatt Macy } 980eda14cbcSMatt Macy 981eda14cbcSMatt Macy mgprev = mg->mg_prev; 982eda14cbcSMatt Macy mgnext = mg->mg_next; 983eda14cbcSMatt Macy 984eda14cbcSMatt Macy if (mg == mgnext) { 9857877fdebSMatt Macy mgnext = NULL; 986eda14cbcSMatt Macy } else { 987eda14cbcSMatt Macy mgprev->mg_next = mgnext; 988eda14cbcSMatt Macy mgnext->mg_prev = mgprev; 989eda14cbcSMatt Macy } 9907877fdebSMatt Macy for (int i = 0; i < spa->spa_alloc_count; i++) { 9917877fdebSMatt Macy if (mc->mc_allocator[i].mca_rotor == mg) 9927877fdebSMatt Macy mc->mc_allocator[i].mca_rotor = mgnext; 9937877fdebSMatt Macy } 994eda14cbcSMatt Macy 995eda14cbcSMatt Macy mg->mg_prev = NULL; 996eda14cbcSMatt Macy mg->mg_next = NULL; 997eda14cbcSMatt Macy } 998eda14cbcSMatt Macy 999eda14cbcSMatt Macy boolean_t 1000eda14cbcSMatt Macy metaslab_group_initialized(metaslab_group_t *mg) 1001eda14cbcSMatt Macy { 1002eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 1003eda14cbcSMatt Macy vdev_stat_t *vs = &vd->vdev_stat; 1004eda14cbcSMatt Macy 1005eda14cbcSMatt Macy return (vs->vs_space != 0 && mg->mg_activation_count > 0); 1006eda14cbcSMatt Macy } 1007eda14cbcSMatt Macy 1008eda14cbcSMatt Macy uint64_t 1009eda14cbcSMatt Macy metaslab_group_get_space(metaslab_group_t *mg) 1010eda14cbcSMatt Macy { 1011184c1b94SMartin Matuska /* 1012184c1b94SMartin Matuska * Note that the number of nodes in mg_metaslab_tree may be one less 1013184c1b94SMartin Matuska * than vdev_ms_count, due to the embedded log metaslab. 1014184c1b94SMartin Matuska */ 1015184c1b94SMartin Matuska mutex_enter(&mg->mg_lock); 1016184c1b94SMartin Matuska uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree); 1017184c1b94SMartin Matuska mutex_exit(&mg->mg_lock); 1018184c1b94SMartin Matuska return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count); 1019eda14cbcSMatt Macy } 1020eda14cbcSMatt Macy 1021eda14cbcSMatt Macy void 1022eda14cbcSMatt Macy metaslab_group_histogram_verify(metaslab_group_t *mg) 1023eda14cbcSMatt Macy { 1024eda14cbcSMatt Macy uint64_t *mg_hist; 1025184c1b94SMartin Matuska avl_tree_t *t = &mg->mg_metaslab_tree; 1026184c1b94SMartin Matuska uint64_t ashift = mg->mg_vd->vdev_ashift; 1027eda14cbcSMatt Macy 1028eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 1029eda14cbcSMatt Macy return; 1030eda14cbcSMatt Macy 1031eda14cbcSMatt Macy mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 1032eda14cbcSMatt Macy KM_SLEEP); 1033eda14cbcSMatt Macy 1034eda14cbcSMatt Macy ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, 1035eda14cbcSMatt Macy SPACE_MAP_HISTOGRAM_SIZE + ashift); 1036eda14cbcSMatt Macy 1037184c1b94SMartin Matuska mutex_enter(&mg->mg_lock); 1038184c1b94SMartin Matuska for (metaslab_t *msp = avl_first(t); 1039184c1b94SMartin Matuska msp != NULL; msp = AVL_NEXT(t, msp)) { 1040184c1b94SMartin Matuska VERIFY3P(msp->ms_group, ==, mg); 1041184c1b94SMartin Matuska /* skip if not active */ 1042184c1b94SMartin Matuska if (msp->ms_sm == NULL) 1043eda14cbcSMatt Macy continue; 1044eda14cbcSMatt Macy 1045184c1b94SMartin Matuska for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1046eda14cbcSMatt Macy mg_hist[i + ashift] += 1047eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1048eda14cbcSMatt Macy } 1049184c1b94SMartin Matuska } 1050eda14cbcSMatt Macy 1051184c1b94SMartin Matuska for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++) 1052eda14cbcSMatt Macy VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); 1053eda14cbcSMatt Macy 1054184c1b94SMartin Matuska mutex_exit(&mg->mg_lock); 1055184c1b94SMartin Matuska 1056eda14cbcSMatt Macy kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 1057eda14cbcSMatt Macy } 1058eda14cbcSMatt Macy 1059eda14cbcSMatt Macy static void 1060eda14cbcSMatt Macy metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) 1061eda14cbcSMatt Macy { 1062eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 1063eda14cbcSMatt Macy uint64_t ashift = mg->mg_vd->vdev_ashift; 1064eda14cbcSMatt Macy 1065eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1066eda14cbcSMatt Macy if (msp->ms_sm == NULL) 1067eda14cbcSMatt Macy return; 1068eda14cbcSMatt Macy 1069eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1070184c1b94SMartin Matuska mutex_enter(&mc->mc_lock); 1071eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1072184c1b94SMartin Matuska IMPLY(mg == mg->mg_vd->vdev_log_mg, 1073184c1b94SMartin Matuska mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); 1074eda14cbcSMatt Macy mg->mg_histogram[i + ashift] += 1075eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1076eda14cbcSMatt Macy mc->mc_histogram[i + ashift] += 1077eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1078eda14cbcSMatt Macy } 1079184c1b94SMartin Matuska mutex_exit(&mc->mc_lock); 1080eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1081eda14cbcSMatt Macy } 1082eda14cbcSMatt Macy 1083eda14cbcSMatt Macy void 1084eda14cbcSMatt Macy metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) 1085eda14cbcSMatt Macy { 1086eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 1087eda14cbcSMatt Macy uint64_t ashift = mg->mg_vd->vdev_ashift; 1088eda14cbcSMatt Macy 1089eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1090eda14cbcSMatt Macy if (msp->ms_sm == NULL) 1091eda14cbcSMatt Macy return; 1092eda14cbcSMatt Macy 1093eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1094184c1b94SMartin Matuska mutex_enter(&mc->mc_lock); 1095eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1096eda14cbcSMatt Macy ASSERT3U(mg->mg_histogram[i + ashift], >=, 1097eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]); 1098eda14cbcSMatt Macy ASSERT3U(mc->mc_histogram[i + ashift], >=, 1099eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]); 1100184c1b94SMartin Matuska IMPLY(mg == mg->mg_vd->vdev_log_mg, 1101184c1b94SMartin Matuska mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); 1102eda14cbcSMatt Macy 1103eda14cbcSMatt Macy mg->mg_histogram[i + ashift] -= 1104eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1105eda14cbcSMatt Macy mc->mc_histogram[i + ashift] -= 1106eda14cbcSMatt Macy msp->ms_sm->sm_phys->smp_histogram[i]; 1107eda14cbcSMatt Macy } 1108184c1b94SMartin Matuska mutex_exit(&mc->mc_lock); 1109eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1110eda14cbcSMatt Macy } 1111eda14cbcSMatt Macy 1112eda14cbcSMatt Macy static void 1113eda14cbcSMatt Macy metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 1114eda14cbcSMatt Macy { 1115eda14cbcSMatt Macy ASSERT(msp->ms_group == NULL); 1116eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1117eda14cbcSMatt Macy msp->ms_group = mg; 1118eda14cbcSMatt Macy msp->ms_weight = 0; 1119eda14cbcSMatt Macy avl_add(&mg->mg_metaslab_tree, msp); 1120eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1121eda14cbcSMatt Macy 1122eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 1123eda14cbcSMatt Macy metaslab_group_histogram_add(mg, msp); 1124eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 1125eda14cbcSMatt Macy } 1126eda14cbcSMatt Macy 1127eda14cbcSMatt Macy static void 1128eda14cbcSMatt Macy metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 1129eda14cbcSMatt Macy { 1130eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 1131eda14cbcSMatt Macy metaslab_group_histogram_remove(mg, msp); 1132eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 1133eda14cbcSMatt Macy 1134eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1135eda14cbcSMatt Macy ASSERT(msp->ms_group == mg); 1136eda14cbcSMatt Macy avl_remove(&mg->mg_metaslab_tree, msp); 1137eda14cbcSMatt Macy 1138eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 1139eda14cbcSMatt Macy multilist_sublist_t *mls = 11403ff01b23SMartin Matuska multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 1141eda14cbcSMatt Macy if (multilist_link_active(&msp->ms_class_txg_node)) 1142eda14cbcSMatt Macy multilist_sublist_remove(mls, msp); 1143eda14cbcSMatt Macy multilist_sublist_unlock(mls); 1144eda14cbcSMatt Macy 1145eda14cbcSMatt Macy msp->ms_group = NULL; 1146eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1147eda14cbcSMatt Macy } 1148eda14cbcSMatt Macy 1149eda14cbcSMatt Macy static void 1150eda14cbcSMatt Macy metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 1151eda14cbcSMatt Macy { 1152eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1153eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&mg->mg_lock)); 1154eda14cbcSMatt Macy ASSERT(msp->ms_group == mg); 1155eda14cbcSMatt Macy 1156eda14cbcSMatt Macy avl_remove(&mg->mg_metaslab_tree, msp); 1157eda14cbcSMatt Macy msp->ms_weight = weight; 1158eda14cbcSMatt Macy avl_add(&mg->mg_metaslab_tree, msp); 1159eda14cbcSMatt Macy 1160eda14cbcSMatt Macy } 1161eda14cbcSMatt Macy 1162eda14cbcSMatt Macy static void 1163eda14cbcSMatt Macy metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 1164eda14cbcSMatt Macy { 1165eda14cbcSMatt Macy /* 1166eda14cbcSMatt Macy * Although in principle the weight can be any value, in 1167eda14cbcSMatt Macy * practice we do not use values in the range [1, 511]. 1168eda14cbcSMatt Macy */ 1169eda14cbcSMatt Macy ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); 1170eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1171eda14cbcSMatt Macy 1172eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 1173eda14cbcSMatt Macy metaslab_group_sort_impl(mg, msp, weight); 1174eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 1175eda14cbcSMatt Macy } 1176eda14cbcSMatt Macy 1177eda14cbcSMatt Macy /* 1178eda14cbcSMatt Macy * Calculate the fragmentation for a given metaslab group. We can use 1179eda14cbcSMatt Macy * a simple average here since all metaslabs within the group must have 1180eda14cbcSMatt Macy * the same size. The return value will be a value between 0 and 100 1181eda14cbcSMatt Macy * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this 1182eda14cbcSMatt Macy * group have a fragmentation metric. 1183eda14cbcSMatt Macy */ 1184eda14cbcSMatt Macy uint64_t 1185eda14cbcSMatt Macy metaslab_group_fragmentation(metaslab_group_t *mg) 1186eda14cbcSMatt Macy { 1187eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 1188eda14cbcSMatt Macy uint64_t fragmentation = 0; 1189eda14cbcSMatt Macy uint64_t valid_ms = 0; 1190eda14cbcSMatt Macy 1191eda14cbcSMatt Macy for (int m = 0; m < vd->vdev_ms_count; m++) { 1192eda14cbcSMatt Macy metaslab_t *msp = vd->vdev_ms[m]; 1193eda14cbcSMatt Macy 1194eda14cbcSMatt Macy if (msp->ms_fragmentation == ZFS_FRAG_INVALID) 1195eda14cbcSMatt Macy continue; 1196eda14cbcSMatt Macy if (msp->ms_group != mg) 1197eda14cbcSMatt Macy continue; 1198eda14cbcSMatt Macy 1199eda14cbcSMatt Macy valid_ms++; 1200eda14cbcSMatt Macy fragmentation += msp->ms_fragmentation; 1201eda14cbcSMatt Macy } 1202eda14cbcSMatt Macy 1203eda14cbcSMatt Macy if (valid_ms <= mg->mg_vd->vdev_ms_count / 2) 1204eda14cbcSMatt Macy return (ZFS_FRAG_INVALID); 1205eda14cbcSMatt Macy 1206eda14cbcSMatt Macy fragmentation /= valid_ms; 1207eda14cbcSMatt Macy ASSERT3U(fragmentation, <=, 100); 1208eda14cbcSMatt Macy return (fragmentation); 1209eda14cbcSMatt Macy } 1210eda14cbcSMatt Macy 1211eda14cbcSMatt Macy /* 1212eda14cbcSMatt Macy * Determine if a given metaslab group should skip allocations. A metaslab 1213eda14cbcSMatt Macy * group should avoid allocations if its free capacity is less than the 1214eda14cbcSMatt Macy * zfs_mg_noalloc_threshold or its fragmentation metric is greater than 1215eda14cbcSMatt Macy * zfs_mg_fragmentation_threshold and there is at least one metaslab group 1216eda14cbcSMatt Macy * that can still handle allocations. If the allocation throttle is enabled 1217eda14cbcSMatt Macy * then we skip allocations to devices that have reached their maximum 1218eda14cbcSMatt Macy * allocation queue depth unless the selected metaslab group is the only 1219eda14cbcSMatt Macy * eligible group remaining. 1220eda14cbcSMatt Macy */ 1221eda14cbcSMatt Macy static boolean_t 1222eda14cbcSMatt Macy metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, 122315f0b8c3SMartin Matuska int flags, uint64_t psize, int allocator, int d) 1224eda14cbcSMatt Macy { 1225eda14cbcSMatt Macy spa_t *spa = mg->mg_vd->vdev_spa; 1226eda14cbcSMatt Macy metaslab_class_t *mc = mg->mg_class; 1227eda14cbcSMatt Macy 1228eda14cbcSMatt Macy /* 1229eda14cbcSMatt Macy * We can only consider skipping this metaslab group if it's 1230eda14cbcSMatt Macy * in the normal metaslab class and there are other metaslab 1231eda14cbcSMatt Macy * groups to select from. Otherwise, we always consider it eligible 1232eda14cbcSMatt Macy * for allocations. 1233eda14cbcSMatt Macy */ 1234eda14cbcSMatt Macy if ((mc != spa_normal_class(spa) && 1235eda14cbcSMatt Macy mc != spa_special_class(spa) && 1236eda14cbcSMatt Macy mc != spa_dedup_class(spa)) || 1237eda14cbcSMatt Macy mc->mc_groups <= 1) 1238eda14cbcSMatt Macy return (B_TRUE); 1239eda14cbcSMatt Macy 1240eda14cbcSMatt Macy /* 1241eda14cbcSMatt Macy * If the metaslab group's mg_allocatable flag is set (see comments 1242eda14cbcSMatt Macy * in metaslab_group_alloc_update() for more information) and 1243eda14cbcSMatt Macy * the allocation throttle is disabled then allow allocations to this 1244eda14cbcSMatt Macy * device. However, if the allocation throttle is enabled then 12457877fdebSMatt Macy * check if we have reached our allocation limit (mga_alloc_queue_depth) 1246eda14cbcSMatt Macy * to determine if we should allow allocations to this metaslab group. 1247eda14cbcSMatt Macy * If all metaslab groups are no longer considered allocatable 1248eda14cbcSMatt Macy * (mc_alloc_groups == 0) or we're trying to allocate the smallest 1249eda14cbcSMatt Macy * gang block size then we allow allocations on this metaslab group 1250eda14cbcSMatt Macy * regardless of the mg_allocatable or throttle settings. 1251eda14cbcSMatt Macy */ 1252eda14cbcSMatt Macy if (mg->mg_allocatable) { 1253eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 1254eda14cbcSMatt Macy int64_t qdepth; 1255eda14cbcSMatt Macy uint64_t qmax = mga->mga_cur_max_alloc_queue_depth; 1256eda14cbcSMatt Macy 1257eda14cbcSMatt Macy if (!mc->mc_alloc_throttle_enabled) 1258eda14cbcSMatt Macy return (B_TRUE); 1259eda14cbcSMatt Macy 1260eda14cbcSMatt Macy /* 1261eda14cbcSMatt Macy * If this metaslab group does not have any free space, then 1262eda14cbcSMatt Macy * there is no point in looking further. 1263eda14cbcSMatt Macy */ 1264eda14cbcSMatt Macy if (mg->mg_no_free_space) 1265eda14cbcSMatt Macy return (B_FALSE); 1266eda14cbcSMatt Macy 1267eda14cbcSMatt Macy /* 126815f0b8c3SMartin Matuska * Some allocations (e.g., those coming from device removal 126915f0b8c3SMartin Matuska * where the * allocations are not even counted in the 127015f0b8c3SMartin Matuska * metaslab * allocation queues) are allowed to bypass 127115f0b8c3SMartin Matuska * the throttle. 127215f0b8c3SMartin Matuska */ 127315f0b8c3SMartin Matuska if (flags & METASLAB_DONT_THROTTLE) 127415f0b8c3SMartin Matuska return (B_TRUE); 127515f0b8c3SMartin Matuska 127615f0b8c3SMartin Matuska /* 1277eda14cbcSMatt Macy * Relax allocation throttling for ditto blocks. Due to 1278eda14cbcSMatt Macy * random imbalances in allocation it tends to push copies 1279eda14cbcSMatt Macy * to one vdev, that looks a bit better at the moment. 1280eda14cbcSMatt Macy */ 1281eda14cbcSMatt Macy qmax = qmax * (4 + d) / 4; 1282eda14cbcSMatt Macy 1283eda14cbcSMatt Macy qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth); 1284eda14cbcSMatt Macy 1285eda14cbcSMatt Macy /* 1286eda14cbcSMatt Macy * If this metaslab group is below its qmax or it's 1287315ee00fSMartin Matuska * the only allocatable metaslab group, then attempt 1288eda14cbcSMatt Macy * to allocate from it. 1289eda14cbcSMatt Macy */ 1290eda14cbcSMatt Macy if (qdepth < qmax || mc->mc_alloc_groups == 1) 1291eda14cbcSMatt Macy return (B_TRUE); 1292eda14cbcSMatt Macy ASSERT3U(mc->mc_alloc_groups, >, 1); 1293eda14cbcSMatt Macy 1294eda14cbcSMatt Macy /* 1295eda14cbcSMatt Macy * Since this metaslab group is at or over its qmax, we 1296eda14cbcSMatt Macy * need to determine if there are metaslab groups after this 1297eda14cbcSMatt Macy * one that might be able to handle this allocation. This is 1298eda14cbcSMatt Macy * racy since we can't hold the locks for all metaslab 1299eda14cbcSMatt Macy * groups at the same time when we make this check. 1300eda14cbcSMatt Macy */ 1301eda14cbcSMatt Macy for (metaslab_group_t *mgp = mg->mg_next; 1302eda14cbcSMatt Macy mgp != rotor; mgp = mgp->mg_next) { 1303eda14cbcSMatt Macy metaslab_group_allocator_t *mgap = 1304eda14cbcSMatt Macy &mgp->mg_allocator[allocator]; 1305eda14cbcSMatt Macy qmax = mgap->mga_cur_max_alloc_queue_depth; 1306eda14cbcSMatt Macy qmax = qmax * (4 + d) / 4; 1307eda14cbcSMatt Macy qdepth = 1308eda14cbcSMatt Macy zfs_refcount_count(&mgap->mga_alloc_queue_depth); 1309eda14cbcSMatt Macy 1310eda14cbcSMatt Macy /* 1311eda14cbcSMatt Macy * If there is another metaslab group that 1312eda14cbcSMatt Macy * might be able to handle the allocation, then 1313eda14cbcSMatt Macy * we return false so that we skip this group. 1314eda14cbcSMatt Macy */ 1315eda14cbcSMatt Macy if (qdepth < qmax && !mgp->mg_no_free_space) 1316eda14cbcSMatt Macy return (B_FALSE); 1317eda14cbcSMatt Macy } 1318eda14cbcSMatt Macy 1319eda14cbcSMatt Macy /* 1320eda14cbcSMatt Macy * We didn't find another group to handle the allocation 1321eda14cbcSMatt Macy * so we can't skip this metaslab group even though 1322eda14cbcSMatt Macy * we are at or over our qmax. 1323eda14cbcSMatt Macy */ 1324eda14cbcSMatt Macy return (B_TRUE); 1325eda14cbcSMatt Macy 1326eda14cbcSMatt Macy } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) { 1327eda14cbcSMatt Macy return (B_TRUE); 1328eda14cbcSMatt Macy } 1329eda14cbcSMatt Macy return (B_FALSE); 1330eda14cbcSMatt Macy } 1331eda14cbcSMatt Macy 1332eda14cbcSMatt Macy /* 1333eda14cbcSMatt Macy * ========================================================================== 1334eda14cbcSMatt Macy * Range tree callbacks 1335eda14cbcSMatt Macy * ========================================================================== 1336eda14cbcSMatt Macy */ 1337eda14cbcSMatt Macy 1338eda14cbcSMatt Macy /* 1339eda14cbcSMatt Macy * Comparison function for the private size-ordered tree using 32-bit 1340eda14cbcSMatt Macy * ranges. Tree is sorted by size, larger sizes at the end of the tree. 1341eda14cbcSMatt Macy */ 13424e8d558cSMartin Matuska __attribute__((always_inline)) inline 1343eda14cbcSMatt Macy static int 1344eda14cbcSMatt Macy metaslab_rangesize32_compare(const void *x1, const void *x2) 1345eda14cbcSMatt Macy { 1346eda14cbcSMatt Macy const range_seg32_t *r1 = x1; 1347eda14cbcSMatt Macy const range_seg32_t *r2 = x2; 1348eda14cbcSMatt Macy 1349eda14cbcSMatt Macy uint64_t rs_size1 = r1->rs_end - r1->rs_start; 1350eda14cbcSMatt Macy uint64_t rs_size2 = r2->rs_end - r2->rs_start; 1351eda14cbcSMatt Macy 1352eda14cbcSMatt Macy int cmp = TREE_CMP(rs_size1, rs_size2); 1353eda14cbcSMatt Macy 13544e8d558cSMartin Matuska return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start)); 1355eda14cbcSMatt Macy } 1356eda14cbcSMatt Macy 1357eda14cbcSMatt Macy /* 1358eda14cbcSMatt Macy * Comparison function for the private size-ordered tree using 64-bit 1359eda14cbcSMatt Macy * ranges. Tree is sorted by size, larger sizes at the end of the tree. 1360eda14cbcSMatt Macy */ 13614e8d558cSMartin Matuska __attribute__((always_inline)) inline 1362eda14cbcSMatt Macy static int 1363eda14cbcSMatt Macy metaslab_rangesize64_compare(const void *x1, const void *x2) 1364eda14cbcSMatt Macy { 1365eda14cbcSMatt Macy const range_seg64_t *r1 = x1; 1366eda14cbcSMatt Macy const range_seg64_t *r2 = x2; 1367eda14cbcSMatt Macy 1368eda14cbcSMatt Macy uint64_t rs_size1 = r1->rs_end - r1->rs_start; 1369eda14cbcSMatt Macy uint64_t rs_size2 = r2->rs_end - r2->rs_start; 1370eda14cbcSMatt Macy 1371eda14cbcSMatt Macy int cmp = TREE_CMP(rs_size1, rs_size2); 1372eda14cbcSMatt Macy 13734e8d558cSMartin Matuska return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start)); 1374eda14cbcSMatt Macy } 13754e8d558cSMartin Matuska 1376eda14cbcSMatt Macy typedef struct metaslab_rt_arg { 1377eda14cbcSMatt Macy zfs_btree_t *mra_bt; 1378eda14cbcSMatt Macy uint32_t mra_floor_shift; 1379eda14cbcSMatt Macy } metaslab_rt_arg_t; 1380eda14cbcSMatt Macy 1381eda14cbcSMatt Macy struct mssa_arg { 1382eda14cbcSMatt Macy range_tree_t *rt; 1383eda14cbcSMatt Macy metaslab_rt_arg_t *mra; 1384eda14cbcSMatt Macy }; 1385eda14cbcSMatt Macy 1386eda14cbcSMatt Macy static void 1387eda14cbcSMatt Macy metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size) 1388eda14cbcSMatt Macy { 1389eda14cbcSMatt Macy struct mssa_arg *mssap = arg; 1390eda14cbcSMatt Macy range_tree_t *rt = mssap->rt; 1391eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = mssap->mra; 1392eda14cbcSMatt Macy range_seg_max_t seg = {0}; 1393eda14cbcSMatt Macy rs_set_start(&seg, rt, start); 1394eda14cbcSMatt Macy rs_set_end(&seg, rt, start + size); 1395eda14cbcSMatt Macy metaslab_rt_add(rt, &seg, mrap); 1396eda14cbcSMatt Macy } 1397eda14cbcSMatt Macy 1398eda14cbcSMatt Macy static void 1399eda14cbcSMatt Macy metaslab_size_tree_full_load(range_tree_t *rt) 1400eda14cbcSMatt Macy { 1401eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = rt->rt_arg; 1402eda14cbcSMatt Macy METASLABSTAT_BUMP(metaslabstat_reload_tree); 1403eda14cbcSMatt Macy ASSERT0(zfs_btree_numnodes(mrap->mra_bt)); 1404eda14cbcSMatt Macy mrap->mra_floor_shift = 0; 1405eda14cbcSMatt Macy struct mssa_arg arg = {0}; 1406eda14cbcSMatt Macy arg.rt = rt; 1407eda14cbcSMatt Macy arg.mra = mrap; 1408eda14cbcSMatt Macy range_tree_walk(rt, metaslab_size_sorted_add, &arg); 1409eda14cbcSMatt Macy } 1410eda14cbcSMatt Macy 14114e8d558cSMartin Matuska 14124e8d558cSMartin Matuska ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf, 14134e8d558cSMartin Matuska range_seg32_t, metaslab_rangesize32_compare) 14144e8d558cSMartin Matuska 14154e8d558cSMartin Matuska ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf, 14164e8d558cSMartin Matuska range_seg64_t, metaslab_rangesize64_compare) 14174e8d558cSMartin Matuska 1418eda14cbcSMatt Macy /* 1419eda14cbcSMatt Macy * Create any block allocator specific components. The current allocators 1420eda14cbcSMatt Macy * rely on using both a size-ordered range_tree_t and an array of uint64_t's. 1421eda14cbcSMatt Macy */ 1422eda14cbcSMatt Macy static void 1423eda14cbcSMatt Macy metaslab_rt_create(range_tree_t *rt, void *arg) 1424eda14cbcSMatt Macy { 1425eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1426eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1427eda14cbcSMatt Macy 1428eda14cbcSMatt Macy size_t size; 1429eda14cbcSMatt Macy int (*compare) (const void *, const void *); 14304e8d558cSMartin Matuska bt_find_in_buf_f bt_find; 1431eda14cbcSMatt Macy switch (rt->rt_type) { 1432eda14cbcSMatt Macy case RANGE_SEG32: 1433eda14cbcSMatt Macy size = sizeof (range_seg32_t); 1434eda14cbcSMatt Macy compare = metaslab_rangesize32_compare; 14354e8d558cSMartin Matuska bt_find = metaslab_rt_find_rangesize32_in_buf; 1436eda14cbcSMatt Macy break; 1437eda14cbcSMatt Macy case RANGE_SEG64: 1438eda14cbcSMatt Macy size = sizeof (range_seg64_t); 1439eda14cbcSMatt Macy compare = metaslab_rangesize64_compare; 14404e8d558cSMartin Matuska bt_find = metaslab_rt_find_rangesize64_in_buf; 1441eda14cbcSMatt Macy break; 1442eda14cbcSMatt Macy default: 1443eda14cbcSMatt Macy panic("Invalid range seg type %d", rt->rt_type); 1444eda14cbcSMatt Macy } 14454e8d558cSMartin Matuska zfs_btree_create(size_tree, compare, bt_find, size); 1446eda14cbcSMatt Macy mrap->mra_floor_shift = metaslab_by_size_min_shift; 1447eda14cbcSMatt Macy } 1448eda14cbcSMatt Macy 1449eda14cbcSMatt Macy static void 1450eda14cbcSMatt Macy metaslab_rt_destroy(range_tree_t *rt, void *arg) 1451eda14cbcSMatt Macy { 1452e92ffd9bSMartin Matuska (void) rt; 1453eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1454eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1455eda14cbcSMatt Macy 1456eda14cbcSMatt Macy zfs_btree_destroy(size_tree); 1457eda14cbcSMatt Macy kmem_free(mrap, sizeof (*mrap)); 1458eda14cbcSMatt Macy } 1459eda14cbcSMatt Macy 1460eda14cbcSMatt Macy static void 1461eda14cbcSMatt Macy metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) 1462eda14cbcSMatt Macy { 1463eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1464eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1465eda14cbcSMatt Macy 1466eda14cbcSMatt Macy if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < 1467be181ee2SMartin Matuska (1ULL << mrap->mra_floor_shift)) 1468eda14cbcSMatt Macy return; 1469eda14cbcSMatt Macy 1470eda14cbcSMatt Macy zfs_btree_add(size_tree, rs); 1471eda14cbcSMatt Macy } 1472eda14cbcSMatt Macy 1473eda14cbcSMatt Macy static void 1474eda14cbcSMatt Macy metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) 1475eda14cbcSMatt Macy { 1476eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1477eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1478eda14cbcSMatt Macy 1479be181ee2SMartin Matuska if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL << 1480eda14cbcSMatt Macy mrap->mra_floor_shift)) 1481eda14cbcSMatt Macy return; 1482eda14cbcSMatt Macy 1483eda14cbcSMatt Macy zfs_btree_remove(size_tree, rs); 1484eda14cbcSMatt Macy } 1485eda14cbcSMatt Macy 1486eda14cbcSMatt Macy static void 1487eda14cbcSMatt Macy metaslab_rt_vacate(range_tree_t *rt, void *arg) 1488eda14cbcSMatt Macy { 1489eda14cbcSMatt Macy metaslab_rt_arg_t *mrap = arg; 1490eda14cbcSMatt Macy zfs_btree_t *size_tree = mrap->mra_bt; 1491eda14cbcSMatt Macy zfs_btree_clear(size_tree); 1492eda14cbcSMatt Macy zfs_btree_destroy(size_tree); 1493eda14cbcSMatt Macy 1494eda14cbcSMatt Macy metaslab_rt_create(rt, arg); 1495eda14cbcSMatt Macy } 1496eda14cbcSMatt Macy 1497e92ffd9bSMartin Matuska static const range_tree_ops_t metaslab_rt_ops = { 1498eda14cbcSMatt Macy .rtop_create = metaslab_rt_create, 1499eda14cbcSMatt Macy .rtop_destroy = metaslab_rt_destroy, 1500eda14cbcSMatt Macy .rtop_add = metaslab_rt_add, 1501eda14cbcSMatt Macy .rtop_remove = metaslab_rt_remove, 1502eda14cbcSMatt Macy .rtop_vacate = metaslab_rt_vacate 1503eda14cbcSMatt Macy }; 1504eda14cbcSMatt Macy 1505eda14cbcSMatt Macy /* 1506eda14cbcSMatt Macy * ========================================================================== 1507eda14cbcSMatt Macy * Common allocator routines 1508eda14cbcSMatt Macy * ========================================================================== 1509eda14cbcSMatt Macy */ 1510eda14cbcSMatt Macy 1511eda14cbcSMatt Macy /* 1512eda14cbcSMatt Macy * Return the maximum contiguous segment within the metaslab. 1513eda14cbcSMatt Macy */ 1514eda14cbcSMatt Macy uint64_t 1515eda14cbcSMatt Macy metaslab_largest_allocatable(metaslab_t *msp) 1516eda14cbcSMatt Macy { 1517eda14cbcSMatt Macy zfs_btree_t *t = &msp->ms_allocatable_by_size; 1518eda14cbcSMatt Macy range_seg_t *rs; 1519eda14cbcSMatt Macy 1520eda14cbcSMatt Macy if (t == NULL) 1521eda14cbcSMatt Macy return (0); 1522eda14cbcSMatt Macy if (zfs_btree_numnodes(t) == 0) 1523eda14cbcSMatt Macy metaslab_size_tree_full_load(msp->ms_allocatable); 1524eda14cbcSMatt Macy 1525eda14cbcSMatt Macy rs = zfs_btree_last(t, NULL); 1526eda14cbcSMatt Macy if (rs == NULL) 1527eda14cbcSMatt Macy return (0); 1528eda14cbcSMatt Macy 1529eda14cbcSMatt Macy return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs, 1530eda14cbcSMatt Macy msp->ms_allocatable)); 1531eda14cbcSMatt Macy } 1532eda14cbcSMatt Macy 1533eda14cbcSMatt Macy /* 1534eda14cbcSMatt Macy * Return the maximum contiguous segment within the unflushed frees of this 1535eda14cbcSMatt Macy * metaslab. 1536eda14cbcSMatt Macy */ 1537eda14cbcSMatt Macy static uint64_t 1538eda14cbcSMatt Macy metaslab_largest_unflushed_free(metaslab_t *msp) 1539eda14cbcSMatt Macy { 1540eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1541eda14cbcSMatt Macy 1542eda14cbcSMatt Macy if (msp->ms_unflushed_frees == NULL) 1543eda14cbcSMatt Macy return (0); 1544eda14cbcSMatt Macy 1545eda14cbcSMatt Macy if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0) 1546eda14cbcSMatt Macy metaslab_size_tree_full_load(msp->ms_unflushed_frees); 1547eda14cbcSMatt Macy range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size, 1548eda14cbcSMatt Macy NULL); 1549eda14cbcSMatt Macy if (rs == NULL) 1550eda14cbcSMatt Macy return (0); 1551eda14cbcSMatt Macy 1552eda14cbcSMatt Macy /* 1553eda14cbcSMatt Macy * When a range is freed from the metaslab, that range is added to 1554eda14cbcSMatt Macy * both the unflushed frees and the deferred frees. While the block 1555eda14cbcSMatt Macy * will eventually be usable, if the metaslab were loaded the range 1556eda14cbcSMatt Macy * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE 1557eda14cbcSMatt Macy * txgs had passed. As a result, when attempting to estimate an upper 1558eda14cbcSMatt Macy * bound for the largest currently-usable free segment in the 1559eda14cbcSMatt Macy * metaslab, we need to not consider any ranges currently in the defer 1560eda14cbcSMatt Macy * trees. This algorithm approximates the largest available chunk in 1561eda14cbcSMatt Macy * the largest range in the unflushed_frees tree by taking the first 1562eda14cbcSMatt Macy * chunk. While this may be a poor estimate, it should only remain so 1563eda14cbcSMatt Macy * briefly and should eventually self-correct as frees are no longer 1564eda14cbcSMatt Macy * deferred. Similar logic applies to the ms_freed tree. See 1565eda14cbcSMatt Macy * metaslab_load() for more details. 1566eda14cbcSMatt Macy * 1567eda14cbcSMatt Macy * There are two primary sources of inaccuracy in this estimate. Both 1568eda14cbcSMatt Macy * are tolerated for performance reasons. The first source is that we 1569eda14cbcSMatt Macy * only check the largest segment for overlaps. Smaller segments may 1570eda14cbcSMatt Macy * have more favorable overlaps with the other trees, resulting in 1571eda14cbcSMatt Macy * larger usable chunks. Second, we only look at the first chunk in 1572eda14cbcSMatt Macy * the largest segment; there may be other usable chunks in the 1573eda14cbcSMatt Macy * largest segment, but we ignore them. 1574eda14cbcSMatt Macy */ 1575eda14cbcSMatt Macy uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees); 1576eda14cbcSMatt Macy uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart; 1577eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 1578eda14cbcSMatt Macy uint64_t start = 0; 1579eda14cbcSMatt Macy uint64_t size = 0; 1580eda14cbcSMatt Macy boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart, 1581eda14cbcSMatt Macy rsize, &start, &size); 1582eda14cbcSMatt Macy if (found) { 1583eda14cbcSMatt Macy if (rstart == start) 1584eda14cbcSMatt Macy return (0); 1585eda14cbcSMatt Macy rsize = start - rstart; 1586eda14cbcSMatt Macy } 1587eda14cbcSMatt Macy } 1588eda14cbcSMatt Macy 1589eda14cbcSMatt Macy uint64_t start = 0; 1590eda14cbcSMatt Macy uint64_t size = 0; 1591eda14cbcSMatt Macy boolean_t found = range_tree_find_in(msp->ms_freed, rstart, 1592eda14cbcSMatt Macy rsize, &start, &size); 1593eda14cbcSMatt Macy if (found) 1594eda14cbcSMatt Macy rsize = start - rstart; 1595eda14cbcSMatt Macy 1596eda14cbcSMatt Macy return (rsize); 1597eda14cbcSMatt Macy } 1598eda14cbcSMatt Macy 1599eda14cbcSMatt Macy static range_seg_t * 1600eda14cbcSMatt Macy metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start, 1601eda14cbcSMatt Macy uint64_t size, zfs_btree_index_t *where) 1602eda14cbcSMatt Macy { 1603eda14cbcSMatt Macy range_seg_t *rs; 1604eda14cbcSMatt Macy range_seg_max_t rsearch; 1605eda14cbcSMatt Macy 1606eda14cbcSMatt Macy rs_set_start(&rsearch, rt, start); 1607eda14cbcSMatt Macy rs_set_end(&rsearch, rt, start + size); 1608eda14cbcSMatt Macy 1609eda14cbcSMatt Macy rs = zfs_btree_find(t, &rsearch, where); 1610eda14cbcSMatt Macy if (rs == NULL) { 1611eda14cbcSMatt Macy rs = zfs_btree_next(t, where, where); 1612eda14cbcSMatt Macy } 1613eda14cbcSMatt Macy 1614eda14cbcSMatt Macy return (rs); 1615eda14cbcSMatt Macy } 1616eda14cbcSMatt Macy 1617eda14cbcSMatt Macy /* 1618eda14cbcSMatt Macy * This is a helper function that can be used by the allocator to find a 1619eda14cbcSMatt Macy * suitable block to allocate. This will search the specified B-tree looking 1620eda14cbcSMatt Macy * for a block that matches the specified criteria. 1621eda14cbcSMatt Macy */ 1622eda14cbcSMatt Macy static uint64_t 1623eda14cbcSMatt Macy metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size, 1624eda14cbcSMatt Macy uint64_t max_search) 1625eda14cbcSMatt Macy { 1626eda14cbcSMatt Macy if (*cursor == 0) 1627eda14cbcSMatt Macy *cursor = rt->rt_start; 1628eda14cbcSMatt Macy zfs_btree_t *bt = &rt->rt_root; 1629eda14cbcSMatt Macy zfs_btree_index_t where; 1630eda14cbcSMatt Macy range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where); 1631eda14cbcSMatt Macy uint64_t first_found; 1632eda14cbcSMatt Macy int count_searched = 0; 1633eda14cbcSMatt Macy 1634eda14cbcSMatt Macy if (rs != NULL) 1635eda14cbcSMatt Macy first_found = rs_get_start(rs, rt); 1636eda14cbcSMatt Macy 1637eda14cbcSMatt Macy while (rs != NULL && (rs_get_start(rs, rt) - first_found <= 1638eda14cbcSMatt Macy max_search || count_searched < metaslab_min_search_count)) { 1639eda14cbcSMatt Macy uint64_t offset = rs_get_start(rs, rt); 1640eda14cbcSMatt Macy if (offset + size <= rs_get_end(rs, rt)) { 1641eda14cbcSMatt Macy *cursor = offset + size; 1642eda14cbcSMatt Macy return (offset); 1643eda14cbcSMatt Macy } 1644eda14cbcSMatt Macy rs = zfs_btree_next(bt, &where, &where); 1645eda14cbcSMatt Macy count_searched++; 1646eda14cbcSMatt Macy } 1647eda14cbcSMatt Macy 1648eda14cbcSMatt Macy *cursor = 0; 1649eda14cbcSMatt Macy return (-1ULL); 1650eda14cbcSMatt Macy } 1651eda14cbcSMatt Macy 16522ad756a6SMartin Matuska static uint64_t metaslab_df_alloc(metaslab_t *msp, uint64_t size); 16532ad756a6SMartin Matuska static uint64_t metaslab_cf_alloc(metaslab_t *msp, uint64_t size); 16542ad756a6SMartin Matuska static uint64_t metaslab_ndf_alloc(metaslab_t *msp, uint64_t size); 16552ad756a6SMartin Matuska metaslab_ops_t *metaslab_allocator(spa_t *spa); 16562ad756a6SMartin Matuska 16572ad756a6SMartin Matuska static metaslab_ops_t metaslab_allocators[] = { 16582ad756a6SMartin Matuska { "dynamic", metaslab_df_alloc }, 16592ad756a6SMartin Matuska { "cursor", metaslab_cf_alloc }, 16602ad756a6SMartin Matuska { "new-dynamic", metaslab_ndf_alloc }, 16612ad756a6SMartin Matuska }; 16622ad756a6SMartin Matuska 16632ad756a6SMartin Matuska static int 16642ad756a6SMartin Matuska spa_find_allocator_byname(const char *val) 16652ad756a6SMartin Matuska { 16662ad756a6SMartin Matuska int a = ARRAY_SIZE(metaslab_allocators) - 1; 16672ad756a6SMartin Matuska if (strcmp("new-dynamic", val) == 0) 16682ad756a6SMartin Matuska return (-1); /* remove when ndf is working */ 16692ad756a6SMartin Matuska for (; a >= 0; a--) { 16702ad756a6SMartin Matuska if (strcmp(val, metaslab_allocators[a].msop_name) == 0) 16712ad756a6SMartin Matuska return (a); 16722ad756a6SMartin Matuska } 16732ad756a6SMartin Matuska return (-1); 16742ad756a6SMartin Matuska } 16752ad756a6SMartin Matuska 16762ad756a6SMartin Matuska void 16772ad756a6SMartin Matuska spa_set_allocator(spa_t *spa, const char *allocator) 16782ad756a6SMartin Matuska { 16792ad756a6SMartin Matuska int a = spa_find_allocator_byname(allocator); 16802ad756a6SMartin Matuska if (a < 0) a = 0; 16812ad756a6SMartin Matuska spa->spa_active_allocator = a; 1682*7a7741afSMartin Matuska zfs_dbgmsg("spa allocator: %s", metaslab_allocators[a].msop_name); 16832ad756a6SMartin Matuska } 16842ad756a6SMartin Matuska 16852ad756a6SMartin Matuska int 16862ad756a6SMartin Matuska spa_get_allocator(spa_t *spa) 16872ad756a6SMartin Matuska { 16882ad756a6SMartin Matuska return (spa->spa_active_allocator); 16892ad756a6SMartin Matuska } 16902ad756a6SMartin Matuska 16912ad756a6SMartin Matuska #if defined(_KERNEL) 16922ad756a6SMartin Matuska int 16932ad756a6SMartin Matuska param_set_active_allocator_common(const char *val) 16942ad756a6SMartin Matuska { 16952ad756a6SMartin Matuska char *p; 16962ad756a6SMartin Matuska 16972ad756a6SMartin Matuska if (val == NULL) 16982ad756a6SMartin Matuska return (SET_ERROR(EINVAL)); 16992ad756a6SMartin Matuska 17002ad756a6SMartin Matuska if ((p = strchr(val, '\n')) != NULL) 17012ad756a6SMartin Matuska *p = '\0'; 17022ad756a6SMartin Matuska 17032ad756a6SMartin Matuska int a = spa_find_allocator_byname(val); 17042ad756a6SMartin Matuska if (a < 0) 17052ad756a6SMartin Matuska return (SET_ERROR(EINVAL)); 17062ad756a6SMartin Matuska 17072ad756a6SMartin Matuska zfs_active_allocator = metaslab_allocators[a].msop_name; 17082ad756a6SMartin Matuska return (0); 17092ad756a6SMartin Matuska } 17102ad756a6SMartin Matuska #endif 17112ad756a6SMartin Matuska 17122ad756a6SMartin Matuska metaslab_ops_t * 17132ad756a6SMartin Matuska metaslab_allocator(spa_t *spa) 17142ad756a6SMartin Matuska { 17152ad756a6SMartin Matuska int allocator = spa_get_allocator(spa); 17162ad756a6SMartin Matuska return (&metaslab_allocators[allocator]); 17172ad756a6SMartin Matuska } 17182ad756a6SMartin Matuska 1719eda14cbcSMatt Macy /* 1720eda14cbcSMatt Macy * ========================================================================== 1721eda14cbcSMatt Macy * Dynamic Fit (df) block allocator 1722eda14cbcSMatt Macy * 1723eda14cbcSMatt Macy * Search for a free chunk of at least this size, starting from the last 1724eda14cbcSMatt Macy * offset (for this alignment of block) looking for up to 1725eda14cbcSMatt Macy * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not 1726eda14cbcSMatt Macy * found within 16MB, then return a free chunk of exactly the requested size (or 1727eda14cbcSMatt Macy * larger). 1728eda14cbcSMatt Macy * 1729eda14cbcSMatt Macy * If it seems like searching from the last offset will be unproductive, skip 1730eda14cbcSMatt Macy * that and just return a free chunk of exactly the requested size (or larger). 1731eda14cbcSMatt Macy * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This 1732eda14cbcSMatt Macy * mechanism is probably not very useful and may be removed in the future. 1733eda14cbcSMatt Macy * 1734eda14cbcSMatt Macy * The behavior when not searching can be changed to return the largest free 1735eda14cbcSMatt Macy * chunk, instead of a free chunk of exactly the requested size, by setting 1736eda14cbcSMatt Macy * metaslab_df_use_largest_segment. 1737eda14cbcSMatt Macy * ========================================================================== 1738eda14cbcSMatt Macy */ 1739eda14cbcSMatt Macy static uint64_t 1740eda14cbcSMatt Macy metaslab_df_alloc(metaslab_t *msp, uint64_t size) 1741eda14cbcSMatt Macy { 1742eda14cbcSMatt Macy /* 1743eda14cbcSMatt Macy * Find the largest power of 2 block size that evenly divides the 1744eda14cbcSMatt Macy * requested size. This is used to try to allocate blocks with similar 1745eda14cbcSMatt Macy * alignment from the same area of the metaslab (i.e. same cursor 1746eda14cbcSMatt Macy * bucket) but it does not guarantee that other allocations sizes 1747eda14cbcSMatt Macy * may exist in the same region. 1748eda14cbcSMatt Macy */ 1749eda14cbcSMatt Macy uint64_t align = size & -size; 1750eda14cbcSMatt Macy uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; 1751eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 1752be181ee2SMartin Matuska uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size; 1753eda14cbcSMatt Macy uint64_t offset; 1754eda14cbcSMatt Macy 1755eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1756eda14cbcSMatt Macy 1757eda14cbcSMatt Macy /* 1758eda14cbcSMatt Macy * If we're running low on space, find a segment based on size, 1759eda14cbcSMatt Macy * rather than iterating based on offset. 1760eda14cbcSMatt Macy */ 1761eda14cbcSMatt Macy if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold || 1762eda14cbcSMatt Macy free_pct < metaslab_df_free_pct) { 1763eda14cbcSMatt Macy offset = -1; 1764eda14cbcSMatt Macy } else { 1765eda14cbcSMatt Macy offset = metaslab_block_picker(rt, 1766eda14cbcSMatt Macy cursor, size, metaslab_df_max_search); 1767eda14cbcSMatt Macy } 1768eda14cbcSMatt Macy 1769eda14cbcSMatt Macy if (offset == -1) { 1770eda14cbcSMatt Macy range_seg_t *rs; 1771eda14cbcSMatt Macy if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0) 1772eda14cbcSMatt Macy metaslab_size_tree_full_load(msp->ms_allocatable); 17737877fdebSMatt Macy 1774eda14cbcSMatt Macy if (metaslab_df_use_largest_segment) { 1775eda14cbcSMatt Macy /* use largest free segment */ 1776eda14cbcSMatt Macy rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL); 1777eda14cbcSMatt Macy } else { 1778eda14cbcSMatt Macy zfs_btree_index_t where; 1779eda14cbcSMatt Macy /* use segment of this size, or next largest */ 1780eda14cbcSMatt Macy rs = metaslab_block_find(&msp->ms_allocatable_by_size, 1781eda14cbcSMatt Macy rt, msp->ms_start, size, &where); 1782eda14cbcSMatt Macy } 1783eda14cbcSMatt Macy if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs, 1784eda14cbcSMatt Macy rt)) { 1785eda14cbcSMatt Macy offset = rs_get_start(rs, rt); 1786eda14cbcSMatt Macy *cursor = offset + size; 1787eda14cbcSMatt Macy } 1788eda14cbcSMatt Macy } 1789eda14cbcSMatt Macy 1790eda14cbcSMatt Macy return (offset); 1791eda14cbcSMatt Macy } 1792eda14cbcSMatt Macy 1793eda14cbcSMatt Macy /* 1794eda14cbcSMatt Macy * ========================================================================== 1795eda14cbcSMatt Macy * Cursor fit block allocator - 1796eda14cbcSMatt Macy * Select the largest region in the metaslab, set the cursor to the beginning 1797eda14cbcSMatt Macy * of the range and the cursor_end to the end of the range. As allocations 1798eda14cbcSMatt Macy * are made advance the cursor. Continue allocating from the cursor until 1799eda14cbcSMatt Macy * the range is exhausted and then find a new range. 1800eda14cbcSMatt Macy * ========================================================================== 1801eda14cbcSMatt Macy */ 1802eda14cbcSMatt Macy static uint64_t 1803eda14cbcSMatt Macy metaslab_cf_alloc(metaslab_t *msp, uint64_t size) 1804eda14cbcSMatt Macy { 1805eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 1806eda14cbcSMatt Macy zfs_btree_t *t = &msp->ms_allocatable_by_size; 1807eda14cbcSMatt Macy uint64_t *cursor = &msp->ms_lbas[0]; 1808eda14cbcSMatt Macy uint64_t *cursor_end = &msp->ms_lbas[1]; 1809eda14cbcSMatt Macy uint64_t offset = 0; 1810eda14cbcSMatt Macy 1811eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1812eda14cbcSMatt Macy 1813eda14cbcSMatt Macy ASSERT3U(*cursor_end, >=, *cursor); 1814eda14cbcSMatt Macy 1815eda14cbcSMatt Macy if ((*cursor + size) > *cursor_end) { 1816eda14cbcSMatt Macy range_seg_t *rs; 1817eda14cbcSMatt Macy 1818eda14cbcSMatt Macy if (zfs_btree_numnodes(t) == 0) 1819eda14cbcSMatt Macy metaslab_size_tree_full_load(msp->ms_allocatable); 1820eda14cbcSMatt Macy rs = zfs_btree_last(t, NULL); 1821eda14cbcSMatt Macy if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < 1822eda14cbcSMatt Macy size) 1823eda14cbcSMatt Macy return (-1ULL); 1824eda14cbcSMatt Macy 1825eda14cbcSMatt Macy *cursor = rs_get_start(rs, rt); 1826eda14cbcSMatt Macy *cursor_end = rs_get_end(rs, rt); 1827eda14cbcSMatt Macy } 1828eda14cbcSMatt Macy 1829eda14cbcSMatt Macy offset = *cursor; 1830eda14cbcSMatt Macy *cursor += size; 1831eda14cbcSMatt Macy 1832eda14cbcSMatt Macy return (offset); 1833eda14cbcSMatt Macy } 1834eda14cbcSMatt Macy 1835eda14cbcSMatt Macy /* 1836eda14cbcSMatt Macy * ========================================================================== 1837eda14cbcSMatt Macy * New dynamic fit allocator - 1838eda14cbcSMatt Macy * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift 1839eda14cbcSMatt Macy * contiguous blocks. If no region is found then just use the largest segment 1840eda14cbcSMatt Macy * that remains. 1841eda14cbcSMatt Macy * ========================================================================== 1842eda14cbcSMatt Macy */ 1843eda14cbcSMatt Macy 1844eda14cbcSMatt Macy /* 1845eda14cbcSMatt Macy * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) 1846eda14cbcSMatt Macy * to request from the allocator. 1847eda14cbcSMatt Macy */ 1848eda14cbcSMatt Macy uint64_t metaslab_ndf_clump_shift = 4; 1849eda14cbcSMatt Macy 1850eda14cbcSMatt Macy static uint64_t 1851eda14cbcSMatt Macy metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) 1852eda14cbcSMatt Macy { 1853eda14cbcSMatt Macy zfs_btree_t *t = &msp->ms_allocatable->rt_root; 1854eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 1855eda14cbcSMatt Macy zfs_btree_index_t where; 1856eda14cbcSMatt Macy range_seg_t *rs; 1857eda14cbcSMatt Macy range_seg_max_t rsearch; 1858eda14cbcSMatt Macy uint64_t hbit = highbit64(size); 1859eda14cbcSMatt Macy uint64_t *cursor = &msp->ms_lbas[hbit - 1]; 1860eda14cbcSMatt Macy uint64_t max_size = metaslab_largest_allocatable(msp); 1861eda14cbcSMatt Macy 1862eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1863eda14cbcSMatt Macy 1864eda14cbcSMatt Macy if (max_size < size) 1865eda14cbcSMatt Macy return (-1ULL); 1866eda14cbcSMatt Macy 1867eda14cbcSMatt Macy rs_set_start(&rsearch, rt, *cursor); 1868eda14cbcSMatt Macy rs_set_end(&rsearch, rt, *cursor + size); 1869eda14cbcSMatt Macy 1870eda14cbcSMatt Macy rs = zfs_btree_find(t, &rsearch, &where); 1871eda14cbcSMatt Macy if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) { 1872eda14cbcSMatt Macy t = &msp->ms_allocatable_by_size; 1873eda14cbcSMatt Macy 1874eda14cbcSMatt Macy rs_set_start(&rsearch, rt, 0); 1875eda14cbcSMatt Macy rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit + 1876eda14cbcSMatt Macy metaslab_ndf_clump_shift))); 1877eda14cbcSMatt Macy 1878eda14cbcSMatt Macy rs = zfs_btree_find(t, &rsearch, &where); 1879eda14cbcSMatt Macy if (rs == NULL) 1880eda14cbcSMatt Macy rs = zfs_btree_next(t, &where, &where); 1881eda14cbcSMatt Macy ASSERT(rs != NULL); 1882eda14cbcSMatt Macy } 1883eda14cbcSMatt Macy 1884eda14cbcSMatt Macy if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) { 1885eda14cbcSMatt Macy *cursor = rs_get_start(rs, rt) + size; 1886eda14cbcSMatt Macy return (rs_get_start(rs, rt)); 1887eda14cbcSMatt Macy } 1888eda14cbcSMatt Macy return (-1ULL); 1889eda14cbcSMatt Macy } 1890eda14cbcSMatt Macy 1891eda14cbcSMatt Macy /* 1892eda14cbcSMatt Macy * ========================================================================== 1893eda14cbcSMatt Macy * Metaslabs 1894eda14cbcSMatt Macy * ========================================================================== 1895eda14cbcSMatt Macy */ 1896eda14cbcSMatt Macy 1897eda14cbcSMatt Macy /* 1898eda14cbcSMatt Macy * Wait for any in-progress metaslab loads to complete. 1899eda14cbcSMatt Macy */ 1900eda14cbcSMatt Macy static void 1901eda14cbcSMatt Macy metaslab_load_wait(metaslab_t *msp) 1902eda14cbcSMatt Macy { 1903eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1904eda14cbcSMatt Macy 1905eda14cbcSMatt Macy while (msp->ms_loading) { 1906eda14cbcSMatt Macy ASSERT(!msp->ms_loaded); 1907eda14cbcSMatt Macy cv_wait(&msp->ms_load_cv, &msp->ms_lock); 1908eda14cbcSMatt Macy } 1909eda14cbcSMatt Macy } 1910eda14cbcSMatt Macy 1911eda14cbcSMatt Macy /* 1912eda14cbcSMatt Macy * Wait for any in-progress flushing to complete. 1913eda14cbcSMatt Macy */ 1914eda14cbcSMatt Macy static void 1915eda14cbcSMatt Macy metaslab_flush_wait(metaslab_t *msp) 1916eda14cbcSMatt Macy { 1917eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1918eda14cbcSMatt Macy 1919eda14cbcSMatt Macy while (msp->ms_flushing) 1920eda14cbcSMatt Macy cv_wait(&msp->ms_flush_cv, &msp->ms_lock); 1921eda14cbcSMatt Macy } 1922eda14cbcSMatt Macy 1923eda14cbcSMatt Macy static unsigned int 1924eda14cbcSMatt Macy metaslab_idx_func(multilist_t *ml, void *arg) 1925eda14cbcSMatt Macy { 1926eda14cbcSMatt Macy metaslab_t *msp = arg; 19272617128aSMartin Matuska 19282617128aSMartin Matuska /* 19292617128aSMartin Matuska * ms_id values are allocated sequentially, so full 64bit 19302617128aSMartin Matuska * division would be a waste of time, so limit it to 32 bits. 19312617128aSMartin Matuska */ 19322617128aSMartin Matuska return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml)); 1933eda14cbcSMatt Macy } 1934eda14cbcSMatt Macy 1935eda14cbcSMatt Macy uint64_t 1936eda14cbcSMatt Macy metaslab_allocated_space(metaslab_t *msp) 1937eda14cbcSMatt Macy { 1938eda14cbcSMatt Macy return (msp->ms_allocated_space); 1939eda14cbcSMatt Macy } 1940eda14cbcSMatt Macy 1941eda14cbcSMatt Macy /* 1942eda14cbcSMatt Macy * Verify that the space accounting on disk matches the in-core range_trees. 1943eda14cbcSMatt Macy */ 1944eda14cbcSMatt Macy static void 1945eda14cbcSMatt Macy metaslab_verify_space(metaslab_t *msp, uint64_t txg) 1946eda14cbcSMatt Macy { 1947eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 1948eda14cbcSMatt Macy uint64_t allocating = 0; 1949eda14cbcSMatt Macy uint64_t sm_free_space, msp_free_space; 1950eda14cbcSMatt Macy 1951eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 1952eda14cbcSMatt Macy ASSERT(!msp->ms_condensing); 1953eda14cbcSMatt Macy 1954eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 1955eda14cbcSMatt Macy return; 1956eda14cbcSMatt Macy 1957eda14cbcSMatt Macy /* 1958eda14cbcSMatt Macy * We can only verify the metaslab space when we're called 1959eda14cbcSMatt Macy * from syncing context with a loaded metaslab that has an 1960eda14cbcSMatt Macy * allocated space map. Calling this in non-syncing context 1961eda14cbcSMatt Macy * does not provide a consistent view of the metaslab since 1962eda14cbcSMatt Macy * we're performing allocations in the future. 1963eda14cbcSMatt Macy */ 1964eda14cbcSMatt Macy if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL || 1965eda14cbcSMatt Macy !msp->ms_loaded) 1966eda14cbcSMatt Macy return; 1967eda14cbcSMatt Macy 1968eda14cbcSMatt Macy /* 1969eda14cbcSMatt Macy * Even though the smp_alloc field can get negative, 1970eda14cbcSMatt Macy * when it comes to a metaslab's space map, that should 1971eda14cbcSMatt Macy * never be the case. 1972eda14cbcSMatt Macy */ 1973eda14cbcSMatt Macy ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0); 1974eda14cbcSMatt Macy 1975eda14cbcSMatt Macy ASSERT3U(space_map_allocated(msp->ms_sm), >=, 1976eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_frees)); 1977eda14cbcSMatt Macy 1978eda14cbcSMatt Macy ASSERT3U(metaslab_allocated_space(msp), ==, 1979eda14cbcSMatt Macy space_map_allocated(msp->ms_sm) + 1980eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_allocs) - 1981eda14cbcSMatt Macy range_tree_space(msp->ms_unflushed_frees)); 1982eda14cbcSMatt Macy 1983eda14cbcSMatt Macy sm_free_space = msp->ms_size - metaslab_allocated_space(msp); 1984eda14cbcSMatt Macy 1985eda14cbcSMatt Macy /* 1986eda14cbcSMatt Macy * Account for future allocations since we would have 1987eda14cbcSMatt Macy * already deducted that space from the ms_allocatable. 1988eda14cbcSMatt Macy */ 1989eda14cbcSMatt Macy for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { 1990eda14cbcSMatt Macy allocating += 1991eda14cbcSMatt Macy range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]); 1992eda14cbcSMatt Macy } 1993eda14cbcSMatt Macy ASSERT3U(allocating + msp->ms_allocated_this_txg, ==, 1994eda14cbcSMatt Macy msp->ms_allocating_total); 1995eda14cbcSMatt Macy 1996eda14cbcSMatt Macy ASSERT3U(msp->ms_deferspace, ==, 1997eda14cbcSMatt Macy range_tree_space(msp->ms_defer[0]) + 1998eda14cbcSMatt Macy range_tree_space(msp->ms_defer[1])); 1999eda14cbcSMatt Macy 2000eda14cbcSMatt Macy msp_free_space = range_tree_space(msp->ms_allocatable) + allocating + 2001eda14cbcSMatt Macy msp->ms_deferspace + range_tree_space(msp->ms_freed); 2002eda14cbcSMatt Macy 2003eda14cbcSMatt Macy VERIFY3U(sm_free_space, ==, msp_free_space); 2004eda14cbcSMatt Macy } 2005eda14cbcSMatt Macy 2006eda14cbcSMatt Macy static void 2007eda14cbcSMatt Macy metaslab_aux_histograms_clear(metaslab_t *msp) 2008eda14cbcSMatt Macy { 2009eda14cbcSMatt Macy /* 2010eda14cbcSMatt Macy * Auxiliary histograms are only cleared when resetting them, 2011eda14cbcSMatt Macy * which can only happen while the metaslab is loaded. 2012eda14cbcSMatt Macy */ 2013eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 2014eda14cbcSMatt Macy 2015da5137abSMartin Matuska memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist)); 2016eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) 2017da5137abSMartin Matuska memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t])); 2018eda14cbcSMatt Macy } 2019eda14cbcSMatt Macy 2020eda14cbcSMatt Macy static void 2021eda14cbcSMatt Macy metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift, 2022eda14cbcSMatt Macy range_tree_t *rt) 2023eda14cbcSMatt Macy { 2024eda14cbcSMatt Macy /* 2025eda14cbcSMatt Macy * This is modeled after space_map_histogram_add(), so refer to that 2026eda14cbcSMatt Macy * function for implementation details. We want this to work like 2027eda14cbcSMatt Macy * the space map histogram, and not the range tree histogram, as we 2028eda14cbcSMatt Macy * are essentially constructing a delta that will be later subtracted 2029eda14cbcSMatt Macy * from the space map histogram. 2030eda14cbcSMatt Macy */ 2031eda14cbcSMatt Macy int idx = 0; 2032eda14cbcSMatt Macy for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 2033eda14cbcSMatt Macy ASSERT3U(i, >=, idx + shift); 2034eda14cbcSMatt Macy histogram[idx] += rt->rt_histogram[i] << (i - idx - shift); 2035eda14cbcSMatt Macy 2036eda14cbcSMatt Macy if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { 2037eda14cbcSMatt Macy ASSERT3U(idx + shift, ==, i); 2038eda14cbcSMatt Macy idx++; 2039eda14cbcSMatt Macy ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); 2040eda14cbcSMatt Macy } 2041eda14cbcSMatt Macy } 2042eda14cbcSMatt Macy } 2043eda14cbcSMatt Macy 2044eda14cbcSMatt Macy /* 2045eda14cbcSMatt Macy * Called at every sync pass that the metaslab gets synced. 2046eda14cbcSMatt Macy * 2047eda14cbcSMatt Macy * The reason is that we want our auxiliary histograms to be updated 2048eda14cbcSMatt Macy * wherever the metaslab's space map histogram is updated. This way 2049eda14cbcSMatt Macy * we stay consistent on which parts of the metaslab space map's 2050eda14cbcSMatt Macy * histogram are currently not available for allocations (e.g because 2051eda14cbcSMatt Macy * they are in the defer, freed, and freeing trees). 2052eda14cbcSMatt Macy */ 2053eda14cbcSMatt Macy static void 2054eda14cbcSMatt Macy metaslab_aux_histograms_update(metaslab_t *msp) 2055eda14cbcSMatt Macy { 2056eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 2057eda14cbcSMatt Macy ASSERT(sm != NULL); 2058eda14cbcSMatt Macy 2059eda14cbcSMatt Macy /* 2060eda14cbcSMatt Macy * This is similar to the metaslab's space map histogram updates 2061eda14cbcSMatt Macy * that take place in metaslab_sync(). The only difference is that 2062eda14cbcSMatt Macy * we only care about segments that haven't made it into the 2063eda14cbcSMatt Macy * ms_allocatable tree yet. 2064eda14cbcSMatt Macy */ 2065eda14cbcSMatt Macy if (msp->ms_loaded) { 2066eda14cbcSMatt Macy metaslab_aux_histograms_clear(msp); 2067eda14cbcSMatt Macy 2068eda14cbcSMatt Macy metaslab_aux_histogram_add(msp->ms_synchist, 2069eda14cbcSMatt Macy sm->sm_shift, msp->ms_freed); 2070eda14cbcSMatt Macy 2071eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2072eda14cbcSMatt Macy metaslab_aux_histogram_add(msp->ms_deferhist[t], 2073eda14cbcSMatt Macy sm->sm_shift, msp->ms_defer[t]); 2074eda14cbcSMatt Macy } 2075eda14cbcSMatt Macy } 2076eda14cbcSMatt Macy 2077eda14cbcSMatt Macy metaslab_aux_histogram_add(msp->ms_synchist, 2078eda14cbcSMatt Macy sm->sm_shift, msp->ms_freeing); 2079eda14cbcSMatt Macy } 2080eda14cbcSMatt Macy 2081eda14cbcSMatt Macy /* 2082eda14cbcSMatt Macy * Called every time we are done syncing (writing to) the metaslab, 2083eda14cbcSMatt Macy * i.e. at the end of each sync pass. 2084eda14cbcSMatt Macy * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist] 2085eda14cbcSMatt Macy */ 2086eda14cbcSMatt Macy static void 2087eda14cbcSMatt Macy metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed) 2088eda14cbcSMatt Macy { 2089eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2090eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 2091eda14cbcSMatt Macy 2092eda14cbcSMatt Macy if (sm == NULL) { 2093eda14cbcSMatt Macy /* 2094eda14cbcSMatt Macy * We came here from metaslab_init() when creating/opening a 2095eda14cbcSMatt Macy * pool, looking at a metaslab that hasn't had any allocations 2096eda14cbcSMatt Macy * yet. 2097eda14cbcSMatt Macy */ 2098eda14cbcSMatt Macy return; 2099eda14cbcSMatt Macy } 2100eda14cbcSMatt Macy 2101eda14cbcSMatt Macy /* 2102eda14cbcSMatt Macy * This is similar to the actions that we take for the ms_freed 2103eda14cbcSMatt Macy * and ms_defer trees in metaslab_sync_done(). 2104eda14cbcSMatt Macy */ 2105eda14cbcSMatt Macy uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE; 2106eda14cbcSMatt Macy if (defer_allowed) { 2107da5137abSMartin Matuska memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist, 2108eda14cbcSMatt Macy sizeof (msp->ms_synchist)); 2109eda14cbcSMatt Macy } else { 2110da5137abSMartin Matuska memset(msp->ms_deferhist[hist_index], 0, 2111eda14cbcSMatt Macy sizeof (msp->ms_deferhist[hist_index])); 2112eda14cbcSMatt Macy } 2113da5137abSMartin Matuska memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist)); 2114eda14cbcSMatt Macy } 2115eda14cbcSMatt Macy 2116eda14cbcSMatt Macy /* 2117eda14cbcSMatt Macy * Ensure that the metaslab's weight and fragmentation are consistent 2118eda14cbcSMatt Macy * with the contents of the histogram (either the range tree's histogram 2119eda14cbcSMatt Macy * or the space map's depending whether the metaslab is loaded). 2120eda14cbcSMatt Macy */ 2121eda14cbcSMatt Macy static void 2122eda14cbcSMatt Macy metaslab_verify_weight_and_frag(metaslab_t *msp) 2123eda14cbcSMatt Macy { 2124eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2125eda14cbcSMatt Macy 2126eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 2127eda14cbcSMatt Macy return; 2128eda14cbcSMatt Macy 2129eda14cbcSMatt Macy /* 2130eda14cbcSMatt Macy * We can end up here from vdev_remove_complete(), in which case we 2131eda14cbcSMatt Macy * cannot do these assertions because we hold spa config locks and 2132eda14cbcSMatt Macy * thus we are not allowed to read from the DMU. 2133eda14cbcSMatt Macy * 2134eda14cbcSMatt Macy * We check if the metaslab group has been removed and if that's 2135eda14cbcSMatt Macy * the case we return immediately as that would mean that we are 2136eda14cbcSMatt Macy * here from the aforementioned code path. 2137eda14cbcSMatt Macy */ 2138eda14cbcSMatt Macy if (msp->ms_group == NULL) 2139eda14cbcSMatt Macy return; 2140eda14cbcSMatt Macy 2141eda14cbcSMatt Macy /* 2142eda14cbcSMatt Macy * Devices being removed always return a weight of 0 and leave 2143eda14cbcSMatt Macy * fragmentation and ms_max_size as is - there is nothing for 2144eda14cbcSMatt Macy * us to verify here. 2145eda14cbcSMatt Macy */ 2146eda14cbcSMatt Macy vdev_t *vd = msp->ms_group->mg_vd; 2147eda14cbcSMatt Macy if (vd->vdev_removing) 2148eda14cbcSMatt Macy return; 2149eda14cbcSMatt Macy 2150eda14cbcSMatt Macy /* 2151eda14cbcSMatt Macy * If the metaslab is dirty it probably means that we've done 2152eda14cbcSMatt Macy * some allocations or frees that have changed our histograms 2153eda14cbcSMatt Macy * and thus the weight. 2154eda14cbcSMatt Macy */ 2155eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) { 2156eda14cbcSMatt Macy if (txg_list_member(&vd->vdev_ms_list, msp, t)) 2157eda14cbcSMatt Macy return; 2158eda14cbcSMatt Macy } 2159eda14cbcSMatt Macy 2160eda14cbcSMatt Macy /* 2161eda14cbcSMatt Macy * This verification checks that our in-memory state is consistent 2162eda14cbcSMatt Macy * with what's on disk. If the pool is read-only then there aren't 2163eda14cbcSMatt Macy * any changes and we just have the initially-loaded state. 2164eda14cbcSMatt Macy */ 2165eda14cbcSMatt Macy if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa)) 2166eda14cbcSMatt Macy return; 2167eda14cbcSMatt Macy 2168eda14cbcSMatt Macy /* some extra verification for in-core tree if you can */ 2169eda14cbcSMatt Macy if (msp->ms_loaded) { 2170eda14cbcSMatt Macy range_tree_stat_verify(msp->ms_allocatable); 2171eda14cbcSMatt Macy VERIFY(space_map_histogram_verify(msp->ms_sm, 2172eda14cbcSMatt Macy msp->ms_allocatable)); 2173eda14cbcSMatt Macy } 2174eda14cbcSMatt Macy 2175eda14cbcSMatt Macy uint64_t weight = msp->ms_weight; 2176eda14cbcSMatt Macy uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 2177eda14cbcSMatt Macy boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight); 2178eda14cbcSMatt Macy uint64_t frag = msp->ms_fragmentation; 2179eda14cbcSMatt Macy uint64_t max_segsize = msp->ms_max_size; 2180eda14cbcSMatt Macy 2181eda14cbcSMatt Macy msp->ms_weight = 0; 2182eda14cbcSMatt Macy msp->ms_fragmentation = 0; 2183eda14cbcSMatt Macy 2184eda14cbcSMatt Macy /* 2185eda14cbcSMatt Macy * This function is used for verification purposes and thus should 2186eda14cbcSMatt Macy * not introduce any side-effects/mutations on the system's state. 2187eda14cbcSMatt Macy * 2188eda14cbcSMatt Macy * Regardless of whether metaslab_weight() thinks this metaslab 2189eda14cbcSMatt Macy * should be active or not, we want to ensure that the actual weight 2190eda14cbcSMatt Macy * (and therefore the value of ms_weight) would be the same if it 2191eda14cbcSMatt Macy * was to be recalculated at this point. 2192eda14cbcSMatt Macy * 2193eda14cbcSMatt Macy * In addition we set the nodirty flag so metaslab_weight() does 2194eda14cbcSMatt Macy * not dirty the metaslab for future TXGs (e.g. when trying to 2195eda14cbcSMatt Macy * force condensing to upgrade the metaslab spacemaps). 2196eda14cbcSMatt Macy */ 2197eda14cbcSMatt Macy msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active; 2198eda14cbcSMatt Macy 2199eda14cbcSMatt Macy VERIFY3U(max_segsize, ==, msp->ms_max_size); 2200eda14cbcSMatt Macy 2201eda14cbcSMatt Macy /* 2202eda14cbcSMatt Macy * If the weight type changed then there is no point in doing 2203eda14cbcSMatt Macy * verification. Revert fields to their original values. 2204eda14cbcSMatt Macy */ 2205eda14cbcSMatt Macy if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) || 2206eda14cbcSMatt Macy (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) { 2207eda14cbcSMatt Macy msp->ms_fragmentation = frag; 2208eda14cbcSMatt Macy msp->ms_weight = weight; 2209eda14cbcSMatt Macy return; 2210eda14cbcSMatt Macy } 2211eda14cbcSMatt Macy 2212eda14cbcSMatt Macy VERIFY3U(msp->ms_fragmentation, ==, frag); 2213eda14cbcSMatt Macy VERIFY3U(msp->ms_weight, ==, weight); 2214eda14cbcSMatt Macy } 2215eda14cbcSMatt Macy 2216eda14cbcSMatt Macy /* 2217eda14cbcSMatt Macy * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from 2218eda14cbcSMatt Macy * this class that was used longest ago, and attempt to unload it. We don't 2219eda14cbcSMatt Macy * want to spend too much time in this loop to prevent performance 2220eda14cbcSMatt Macy * degradation, and we expect that most of the time this operation will 2221eda14cbcSMatt Macy * succeed. Between that and the normal unloading processing during txg sync, 2222eda14cbcSMatt Macy * we expect this to keep the metaslab memory usage under control. 2223eda14cbcSMatt Macy */ 2224eda14cbcSMatt Macy static void 2225eda14cbcSMatt Macy metaslab_potentially_evict(metaslab_class_t *mc) 2226eda14cbcSMatt Macy { 2227eda14cbcSMatt Macy #ifdef _KERNEL 2228eda14cbcSMatt Macy uint64_t allmem = arc_all_memory(); 2229eda14cbcSMatt Macy uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2230eda14cbcSMatt Macy uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache); 2231be181ee2SMartin Matuska uint_t tries = 0; 2232eda14cbcSMatt Macy for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size && 22333ff01b23SMartin Matuska tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2; 2234eda14cbcSMatt Macy tries++) { 2235eda14cbcSMatt Macy unsigned int idx = multilist_get_random_index( 22363ff01b23SMartin Matuska &mc->mc_metaslab_txg_list); 2237eda14cbcSMatt Macy multilist_sublist_t *mls = 22381719886fSMartin Matuska multilist_sublist_lock_idx(&mc->mc_metaslab_txg_list, idx); 2239eda14cbcSMatt Macy metaslab_t *msp = multilist_sublist_head(mls); 2240eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2241eda14cbcSMatt Macy while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 < 2242eda14cbcSMatt Macy inuse * size) { 22431719886fSMartin Matuska VERIFY3P(mls, ==, multilist_sublist_lock_idx( 22443ff01b23SMartin Matuska &mc->mc_metaslab_txg_list, idx)); 2245eda14cbcSMatt Macy ASSERT3U(idx, ==, 22463ff01b23SMartin Matuska metaslab_idx_func(&mc->mc_metaslab_txg_list, msp)); 2247eda14cbcSMatt Macy 2248eda14cbcSMatt Macy if (!multilist_link_active(&msp->ms_class_txg_node)) { 2249eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2250eda14cbcSMatt Macy break; 2251eda14cbcSMatt Macy } 2252eda14cbcSMatt Macy metaslab_t *next_msp = multilist_sublist_next(mls, msp); 2253eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2254eda14cbcSMatt Macy /* 2255eda14cbcSMatt Macy * If the metaslab is currently loading there are two 2256eda14cbcSMatt Macy * cases. If it's the metaslab we're evicting, we 2257eda14cbcSMatt Macy * can't continue on or we'll panic when we attempt to 2258eda14cbcSMatt Macy * recursively lock the mutex. If it's another 2259eda14cbcSMatt Macy * metaslab that's loading, it can be safely skipped, 2260eda14cbcSMatt Macy * since we know it's very new and therefore not a 2261eda14cbcSMatt Macy * good eviction candidate. We check later once the 2262eda14cbcSMatt Macy * lock is held that the metaslab is fully loaded 2263eda14cbcSMatt Macy * before actually unloading it. 2264eda14cbcSMatt Macy */ 2265eda14cbcSMatt Macy if (msp->ms_loading) { 2266eda14cbcSMatt Macy msp = next_msp; 2267eda14cbcSMatt Macy inuse = 2268eda14cbcSMatt Macy spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2269eda14cbcSMatt Macy continue; 2270eda14cbcSMatt Macy } 2271eda14cbcSMatt Macy /* 2272eda14cbcSMatt Macy * We can't unload metaslabs with no spacemap because 2273eda14cbcSMatt Macy * they're not ready to be unloaded yet. We can't 2274eda14cbcSMatt Macy * unload metaslabs with outstanding allocations 2275eda14cbcSMatt Macy * because doing so could cause the metaslab's weight 2276eda14cbcSMatt Macy * to decrease while it's unloaded, which violates an 2277eda14cbcSMatt Macy * invariant that we use to prevent unnecessary 2278eda14cbcSMatt Macy * loading. We also don't unload metaslabs that are 2279eda14cbcSMatt Macy * currently active because they are high-weight 2280eda14cbcSMatt Macy * metaslabs that are likely to be used in the near 2281eda14cbcSMatt Macy * future. 2282eda14cbcSMatt Macy */ 2283eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 2284eda14cbcSMatt Macy if (msp->ms_allocator == -1 && msp->ms_sm != NULL && 2285eda14cbcSMatt Macy msp->ms_allocating_total == 0) { 2286eda14cbcSMatt Macy metaslab_unload(msp); 2287eda14cbcSMatt Macy } 2288eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 2289eda14cbcSMatt Macy msp = next_msp; 2290eda14cbcSMatt Macy inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2291eda14cbcSMatt Macy } 2292eda14cbcSMatt Macy } 2293e92ffd9bSMartin Matuska #else 2294e92ffd9bSMartin Matuska (void) mc, (void) zfs_metaslab_mem_limit; 2295eda14cbcSMatt Macy #endif 2296eda14cbcSMatt Macy } 2297eda14cbcSMatt Macy 2298eda14cbcSMatt Macy static int 2299eda14cbcSMatt Macy metaslab_load_impl(metaslab_t *msp) 2300eda14cbcSMatt Macy { 2301eda14cbcSMatt Macy int error = 0; 2302eda14cbcSMatt Macy 2303eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2304eda14cbcSMatt Macy ASSERT(msp->ms_loading); 2305eda14cbcSMatt Macy ASSERT(!msp->ms_condensing); 2306eda14cbcSMatt Macy 2307eda14cbcSMatt Macy /* 2308eda14cbcSMatt Macy * We temporarily drop the lock to unblock other operations while we 2309eda14cbcSMatt Macy * are reading the space map. Therefore, metaslab_sync() and 2310eda14cbcSMatt Macy * metaslab_sync_done() can run at the same time as we do. 2311eda14cbcSMatt Macy * 2312eda14cbcSMatt Macy * If we are using the log space maps, metaslab_sync() can't write to 2313eda14cbcSMatt Macy * the metaslab's space map while we are loading as we only write to 2314eda14cbcSMatt Macy * it when we are flushing the metaslab, and that can't happen while 2315eda14cbcSMatt Macy * we are loading it. 2316eda14cbcSMatt Macy * 2317eda14cbcSMatt Macy * If we are not using log space maps though, metaslab_sync() can 2318eda14cbcSMatt Macy * append to the space map while we are loading. Therefore we load 2319eda14cbcSMatt Macy * only entries that existed when we started the load. Additionally, 2320eda14cbcSMatt Macy * metaslab_sync_done() has to wait for the load to complete because 2321eda14cbcSMatt Macy * there are potential races like metaslab_load() loading parts of the 2322eda14cbcSMatt Macy * space map that are currently being appended by metaslab_sync(). If 2323eda14cbcSMatt Macy * we didn't, the ms_allocatable would have entries that 2324eda14cbcSMatt Macy * metaslab_sync_done() would try to re-add later. 2325eda14cbcSMatt Macy * 2326eda14cbcSMatt Macy * That's why before dropping the lock we remember the synced length 2327eda14cbcSMatt Macy * of the metaslab and read up to that point of the space map, 2328eda14cbcSMatt Macy * ignoring entries appended by metaslab_sync() that happen after we 2329eda14cbcSMatt Macy * drop the lock. 2330eda14cbcSMatt Macy */ 2331eda14cbcSMatt Macy uint64_t length = msp->ms_synced_length; 2332eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 2333eda14cbcSMatt Macy 2334eda14cbcSMatt Macy hrtime_t load_start = gethrtime(); 2335eda14cbcSMatt Macy metaslab_rt_arg_t *mrap; 2336eda14cbcSMatt Macy if (msp->ms_allocatable->rt_arg == NULL) { 2337eda14cbcSMatt Macy mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP); 2338eda14cbcSMatt Macy } else { 2339eda14cbcSMatt Macy mrap = msp->ms_allocatable->rt_arg; 2340eda14cbcSMatt Macy msp->ms_allocatable->rt_ops = NULL; 2341eda14cbcSMatt Macy msp->ms_allocatable->rt_arg = NULL; 2342eda14cbcSMatt Macy } 2343eda14cbcSMatt Macy mrap->mra_bt = &msp->ms_allocatable_by_size; 2344eda14cbcSMatt Macy mrap->mra_floor_shift = metaslab_by_size_min_shift; 2345eda14cbcSMatt Macy 2346eda14cbcSMatt Macy if (msp->ms_sm != NULL) { 2347eda14cbcSMatt Macy error = space_map_load_length(msp->ms_sm, msp->ms_allocatable, 2348eda14cbcSMatt Macy SM_FREE, length); 2349eda14cbcSMatt Macy 2350eda14cbcSMatt Macy /* Now, populate the size-sorted tree. */ 2351eda14cbcSMatt Macy metaslab_rt_create(msp->ms_allocatable, mrap); 2352eda14cbcSMatt Macy msp->ms_allocatable->rt_ops = &metaslab_rt_ops; 2353eda14cbcSMatt Macy msp->ms_allocatable->rt_arg = mrap; 2354eda14cbcSMatt Macy 2355eda14cbcSMatt Macy struct mssa_arg arg = {0}; 2356eda14cbcSMatt Macy arg.rt = msp->ms_allocatable; 2357eda14cbcSMatt Macy arg.mra = mrap; 2358eda14cbcSMatt Macy range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add, 2359eda14cbcSMatt Macy &arg); 2360eda14cbcSMatt Macy } else { 2361eda14cbcSMatt Macy /* 2362eda14cbcSMatt Macy * Add the size-sorted tree first, since we don't need to load 2363eda14cbcSMatt Macy * the metaslab from the spacemap. 2364eda14cbcSMatt Macy */ 2365eda14cbcSMatt Macy metaslab_rt_create(msp->ms_allocatable, mrap); 2366eda14cbcSMatt Macy msp->ms_allocatable->rt_ops = &metaslab_rt_ops; 2367eda14cbcSMatt Macy msp->ms_allocatable->rt_arg = mrap; 2368eda14cbcSMatt Macy /* 2369eda14cbcSMatt Macy * The space map has not been allocated yet, so treat 2370eda14cbcSMatt Macy * all the space in the metaslab as free and add it to the 2371eda14cbcSMatt Macy * ms_allocatable tree. 2372eda14cbcSMatt Macy */ 2373eda14cbcSMatt Macy range_tree_add(msp->ms_allocatable, 2374eda14cbcSMatt Macy msp->ms_start, msp->ms_size); 2375eda14cbcSMatt Macy 2376f9693befSMartin Matuska if (msp->ms_new) { 2377eda14cbcSMatt Macy /* 2378eda14cbcSMatt Macy * If the ms_sm doesn't exist, this means that this 2379eda14cbcSMatt Macy * metaslab hasn't gone through metaslab_sync() and 2380eda14cbcSMatt Macy * thus has never been dirtied. So we shouldn't 2381eda14cbcSMatt Macy * expect any unflushed allocs or frees from previous 2382eda14cbcSMatt Macy * TXGs. 2383eda14cbcSMatt Macy */ 2384eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 2385eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 2386eda14cbcSMatt Macy } 2387eda14cbcSMatt Macy } 2388eda14cbcSMatt Macy 2389eda14cbcSMatt Macy /* 2390eda14cbcSMatt Macy * We need to grab the ms_sync_lock to prevent metaslab_sync() from 2391eda14cbcSMatt Macy * changing the ms_sm (or log_sm) and the metaslab's range trees 2392eda14cbcSMatt Macy * while we are about to use them and populate the ms_allocatable. 2393eda14cbcSMatt Macy * The ms_lock is insufficient for this because metaslab_sync() doesn't 2394eda14cbcSMatt Macy * hold the ms_lock while writing the ms_checkpointing tree to disk. 2395eda14cbcSMatt Macy */ 2396eda14cbcSMatt Macy mutex_enter(&msp->ms_sync_lock); 2397eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 2398eda14cbcSMatt Macy 2399eda14cbcSMatt Macy ASSERT(!msp->ms_condensing); 2400eda14cbcSMatt Macy ASSERT(!msp->ms_flushing); 2401eda14cbcSMatt Macy 2402eda14cbcSMatt Macy if (error != 0) { 2403eda14cbcSMatt Macy mutex_exit(&msp->ms_sync_lock); 2404eda14cbcSMatt Macy return (error); 2405eda14cbcSMatt Macy } 2406eda14cbcSMatt Macy 2407eda14cbcSMatt Macy ASSERT3P(msp->ms_group, !=, NULL); 2408eda14cbcSMatt Macy msp->ms_loaded = B_TRUE; 2409eda14cbcSMatt Macy 2410eda14cbcSMatt Macy /* 2411eda14cbcSMatt Macy * Apply all the unflushed changes to ms_allocatable right 2412eda14cbcSMatt Macy * away so any manipulations we do below have a clear view 2413eda14cbcSMatt Macy * of what is allocated and what is free. 2414eda14cbcSMatt Macy */ 2415eda14cbcSMatt Macy range_tree_walk(msp->ms_unflushed_allocs, 2416eda14cbcSMatt Macy range_tree_remove, msp->ms_allocatable); 2417eda14cbcSMatt Macy range_tree_walk(msp->ms_unflushed_frees, 2418eda14cbcSMatt Macy range_tree_add, msp->ms_allocatable); 2419eda14cbcSMatt Macy 2420eda14cbcSMatt Macy ASSERT3P(msp->ms_group, !=, NULL); 2421eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2422eda14cbcSMatt Macy if (spa_syncing_log_sm(spa) != NULL) { 2423eda14cbcSMatt Macy ASSERT(spa_feature_is_enabled(spa, 2424eda14cbcSMatt Macy SPA_FEATURE_LOG_SPACEMAP)); 2425eda14cbcSMatt Macy 2426eda14cbcSMatt Macy /* 2427eda14cbcSMatt Macy * If we use a log space map we add all the segments 2428eda14cbcSMatt Macy * that are in ms_unflushed_frees so they are available 2429eda14cbcSMatt Macy * for allocation. 2430eda14cbcSMatt Macy * 2431eda14cbcSMatt Macy * ms_allocatable needs to contain all free segments 2432eda14cbcSMatt Macy * that are ready for allocations (thus not segments 2433eda14cbcSMatt Macy * from ms_freeing, ms_freed, and the ms_defer trees). 2434eda14cbcSMatt Macy * But if we grab the lock in this code path at a sync 2435eda14cbcSMatt Macy * pass later that 1, then it also contains the 2436eda14cbcSMatt Macy * segments of ms_freed (they were added to it earlier 2437eda14cbcSMatt Macy * in this path through ms_unflushed_frees). So we 2438eda14cbcSMatt Macy * need to remove all the segments that exist in 2439eda14cbcSMatt Macy * ms_freed from ms_allocatable as they will be added 2440eda14cbcSMatt Macy * later in metaslab_sync_done(). 2441eda14cbcSMatt Macy * 2442eda14cbcSMatt Macy * When there's no log space map, the ms_allocatable 2443eda14cbcSMatt Macy * correctly doesn't contain any segments that exist 2444eda14cbcSMatt Macy * in ms_freed [see ms_synced_length]. 2445eda14cbcSMatt Macy */ 2446eda14cbcSMatt Macy range_tree_walk(msp->ms_freed, 2447eda14cbcSMatt Macy range_tree_remove, msp->ms_allocatable); 2448eda14cbcSMatt Macy } 2449eda14cbcSMatt Macy 2450eda14cbcSMatt Macy /* 2451eda14cbcSMatt Macy * If we are not using the log space map, ms_allocatable 2452eda14cbcSMatt Macy * contains the segments that exist in the ms_defer trees 2453eda14cbcSMatt Macy * [see ms_synced_length]. Thus we need to remove them 2454eda14cbcSMatt Macy * from ms_allocatable as they will be added again in 2455eda14cbcSMatt Macy * metaslab_sync_done(). 2456eda14cbcSMatt Macy * 2457eda14cbcSMatt Macy * If we are using the log space map, ms_allocatable still 2458eda14cbcSMatt Macy * contains the segments that exist in the ms_defer trees. 2459eda14cbcSMatt Macy * Not because it read them through the ms_sm though. But 2460eda14cbcSMatt Macy * because these segments are part of ms_unflushed_frees 2461eda14cbcSMatt Macy * whose segments we add to ms_allocatable earlier in this 2462eda14cbcSMatt Macy * code path. 2463eda14cbcSMatt Macy */ 2464eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2465eda14cbcSMatt Macy range_tree_walk(msp->ms_defer[t], 2466eda14cbcSMatt Macy range_tree_remove, msp->ms_allocatable); 2467eda14cbcSMatt Macy } 2468eda14cbcSMatt Macy 2469eda14cbcSMatt Macy /* 2470eda14cbcSMatt Macy * Call metaslab_recalculate_weight_and_sort() now that the 2471eda14cbcSMatt Macy * metaslab is loaded so we get the metaslab's real weight. 2472eda14cbcSMatt Macy * 2473eda14cbcSMatt Macy * Unless this metaslab was created with older software and 2474eda14cbcSMatt Macy * has not yet been converted to use segment-based weight, we 2475eda14cbcSMatt Macy * expect the new weight to be better or equal to the weight 2476eda14cbcSMatt Macy * that the metaslab had while it was not loaded. This is 2477eda14cbcSMatt Macy * because the old weight does not take into account the 2478eda14cbcSMatt Macy * consolidation of adjacent segments between TXGs. [see 2479eda14cbcSMatt Macy * comment for ms_synchist and ms_deferhist[] for more info] 2480eda14cbcSMatt Macy */ 2481eda14cbcSMatt Macy uint64_t weight = msp->ms_weight; 2482eda14cbcSMatt Macy uint64_t max_size = msp->ms_max_size; 2483eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(msp); 2484eda14cbcSMatt Macy if (!WEIGHT_IS_SPACEBASED(weight)) 2485eda14cbcSMatt Macy ASSERT3U(weight, <=, msp->ms_weight); 2486eda14cbcSMatt Macy msp->ms_max_size = metaslab_largest_allocatable(msp); 2487eda14cbcSMatt Macy ASSERT3U(max_size, <=, msp->ms_max_size); 2488eda14cbcSMatt Macy hrtime_t load_end = gethrtime(); 2489eda14cbcSMatt Macy msp->ms_load_time = load_end; 2490eda14cbcSMatt Macy zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, " 2491eda14cbcSMatt Macy "ms_id %llu, smp_length %llu, " 2492eda14cbcSMatt Macy "unflushed_allocs %llu, unflushed_frees %llu, " 2493eda14cbcSMatt Macy "freed %llu, defer %llu + %llu, unloaded time %llu ms, " 2494eda14cbcSMatt Macy "loading_time %lld ms, ms_max_size %llu, " 2495eda14cbcSMatt Macy "max size error %lld, " 2496eda14cbcSMatt Macy "old_weight %llx, new_weight %llx", 249733b8c039SMartin Matuska (u_longlong_t)spa_syncing_txg(spa), spa_name(spa), 249833b8c039SMartin Matuska (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 249933b8c039SMartin Matuska (u_longlong_t)msp->ms_id, 250033b8c039SMartin Matuska (u_longlong_t)space_map_length(msp->ms_sm), 250133b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs), 250233b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_unflushed_frees), 250333b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_freed), 250433b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_defer[0]), 250533b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_defer[1]), 2506eda14cbcSMatt Macy (longlong_t)((load_start - msp->ms_unload_time) / 1000000), 2507eda14cbcSMatt Macy (longlong_t)((load_end - load_start) / 1000000), 250833b8c039SMartin Matuska (u_longlong_t)msp->ms_max_size, 250933b8c039SMartin Matuska (u_longlong_t)msp->ms_max_size - max_size, 251033b8c039SMartin Matuska (u_longlong_t)weight, (u_longlong_t)msp->ms_weight); 2511eda14cbcSMatt Macy 2512eda14cbcSMatt Macy metaslab_verify_space(msp, spa_syncing_txg(spa)); 2513eda14cbcSMatt Macy mutex_exit(&msp->ms_sync_lock); 2514eda14cbcSMatt Macy return (0); 2515eda14cbcSMatt Macy } 2516eda14cbcSMatt Macy 2517eda14cbcSMatt Macy int 2518eda14cbcSMatt Macy metaslab_load(metaslab_t *msp) 2519eda14cbcSMatt Macy { 2520eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2521eda14cbcSMatt Macy 2522eda14cbcSMatt Macy /* 2523eda14cbcSMatt Macy * There may be another thread loading the same metaslab, if that's 2524eda14cbcSMatt Macy * the case just wait until the other thread is done and return. 2525eda14cbcSMatt Macy */ 2526eda14cbcSMatt Macy metaslab_load_wait(msp); 2527eda14cbcSMatt Macy if (msp->ms_loaded) 2528eda14cbcSMatt Macy return (0); 2529eda14cbcSMatt Macy VERIFY(!msp->ms_loading); 2530eda14cbcSMatt Macy ASSERT(!msp->ms_condensing); 2531eda14cbcSMatt Macy 2532eda14cbcSMatt Macy /* 2533eda14cbcSMatt Macy * We set the loading flag BEFORE potentially dropping the lock to 2534eda14cbcSMatt Macy * wait for an ongoing flush (see ms_flushing below). This way other 2535eda14cbcSMatt Macy * threads know that there is already a thread that is loading this 2536eda14cbcSMatt Macy * metaslab. 2537eda14cbcSMatt Macy */ 2538eda14cbcSMatt Macy msp->ms_loading = B_TRUE; 2539eda14cbcSMatt Macy 2540eda14cbcSMatt Macy /* 2541eda14cbcSMatt Macy * Wait for any in-progress flushing to finish as we drop the ms_lock 2542eda14cbcSMatt Macy * both here (during space_map_load()) and in metaslab_flush() (when 2543eda14cbcSMatt Macy * we flush our changes to the ms_sm). 2544eda14cbcSMatt Macy */ 2545eda14cbcSMatt Macy if (msp->ms_flushing) 2546eda14cbcSMatt Macy metaslab_flush_wait(msp); 2547eda14cbcSMatt Macy 2548eda14cbcSMatt Macy /* 2549eda14cbcSMatt Macy * In the possibility that we were waiting for the metaslab to be 2550eda14cbcSMatt Macy * flushed (where we temporarily dropped the ms_lock), ensure that 2551eda14cbcSMatt Macy * no one else loaded the metaslab somehow. 2552eda14cbcSMatt Macy */ 2553eda14cbcSMatt Macy ASSERT(!msp->ms_loaded); 2554eda14cbcSMatt Macy 2555eda14cbcSMatt Macy /* 2556eda14cbcSMatt Macy * If we're loading a metaslab in the normal class, consider evicting 2557eda14cbcSMatt Macy * another one to keep our memory usage under the limit defined by the 2558eda14cbcSMatt Macy * zfs_metaslab_mem_limit tunable. 2559eda14cbcSMatt Macy */ 2560eda14cbcSMatt Macy if (spa_normal_class(msp->ms_group->mg_class->mc_spa) == 2561eda14cbcSMatt Macy msp->ms_group->mg_class) { 2562eda14cbcSMatt Macy metaslab_potentially_evict(msp->ms_group->mg_class); 2563eda14cbcSMatt Macy } 2564eda14cbcSMatt Macy 2565eda14cbcSMatt Macy int error = metaslab_load_impl(msp); 2566eda14cbcSMatt Macy 2567eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2568eda14cbcSMatt Macy msp->ms_loading = B_FALSE; 2569eda14cbcSMatt Macy cv_broadcast(&msp->ms_load_cv); 2570eda14cbcSMatt Macy 2571eda14cbcSMatt Macy return (error); 2572eda14cbcSMatt Macy } 2573eda14cbcSMatt Macy 2574eda14cbcSMatt Macy void 2575eda14cbcSMatt Macy metaslab_unload(metaslab_t *msp) 2576eda14cbcSMatt Macy { 2577eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2578eda14cbcSMatt Macy 2579eda14cbcSMatt Macy /* 2580eda14cbcSMatt Macy * This can happen if a metaslab is selected for eviction (in 2581eda14cbcSMatt Macy * metaslab_potentially_evict) and then unloaded during spa_sync (via 2582eda14cbcSMatt Macy * metaslab_class_evict_old). 2583eda14cbcSMatt Macy */ 2584eda14cbcSMatt Macy if (!msp->ms_loaded) 2585eda14cbcSMatt Macy return; 2586eda14cbcSMatt Macy 2587eda14cbcSMatt Macy range_tree_vacate(msp->ms_allocatable, NULL, NULL); 2588eda14cbcSMatt Macy msp->ms_loaded = B_FALSE; 2589eda14cbcSMatt Macy msp->ms_unload_time = gethrtime(); 2590eda14cbcSMatt Macy 2591eda14cbcSMatt Macy msp->ms_activation_weight = 0; 2592eda14cbcSMatt Macy msp->ms_weight &= ~METASLAB_ACTIVE_MASK; 2593eda14cbcSMatt Macy 2594eda14cbcSMatt Macy if (msp->ms_group != NULL) { 2595eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 2596eda14cbcSMatt Macy multilist_sublist_t *mls = 25973ff01b23SMartin Matuska multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 2598eda14cbcSMatt Macy if (multilist_link_active(&msp->ms_class_txg_node)) 2599eda14cbcSMatt Macy multilist_sublist_remove(mls, msp); 2600eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2601eda14cbcSMatt Macy 2602eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2603eda14cbcSMatt Macy zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, " 2604eda14cbcSMatt Macy "ms_id %llu, weight %llx, " 2605eda14cbcSMatt Macy "selected txg %llu (%llu ms ago), alloc_txg %llu, " 2606eda14cbcSMatt Macy "loaded %llu ms ago, max_size %llu", 260733b8c039SMartin Matuska (u_longlong_t)spa_syncing_txg(spa), spa_name(spa), 260833b8c039SMartin Matuska (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 260933b8c039SMartin Matuska (u_longlong_t)msp->ms_id, 261033b8c039SMartin Matuska (u_longlong_t)msp->ms_weight, 261133b8c039SMartin Matuska (u_longlong_t)msp->ms_selected_txg, 261233b8c039SMartin Matuska (u_longlong_t)(msp->ms_unload_time - 261333b8c039SMartin Matuska msp->ms_selected_time) / 1000 / 1000, 261433b8c039SMartin Matuska (u_longlong_t)msp->ms_alloc_txg, 261533b8c039SMartin Matuska (u_longlong_t)(msp->ms_unload_time - 261633b8c039SMartin Matuska msp->ms_load_time) / 1000 / 1000, 261733b8c039SMartin Matuska (u_longlong_t)msp->ms_max_size); 2618eda14cbcSMatt Macy } 2619eda14cbcSMatt Macy 2620eda14cbcSMatt Macy /* 2621eda14cbcSMatt Macy * We explicitly recalculate the metaslab's weight based on its space 2622eda14cbcSMatt Macy * map (as it is now not loaded). We want unload metaslabs to always 2623eda14cbcSMatt Macy * have their weights calculated from the space map histograms, while 2624eda14cbcSMatt Macy * loaded ones have it calculated from their in-core range tree 2625eda14cbcSMatt Macy * [see metaslab_load()]. This way, the weight reflects the information 2626eda14cbcSMatt Macy * available in-core, whether it is loaded or not. 2627eda14cbcSMatt Macy * 2628eda14cbcSMatt Macy * If ms_group == NULL means that we came here from metaslab_fini(), 2629eda14cbcSMatt Macy * at which point it doesn't make sense for us to do the recalculation 2630eda14cbcSMatt Macy * and the sorting. 2631eda14cbcSMatt Macy */ 2632eda14cbcSMatt Macy if (msp->ms_group != NULL) 2633eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(msp); 2634eda14cbcSMatt Macy } 2635eda14cbcSMatt Macy 2636eda14cbcSMatt Macy /* 2637eda14cbcSMatt Macy * We want to optimize the memory use of the per-metaslab range 2638eda14cbcSMatt Macy * trees. To do this, we store the segments in the range trees in 2639eda14cbcSMatt Macy * units of sectors, zero-indexing from the start of the metaslab. If 2640eda14cbcSMatt Macy * the vdev_ms_shift - the vdev_ashift is less than 32, we can store 2641eda14cbcSMatt Macy * the ranges using two uint32_ts, rather than two uint64_ts. 2642eda14cbcSMatt Macy */ 2643eda14cbcSMatt Macy range_seg_type_t 2644eda14cbcSMatt Macy metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp, 2645eda14cbcSMatt Macy uint64_t *start, uint64_t *shift) 2646eda14cbcSMatt Macy { 2647eda14cbcSMatt Macy if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 && 2648eda14cbcSMatt Macy !zfs_metaslab_force_large_segs) { 2649eda14cbcSMatt Macy *shift = vdev->vdev_ashift; 2650eda14cbcSMatt Macy *start = msp->ms_start; 2651eda14cbcSMatt Macy return (RANGE_SEG32); 2652eda14cbcSMatt Macy } else { 2653eda14cbcSMatt Macy *shift = 0; 2654eda14cbcSMatt Macy *start = 0; 2655eda14cbcSMatt Macy return (RANGE_SEG64); 2656eda14cbcSMatt Macy } 2657eda14cbcSMatt Macy } 2658eda14cbcSMatt Macy 2659eda14cbcSMatt Macy void 2660eda14cbcSMatt Macy metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg) 2661eda14cbcSMatt Macy { 2662eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 2663eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 2664eda14cbcSMatt Macy multilist_sublist_t *mls = 26653ff01b23SMartin Matuska multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 2666eda14cbcSMatt Macy if (multilist_link_active(&msp->ms_class_txg_node)) 2667eda14cbcSMatt Macy multilist_sublist_remove(mls, msp); 2668eda14cbcSMatt Macy msp->ms_selected_txg = txg; 2669eda14cbcSMatt Macy msp->ms_selected_time = gethrtime(); 2670eda14cbcSMatt Macy multilist_sublist_insert_tail(mls, msp); 2671eda14cbcSMatt Macy multilist_sublist_unlock(mls); 2672eda14cbcSMatt Macy } 2673eda14cbcSMatt Macy 2674eda14cbcSMatt Macy void 2675eda14cbcSMatt Macy metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta, 2676eda14cbcSMatt Macy int64_t defer_delta, int64_t space_delta) 2677eda14cbcSMatt Macy { 2678eda14cbcSMatt Macy vdev_space_update(vd, alloc_delta, defer_delta, space_delta); 2679eda14cbcSMatt Macy 2680eda14cbcSMatt Macy ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent); 2681eda14cbcSMatt Macy ASSERT(vd->vdev_ms_count != 0); 2682eda14cbcSMatt Macy 2683eda14cbcSMatt Macy metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta, 2684eda14cbcSMatt Macy vdev_deflated_space(vd, space_delta)); 2685eda14cbcSMatt Macy } 2686eda14cbcSMatt Macy 2687eda14cbcSMatt Macy int 2688eda14cbcSMatt Macy metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, 2689eda14cbcSMatt Macy uint64_t txg, metaslab_t **msp) 2690eda14cbcSMatt Macy { 2691eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 2692eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 2693eda14cbcSMatt Macy objset_t *mos = spa->spa_meta_objset; 2694eda14cbcSMatt Macy metaslab_t *ms; 2695eda14cbcSMatt Macy int error; 2696eda14cbcSMatt Macy 2697eda14cbcSMatt Macy ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 2698eda14cbcSMatt Macy mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); 2699eda14cbcSMatt Macy mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL); 2700eda14cbcSMatt Macy cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); 2701eda14cbcSMatt Macy cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL); 2702eda14cbcSMatt Macy multilist_link_init(&ms->ms_class_txg_node); 2703eda14cbcSMatt Macy 2704eda14cbcSMatt Macy ms->ms_id = id; 2705eda14cbcSMatt Macy ms->ms_start = id << vd->vdev_ms_shift; 2706eda14cbcSMatt Macy ms->ms_size = 1ULL << vd->vdev_ms_shift; 2707eda14cbcSMatt Macy ms->ms_allocator = -1; 2708eda14cbcSMatt Macy ms->ms_new = B_TRUE; 2709eda14cbcSMatt Macy 27107877fdebSMatt Macy vdev_ops_t *ops = vd->vdev_ops; 27117877fdebSMatt Macy if (ops->vdev_op_metaslab_init != NULL) 27127877fdebSMatt Macy ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size); 27137877fdebSMatt Macy 2714eda14cbcSMatt Macy /* 2715eda14cbcSMatt Macy * We only open space map objects that already exist. All others 271681b22a98SMartin Matuska * will be opened when we finally allocate an object for it. For 271781b22a98SMartin Matuska * readonly pools there is no need to open the space map object. 2718eda14cbcSMatt Macy * 2719eda14cbcSMatt Macy * Note: 2720eda14cbcSMatt Macy * When called from vdev_expand(), we can't call into the DMU as 2721eda14cbcSMatt Macy * we are holding the spa_config_lock as a writer and we would 2722eda14cbcSMatt Macy * deadlock [see relevant comment in vdev_metaslab_init()]. in 2723eda14cbcSMatt Macy * that case, the object parameter is zero though, so we won't 2724eda14cbcSMatt Macy * call into the DMU. 2725eda14cbcSMatt Macy */ 272681b22a98SMartin Matuska if (object != 0 && !(spa->spa_mode == SPA_MODE_READ && 272781b22a98SMartin Matuska !spa->spa_read_spacemaps)) { 2728eda14cbcSMatt Macy error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, 2729eda14cbcSMatt Macy ms->ms_size, vd->vdev_ashift); 2730eda14cbcSMatt Macy 2731eda14cbcSMatt Macy if (error != 0) { 2732eda14cbcSMatt Macy kmem_free(ms, sizeof (metaslab_t)); 2733eda14cbcSMatt Macy return (error); 2734eda14cbcSMatt Macy } 2735eda14cbcSMatt Macy 2736eda14cbcSMatt Macy ASSERT(ms->ms_sm != NULL); 2737eda14cbcSMatt Macy ms->ms_allocated_space = space_map_allocated(ms->ms_sm); 2738eda14cbcSMatt Macy } 2739eda14cbcSMatt Macy 2740eda14cbcSMatt Macy uint64_t shift, start; 2741f9693befSMartin Matuska range_seg_type_t type = 2742f9693befSMartin Matuska metaslab_calculate_range_tree_type(vd, ms, &start, &shift); 2743eda14cbcSMatt Macy 2744eda14cbcSMatt Macy ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift); 2745f9693befSMartin Matuska for (int t = 0; t < TXG_SIZE; t++) { 2746f9693befSMartin Matuska ms->ms_allocating[t] = range_tree_create(NULL, type, 2747f9693befSMartin Matuska NULL, start, shift); 2748f9693befSMartin Matuska } 2749f9693befSMartin Matuska ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift); 2750f9693befSMartin Matuska ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift); 2751f9693befSMartin Matuska for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2752f9693befSMartin Matuska ms->ms_defer[t] = range_tree_create(NULL, type, NULL, 2753f9693befSMartin Matuska start, shift); 2754f9693befSMartin Matuska } 2755f9693befSMartin Matuska ms->ms_checkpointing = 2756f9693befSMartin Matuska range_tree_create(NULL, type, NULL, start, shift); 2757f9693befSMartin Matuska ms->ms_unflushed_allocs = 2758f9693befSMartin Matuska range_tree_create(NULL, type, NULL, start, shift); 2759f9693befSMartin Matuska 2760f9693befSMartin Matuska metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP); 2761f9693befSMartin Matuska mrap->mra_bt = &ms->ms_unflushed_frees_by_size; 2762f9693befSMartin Matuska mrap->mra_floor_shift = metaslab_by_size_min_shift; 2763f9693befSMartin Matuska ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops, 2764f9693befSMartin Matuska type, mrap, start, shift); 2765eda14cbcSMatt Macy 2766eda14cbcSMatt Macy ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift); 2767eda14cbcSMatt Macy 2768eda14cbcSMatt Macy metaslab_group_add(mg, ms); 2769eda14cbcSMatt Macy metaslab_set_fragmentation(ms, B_FALSE); 2770eda14cbcSMatt Macy 2771eda14cbcSMatt Macy /* 2772eda14cbcSMatt Macy * If we're opening an existing pool (txg == 0) or creating 2773eda14cbcSMatt Macy * a new one (txg == TXG_INITIAL), all space is available now. 2774eda14cbcSMatt Macy * If we're adding space to an existing pool, the new space 2775eda14cbcSMatt Macy * does not become available until after this txg has synced. 2776eda14cbcSMatt Macy * The metaslab's weight will also be initialized when we sync 2777eda14cbcSMatt Macy * out this txg. This ensures that we don't attempt to allocate 2778eda14cbcSMatt Macy * from it before we have initialized it completely. 2779eda14cbcSMatt Macy */ 2780eda14cbcSMatt Macy if (txg <= TXG_INITIAL) { 2781eda14cbcSMatt Macy metaslab_sync_done(ms, 0); 2782eda14cbcSMatt Macy metaslab_space_update(vd, mg->mg_class, 2783eda14cbcSMatt Macy metaslab_allocated_space(ms), 0, 0); 2784eda14cbcSMatt Macy } 2785eda14cbcSMatt Macy 2786eda14cbcSMatt Macy if (txg != 0) { 2787eda14cbcSMatt Macy vdev_dirty(vd, 0, NULL, txg); 2788eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, ms, txg); 2789eda14cbcSMatt Macy } 2790eda14cbcSMatt Macy 2791eda14cbcSMatt Macy *msp = ms; 2792eda14cbcSMatt Macy 2793eda14cbcSMatt Macy return (0); 2794eda14cbcSMatt Macy } 2795eda14cbcSMatt Macy 2796eda14cbcSMatt Macy static void 2797eda14cbcSMatt Macy metaslab_fini_flush_data(metaslab_t *msp) 2798eda14cbcSMatt Macy { 2799eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2800eda14cbcSMatt Macy 2801eda14cbcSMatt Macy if (metaslab_unflushed_txg(msp) == 0) { 2802eda14cbcSMatt Macy ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), 2803eda14cbcSMatt Macy ==, NULL); 2804eda14cbcSMatt Macy return; 2805eda14cbcSMatt Macy } 2806eda14cbcSMatt Macy ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 2807eda14cbcSMatt Macy 2808eda14cbcSMatt Macy mutex_enter(&spa->spa_flushed_ms_lock); 2809eda14cbcSMatt Macy avl_remove(&spa->spa_metaslabs_by_flushed, msp); 2810eda14cbcSMatt Macy mutex_exit(&spa->spa_flushed_ms_lock); 2811eda14cbcSMatt Macy 2812eda14cbcSMatt Macy spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp)); 2813716fd348SMartin Matuska spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp), 2814716fd348SMartin Matuska metaslab_unflushed_dirty(msp)); 2815eda14cbcSMatt Macy } 2816eda14cbcSMatt Macy 2817eda14cbcSMatt Macy uint64_t 2818eda14cbcSMatt Macy metaslab_unflushed_changes_memused(metaslab_t *ms) 2819eda14cbcSMatt Macy { 2820eda14cbcSMatt Macy return ((range_tree_numsegs(ms->ms_unflushed_allocs) + 2821eda14cbcSMatt Macy range_tree_numsegs(ms->ms_unflushed_frees)) * 2822eda14cbcSMatt Macy ms->ms_unflushed_allocs->rt_root.bt_elem_size); 2823eda14cbcSMatt Macy } 2824eda14cbcSMatt Macy 2825eda14cbcSMatt Macy void 2826eda14cbcSMatt Macy metaslab_fini(metaslab_t *msp) 2827eda14cbcSMatt Macy { 2828eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 2829eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 2830eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 2831eda14cbcSMatt Macy 2832eda14cbcSMatt Macy metaslab_fini_flush_data(msp); 2833eda14cbcSMatt Macy 2834eda14cbcSMatt Macy metaslab_group_remove(mg, msp); 2835eda14cbcSMatt Macy 2836eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 2837eda14cbcSMatt Macy VERIFY(msp->ms_group == NULL); 2838f9693befSMartin Matuska 2839184c1b94SMartin Matuska /* 2840f9693befSMartin Matuska * If this metaslab hasn't been through metaslab_sync_done() yet its 2841184c1b94SMartin Matuska * space hasn't been accounted for in its vdev and doesn't need to be 2842184c1b94SMartin Matuska * subtracted. 2843184c1b94SMartin Matuska */ 2844f9693befSMartin Matuska if (!msp->ms_new) { 2845eda14cbcSMatt Macy metaslab_space_update(vd, mg->mg_class, 2846eda14cbcSMatt Macy -metaslab_allocated_space(msp), 0, -msp->ms_size); 2847eda14cbcSMatt Macy 2848184c1b94SMartin Matuska } 2849eda14cbcSMatt Macy space_map_close(msp->ms_sm); 2850eda14cbcSMatt Macy msp->ms_sm = NULL; 2851eda14cbcSMatt Macy 2852eda14cbcSMatt Macy metaslab_unload(msp); 2853184c1b94SMartin Matuska 2854eda14cbcSMatt Macy range_tree_destroy(msp->ms_allocatable); 2855eda14cbcSMatt Macy range_tree_destroy(msp->ms_freeing); 2856eda14cbcSMatt Macy range_tree_destroy(msp->ms_freed); 2857eda14cbcSMatt Macy 2858eda14cbcSMatt Macy ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 2859eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp)); 2860eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused -= 2861eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 2862eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 2863eda14cbcSMatt Macy range_tree_destroy(msp->ms_unflushed_allocs); 2864184c1b94SMartin Matuska range_tree_destroy(msp->ms_checkpointing); 2865eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 2866eda14cbcSMatt Macy range_tree_destroy(msp->ms_unflushed_frees); 2867eda14cbcSMatt Macy 2868eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) { 2869eda14cbcSMatt Macy range_tree_destroy(msp->ms_allocating[t]); 2870eda14cbcSMatt Macy } 2871eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2872eda14cbcSMatt Macy range_tree_destroy(msp->ms_defer[t]); 2873eda14cbcSMatt Macy } 2874eda14cbcSMatt Macy ASSERT0(msp->ms_deferspace); 2875eda14cbcSMatt Macy 2876eda14cbcSMatt Macy for (int t = 0; t < TXG_SIZE; t++) 2877eda14cbcSMatt Macy ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t)); 2878eda14cbcSMatt Macy 2879eda14cbcSMatt Macy range_tree_vacate(msp->ms_trim, NULL, NULL); 2880eda14cbcSMatt Macy range_tree_destroy(msp->ms_trim); 2881eda14cbcSMatt Macy 2882eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 2883eda14cbcSMatt Macy cv_destroy(&msp->ms_load_cv); 2884eda14cbcSMatt Macy cv_destroy(&msp->ms_flush_cv); 2885eda14cbcSMatt Macy mutex_destroy(&msp->ms_lock); 2886eda14cbcSMatt Macy mutex_destroy(&msp->ms_sync_lock); 2887eda14cbcSMatt Macy ASSERT3U(msp->ms_allocator, ==, -1); 2888eda14cbcSMatt Macy 2889eda14cbcSMatt Macy kmem_free(msp, sizeof (metaslab_t)); 2890eda14cbcSMatt Macy } 2891eda14cbcSMatt Macy 2892eda14cbcSMatt Macy #define FRAGMENTATION_TABLE_SIZE 17 2893eda14cbcSMatt Macy 2894eda14cbcSMatt Macy /* 2895eda14cbcSMatt Macy * This table defines a segment size based fragmentation metric that will 2896eda14cbcSMatt Macy * allow each metaslab to derive its own fragmentation value. This is done 2897eda14cbcSMatt Macy * by calculating the space in each bucket of the spacemap histogram and 2898eda14cbcSMatt Macy * multiplying that by the fragmentation metric in this table. Doing 2899eda14cbcSMatt Macy * this for all buckets and dividing it by the total amount of free 2900eda14cbcSMatt Macy * space in this metaslab (i.e. the total free space in all buckets) gives 2901eda14cbcSMatt Macy * us the fragmentation metric. This means that a high fragmentation metric 2902eda14cbcSMatt Macy * equates to most of the free space being comprised of small segments. 2903eda14cbcSMatt Macy * Conversely, if the metric is low, then most of the free space is in 2904eda14cbcSMatt Macy * large segments. A 10% change in fragmentation equates to approximately 2905eda14cbcSMatt Macy * double the number of segments. 2906eda14cbcSMatt Macy * 2907eda14cbcSMatt Macy * This table defines 0% fragmented space using 16MB segments. Testing has 2908eda14cbcSMatt Macy * shown that segments that are greater than or equal to 16MB do not suffer 2909eda14cbcSMatt Macy * from drastic performance problems. Using this value, we derive the rest 2910eda14cbcSMatt Macy * of the table. Since the fragmentation value is never stored on disk, it 2911eda14cbcSMatt Macy * is possible to change these calculations in the future. 2912eda14cbcSMatt Macy */ 2913e92ffd9bSMartin Matuska static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { 2914eda14cbcSMatt Macy 100, /* 512B */ 2915eda14cbcSMatt Macy 100, /* 1K */ 2916eda14cbcSMatt Macy 98, /* 2K */ 2917eda14cbcSMatt Macy 95, /* 4K */ 2918eda14cbcSMatt Macy 90, /* 8K */ 2919eda14cbcSMatt Macy 80, /* 16K */ 2920eda14cbcSMatt Macy 70, /* 32K */ 2921eda14cbcSMatt Macy 60, /* 64K */ 2922eda14cbcSMatt Macy 50, /* 128K */ 2923eda14cbcSMatt Macy 40, /* 256K */ 2924eda14cbcSMatt Macy 30, /* 512K */ 2925eda14cbcSMatt Macy 20, /* 1M */ 2926eda14cbcSMatt Macy 15, /* 2M */ 2927eda14cbcSMatt Macy 10, /* 4M */ 2928eda14cbcSMatt Macy 5, /* 8M */ 2929eda14cbcSMatt Macy 0 /* 16M */ 2930eda14cbcSMatt Macy }; 2931eda14cbcSMatt Macy 2932eda14cbcSMatt Macy /* 2933eda14cbcSMatt Macy * Calculate the metaslab's fragmentation metric and set ms_fragmentation. 2934eda14cbcSMatt Macy * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not 2935eda14cbcSMatt Macy * been upgraded and does not support this metric. Otherwise, the return 2936eda14cbcSMatt Macy * value should be in the range [0, 100]. 2937eda14cbcSMatt Macy */ 2938eda14cbcSMatt Macy static void 2939eda14cbcSMatt Macy metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty) 2940eda14cbcSMatt Macy { 2941eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2942eda14cbcSMatt Macy uint64_t fragmentation = 0; 2943eda14cbcSMatt Macy uint64_t total = 0; 2944eda14cbcSMatt Macy boolean_t feature_enabled = spa_feature_is_enabled(spa, 2945eda14cbcSMatt Macy SPA_FEATURE_SPACEMAP_HISTOGRAM); 2946eda14cbcSMatt Macy 2947eda14cbcSMatt Macy if (!feature_enabled) { 2948eda14cbcSMatt Macy msp->ms_fragmentation = ZFS_FRAG_INVALID; 2949eda14cbcSMatt Macy return; 2950eda14cbcSMatt Macy } 2951eda14cbcSMatt Macy 2952eda14cbcSMatt Macy /* 2953eda14cbcSMatt Macy * A null space map means that the entire metaslab is free 2954eda14cbcSMatt Macy * and thus is not fragmented. 2955eda14cbcSMatt Macy */ 2956eda14cbcSMatt Macy if (msp->ms_sm == NULL) { 2957eda14cbcSMatt Macy msp->ms_fragmentation = 0; 2958eda14cbcSMatt Macy return; 2959eda14cbcSMatt Macy } 2960eda14cbcSMatt Macy 2961eda14cbcSMatt Macy /* 2962eda14cbcSMatt Macy * If this metaslab's space map has not been upgraded, flag it 2963eda14cbcSMatt Macy * so that we upgrade next time we encounter it. 2964eda14cbcSMatt Macy */ 2965eda14cbcSMatt Macy if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { 2966eda14cbcSMatt Macy uint64_t txg = spa_syncing_txg(spa); 2967eda14cbcSMatt Macy vdev_t *vd = msp->ms_group->mg_vd; 2968eda14cbcSMatt Macy 2969eda14cbcSMatt Macy /* 2970eda14cbcSMatt Macy * If we've reached the final dirty txg, then we must 2971eda14cbcSMatt Macy * be shutting down the pool. We don't want to dirty 2972eda14cbcSMatt Macy * any data past this point so skip setting the condense 2973eda14cbcSMatt Macy * flag. We can retry this action the next time the pool 2974eda14cbcSMatt Macy * is imported. We also skip marking this metaslab for 2975eda14cbcSMatt Macy * condensing if the caller has explicitly set nodirty. 2976eda14cbcSMatt Macy */ 2977eda14cbcSMatt Macy if (!nodirty && 2978eda14cbcSMatt Macy spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) { 2979eda14cbcSMatt Macy msp->ms_condense_wanted = B_TRUE; 2980eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 2981eda14cbcSMatt Macy zfs_dbgmsg("txg %llu, requesting force condense: " 298233b8c039SMartin Matuska "ms_id %llu, vdev_id %llu", (u_longlong_t)txg, 298333b8c039SMartin Matuska (u_longlong_t)msp->ms_id, 298433b8c039SMartin Matuska (u_longlong_t)vd->vdev_id); 2985eda14cbcSMatt Macy } 2986eda14cbcSMatt Macy msp->ms_fragmentation = ZFS_FRAG_INVALID; 2987eda14cbcSMatt Macy return; 2988eda14cbcSMatt Macy } 2989eda14cbcSMatt Macy 2990eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 2991eda14cbcSMatt Macy uint64_t space = 0; 2992eda14cbcSMatt Macy uint8_t shift = msp->ms_sm->sm_shift; 2993eda14cbcSMatt Macy 2994eda14cbcSMatt Macy int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, 2995eda14cbcSMatt Macy FRAGMENTATION_TABLE_SIZE - 1); 2996eda14cbcSMatt Macy 2997eda14cbcSMatt Macy if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) 2998eda14cbcSMatt Macy continue; 2999eda14cbcSMatt Macy 3000eda14cbcSMatt Macy space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); 3001eda14cbcSMatt Macy total += space; 3002eda14cbcSMatt Macy 3003eda14cbcSMatt Macy ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); 3004eda14cbcSMatt Macy fragmentation += space * zfs_frag_table[idx]; 3005eda14cbcSMatt Macy } 3006eda14cbcSMatt Macy 3007eda14cbcSMatt Macy if (total > 0) 3008eda14cbcSMatt Macy fragmentation /= total; 3009eda14cbcSMatt Macy ASSERT3U(fragmentation, <=, 100); 3010eda14cbcSMatt Macy 3011eda14cbcSMatt Macy msp->ms_fragmentation = fragmentation; 3012eda14cbcSMatt Macy } 3013eda14cbcSMatt Macy 3014eda14cbcSMatt Macy /* 3015eda14cbcSMatt Macy * Compute a weight -- a selection preference value -- for the given metaslab. 3016eda14cbcSMatt Macy * This is based on the amount of free space, the level of fragmentation, 3017eda14cbcSMatt Macy * the LBA range, and whether the metaslab is loaded. 3018eda14cbcSMatt Macy */ 3019eda14cbcSMatt Macy static uint64_t 3020eda14cbcSMatt Macy metaslab_space_weight(metaslab_t *msp) 3021eda14cbcSMatt Macy { 3022eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 3023eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 3024eda14cbcSMatt Macy uint64_t weight, space; 3025eda14cbcSMatt Macy 3026eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3027eda14cbcSMatt Macy 3028eda14cbcSMatt Macy /* 3029eda14cbcSMatt Macy * The baseline weight is the metaslab's free space. 3030eda14cbcSMatt Macy */ 3031eda14cbcSMatt Macy space = msp->ms_size - metaslab_allocated_space(msp); 3032eda14cbcSMatt Macy 3033eda14cbcSMatt Macy if (metaslab_fragmentation_factor_enabled && 3034eda14cbcSMatt Macy msp->ms_fragmentation != ZFS_FRAG_INVALID) { 3035eda14cbcSMatt Macy /* 3036eda14cbcSMatt Macy * Use the fragmentation information to inversely scale 3037eda14cbcSMatt Macy * down the baseline weight. We need to ensure that we 3038eda14cbcSMatt Macy * don't exclude this metaslab completely when it's 100% 3039eda14cbcSMatt Macy * fragmented. To avoid this we reduce the fragmented value 3040eda14cbcSMatt Macy * by 1. 3041eda14cbcSMatt Macy */ 3042eda14cbcSMatt Macy space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; 3043eda14cbcSMatt Macy 3044eda14cbcSMatt Macy /* 3045eda14cbcSMatt Macy * If space < SPA_MINBLOCKSIZE, then we will not allocate from 3046eda14cbcSMatt Macy * this metaslab again. The fragmentation metric may have 3047eda14cbcSMatt Macy * decreased the space to something smaller than 3048eda14cbcSMatt Macy * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE 3049eda14cbcSMatt Macy * so that we can consume any remaining space. 3050eda14cbcSMatt Macy */ 3051eda14cbcSMatt Macy if (space > 0 && space < SPA_MINBLOCKSIZE) 3052eda14cbcSMatt Macy space = SPA_MINBLOCKSIZE; 3053eda14cbcSMatt Macy } 3054eda14cbcSMatt Macy weight = space; 3055eda14cbcSMatt Macy 3056eda14cbcSMatt Macy /* 3057eda14cbcSMatt Macy * Modern disks have uniform bit density and constant angular velocity. 3058eda14cbcSMatt Macy * Therefore, the outer recording zones are faster (higher bandwidth) 3059eda14cbcSMatt Macy * than the inner zones by the ratio of outer to inner track diameter, 3060eda14cbcSMatt Macy * which is typically around 2:1. We account for this by assigning 3061eda14cbcSMatt Macy * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 3062eda14cbcSMatt Macy * In effect, this means that we'll select the metaslab with the most 3063eda14cbcSMatt Macy * free bandwidth rather than simply the one with the most free space. 3064eda14cbcSMatt Macy */ 3065eda14cbcSMatt Macy if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) { 3066eda14cbcSMatt Macy weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; 3067eda14cbcSMatt Macy ASSERT(weight >= space && weight <= 2 * space); 3068eda14cbcSMatt Macy } 3069eda14cbcSMatt Macy 3070eda14cbcSMatt Macy /* 3071eda14cbcSMatt Macy * If this metaslab is one we're actively using, adjust its 3072eda14cbcSMatt Macy * weight to make it preferable to any inactive metaslab so 3073eda14cbcSMatt Macy * we'll polish it off. If the fragmentation on this metaslab 3074eda14cbcSMatt Macy * has exceed our threshold, then don't mark it active. 3075eda14cbcSMatt Macy */ 3076eda14cbcSMatt Macy if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && 3077eda14cbcSMatt Macy msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { 3078eda14cbcSMatt Macy weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 3079eda14cbcSMatt Macy } 3080eda14cbcSMatt Macy 3081eda14cbcSMatt Macy WEIGHT_SET_SPACEBASED(weight); 3082eda14cbcSMatt Macy return (weight); 3083eda14cbcSMatt Macy } 3084eda14cbcSMatt Macy 3085eda14cbcSMatt Macy /* 3086eda14cbcSMatt Macy * Return the weight of the specified metaslab, according to the segment-based 3087eda14cbcSMatt Macy * weighting algorithm. The metaslab must be loaded. This function can 3088eda14cbcSMatt Macy * be called within a sync pass since it relies only on the metaslab's 3089eda14cbcSMatt Macy * range tree which is always accurate when the metaslab is loaded. 3090eda14cbcSMatt Macy */ 3091eda14cbcSMatt Macy static uint64_t 3092eda14cbcSMatt Macy metaslab_weight_from_range_tree(metaslab_t *msp) 3093eda14cbcSMatt Macy { 3094eda14cbcSMatt Macy uint64_t weight = 0; 3095eda14cbcSMatt Macy uint32_t segments = 0; 3096eda14cbcSMatt Macy 3097eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3098eda14cbcSMatt Macy 3099eda14cbcSMatt Macy for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; 3100eda14cbcSMatt Macy i--) { 3101eda14cbcSMatt Macy uint8_t shift = msp->ms_group->mg_vd->vdev_ashift; 3102eda14cbcSMatt Macy int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 3103eda14cbcSMatt Macy 3104eda14cbcSMatt Macy segments <<= 1; 3105eda14cbcSMatt Macy segments += msp->ms_allocatable->rt_histogram[i]; 3106eda14cbcSMatt Macy 3107eda14cbcSMatt Macy /* 3108eda14cbcSMatt Macy * The range tree provides more precision than the space map 3109eda14cbcSMatt Macy * and must be downgraded so that all values fit within the 3110eda14cbcSMatt Macy * space map's histogram. This allows us to compare loaded 3111eda14cbcSMatt Macy * vs. unloaded metaslabs to determine which metaslab is 3112eda14cbcSMatt Macy * considered "best". 3113eda14cbcSMatt Macy */ 3114eda14cbcSMatt Macy if (i > max_idx) 3115eda14cbcSMatt Macy continue; 3116eda14cbcSMatt Macy 3117eda14cbcSMatt Macy if (segments != 0) { 3118eda14cbcSMatt Macy WEIGHT_SET_COUNT(weight, segments); 3119eda14cbcSMatt Macy WEIGHT_SET_INDEX(weight, i); 3120eda14cbcSMatt Macy WEIGHT_SET_ACTIVE(weight, 0); 3121eda14cbcSMatt Macy break; 3122eda14cbcSMatt Macy } 3123eda14cbcSMatt Macy } 3124eda14cbcSMatt Macy return (weight); 3125eda14cbcSMatt Macy } 3126eda14cbcSMatt Macy 3127eda14cbcSMatt Macy /* 3128eda14cbcSMatt Macy * Calculate the weight based on the on-disk histogram. Should be applied 3129eda14cbcSMatt Macy * only to unloaded metaslabs (i.e no incoming allocations) in-order to 3130eda14cbcSMatt Macy * give results consistent with the on-disk state 3131eda14cbcSMatt Macy */ 3132eda14cbcSMatt Macy static uint64_t 3133eda14cbcSMatt Macy metaslab_weight_from_spacemap(metaslab_t *msp) 3134eda14cbcSMatt Macy { 3135eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 3136eda14cbcSMatt Macy ASSERT(!msp->ms_loaded); 3137eda14cbcSMatt Macy ASSERT(sm != NULL); 3138eda14cbcSMatt Macy ASSERT3U(space_map_object(sm), !=, 0); 3139eda14cbcSMatt Macy ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 3140eda14cbcSMatt Macy 3141eda14cbcSMatt Macy /* 3142eda14cbcSMatt Macy * Create a joint histogram from all the segments that have made 3143eda14cbcSMatt Macy * it to the metaslab's space map histogram, that are not yet 3144eda14cbcSMatt Macy * available for allocation because they are still in the freeing 3145eda14cbcSMatt Macy * pipeline (e.g. freeing, freed, and defer trees). Then subtract 3146eda14cbcSMatt Macy * these segments from the space map's histogram to get a more 3147eda14cbcSMatt Macy * accurate weight. 3148eda14cbcSMatt Macy */ 3149eda14cbcSMatt Macy uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0}; 3150eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) 3151eda14cbcSMatt Macy deferspace_histogram[i] += msp->ms_synchist[i]; 3152eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3153eda14cbcSMatt Macy for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 3154eda14cbcSMatt Macy deferspace_histogram[i] += msp->ms_deferhist[t][i]; 3155eda14cbcSMatt Macy } 3156eda14cbcSMatt Macy } 3157eda14cbcSMatt Macy 3158eda14cbcSMatt Macy uint64_t weight = 0; 3159eda14cbcSMatt Macy for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) { 3160eda14cbcSMatt Macy ASSERT3U(sm->sm_phys->smp_histogram[i], >=, 3161eda14cbcSMatt Macy deferspace_histogram[i]); 3162eda14cbcSMatt Macy uint64_t count = 3163eda14cbcSMatt Macy sm->sm_phys->smp_histogram[i] - deferspace_histogram[i]; 3164eda14cbcSMatt Macy if (count != 0) { 3165eda14cbcSMatt Macy WEIGHT_SET_COUNT(weight, count); 3166eda14cbcSMatt Macy WEIGHT_SET_INDEX(weight, i + sm->sm_shift); 3167eda14cbcSMatt Macy WEIGHT_SET_ACTIVE(weight, 0); 3168eda14cbcSMatt Macy break; 3169eda14cbcSMatt Macy } 3170eda14cbcSMatt Macy } 3171eda14cbcSMatt Macy return (weight); 3172eda14cbcSMatt Macy } 3173eda14cbcSMatt Macy 3174eda14cbcSMatt Macy /* 3175eda14cbcSMatt Macy * Compute a segment-based weight for the specified metaslab. The weight 3176eda14cbcSMatt Macy * is determined by highest bucket in the histogram. The information 3177eda14cbcSMatt Macy * for the highest bucket is encoded into the weight value. 3178eda14cbcSMatt Macy */ 3179eda14cbcSMatt Macy static uint64_t 3180eda14cbcSMatt Macy metaslab_segment_weight(metaslab_t *msp) 3181eda14cbcSMatt Macy { 3182eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 3183eda14cbcSMatt Macy uint64_t weight = 0; 3184eda14cbcSMatt Macy uint8_t shift = mg->mg_vd->vdev_ashift; 3185eda14cbcSMatt Macy 3186eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3187eda14cbcSMatt Macy 3188eda14cbcSMatt Macy /* 3189eda14cbcSMatt Macy * The metaslab is completely free. 3190eda14cbcSMatt Macy */ 3191eda14cbcSMatt Macy if (metaslab_allocated_space(msp) == 0) { 3192eda14cbcSMatt Macy int idx = highbit64(msp->ms_size) - 1; 3193eda14cbcSMatt Macy int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 3194eda14cbcSMatt Macy 3195eda14cbcSMatt Macy if (idx < max_idx) { 3196eda14cbcSMatt Macy WEIGHT_SET_COUNT(weight, 1ULL); 3197eda14cbcSMatt Macy WEIGHT_SET_INDEX(weight, idx); 3198eda14cbcSMatt Macy } else { 3199eda14cbcSMatt Macy WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx)); 3200eda14cbcSMatt Macy WEIGHT_SET_INDEX(weight, max_idx); 3201eda14cbcSMatt Macy } 3202eda14cbcSMatt Macy WEIGHT_SET_ACTIVE(weight, 0); 3203eda14cbcSMatt Macy ASSERT(!WEIGHT_IS_SPACEBASED(weight)); 3204eda14cbcSMatt Macy return (weight); 3205eda14cbcSMatt Macy } 3206eda14cbcSMatt Macy 3207eda14cbcSMatt Macy ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 3208eda14cbcSMatt Macy 3209eda14cbcSMatt Macy /* 3210eda14cbcSMatt Macy * If the metaslab is fully allocated then just make the weight 0. 3211eda14cbcSMatt Macy */ 3212eda14cbcSMatt Macy if (metaslab_allocated_space(msp) == msp->ms_size) 3213eda14cbcSMatt Macy return (0); 3214eda14cbcSMatt Macy /* 3215eda14cbcSMatt Macy * If the metaslab is already loaded, then use the range tree to 3216eda14cbcSMatt Macy * determine the weight. Otherwise, we rely on the space map information 3217eda14cbcSMatt Macy * to generate the weight. 3218eda14cbcSMatt Macy */ 3219eda14cbcSMatt Macy if (msp->ms_loaded) { 3220eda14cbcSMatt Macy weight = metaslab_weight_from_range_tree(msp); 3221eda14cbcSMatt Macy } else { 3222eda14cbcSMatt Macy weight = metaslab_weight_from_spacemap(msp); 3223eda14cbcSMatt Macy } 3224eda14cbcSMatt Macy 3225eda14cbcSMatt Macy /* 3226eda14cbcSMatt Macy * If the metaslab was active the last time we calculated its weight 3227eda14cbcSMatt Macy * then keep it active. We want to consume the entire region that 3228eda14cbcSMatt Macy * is associated with this weight. 3229eda14cbcSMatt Macy */ 3230eda14cbcSMatt Macy if (msp->ms_activation_weight != 0 && weight != 0) 3231eda14cbcSMatt Macy WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight)); 3232eda14cbcSMatt Macy return (weight); 3233eda14cbcSMatt Macy } 3234eda14cbcSMatt Macy 3235eda14cbcSMatt Macy /* 3236eda14cbcSMatt Macy * Determine if we should attempt to allocate from this metaslab. If the 3237eda14cbcSMatt Macy * metaslab is loaded, then we can determine if the desired allocation 3238eda14cbcSMatt Macy * can be satisfied by looking at the size of the maximum free segment 3239eda14cbcSMatt Macy * on that metaslab. Otherwise, we make our decision based on the metaslab's 3240eda14cbcSMatt Macy * weight. For segment-based weighting we can determine the maximum 3241eda14cbcSMatt Macy * allocation based on the index encoded in its value. For space-based 3242eda14cbcSMatt Macy * weights we rely on the entire weight (excluding the weight-type bit). 3243eda14cbcSMatt Macy */ 3244eda14cbcSMatt Macy static boolean_t 3245eda14cbcSMatt Macy metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard) 3246eda14cbcSMatt Macy { 3247eda14cbcSMatt Macy /* 3248f7a5903dSMartin Matuska * This case will usually but not always get caught by the checks below; 3249f7a5903dSMartin Matuska * metaslabs can be loaded by various means, including the trim and 3250f7a5903dSMartin Matuska * initialize code. Once that happens, without this check they are 3251f7a5903dSMartin Matuska * allocatable even before they finish their first txg sync. 3252f7a5903dSMartin Matuska */ 3253f7a5903dSMartin Matuska if (unlikely(msp->ms_new)) 3254f7a5903dSMartin Matuska return (B_FALSE); 3255f7a5903dSMartin Matuska 3256f7a5903dSMartin Matuska /* 3257eda14cbcSMatt Macy * If the metaslab is loaded, ms_max_size is definitive and we can use 3258eda14cbcSMatt Macy * the fast check. If it's not, the ms_max_size is a lower bound (once 3259eda14cbcSMatt Macy * set), and we should use the fast check as long as we're not in 3260eda14cbcSMatt Macy * try_hard and it's been less than zfs_metaslab_max_size_cache_sec 3261eda14cbcSMatt Macy * seconds since the metaslab was unloaded. 3262eda14cbcSMatt Macy */ 3263eda14cbcSMatt Macy if (msp->ms_loaded || 3264eda14cbcSMatt Macy (msp->ms_max_size != 0 && !try_hard && gethrtime() < 3265eda14cbcSMatt Macy msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec))) 3266eda14cbcSMatt Macy return (msp->ms_max_size >= asize); 3267eda14cbcSMatt Macy 3268eda14cbcSMatt Macy boolean_t should_allocate; 3269eda14cbcSMatt Macy if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 3270eda14cbcSMatt Macy /* 3271eda14cbcSMatt Macy * The metaslab segment weight indicates segments in the 3272eda14cbcSMatt Macy * range [2^i, 2^(i+1)), where i is the index in the weight. 3273eda14cbcSMatt Macy * Since the asize might be in the middle of the range, we 3274eda14cbcSMatt Macy * should attempt the allocation if asize < 2^(i+1). 3275eda14cbcSMatt Macy */ 3276eda14cbcSMatt Macy should_allocate = (asize < 3277eda14cbcSMatt Macy 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1)); 3278eda14cbcSMatt Macy } else { 3279eda14cbcSMatt Macy should_allocate = (asize <= 3280eda14cbcSMatt Macy (msp->ms_weight & ~METASLAB_WEIGHT_TYPE)); 3281eda14cbcSMatt Macy } 3282eda14cbcSMatt Macy 3283eda14cbcSMatt Macy return (should_allocate); 3284eda14cbcSMatt Macy } 3285eda14cbcSMatt Macy 3286eda14cbcSMatt Macy static uint64_t 3287eda14cbcSMatt Macy metaslab_weight(metaslab_t *msp, boolean_t nodirty) 3288eda14cbcSMatt Macy { 3289eda14cbcSMatt Macy vdev_t *vd = msp->ms_group->mg_vd; 3290eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 3291eda14cbcSMatt Macy uint64_t weight; 3292eda14cbcSMatt Macy 3293eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3294eda14cbcSMatt Macy 3295eda14cbcSMatt Macy metaslab_set_fragmentation(msp, nodirty); 3296eda14cbcSMatt Macy 3297eda14cbcSMatt Macy /* 3298eda14cbcSMatt Macy * Update the maximum size. If the metaslab is loaded, this will 3299eda14cbcSMatt Macy * ensure that we get an accurate maximum size if newly freed space 3300eda14cbcSMatt Macy * has been added back into the free tree. If the metaslab is 3301eda14cbcSMatt Macy * unloaded, we check if there's a larger free segment in the 3302eda14cbcSMatt Macy * unflushed frees. This is a lower bound on the largest allocatable 3303eda14cbcSMatt Macy * segment size. Coalescing of adjacent entries may reveal larger 3304eda14cbcSMatt Macy * allocatable segments, but we aren't aware of those until loading 3305eda14cbcSMatt Macy * the space map into a range tree. 3306eda14cbcSMatt Macy */ 3307eda14cbcSMatt Macy if (msp->ms_loaded) { 3308eda14cbcSMatt Macy msp->ms_max_size = metaslab_largest_allocatable(msp); 3309eda14cbcSMatt Macy } else { 3310eda14cbcSMatt Macy msp->ms_max_size = MAX(msp->ms_max_size, 3311eda14cbcSMatt Macy metaslab_largest_unflushed_free(msp)); 3312eda14cbcSMatt Macy } 3313eda14cbcSMatt Macy 3314eda14cbcSMatt Macy /* 3315eda14cbcSMatt Macy * Segment-based weighting requires space map histogram support. 3316eda14cbcSMatt Macy */ 3317eda14cbcSMatt Macy if (zfs_metaslab_segment_weight_enabled && 3318eda14cbcSMatt Macy spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 3319eda14cbcSMatt Macy (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size == 3320eda14cbcSMatt Macy sizeof (space_map_phys_t))) { 3321eda14cbcSMatt Macy weight = metaslab_segment_weight(msp); 3322eda14cbcSMatt Macy } else { 3323eda14cbcSMatt Macy weight = metaslab_space_weight(msp); 3324eda14cbcSMatt Macy } 3325eda14cbcSMatt Macy return (weight); 3326eda14cbcSMatt Macy } 3327eda14cbcSMatt Macy 3328eda14cbcSMatt Macy void 3329eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(metaslab_t *msp) 3330eda14cbcSMatt Macy { 3331eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3332eda14cbcSMatt Macy 3333eda14cbcSMatt Macy /* note: we preserve the mask (e.g. indication of primary, etc..) */ 3334eda14cbcSMatt Macy uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 3335eda14cbcSMatt Macy metaslab_group_sort(msp->ms_group, msp, 3336eda14cbcSMatt Macy metaslab_weight(msp, B_FALSE) | was_active); 3337eda14cbcSMatt Macy } 3338eda14cbcSMatt Macy 3339eda14cbcSMatt Macy static int 3340eda14cbcSMatt Macy metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp, 3341eda14cbcSMatt Macy int allocator, uint64_t activation_weight) 3342eda14cbcSMatt Macy { 3343eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 3344eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3345eda14cbcSMatt Macy 3346eda14cbcSMatt Macy /* 3347eda14cbcSMatt Macy * If we're activating for the claim code, we don't want to actually 3348eda14cbcSMatt Macy * set the metaslab up for a specific allocator. 3349eda14cbcSMatt Macy */ 3350eda14cbcSMatt Macy if (activation_weight == METASLAB_WEIGHT_CLAIM) { 3351eda14cbcSMatt Macy ASSERT0(msp->ms_activation_weight); 3352eda14cbcSMatt Macy msp->ms_activation_weight = msp->ms_weight; 3353eda14cbcSMatt Macy metaslab_group_sort(mg, msp, msp->ms_weight | 3354eda14cbcSMatt Macy activation_weight); 3355eda14cbcSMatt Macy return (0); 3356eda14cbcSMatt Macy } 3357eda14cbcSMatt Macy 3358eda14cbcSMatt Macy metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ? 3359eda14cbcSMatt Macy &mga->mga_primary : &mga->mga_secondary); 3360eda14cbcSMatt Macy 3361eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 3362eda14cbcSMatt Macy if (*mspp != NULL) { 3363eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 3364eda14cbcSMatt Macy return (EEXIST); 3365eda14cbcSMatt Macy } 3366eda14cbcSMatt Macy 3367eda14cbcSMatt Macy *mspp = msp; 3368eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, -1); 3369eda14cbcSMatt Macy msp->ms_allocator = allocator; 3370eda14cbcSMatt Macy msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY); 3371eda14cbcSMatt Macy 3372eda14cbcSMatt Macy ASSERT0(msp->ms_activation_weight); 3373eda14cbcSMatt Macy msp->ms_activation_weight = msp->ms_weight; 3374eda14cbcSMatt Macy metaslab_group_sort_impl(mg, msp, 3375eda14cbcSMatt Macy msp->ms_weight | activation_weight); 3376eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 3377eda14cbcSMatt Macy 3378eda14cbcSMatt Macy return (0); 3379eda14cbcSMatt Macy } 3380eda14cbcSMatt Macy 3381eda14cbcSMatt Macy static int 3382eda14cbcSMatt Macy metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight) 3383eda14cbcSMatt Macy { 3384eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3385eda14cbcSMatt Macy 3386eda14cbcSMatt Macy /* 3387eda14cbcSMatt Macy * The current metaslab is already activated for us so there 3388eda14cbcSMatt Macy * is nothing to do. Already activated though, doesn't mean 3389eda14cbcSMatt Macy * that this metaslab is activated for our allocator nor our 3390eda14cbcSMatt Macy * requested activation weight. The metaslab could have started 3391eda14cbcSMatt Macy * as an active one for our allocator but changed allocators 3392eda14cbcSMatt Macy * while we were waiting to grab its ms_lock or we stole it 3393eda14cbcSMatt Macy * [see find_valid_metaslab()]. This means that there is a 3394eda14cbcSMatt Macy * possibility of passivating a metaslab of another allocator 3395eda14cbcSMatt Macy * or from a different activation mask, from this thread. 3396eda14cbcSMatt Macy */ 3397eda14cbcSMatt Macy if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { 3398eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3399eda14cbcSMatt Macy return (0); 3400eda14cbcSMatt Macy } 3401eda14cbcSMatt Macy 3402eda14cbcSMatt Macy int error = metaslab_load(msp); 3403eda14cbcSMatt Macy if (error != 0) { 3404eda14cbcSMatt Macy metaslab_group_sort(msp->ms_group, msp, 0); 3405eda14cbcSMatt Macy return (error); 3406eda14cbcSMatt Macy } 3407eda14cbcSMatt Macy 3408eda14cbcSMatt Macy /* 3409eda14cbcSMatt Macy * When entering metaslab_load() we may have dropped the 3410eda14cbcSMatt Macy * ms_lock because we were loading this metaslab, or we 3411eda14cbcSMatt Macy * were waiting for another thread to load it for us. In 3412eda14cbcSMatt Macy * that scenario, we recheck the weight of the metaslab 3413eda14cbcSMatt Macy * to see if it was activated by another thread. 3414eda14cbcSMatt Macy * 3415eda14cbcSMatt Macy * If the metaslab was activated for another allocator or 3416eda14cbcSMatt Macy * it was activated with a different activation weight (e.g. 3417eda14cbcSMatt Macy * we wanted to make it a primary but it was activated as 3418eda14cbcSMatt Macy * secondary) we return error (EBUSY). 3419eda14cbcSMatt Macy * 3420eda14cbcSMatt Macy * If the metaslab was activated for the same allocator 3421eda14cbcSMatt Macy * and requested activation mask, skip activating it. 3422eda14cbcSMatt Macy */ 3423eda14cbcSMatt Macy if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { 3424eda14cbcSMatt Macy if (msp->ms_allocator != allocator) 3425eda14cbcSMatt Macy return (EBUSY); 3426eda14cbcSMatt Macy 3427eda14cbcSMatt Macy if ((msp->ms_weight & activation_weight) == 0) 3428eda14cbcSMatt Macy return (SET_ERROR(EBUSY)); 3429eda14cbcSMatt Macy 3430eda14cbcSMatt Macy EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY), 3431eda14cbcSMatt Macy msp->ms_primary); 3432eda14cbcSMatt Macy return (0); 3433eda14cbcSMatt Macy } 3434eda14cbcSMatt Macy 3435eda14cbcSMatt Macy /* 3436eda14cbcSMatt Macy * If the metaslab has literally 0 space, it will have weight 0. In 3437eda14cbcSMatt Macy * that case, don't bother activating it. This can happen if the 3438eda14cbcSMatt Macy * metaslab had space during find_valid_metaslab, but another thread 3439eda14cbcSMatt Macy * loaded it and used all that space while we were waiting to grab the 3440eda14cbcSMatt Macy * lock. 3441eda14cbcSMatt Macy */ 3442eda14cbcSMatt Macy if (msp->ms_weight == 0) { 3443eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_allocatable)); 3444eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 3445eda14cbcSMatt Macy } 3446eda14cbcSMatt Macy 3447eda14cbcSMatt Macy if ((error = metaslab_activate_allocator(msp->ms_group, msp, 3448eda14cbcSMatt Macy allocator, activation_weight)) != 0) { 3449eda14cbcSMatt Macy return (error); 3450eda14cbcSMatt Macy } 3451eda14cbcSMatt Macy 3452eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3453eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 3454eda14cbcSMatt Macy 3455eda14cbcSMatt Macy return (0); 3456eda14cbcSMatt Macy } 3457eda14cbcSMatt Macy 3458eda14cbcSMatt Macy static void 3459eda14cbcSMatt Macy metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp, 3460eda14cbcSMatt Macy uint64_t weight) 3461eda14cbcSMatt Macy { 3462eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3463eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3464eda14cbcSMatt Macy 3465eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { 3466eda14cbcSMatt Macy metaslab_group_sort(mg, msp, weight); 3467eda14cbcSMatt Macy return; 3468eda14cbcSMatt Macy } 3469eda14cbcSMatt Macy 3470eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 3471eda14cbcSMatt Macy ASSERT3P(msp->ms_group, ==, mg); 3472eda14cbcSMatt Macy ASSERT3S(0, <=, msp->ms_allocator); 3473eda14cbcSMatt Macy ASSERT3U(msp->ms_allocator, <, mg->mg_allocators); 3474eda14cbcSMatt Macy 3475eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator]; 3476eda14cbcSMatt Macy if (msp->ms_primary) { 3477eda14cbcSMatt Macy ASSERT3P(mga->mga_primary, ==, msp); 3478eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 3479eda14cbcSMatt Macy mga->mga_primary = NULL; 3480eda14cbcSMatt Macy } else { 3481eda14cbcSMatt Macy ASSERT3P(mga->mga_secondary, ==, msp); 3482eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 3483eda14cbcSMatt Macy mga->mga_secondary = NULL; 3484eda14cbcSMatt Macy } 3485eda14cbcSMatt Macy msp->ms_allocator = -1; 3486eda14cbcSMatt Macy metaslab_group_sort_impl(mg, msp, weight); 3487eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 3488eda14cbcSMatt Macy } 3489eda14cbcSMatt Macy 3490eda14cbcSMatt Macy static void 3491eda14cbcSMatt Macy metaslab_passivate(metaslab_t *msp, uint64_t weight) 3492eda14cbcSMatt Macy { 3493eda14cbcSMatt Macy uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE; 3494eda14cbcSMatt Macy 3495eda14cbcSMatt Macy /* 3496eda14cbcSMatt Macy * If size < SPA_MINBLOCKSIZE, then we will not allocate from 3497eda14cbcSMatt Macy * this metaslab again. In that case, it had better be empty, 3498eda14cbcSMatt Macy * or we would be leaving space on the table. 3499eda14cbcSMatt Macy */ 3500eda14cbcSMatt Macy ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) || 3501eda14cbcSMatt Macy size >= SPA_MINBLOCKSIZE || 3502eda14cbcSMatt Macy range_tree_space(msp->ms_allocatable) == 0); 3503eda14cbcSMatt Macy ASSERT0(weight & METASLAB_ACTIVE_MASK); 3504eda14cbcSMatt Macy 3505eda14cbcSMatt Macy ASSERT(msp->ms_activation_weight != 0); 3506eda14cbcSMatt Macy msp->ms_activation_weight = 0; 3507eda14cbcSMatt Macy metaslab_passivate_allocator(msp->ms_group, msp, weight); 3508eda14cbcSMatt Macy ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK); 3509eda14cbcSMatt Macy } 3510eda14cbcSMatt Macy 3511eda14cbcSMatt Macy /* 3512eda14cbcSMatt Macy * Segment-based metaslabs are activated once and remain active until 3513eda14cbcSMatt Macy * we either fail an allocation attempt (similar to space-based metaslabs) 3514eda14cbcSMatt Macy * or have exhausted the free space in zfs_metaslab_switch_threshold 3515eda14cbcSMatt Macy * buckets since the metaslab was activated. This function checks to see 3516eda14cbcSMatt Macy * if we've exhausted the zfs_metaslab_switch_threshold buckets in the 3517eda14cbcSMatt Macy * metaslab and passivates it proactively. This will allow us to select a 3518eda14cbcSMatt Macy * metaslab with a larger contiguous region, if any, remaining within this 3519eda14cbcSMatt Macy * metaslab group. If we're in sync pass > 1, then we continue using this 3520eda14cbcSMatt Macy * metaslab so that we don't dirty more block and cause more sync passes. 3521eda14cbcSMatt Macy */ 3522eda14cbcSMatt Macy static void 3523eda14cbcSMatt Macy metaslab_segment_may_passivate(metaslab_t *msp) 3524eda14cbcSMatt Macy { 3525eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3526eda14cbcSMatt Macy 3527eda14cbcSMatt Macy if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1) 3528eda14cbcSMatt Macy return; 3529eda14cbcSMatt Macy 3530eda14cbcSMatt Macy /* 3531eda14cbcSMatt Macy * Since we are in the middle of a sync pass, the most accurate 3532eda14cbcSMatt Macy * information that is accessible to us is the in-core range tree 3533eda14cbcSMatt Macy * histogram; calculate the new weight based on that information. 3534eda14cbcSMatt Macy */ 3535eda14cbcSMatt Macy uint64_t weight = metaslab_weight_from_range_tree(msp); 3536eda14cbcSMatt Macy int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight); 3537eda14cbcSMatt Macy int current_idx = WEIGHT_GET_INDEX(weight); 3538eda14cbcSMatt Macy 3539eda14cbcSMatt Macy if (current_idx <= activation_idx - zfs_metaslab_switch_threshold) 3540eda14cbcSMatt Macy metaslab_passivate(msp, weight); 3541eda14cbcSMatt Macy } 3542eda14cbcSMatt Macy 3543eda14cbcSMatt Macy static void 3544eda14cbcSMatt Macy metaslab_preload(void *arg) 3545eda14cbcSMatt Macy { 3546eda14cbcSMatt Macy metaslab_t *msp = arg; 3547eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 3548eda14cbcSMatt Macy spa_t *spa = mc->mc_spa; 3549eda14cbcSMatt Macy fstrans_cookie_t cookie = spl_fstrans_mark(); 3550eda14cbcSMatt Macy 3551eda14cbcSMatt Macy ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); 3552eda14cbcSMatt Macy 3553eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 3554eda14cbcSMatt Macy (void) metaslab_load(msp); 3555eda14cbcSMatt Macy metaslab_set_selected_txg(msp, spa_syncing_txg(spa)); 3556eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 3557eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 3558eda14cbcSMatt Macy } 3559eda14cbcSMatt Macy 3560eda14cbcSMatt Macy static void 3561eda14cbcSMatt Macy metaslab_group_preload(metaslab_group_t *mg) 3562eda14cbcSMatt Macy { 3563eda14cbcSMatt Macy spa_t *spa = mg->mg_vd->vdev_spa; 3564eda14cbcSMatt Macy metaslab_t *msp; 3565eda14cbcSMatt Macy avl_tree_t *t = &mg->mg_metaslab_tree; 3566eda14cbcSMatt Macy int m = 0; 3567eda14cbcSMatt Macy 3568b2526e8bSMartin Matuska if (spa_shutting_down(spa) || !metaslab_preload_enabled) 3569eda14cbcSMatt Macy return; 3570eda14cbcSMatt Macy 3571eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 3572eda14cbcSMatt Macy 3573eda14cbcSMatt Macy /* 3574eda14cbcSMatt Macy * Load the next potential metaslabs 3575eda14cbcSMatt Macy */ 3576eda14cbcSMatt Macy for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) { 3577eda14cbcSMatt Macy ASSERT3P(msp->ms_group, ==, mg); 3578eda14cbcSMatt Macy 3579eda14cbcSMatt Macy /* 3580eda14cbcSMatt Macy * We preload only the maximum number of metaslabs specified 3581eda14cbcSMatt Macy * by metaslab_preload_limit. If a metaslab is being forced 3582eda14cbcSMatt Macy * to condense then we preload it too. This will ensure 3583eda14cbcSMatt Macy * that force condensing happens in the next txg. 3584eda14cbcSMatt Macy */ 3585eda14cbcSMatt Macy if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { 3586eda14cbcSMatt Macy continue; 3587eda14cbcSMatt Macy } 3588eda14cbcSMatt Macy 3589b2526e8bSMartin Matuska VERIFY(taskq_dispatch(spa->spa_metaslab_taskq, metaslab_preload, 3590b2526e8bSMartin Matuska msp, TQ_SLEEP | (m <= mg->mg_allocators ? TQ_FRONT : 0)) 3591b2526e8bSMartin Matuska != TASKQID_INVALID); 3592eda14cbcSMatt Macy } 3593eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 3594eda14cbcSMatt Macy } 3595eda14cbcSMatt Macy 3596eda14cbcSMatt Macy /* 3597eda14cbcSMatt Macy * Determine if the space map's on-disk footprint is past our tolerance for 3598eda14cbcSMatt Macy * inefficiency. We would like to use the following criteria to make our 3599eda14cbcSMatt Macy * decision: 3600eda14cbcSMatt Macy * 3601eda14cbcSMatt Macy * 1. Do not condense if the size of the space map object would dramatically 3602eda14cbcSMatt Macy * increase as a result of writing out the free space range tree. 3603eda14cbcSMatt Macy * 3604eda14cbcSMatt Macy * 2. Condense if the on on-disk space map representation is at least 3605eda14cbcSMatt Macy * zfs_condense_pct/100 times the size of the optimal representation 3606eda14cbcSMatt Macy * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB). 3607eda14cbcSMatt Macy * 3608eda14cbcSMatt Macy * 3. Do not condense if the on-disk size of the space map does not actually 3609eda14cbcSMatt Macy * decrease. 3610eda14cbcSMatt Macy * 3611eda14cbcSMatt Macy * Unfortunately, we cannot compute the on-disk size of the space map in this 3612eda14cbcSMatt Macy * context because we cannot accurately compute the effects of compression, etc. 3613eda14cbcSMatt Macy * Instead, we apply the heuristic described in the block comment for 3614eda14cbcSMatt Macy * zfs_metaslab_condense_block_threshold - we only condense if the space used 3615eda14cbcSMatt Macy * is greater than a threshold number of blocks. 3616eda14cbcSMatt Macy */ 3617eda14cbcSMatt Macy static boolean_t 3618eda14cbcSMatt Macy metaslab_should_condense(metaslab_t *msp) 3619eda14cbcSMatt Macy { 3620eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 3621eda14cbcSMatt Macy vdev_t *vd = msp->ms_group->mg_vd; 3622be181ee2SMartin Matuska uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift; 3623eda14cbcSMatt Macy 3624eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3625eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3626eda14cbcSMatt Macy ASSERT(sm != NULL); 3627eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1); 3628eda14cbcSMatt Macy 3629eda14cbcSMatt Macy /* 3630eda14cbcSMatt Macy * We always condense metaslabs that are empty and metaslabs for 3631eda14cbcSMatt Macy * which a condense request has been made. 3632eda14cbcSMatt Macy */ 3633eda14cbcSMatt Macy if (range_tree_numsegs(msp->ms_allocatable) == 0 || 3634eda14cbcSMatt Macy msp->ms_condense_wanted) 3635eda14cbcSMatt Macy return (B_TRUE); 3636eda14cbcSMatt Macy 3637eda14cbcSMatt Macy uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize); 3638eda14cbcSMatt Macy uint64_t object_size = space_map_length(sm); 3639eda14cbcSMatt Macy uint64_t optimal_size = space_map_estimate_optimal_size(sm, 3640eda14cbcSMatt Macy msp->ms_allocatable, SM_NO_VDEVID); 3641eda14cbcSMatt Macy 3642eda14cbcSMatt Macy return (object_size >= (optimal_size * zfs_condense_pct / 100) && 3643eda14cbcSMatt Macy object_size > zfs_metaslab_condense_block_threshold * record_size); 3644eda14cbcSMatt Macy } 3645eda14cbcSMatt Macy 3646eda14cbcSMatt Macy /* 3647eda14cbcSMatt Macy * Condense the on-disk space map representation to its minimized form. 3648eda14cbcSMatt Macy * The minimized form consists of a small number of allocations followed 3649eda14cbcSMatt Macy * by the entries of the free range tree (ms_allocatable). The condensed 3650eda14cbcSMatt Macy * spacemap contains all the entries of previous TXGs (including those in 3651eda14cbcSMatt Macy * the pool-wide log spacemaps; thus this is effectively a superset of 3652eda14cbcSMatt Macy * metaslab_flush()), but this TXG's entries still need to be written. 3653eda14cbcSMatt Macy */ 3654eda14cbcSMatt Macy static void 3655eda14cbcSMatt Macy metaslab_condense(metaslab_t *msp, dmu_tx_t *tx) 3656eda14cbcSMatt Macy { 3657eda14cbcSMatt Macy range_tree_t *condense_tree; 3658eda14cbcSMatt Macy space_map_t *sm = msp->ms_sm; 3659eda14cbcSMatt Macy uint64_t txg = dmu_tx_get_txg(tx); 3660eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3661eda14cbcSMatt Macy 3662eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3663eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 3664eda14cbcSMatt Macy ASSERT(msp->ms_sm != NULL); 3665eda14cbcSMatt Macy 3666eda14cbcSMatt Macy /* 3667eda14cbcSMatt Macy * In order to condense the space map, we need to change it so it 3668eda14cbcSMatt Macy * only describes which segments are currently allocated and free. 3669eda14cbcSMatt Macy * 3670eda14cbcSMatt Macy * All the current free space resides in the ms_allocatable, all 3671eda14cbcSMatt Macy * the ms_defer trees, and all the ms_allocating trees. We ignore 3672eda14cbcSMatt Macy * ms_freed because it is empty because we're in sync pass 1. We 3673eda14cbcSMatt Macy * ignore ms_freeing because these changes are not yet reflected 3674eda14cbcSMatt Macy * in the spacemap (they will be written later this txg). 3675eda14cbcSMatt Macy * 3676eda14cbcSMatt Macy * So to truncate the space map to represent all the entries of 3677eda14cbcSMatt Macy * previous TXGs we do the following: 3678eda14cbcSMatt Macy * 3679eda14cbcSMatt Macy * 1] We create a range tree (condense tree) that is 100% empty. 3680eda14cbcSMatt Macy * 2] We add to it all segments found in the ms_defer trees 3681eda14cbcSMatt Macy * as those segments are marked as free in the original space 3682eda14cbcSMatt Macy * map. We do the same with the ms_allocating trees for the same 3683eda14cbcSMatt Macy * reason. Adding these segments should be a relatively 3684eda14cbcSMatt Macy * inexpensive operation since we expect these trees to have a 3685eda14cbcSMatt Macy * small number of nodes. 3686eda14cbcSMatt Macy * 3] We vacate any unflushed allocs, since they are not frees we 3687eda14cbcSMatt Macy * need to add to the condense tree. Then we vacate any 3688eda14cbcSMatt Macy * unflushed frees as they should already be part of ms_allocatable. 3689eda14cbcSMatt Macy * 4] At this point, we would ideally like to add all segments 3690eda14cbcSMatt Macy * in the ms_allocatable tree from the condense tree. This way 3691eda14cbcSMatt Macy * we would write all the entries of the condense tree as the 3692eda14cbcSMatt Macy * condensed space map, which would only contain freed 3693eda14cbcSMatt Macy * segments with everything else assumed to be allocated. 3694eda14cbcSMatt Macy * 3695eda14cbcSMatt Macy * Doing so can be prohibitively expensive as ms_allocatable can 3696eda14cbcSMatt Macy * be large, and therefore computationally expensive to add to 3697eda14cbcSMatt Macy * the condense_tree. Instead we first sync out an entry marking 3698eda14cbcSMatt Macy * everything as allocated, then the condense_tree and then the 3699eda14cbcSMatt Macy * ms_allocatable, in the condensed space map. While this is not 3700eda14cbcSMatt Macy * optimal, it is typically close to optimal and more importantly 3701eda14cbcSMatt Macy * much cheaper to compute. 3702eda14cbcSMatt Macy * 3703eda14cbcSMatt Macy * 5] Finally, as both of the unflushed trees were written to our 3704eda14cbcSMatt Macy * new and condensed metaslab space map, we basically flushed 3705eda14cbcSMatt Macy * all the unflushed changes to disk, thus we call 3706eda14cbcSMatt Macy * metaslab_flush_update(). 3707eda14cbcSMatt Macy */ 3708eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(spa), ==, 1); 3709eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */ 3710eda14cbcSMatt Macy 3711eda14cbcSMatt Macy zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, " 371233b8c039SMartin Matuska "spa %s, smp size %llu, segments %llu, forcing condense=%s", 371333b8c039SMartin Matuska (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp, 371433b8c039SMartin Matuska (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 371533b8c039SMartin Matuska spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm), 371633b8c039SMartin Matuska (u_longlong_t)range_tree_numsegs(msp->ms_allocatable), 3717eda14cbcSMatt Macy msp->ms_condense_wanted ? "TRUE" : "FALSE"); 3718eda14cbcSMatt Macy 3719eda14cbcSMatt Macy msp->ms_condense_wanted = B_FALSE; 3720eda14cbcSMatt Macy 3721eda14cbcSMatt Macy range_seg_type_t type; 3722eda14cbcSMatt Macy uint64_t shift, start; 3723eda14cbcSMatt Macy type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp, 3724eda14cbcSMatt Macy &start, &shift); 3725eda14cbcSMatt Macy 3726eda14cbcSMatt Macy condense_tree = range_tree_create(NULL, type, NULL, start, shift); 3727eda14cbcSMatt Macy 3728eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3729eda14cbcSMatt Macy range_tree_walk(msp->ms_defer[t], 3730eda14cbcSMatt Macy range_tree_add, condense_tree); 3731eda14cbcSMatt Macy } 3732eda14cbcSMatt Macy 3733eda14cbcSMatt Macy for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { 3734eda14cbcSMatt Macy range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK], 3735eda14cbcSMatt Macy range_tree_add, condense_tree); 3736eda14cbcSMatt Macy } 3737eda14cbcSMatt Macy 3738eda14cbcSMatt Macy ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 3739eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp)); 3740eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused -= 3741eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 3742eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 3743eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 3744eda14cbcSMatt Macy 3745eda14cbcSMatt Macy /* 3746eda14cbcSMatt Macy * We're about to drop the metaslab's lock thus allowing other 3747eda14cbcSMatt Macy * consumers to change it's content. Set the metaslab's ms_condensing 3748eda14cbcSMatt Macy * flag to ensure that allocations on this metaslab do not occur 3749eda14cbcSMatt Macy * while we're in the middle of committing it to disk. This is only 3750eda14cbcSMatt Macy * critical for ms_allocatable as all other range trees use per TXG 3751eda14cbcSMatt Macy * views of their content. 3752eda14cbcSMatt Macy */ 3753eda14cbcSMatt Macy msp->ms_condensing = B_TRUE; 3754eda14cbcSMatt Macy 3755eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 3756eda14cbcSMatt Macy uint64_t object = space_map_object(msp->ms_sm); 3757eda14cbcSMatt Macy space_map_truncate(sm, 3758eda14cbcSMatt Macy spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ? 3759eda14cbcSMatt Macy zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx); 3760eda14cbcSMatt Macy 3761eda14cbcSMatt Macy /* 3762eda14cbcSMatt Macy * space_map_truncate() may have reallocated the spacemap object. 3763eda14cbcSMatt Macy * If so, update the vdev_ms_array. 3764eda14cbcSMatt Macy */ 3765eda14cbcSMatt Macy if (space_map_object(msp->ms_sm) != object) { 3766eda14cbcSMatt Macy object = space_map_object(msp->ms_sm); 3767eda14cbcSMatt Macy dmu_write(spa->spa_meta_objset, 3768eda14cbcSMatt Macy msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) * 3769eda14cbcSMatt Macy msp->ms_id, sizeof (uint64_t), &object, tx); 3770eda14cbcSMatt Macy } 3771eda14cbcSMatt Macy 3772eda14cbcSMatt Macy /* 3773eda14cbcSMatt Macy * Note: 3774eda14cbcSMatt Macy * When the log space map feature is enabled, each space map will 3775eda14cbcSMatt Macy * always have ALLOCS followed by FREES for each sync pass. This is 3776eda14cbcSMatt Macy * typically true even when the log space map feature is disabled, 3777eda14cbcSMatt Macy * except from the case where a metaslab goes through metaslab_sync() 3778eda14cbcSMatt Macy * and gets condensed. In that case the metaslab's space map will have 3779eda14cbcSMatt Macy * ALLOCS followed by FREES (due to condensing) followed by ALLOCS 3780eda14cbcSMatt Macy * followed by FREES (due to space_map_write() in metaslab_sync()) for 3781eda14cbcSMatt Macy * sync pass 1. 3782eda14cbcSMatt Macy */ 3783eda14cbcSMatt Macy range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start, 3784eda14cbcSMatt Macy shift); 3785eda14cbcSMatt Macy range_tree_add(tmp_tree, msp->ms_start, msp->ms_size); 3786eda14cbcSMatt Macy space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx); 3787eda14cbcSMatt Macy space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx); 3788eda14cbcSMatt Macy space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx); 3789eda14cbcSMatt Macy 3790eda14cbcSMatt Macy range_tree_vacate(condense_tree, NULL, NULL); 3791eda14cbcSMatt Macy range_tree_destroy(condense_tree); 3792eda14cbcSMatt Macy range_tree_vacate(tmp_tree, NULL, NULL); 3793eda14cbcSMatt Macy range_tree_destroy(tmp_tree); 3794eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 3795eda14cbcSMatt Macy 3796eda14cbcSMatt Macy msp->ms_condensing = B_FALSE; 3797eda14cbcSMatt Macy metaslab_flush_update(msp, tx); 3798eda14cbcSMatt Macy } 3799eda14cbcSMatt Macy 3800716fd348SMartin Matuska static void 3801716fd348SMartin Matuska metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx) 3802716fd348SMartin Matuska { 3803716fd348SMartin Matuska spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3804716fd348SMartin Matuska ASSERT(spa_syncing_log_sm(spa) != NULL); 3805716fd348SMartin Matuska ASSERT(msp->ms_sm != NULL); 3806716fd348SMartin Matuska ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 3807716fd348SMartin Matuska ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 3808716fd348SMartin Matuska 3809716fd348SMartin Matuska mutex_enter(&spa->spa_flushed_ms_lock); 3810716fd348SMartin Matuska metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx); 3811716fd348SMartin Matuska metaslab_set_unflushed_dirty(msp, B_TRUE); 3812716fd348SMartin Matuska avl_add(&spa->spa_metaslabs_by_flushed, msp); 3813716fd348SMartin Matuska mutex_exit(&spa->spa_flushed_ms_lock); 3814716fd348SMartin Matuska 3815716fd348SMartin Matuska spa_log_sm_increment_current_mscount(spa); 3816716fd348SMartin Matuska spa_log_summary_add_flushed_metaslab(spa, B_TRUE); 3817716fd348SMartin Matuska } 3818716fd348SMartin Matuska 3819716fd348SMartin Matuska void 3820716fd348SMartin Matuska metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty) 3821716fd348SMartin Matuska { 3822716fd348SMartin Matuska spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3823716fd348SMartin Matuska ASSERT(spa_syncing_log_sm(spa) != NULL); 3824716fd348SMartin Matuska ASSERT(msp->ms_sm != NULL); 3825716fd348SMartin Matuska ASSERT(metaslab_unflushed_txg(msp) != 0); 3826716fd348SMartin Matuska ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp); 3827716fd348SMartin Matuska ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 3828716fd348SMartin Matuska ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 3829716fd348SMartin Matuska 3830716fd348SMartin Matuska VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa)); 3831716fd348SMartin Matuska 3832716fd348SMartin Matuska /* update metaslab's position in our flushing tree */ 3833716fd348SMartin Matuska uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp); 3834716fd348SMartin Matuska boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp); 3835716fd348SMartin Matuska mutex_enter(&spa->spa_flushed_ms_lock); 3836716fd348SMartin Matuska avl_remove(&spa->spa_metaslabs_by_flushed, msp); 3837716fd348SMartin Matuska metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx); 3838716fd348SMartin Matuska metaslab_set_unflushed_dirty(msp, dirty); 3839716fd348SMartin Matuska avl_add(&spa->spa_metaslabs_by_flushed, msp); 3840716fd348SMartin Matuska mutex_exit(&spa->spa_flushed_ms_lock); 3841716fd348SMartin Matuska 3842716fd348SMartin Matuska /* update metaslab counts of spa_log_sm_t nodes */ 3843716fd348SMartin Matuska spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg); 3844716fd348SMartin Matuska spa_log_sm_increment_current_mscount(spa); 3845716fd348SMartin Matuska 3846716fd348SMartin Matuska /* update log space map summary */ 3847716fd348SMartin Matuska spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg, 3848716fd348SMartin Matuska ms_prev_flushed_dirty); 3849716fd348SMartin Matuska spa_log_summary_add_flushed_metaslab(spa, dirty); 3850716fd348SMartin Matuska 3851716fd348SMartin Matuska /* cleanup obsolete logs if any */ 3852716fd348SMartin Matuska spa_cleanup_old_sm_logs(spa, tx); 3853716fd348SMartin Matuska } 3854716fd348SMartin Matuska 3855eda14cbcSMatt Macy /* 3856eda14cbcSMatt Macy * Called when the metaslab has been flushed (its own spacemap now reflects 3857eda14cbcSMatt Macy * all the contents of the pool-wide spacemap log). Updates the metaslab's 3858eda14cbcSMatt Macy * metadata and any pool-wide related log space map data (e.g. summary, 3859eda14cbcSMatt Macy * obsolete logs, etc..) to reflect that. 3860eda14cbcSMatt Macy */ 3861eda14cbcSMatt Macy static void 3862eda14cbcSMatt Macy metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx) 3863eda14cbcSMatt Macy { 3864eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 3865eda14cbcSMatt Macy spa_t *spa = mg->mg_vd->vdev_spa; 3866eda14cbcSMatt Macy 3867eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3868eda14cbcSMatt Macy 3869eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(spa), ==, 1); 3870eda14cbcSMatt Macy 3871eda14cbcSMatt Macy /* 3872eda14cbcSMatt Macy * Just because a metaslab got flushed, that doesn't mean that 3873eda14cbcSMatt Macy * it will pass through metaslab_sync_done(). Thus, make sure to 3874eda14cbcSMatt Macy * update ms_synced_length here in case it doesn't. 3875eda14cbcSMatt Macy */ 3876eda14cbcSMatt Macy msp->ms_synced_length = space_map_length(msp->ms_sm); 3877eda14cbcSMatt Macy 3878eda14cbcSMatt Macy /* 3879eda14cbcSMatt Macy * We may end up here from metaslab_condense() without the 3880eda14cbcSMatt Macy * feature being active. In that case this is a no-op. 3881eda14cbcSMatt Macy */ 3882716fd348SMartin Matuska if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) || 3883716fd348SMartin Matuska metaslab_unflushed_txg(msp) == 0) 3884eda14cbcSMatt Macy return; 3885eda14cbcSMatt Macy 3886716fd348SMartin Matuska metaslab_unflushed_bump(msp, tx, B_FALSE); 3887eda14cbcSMatt Macy } 3888eda14cbcSMatt Macy 3889eda14cbcSMatt Macy boolean_t 3890eda14cbcSMatt Macy metaslab_flush(metaslab_t *msp, dmu_tx_t *tx) 3891eda14cbcSMatt Macy { 3892eda14cbcSMatt Macy spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3893eda14cbcSMatt Macy 3894eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 3895eda14cbcSMatt Macy ASSERT3U(spa_sync_pass(spa), ==, 1); 3896eda14cbcSMatt Macy ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 3897eda14cbcSMatt Macy 3898eda14cbcSMatt Macy ASSERT(msp->ms_sm != NULL); 3899eda14cbcSMatt Macy ASSERT(metaslab_unflushed_txg(msp) != 0); 3900eda14cbcSMatt Macy ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL); 3901eda14cbcSMatt Macy 3902eda14cbcSMatt Macy /* 3903eda14cbcSMatt Macy * There is nothing wrong with flushing the same metaslab twice, as 3904eda14cbcSMatt Macy * this codepath should work on that case. However, the current 3905eda14cbcSMatt Macy * flushing scheme makes sure to avoid this situation as we would be 3906eda14cbcSMatt Macy * making all these calls without having anything meaningful to write 3907eda14cbcSMatt Macy * to disk. We assert this behavior here. 3908eda14cbcSMatt Macy */ 3909eda14cbcSMatt Macy ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx)); 3910eda14cbcSMatt Macy 3911eda14cbcSMatt Macy /* 3912eda14cbcSMatt Macy * We can not flush while loading, because then we would 3913eda14cbcSMatt Macy * not load the ms_unflushed_{allocs,frees}. 3914eda14cbcSMatt Macy */ 3915eda14cbcSMatt Macy if (msp->ms_loading) 3916eda14cbcSMatt Macy return (B_FALSE); 3917eda14cbcSMatt Macy 3918eda14cbcSMatt Macy metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3919eda14cbcSMatt Macy metaslab_verify_weight_and_frag(msp); 3920eda14cbcSMatt Macy 3921eda14cbcSMatt Macy /* 3922eda14cbcSMatt Macy * Metaslab condensing is effectively flushing. Therefore if the 3923eda14cbcSMatt Macy * metaslab can be condensed we can just condense it instead of 3924eda14cbcSMatt Macy * flushing it. 3925eda14cbcSMatt Macy * 3926eda14cbcSMatt Macy * Note that metaslab_condense() does call metaslab_flush_update() 3927eda14cbcSMatt Macy * so we can just return immediately after condensing. We also 3928eda14cbcSMatt Macy * don't need to care about setting ms_flushing or broadcasting 3929eda14cbcSMatt Macy * ms_flush_cv, even if we temporarily drop the ms_lock in 3930eda14cbcSMatt Macy * metaslab_condense(), as the metaslab is already loaded. 3931eda14cbcSMatt Macy */ 3932eda14cbcSMatt Macy if (msp->ms_loaded && metaslab_should_condense(msp)) { 3933eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 3934eda14cbcSMatt Macy 3935eda14cbcSMatt Macy /* 3936eda14cbcSMatt Macy * For all histogram operations below refer to the 3937eda14cbcSMatt Macy * comments of metaslab_sync() where we follow a 3938eda14cbcSMatt Macy * similar procedure. 3939eda14cbcSMatt Macy */ 3940eda14cbcSMatt Macy metaslab_group_histogram_verify(mg); 3941eda14cbcSMatt Macy metaslab_class_histogram_verify(mg->mg_class); 3942eda14cbcSMatt Macy metaslab_group_histogram_remove(mg, msp); 3943eda14cbcSMatt Macy 3944eda14cbcSMatt Macy metaslab_condense(msp, tx); 3945eda14cbcSMatt Macy 3946eda14cbcSMatt Macy space_map_histogram_clear(msp->ms_sm); 3947eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); 3948eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_freed)); 3949eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3950eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, 3951eda14cbcSMatt Macy msp->ms_defer[t], tx); 3952eda14cbcSMatt Macy } 3953eda14cbcSMatt Macy metaslab_aux_histograms_update(msp); 3954eda14cbcSMatt Macy 3955eda14cbcSMatt Macy metaslab_group_histogram_add(mg, msp); 3956eda14cbcSMatt Macy metaslab_group_histogram_verify(mg); 3957eda14cbcSMatt Macy metaslab_class_histogram_verify(mg->mg_class); 3958eda14cbcSMatt Macy 3959eda14cbcSMatt Macy metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3960eda14cbcSMatt Macy 3961eda14cbcSMatt Macy /* 3962eda14cbcSMatt Macy * Since we recreated the histogram (and potentially 3963eda14cbcSMatt Macy * the ms_sm too while condensing) ensure that the 3964eda14cbcSMatt Macy * weight is updated too because we are not guaranteed 3965eda14cbcSMatt Macy * that this metaslab is dirty and will go through 3966eda14cbcSMatt Macy * metaslab_sync_done(). 3967eda14cbcSMatt Macy */ 3968eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(msp); 3969eda14cbcSMatt Macy return (B_TRUE); 3970eda14cbcSMatt Macy } 3971eda14cbcSMatt Macy 3972eda14cbcSMatt Macy msp->ms_flushing = B_TRUE; 3973eda14cbcSMatt Macy uint64_t sm_len_before = space_map_length(msp->ms_sm); 3974eda14cbcSMatt Macy 3975eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 3976eda14cbcSMatt Macy space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC, 3977eda14cbcSMatt Macy SM_NO_VDEVID, tx); 3978eda14cbcSMatt Macy space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE, 3979eda14cbcSMatt Macy SM_NO_VDEVID, tx); 3980eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 3981eda14cbcSMatt Macy 3982eda14cbcSMatt Macy uint64_t sm_len_after = space_map_length(msp->ms_sm); 3983eda14cbcSMatt Macy if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) { 3984eda14cbcSMatt Macy zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, " 3985eda14cbcSMatt Macy "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, " 398633b8c039SMartin Matuska "appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx), 398733b8c039SMartin Matuska spa_name(spa), 398833b8c039SMartin Matuska (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 398933b8c039SMartin Matuska (u_longlong_t)msp->ms_id, 399033b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs), 399133b8c039SMartin Matuska (u_longlong_t)range_tree_space(msp->ms_unflushed_frees), 399233b8c039SMartin Matuska (u_longlong_t)(sm_len_after - sm_len_before)); 3993eda14cbcSMatt Macy } 3994eda14cbcSMatt Macy 3995eda14cbcSMatt Macy ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 3996eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp)); 3997eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused -= 3998eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 3999eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 4000eda14cbcSMatt Macy range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 4001eda14cbcSMatt Macy 4002eda14cbcSMatt Macy metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 4003eda14cbcSMatt Macy metaslab_verify_weight_and_frag(msp); 4004eda14cbcSMatt Macy 4005eda14cbcSMatt Macy metaslab_flush_update(msp, tx); 4006eda14cbcSMatt Macy 4007eda14cbcSMatt Macy metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 4008eda14cbcSMatt Macy metaslab_verify_weight_and_frag(msp); 4009eda14cbcSMatt Macy 4010eda14cbcSMatt Macy msp->ms_flushing = B_FALSE; 4011eda14cbcSMatt Macy cv_broadcast(&msp->ms_flush_cv); 4012eda14cbcSMatt Macy return (B_TRUE); 4013eda14cbcSMatt Macy } 4014eda14cbcSMatt Macy 4015eda14cbcSMatt Macy /* 4016eda14cbcSMatt Macy * Write a metaslab to disk in the context of the specified transaction group. 4017eda14cbcSMatt Macy */ 4018eda14cbcSMatt Macy void 4019eda14cbcSMatt Macy metaslab_sync(metaslab_t *msp, uint64_t txg) 4020eda14cbcSMatt Macy { 4021eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 4022eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 4023eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 4024eda14cbcSMatt Macy objset_t *mos = spa_meta_objset(spa); 4025eda14cbcSMatt Macy range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK]; 4026eda14cbcSMatt Macy dmu_tx_t *tx; 4027eda14cbcSMatt Macy 4028eda14cbcSMatt Macy ASSERT(!vd->vdev_ishole); 4029eda14cbcSMatt Macy 4030eda14cbcSMatt Macy /* 4031eda14cbcSMatt Macy * This metaslab has just been added so there's no work to do now. 4032eda14cbcSMatt Macy */ 4033f9693befSMartin Matuska if (msp->ms_new) { 4034f9693befSMartin Matuska ASSERT0(range_tree_space(alloctree)); 4035f9693befSMartin Matuska ASSERT0(range_tree_space(msp->ms_freeing)); 4036f9693befSMartin Matuska ASSERT0(range_tree_space(msp->ms_freed)); 4037f9693befSMartin Matuska ASSERT0(range_tree_space(msp->ms_checkpointing)); 4038f9693befSMartin Matuska ASSERT0(range_tree_space(msp->ms_trim)); 4039eda14cbcSMatt Macy return; 4040eda14cbcSMatt Macy } 4041eda14cbcSMatt Macy 4042eda14cbcSMatt Macy /* 4043eda14cbcSMatt Macy * Normally, we don't want to process a metaslab if there are no 4044eda14cbcSMatt Macy * allocations or frees to perform. However, if the metaslab is being 4045eda14cbcSMatt Macy * forced to condense, it's loaded and we're not beyond the final 4046eda14cbcSMatt Macy * dirty txg, we need to let it through. Not condensing beyond the 4047eda14cbcSMatt Macy * final dirty txg prevents an issue where metaslabs that need to be 4048eda14cbcSMatt Macy * condensed but were loaded for other reasons could cause a panic 4049eda14cbcSMatt Macy * here. By only checking the txg in that branch of the conditional, 4050eda14cbcSMatt Macy * we preserve the utility of the VERIFY statements in all other 4051eda14cbcSMatt Macy * cases. 4052eda14cbcSMatt Macy */ 4053eda14cbcSMatt Macy if (range_tree_is_empty(alloctree) && 4054eda14cbcSMatt Macy range_tree_is_empty(msp->ms_freeing) && 4055eda14cbcSMatt Macy range_tree_is_empty(msp->ms_checkpointing) && 4056eda14cbcSMatt Macy !(msp->ms_loaded && msp->ms_condense_wanted && 4057eda14cbcSMatt Macy txg <= spa_final_dirty_txg(spa))) 4058eda14cbcSMatt Macy return; 4059eda14cbcSMatt Macy 4060eda14cbcSMatt Macy 4061eda14cbcSMatt Macy VERIFY3U(txg, <=, spa_final_dirty_txg(spa)); 4062eda14cbcSMatt Macy 4063eda14cbcSMatt Macy /* 4064eda14cbcSMatt Macy * The only state that can actually be changing concurrently 4065eda14cbcSMatt Macy * with metaslab_sync() is the metaslab's ms_allocatable. No 4066eda14cbcSMatt Macy * other thread can be modifying this txg's alloc, freeing, 4067eda14cbcSMatt Macy * freed, or space_map_phys_t. We drop ms_lock whenever we 4068eda14cbcSMatt Macy * could call into the DMU, because the DMU can call down to 4069eda14cbcSMatt Macy * us (e.g. via zio_free()) at any time. 4070eda14cbcSMatt Macy * 4071eda14cbcSMatt Macy * The spa_vdev_remove_thread() can be reading metaslab state 4072eda14cbcSMatt Macy * concurrently, and it is locked out by the ms_sync_lock. 4073eda14cbcSMatt Macy * Note that the ms_lock is insufficient for this, because it 4074eda14cbcSMatt Macy * is dropped by space_map_write(). 4075eda14cbcSMatt Macy */ 4076eda14cbcSMatt Macy tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 4077eda14cbcSMatt Macy 4078eda14cbcSMatt Macy /* 4079eda14cbcSMatt Macy * Generate a log space map if one doesn't exist already. 4080eda14cbcSMatt Macy */ 4081eda14cbcSMatt Macy spa_generate_syncing_log_sm(spa, tx); 4082eda14cbcSMatt Macy 4083eda14cbcSMatt Macy if (msp->ms_sm == NULL) { 4084eda14cbcSMatt Macy uint64_t new_object = space_map_alloc(mos, 4085eda14cbcSMatt Macy spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ? 4086eda14cbcSMatt Macy zfs_metaslab_sm_blksz_with_log : 4087eda14cbcSMatt Macy zfs_metaslab_sm_blksz_no_log, tx); 4088eda14cbcSMatt Macy VERIFY3U(new_object, !=, 0); 4089eda14cbcSMatt Macy 4090eda14cbcSMatt Macy dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 4091eda14cbcSMatt Macy msp->ms_id, sizeof (uint64_t), &new_object, tx); 4092eda14cbcSMatt Macy 4093eda14cbcSMatt Macy VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, 4094eda14cbcSMatt Macy msp->ms_start, msp->ms_size, vd->vdev_ashift)); 4095eda14cbcSMatt Macy ASSERT(msp->ms_sm != NULL); 4096eda14cbcSMatt Macy 4097eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 4098eda14cbcSMatt Macy ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 4099eda14cbcSMatt Macy ASSERT0(metaslab_allocated_space(msp)); 4100eda14cbcSMatt Macy } 4101eda14cbcSMatt Macy 4102eda14cbcSMatt Macy if (!range_tree_is_empty(msp->ms_checkpointing) && 4103eda14cbcSMatt Macy vd->vdev_checkpoint_sm == NULL) { 4104eda14cbcSMatt Macy ASSERT(spa_has_checkpoint(spa)); 4105eda14cbcSMatt Macy 4106eda14cbcSMatt Macy uint64_t new_object = space_map_alloc(mos, 4107eda14cbcSMatt Macy zfs_vdev_standard_sm_blksz, tx); 4108eda14cbcSMatt Macy VERIFY3U(new_object, !=, 0); 4109eda14cbcSMatt Macy 4110eda14cbcSMatt Macy VERIFY0(space_map_open(&vd->vdev_checkpoint_sm, 4111eda14cbcSMatt Macy mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift)); 4112eda14cbcSMatt Macy ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 4113eda14cbcSMatt Macy 4114eda14cbcSMatt Macy /* 4115eda14cbcSMatt Macy * We save the space map object as an entry in vdev_top_zap 4116eda14cbcSMatt Macy * so it can be retrieved when the pool is reopened after an 4117eda14cbcSMatt Macy * export or through zdb. 4118eda14cbcSMatt Macy */ 4119eda14cbcSMatt Macy VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, 4120eda14cbcSMatt Macy vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, 4121eda14cbcSMatt Macy sizeof (new_object), 1, &new_object, tx)); 4122eda14cbcSMatt Macy } 4123eda14cbcSMatt Macy 4124eda14cbcSMatt Macy mutex_enter(&msp->ms_sync_lock); 4125eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4126eda14cbcSMatt Macy 4127eda14cbcSMatt Macy /* 4128eda14cbcSMatt Macy * Note: metaslab_condense() clears the space map's histogram. 4129eda14cbcSMatt Macy * Therefore we must verify and remove this histogram before 4130eda14cbcSMatt Macy * condensing. 4131eda14cbcSMatt Macy */ 4132eda14cbcSMatt Macy metaslab_group_histogram_verify(mg); 4133eda14cbcSMatt Macy metaslab_class_histogram_verify(mg->mg_class); 4134eda14cbcSMatt Macy metaslab_group_histogram_remove(mg, msp); 4135eda14cbcSMatt Macy 4136eda14cbcSMatt Macy if (spa->spa_sync_pass == 1 && msp->ms_loaded && 4137eda14cbcSMatt Macy metaslab_should_condense(msp)) 4138eda14cbcSMatt Macy metaslab_condense(msp, tx); 4139eda14cbcSMatt Macy 4140eda14cbcSMatt Macy /* 4141eda14cbcSMatt Macy * We'll be going to disk to sync our space accounting, thus we 4142eda14cbcSMatt Macy * drop the ms_lock during that time so allocations coming from 4143eda14cbcSMatt Macy * open-context (ZIL) for future TXGs do not block. 4144eda14cbcSMatt Macy */ 4145eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4146eda14cbcSMatt Macy space_map_t *log_sm = spa_syncing_log_sm(spa); 4147eda14cbcSMatt Macy if (log_sm != NULL) { 4148eda14cbcSMatt Macy ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP)); 4149716fd348SMartin Matuska if (metaslab_unflushed_txg(msp) == 0) 4150716fd348SMartin Matuska metaslab_unflushed_add(msp, tx); 4151716fd348SMartin Matuska else if (!metaslab_unflushed_dirty(msp)) 4152716fd348SMartin Matuska metaslab_unflushed_bump(msp, tx, B_TRUE); 4153eda14cbcSMatt Macy 4154eda14cbcSMatt Macy space_map_write(log_sm, alloctree, SM_ALLOC, 4155eda14cbcSMatt Macy vd->vdev_id, tx); 4156eda14cbcSMatt Macy space_map_write(log_sm, msp->ms_freeing, SM_FREE, 4157eda14cbcSMatt Macy vd->vdev_id, tx); 4158eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4159eda14cbcSMatt Macy 4160eda14cbcSMatt Macy ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 4161eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp)); 4162eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused -= 4163eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 4164eda14cbcSMatt Macy range_tree_remove_xor_add(alloctree, 4165eda14cbcSMatt Macy msp->ms_unflushed_frees, msp->ms_unflushed_allocs); 4166eda14cbcSMatt Macy range_tree_remove_xor_add(msp->ms_freeing, 4167eda14cbcSMatt Macy msp->ms_unflushed_allocs, msp->ms_unflushed_frees); 4168eda14cbcSMatt Macy spa->spa_unflushed_stats.sus_memused += 4169eda14cbcSMatt Macy metaslab_unflushed_changes_memused(msp); 4170eda14cbcSMatt Macy } else { 4171eda14cbcSMatt Macy ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP)); 4172eda14cbcSMatt Macy 4173eda14cbcSMatt Macy space_map_write(msp->ms_sm, alloctree, SM_ALLOC, 4174eda14cbcSMatt Macy SM_NO_VDEVID, tx); 4175eda14cbcSMatt Macy space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE, 4176eda14cbcSMatt Macy SM_NO_VDEVID, tx); 4177eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4178eda14cbcSMatt Macy } 4179eda14cbcSMatt Macy 4180eda14cbcSMatt Macy msp->ms_allocated_space += range_tree_space(alloctree); 4181eda14cbcSMatt Macy ASSERT3U(msp->ms_allocated_space, >=, 4182eda14cbcSMatt Macy range_tree_space(msp->ms_freeing)); 4183eda14cbcSMatt Macy msp->ms_allocated_space -= range_tree_space(msp->ms_freeing); 4184eda14cbcSMatt Macy 4185eda14cbcSMatt Macy if (!range_tree_is_empty(msp->ms_checkpointing)) { 4186eda14cbcSMatt Macy ASSERT(spa_has_checkpoint(spa)); 4187eda14cbcSMatt Macy ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 4188eda14cbcSMatt Macy 4189eda14cbcSMatt Macy /* 4190eda14cbcSMatt Macy * Since we are doing writes to disk and the ms_checkpointing 4191eda14cbcSMatt Macy * tree won't be changing during that time, we drop the 4192eda14cbcSMatt Macy * ms_lock while writing to the checkpoint space map, for the 4193eda14cbcSMatt Macy * same reason mentioned above. 4194eda14cbcSMatt Macy */ 4195eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4196eda14cbcSMatt Macy space_map_write(vd->vdev_checkpoint_sm, 4197eda14cbcSMatt Macy msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx); 4198eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4199eda14cbcSMatt Macy 4200eda14cbcSMatt Macy spa->spa_checkpoint_info.sci_dspace += 4201eda14cbcSMatt Macy range_tree_space(msp->ms_checkpointing); 4202eda14cbcSMatt Macy vd->vdev_stat.vs_checkpoint_space += 4203eda14cbcSMatt Macy range_tree_space(msp->ms_checkpointing); 4204eda14cbcSMatt Macy ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==, 4205eda14cbcSMatt Macy -space_map_allocated(vd->vdev_checkpoint_sm)); 4206eda14cbcSMatt Macy 4207eda14cbcSMatt Macy range_tree_vacate(msp->ms_checkpointing, NULL, NULL); 4208eda14cbcSMatt Macy } 4209eda14cbcSMatt Macy 4210eda14cbcSMatt Macy if (msp->ms_loaded) { 4211eda14cbcSMatt Macy /* 4212eda14cbcSMatt Macy * When the space map is loaded, we have an accurate 4213eda14cbcSMatt Macy * histogram in the range tree. This gives us an opportunity 4214eda14cbcSMatt Macy * to bring the space map's histogram up-to-date so we clear 4215eda14cbcSMatt Macy * it first before updating it. 4216eda14cbcSMatt Macy */ 4217eda14cbcSMatt Macy space_map_histogram_clear(msp->ms_sm); 4218eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); 4219eda14cbcSMatt Macy 4220eda14cbcSMatt Macy /* 4221eda14cbcSMatt Macy * Since we've cleared the histogram we need to add back 4222eda14cbcSMatt Macy * any free space that has already been processed, plus 4223eda14cbcSMatt Macy * any deferred space. This allows the on-disk histogram 4224eda14cbcSMatt Macy * to accurately reflect all free space even if some space 4225eda14cbcSMatt Macy * is not yet available for allocation (i.e. deferred). 4226eda14cbcSMatt Macy */ 4227eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx); 4228eda14cbcSMatt Macy 4229eda14cbcSMatt Macy /* 4230eda14cbcSMatt Macy * Add back any deferred free space that has not been 4231eda14cbcSMatt Macy * added back into the in-core free tree yet. This will 4232eda14cbcSMatt Macy * ensure that we don't end up with a space map histogram 4233eda14cbcSMatt Macy * that is completely empty unless the metaslab is fully 4234eda14cbcSMatt Macy * allocated. 4235eda14cbcSMatt Macy */ 4236eda14cbcSMatt Macy for (int t = 0; t < TXG_DEFER_SIZE; t++) { 4237eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, 4238eda14cbcSMatt Macy msp->ms_defer[t], tx); 4239eda14cbcSMatt Macy } 4240eda14cbcSMatt Macy } 4241eda14cbcSMatt Macy 4242eda14cbcSMatt Macy /* 4243eda14cbcSMatt Macy * Always add the free space from this sync pass to the space 4244eda14cbcSMatt Macy * map histogram. We want to make sure that the on-disk histogram 4245eda14cbcSMatt Macy * accounts for all free space. If the space map is not loaded, 4246eda14cbcSMatt Macy * then we will lose some accuracy but will correct it the next 4247eda14cbcSMatt Macy * time we load the space map. 4248eda14cbcSMatt Macy */ 4249eda14cbcSMatt Macy space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx); 4250eda14cbcSMatt Macy metaslab_aux_histograms_update(msp); 4251eda14cbcSMatt Macy 4252eda14cbcSMatt Macy metaslab_group_histogram_add(mg, msp); 4253eda14cbcSMatt Macy metaslab_group_histogram_verify(mg); 4254eda14cbcSMatt Macy metaslab_class_histogram_verify(mg->mg_class); 4255eda14cbcSMatt Macy 4256eda14cbcSMatt Macy /* 4257eda14cbcSMatt Macy * For sync pass 1, we avoid traversing this txg's free range tree 4258eda14cbcSMatt Macy * and instead will just swap the pointers for freeing and freed. 4259eda14cbcSMatt Macy * We can safely do this since the freed_tree is guaranteed to be 4260eda14cbcSMatt Macy * empty on the initial pass. 4261eda14cbcSMatt Macy * 4262eda14cbcSMatt Macy * Keep in mind that even if we are currently using a log spacemap 4263eda14cbcSMatt Macy * we want current frees to end up in the ms_allocatable (but not 4264eda14cbcSMatt Macy * get appended to the ms_sm) so their ranges can be reused as usual. 4265eda14cbcSMatt Macy */ 4266eda14cbcSMatt Macy if (spa_sync_pass(spa) == 1) { 4267eda14cbcSMatt Macy range_tree_swap(&msp->ms_freeing, &msp->ms_freed); 4268eda14cbcSMatt Macy ASSERT0(msp->ms_allocated_this_txg); 4269eda14cbcSMatt Macy } else { 4270eda14cbcSMatt Macy range_tree_vacate(msp->ms_freeing, 4271eda14cbcSMatt Macy range_tree_add, msp->ms_freed); 4272eda14cbcSMatt Macy } 4273eda14cbcSMatt Macy msp->ms_allocated_this_txg += range_tree_space(alloctree); 4274eda14cbcSMatt Macy range_tree_vacate(alloctree, NULL, NULL); 4275eda14cbcSMatt Macy 4276eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); 4277eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg) 4278eda14cbcSMatt Macy & TXG_MASK])); 4279eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_freeing)); 4280eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_checkpointing)); 4281eda14cbcSMatt Macy 4282eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4283eda14cbcSMatt Macy 4284eda14cbcSMatt Macy /* 4285eda14cbcSMatt Macy * Verify that the space map object ID has been recorded in the 4286eda14cbcSMatt Macy * vdev_ms_array. 4287eda14cbcSMatt Macy */ 4288eda14cbcSMatt Macy uint64_t object; 4289eda14cbcSMatt Macy VERIFY0(dmu_read(mos, vd->vdev_ms_array, 4290eda14cbcSMatt Macy msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0)); 4291eda14cbcSMatt Macy VERIFY3U(object, ==, space_map_object(msp->ms_sm)); 4292eda14cbcSMatt Macy 4293eda14cbcSMatt Macy mutex_exit(&msp->ms_sync_lock); 4294eda14cbcSMatt Macy dmu_tx_commit(tx); 4295eda14cbcSMatt Macy } 4296eda14cbcSMatt Macy 4297eda14cbcSMatt Macy static void 4298eda14cbcSMatt Macy metaslab_evict(metaslab_t *msp, uint64_t txg) 4299eda14cbcSMatt Macy { 4300eda14cbcSMatt Macy if (!msp->ms_loaded || msp->ms_disabled != 0) 4301eda14cbcSMatt Macy return; 4302eda14cbcSMatt Macy 4303eda14cbcSMatt Macy for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { 4304eda14cbcSMatt Macy VERIFY0(range_tree_space( 4305eda14cbcSMatt Macy msp->ms_allocating[(txg + t) & TXG_MASK])); 4306eda14cbcSMatt Macy } 4307eda14cbcSMatt Macy if (msp->ms_allocator != -1) 4308eda14cbcSMatt Macy metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK); 4309eda14cbcSMatt Macy 4310eda14cbcSMatt Macy if (!metaslab_debug_unload) 4311eda14cbcSMatt Macy metaslab_unload(msp); 4312eda14cbcSMatt Macy } 4313eda14cbcSMatt Macy 4314eda14cbcSMatt Macy /* 4315eda14cbcSMatt Macy * Called after a transaction group has completely synced to mark 4316eda14cbcSMatt Macy * all of the metaslab's free space as usable. 4317eda14cbcSMatt Macy */ 4318eda14cbcSMatt Macy void 4319eda14cbcSMatt Macy metaslab_sync_done(metaslab_t *msp, uint64_t txg) 4320eda14cbcSMatt Macy { 4321eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 4322eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 4323eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 4324eda14cbcSMatt Macy range_tree_t **defer_tree; 4325eda14cbcSMatt Macy int64_t alloc_delta, defer_delta; 4326eda14cbcSMatt Macy boolean_t defer_allowed = B_TRUE; 4327eda14cbcSMatt Macy 4328eda14cbcSMatt Macy ASSERT(!vd->vdev_ishole); 4329eda14cbcSMatt Macy 4330eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4331eda14cbcSMatt Macy 4332f9693befSMartin Matuska if (msp->ms_new) { 4333f9693befSMartin Matuska /* this is a new metaslab, add its capacity to the vdev */ 4334eda14cbcSMatt Macy metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size); 4335f9693befSMartin Matuska 4336f9693befSMartin Matuska /* there should be no allocations nor frees at this point */ 4337f9693befSMartin Matuska VERIFY0(msp->ms_allocated_this_txg); 4338f9693befSMartin Matuska VERIFY0(range_tree_space(msp->ms_freed)); 4339eda14cbcSMatt Macy } 4340f9693befSMartin Matuska 4341eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_freeing)); 4342eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_checkpointing)); 4343eda14cbcSMatt Macy 4344eda14cbcSMatt Macy defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE]; 4345eda14cbcSMatt Macy 4346eda14cbcSMatt Macy uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) - 4347eda14cbcSMatt Macy metaslab_class_get_alloc(spa_normal_class(spa)); 4348e716630dSMartin Matuska if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing || 4349e716630dSMartin Matuska vd->vdev_rz_expanding) { 4350eda14cbcSMatt Macy defer_allowed = B_FALSE; 4351eda14cbcSMatt Macy } 4352eda14cbcSMatt Macy 4353eda14cbcSMatt Macy defer_delta = 0; 4354eda14cbcSMatt Macy alloc_delta = msp->ms_allocated_this_txg - 4355eda14cbcSMatt Macy range_tree_space(msp->ms_freed); 4356eda14cbcSMatt Macy 4357eda14cbcSMatt Macy if (defer_allowed) { 4358eda14cbcSMatt Macy defer_delta = range_tree_space(msp->ms_freed) - 4359eda14cbcSMatt Macy range_tree_space(*defer_tree); 4360eda14cbcSMatt Macy } else { 4361eda14cbcSMatt Macy defer_delta -= range_tree_space(*defer_tree); 4362eda14cbcSMatt Macy } 4363eda14cbcSMatt Macy metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta, 4364eda14cbcSMatt Macy defer_delta, 0); 4365eda14cbcSMatt Macy 4366eda14cbcSMatt Macy if (spa_syncing_log_sm(spa) == NULL) { 4367eda14cbcSMatt Macy /* 4368eda14cbcSMatt Macy * If there's a metaslab_load() in progress and we don't have 4369eda14cbcSMatt Macy * a log space map, it means that we probably wrote to the 4370eda14cbcSMatt Macy * metaslab's space map. If this is the case, we need to 4371eda14cbcSMatt Macy * make sure that we wait for the load to complete so that we 4372eda14cbcSMatt Macy * have a consistent view at the in-core side of the metaslab. 4373eda14cbcSMatt Macy */ 4374eda14cbcSMatt Macy metaslab_load_wait(msp); 4375eda14cbcSMatt Macy } else { 4376eda14cbcSMatt Macy ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 4377eda14cbcSMatt Macy } 4378eda14cbcSMatt Macy 4379eda14cbcSMatt Macy /* 4380eda14cbcSMatt Macy * When auto-trimming is enabled, free ranges which are added to 4381eda14cbcSMatt Macy * ms_allocatable are also be added to ms_trim. The ms_trim tree is 4382eda14cbcSMatt Macy * periodically consumed by the vdev_autotrim_thread() which issues 4383eda14cbcSMatt Macy * trims for all ranges and then vacates the tree. The ms_trim tree 4384eda14cbcSMatt Macy * can be discarded at any time with the sole consequence of recent 4385eda14cbcSMatt Macy * frees not being trimmed. 4386eda14cbcSMatt Macy */ 4387eda14cbcSMatt Macy if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) { 4388eda14cbcSMatt Macy range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim); 4389eda14cbcSMatt Macy if (!defer_allowed) { 4390eda14cbcSMatt Macy range_tree_walk(msp->ms_freed, range_tree_add, 4391eda14cbcSMatt Macy msp->ms_trim); 4392eda14cbcSMatt Macy } 4393eda14cbcSMatt Macy } else { 4394eda14cbcSMatt Macy range_tree_vacate(msp->ms_trim, NULL, NULL); 4395eda14cbcSMatt Macy } 4396eda14cbcSMatt Macy 4397eda14cbcSMatt Macy /* 4398eda14cbcSMatt Macy * Move the frees from the defer_tree back to the free 4399eda14cbcSMatt Macy * range tree (if it's loaded). Swap the freed_tree and 4400eda14cbcSMatt Macy * the defer_tree -- this is safe to do because we've 4401eda14cbcSMatt Macy * just emptied out the defer_tree. 4402eda14cbcSMatt Macy */ 4403eda14cbcSMatt Macy range_tree_vacate(*defer_tree, 4404eda14cbcSMatt Macy msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable); 4405eda14cbcSMatt Macy if (defer_allowed) { 4406eda14cbcSMatt Macy range_tree_swap(&msp->ms_freed, defer_tree); 4407eda14cbcSMatt Macy } else { 4408eda14cbcSMatt Macy range_tree_vacate(msp->ms_freed, 4409eda14cbcSMatt Macy msp->ms_loaded ? range_tree_add : NULL, 4410eda14cbcSMatt Macy msp->ms_allocatable); 4411eda14cbcSMatt Macy } 4412eda14cbcSMatt Macy 4413eda14cbcSMatt Macy msp->ms_synced_length = space_map_length(msp->ms_sm); 4414eda14cbcSMatt Macy 4415eda14cbcSMatt Macy msp->ms_deferspace += defer_delta; 4416eda14cbcSMatt Macy ASSERT3S(msp->ms_deferspace, >=, 0); 4417eda14cbcSMatt Macy ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); 4418eda14cbcSMatt Macy if (msp->ms_deferspace != 0) { 4419eda14cbcSMatt Macy /* 4420eda14cbcSMatt Macy * Keep syncing this metaslab until all deferred frees 4421eda14cbcSMatt Macy * are back in circulation. 4422eda14cbcSMatt Macy */ 4423eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 4424eda14cbcSMatt Macy } 4425eda14cbcSMatt Macy metaslab_aux_histograms_update_done(msp, defer_allowed); 4426eda14cbcSMatt Macy 4427eda14cbcSMatt Macy if (msp->ms_new) { 4428eda14cbcSMatt Macy msp->ms_new = B_FALSE; 4429eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 4430eda14cbcSMatt Macy mg->mg_ms_ready++; 4431eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 4432eda14cbcSMatt Macy } 4433eda14cbcSMatt Macy 4434eda14cbcSMatt Macy /* 4435eda14cbcSMatt Macy * Re-sort metaslab within its group now that we've adjusted 4436eda14cbcSMatt Macy * its allocatable space. 4437eda14cbcSMatt Macy */ 4438eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(msp); 4439eda14cbcSMatt Macy 4440eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); 4441eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_freeing)); 4442eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_freed)); 4443eda14cbcSMatt Macy ASSERT0(range_tree_space(msp->ms_checkpointing)); 4444eda14cbcSMatt Macy msp->ms_allocating_total -= msp->ms_allocated_this_txg; 4445eda14cbcSMatt Macy msp->ms_allocated_this_txg = 0; 4446eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4447eda14cbcSMatt Macy } 4448eda14cbcSMatt Macy 4449eda14cbcSMatt Macy void 4450eda14cbcSMatt Macy metaslab_sync_reassess(metaslab_group_t *mg) 4451eda14cbcSMatt Macy { 4452eda14cbcSMatt Macy spa_t *spa = mg->mg_class->mc_spa; 4453eda14cbcSMatt Macy 4454eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 4455eda14cbcSMatt Macy metaslab_group_alloc_update(mg); 4456eda14cbcSMatt Macy mg->mg_fragmentation = metaslab_group_fragmentation(mg); 4457eda14cbcSMatt Macy 4458eda14cbcSMatt Macy /* 4459eda14cbcSMatt Macy * Preload the next potential metaslabs but only on active 4460eda14cbcSMatt Macy * metaslab groups. We can get into a state where the metaslab 4461eda14cbcSMatt Macy * is no longer active since we dirty metaslabs as we remove a 4462eda14cbcSMatt Macy * a device, thus potentially making the metaslab group eligible 4463eda14cbcSMatt Macy * for preloading. 4464eda14cbcSMatt Macy */ 4465eda14cbcSMatt Macy if (mg->mg_activation_count > 0) { 4466eda14cbcSMatt Macy metaslab_group_preload(mg); 4467eda14cbcSMatt Macy } 4468eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 4469eda14cbcSMatt Macy } 4470eda14cbcSMatt Macy 4471eda14cbcSMatt Macy /* 4472eda14cbcSMatt Macy * When writing a ditto block (i.e. more than one DVA for a given BP) on 4473eda14cbcSMatt Macy * the same vdev as an existing DVA of this BP, then try to allocate it 4474eda14cbcSMatt Macy * on a different metaslab than existing DVAs (i.e. a unique metaslab). 4475eda14cbcSMatt Macy */ 4476eda14cbcSMatt Macy static boolean_t 4477eda14cbcSMatt Macy metaslab_is_unique(metaslab_t *msp, dva_t *dva) 4478eda14cbcSMatt Macy { 4479eda14cbcSMatt Macy uint64_t dva_ms_id; 4480eda14cbcSMatt Macy 4481eda14cbcSMatt Macy if (DVA_GET_ASIZE(dva) == 0) 4482eda14cbcSMatt Macy return (B_TRUE); 4483eda14cbcSMatt Macy 4484eda14cbcSMatt Macy if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 4485eda14cbcSMatt Macy return (B_TRUE); 4486eda14cbcSMatt Macy 4487eda14cbcSMatt Macy dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift; 4488eda14cbcSMatt Macy 4489eda14cbcSMatt Macy return (msp->ms_id != dva_ms_id); 4490eda14cbcSMatt Macy } 4491eda14cbcSMatt Macy 4492eda14cbcSMatt Macy /* 4493eda14cbcSMatt Macy * ========================================================================== 4494eda14cbcSMatt Macy * Metaslab allocation tracing facility 4495eda14cbcSMatt Macy * ========================================================================== 4496eda14cbcSMatt Macy */ 4497eda14cbcSMatt Macy 4498eda14cbcSMatt Macy /* 4499eda14cbcSMatt Macy * Add an allocation trace element to the allocation tracing list. 4500eda14cbcSMatt Macy */ 4501eda14cbcSMatt Macy static void 4502eda14cbcSMatt Macy metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg, 4503eda14cbcSMatt Macy metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset, 4504eda14cbcSMatt Macy int allocator) 4505eda14cbcSMatt Macy { 4506eda14cbcSMatt Macy metaslab_alloc_trace_t *mat; 4507eda14cbcSMatt Macy 4508eda14cbcSMatt Macy if (!metaslab_trace_enabled) 4509eda14cbcSMatt Macy return; 4510eda14cbcSMatt Macy 4511eda14cbcSMatt Macy /* 4512eda14cbcSMatt Macy * When the tracing list reaches its maximum we remove 4513eda14cbcSMatt Macy * the second element in the list before adding a new one. 4514eda14cbcSMatt Macy * By removing the second element we preserve the original 4515eda14cbcSMatt Macy * entry as a clue to what allocations steps have already been 4516eda14cbcSMatt Macy * performed. 4517eda14cbcSMatt Macy */ 4518eda14cbcSMatt Macy if (zal->zal_size == metaslab_trace_max_entries) { 4519eda14cbcSMatt Macy metaslab_alloc_trace_t *mat_next; 4520eda14cbcSMatt Macy #ifdef ZFS_DEBUG 4521eda14cbcSMatt Macy panic("too many entries in allocation list"); 4522eda14cbcSMatt Macy #endif 4523eda14cbcSMatt Macy METASLABSTAT_BUMP(metaslabstat_trace_over_limit); 4524eda14cbcSMatt Macy zal->zal_size--; 4525eda14cbcSMatt Macy mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list)); 4526eda14cbcSMatt Macy list_remove(&zal->zal_list, mat_next); 4527eda14cbcSMatt Macy kmem_cache_free(metaslab_alloc_trace_cache, mat_next); 4528eda14cbcSMatt Macy } 4529eda14cbcSMatt Macy 4530eda14cbcSMatt Macy mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP); 4531eda14cbcSMatt Macy list_link_init(&mat->mat_list_node); 4532eda14cbcSMatt Macy mat->mat_mg = mg; 4533eda14cbcSMatt Macy mat->mat_msp = msp; 4534eda14cbcSMatt Macy mat->mat_size = psize; 4535eda14cbcSMatt Macy mat->mat_dva_id = dva_id; 4536eda14cbcSMatt Macy mat->mat_offset = offset; 4537eda14cbcSMatt Macy mat->mat_weight = 0; 4538eda14cbcSMatt Macy mat->mat_allocator = allocator; 4539eda14cbcSMatt Macy 4540eda14cbcSMatt Macy if (msp != NULL) 4541eda14cbcSMatt Macy mat->mat_weight = msp->ms_weight; 4542eda14cbcSMatt Macy 4543eda14cbcSMatt Macy /* 4544eda14cbcSMatt Macy * The list is part of the zio so locking is not required. Only 4545eda14cbcSMatt Macy * a single thread will perform allocations for a given zio. 4546eda14cbcSMatt Macy */ 4547eda14cbcSMatt Macy list_insert_tail(&zal->zal_list, mat); 4548eda14cbcSMatt Macy zal->zal_size++; 4549eda14cbcSMatt Macy 4550eda14cbcSMatt Macy ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries); 4551eda14cbcSMatt Macy } 4552eda14cbcSMatt Macy 4553eda14cbcSMatt Macy void 4554eda14cbcSMatt Macy metaslab_trace_init(zio_alloc_list_t *zal) 4555eda14cbcSMatt Macy { 4556eda14cbcSMatt Macy list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t), 4557eda14cbcSMatt Macy offsetof(metaslab_alloc_trace_t, mat_list_node)); 4558eda14cbcSMatt Macy zal->zal_size = 0; 4559eda14cbcSMatt Macy } 4560eda14cbcSMatt Macy 4561eda14cbcSMatt Macy void 4562eda14cbcSMatt Macy metaslab_trace_fini(zio_alloc_list_t *zal) 4563eda14cbcSMatt Macy { 4564eda14cbcSMatt Macy metaslab_alloc_trace_t *mat; 4565eda14cbcSMatt Macy 4566eda14cbcSMatt Macy while ((mat = list_remove_head(&zal->zal_list)) != NULL) 4567eda14cbcSMatt Macy kmem_cache_free(metaslab_alloc_trace_cache, mat); 4568eda14cbcSMatt Macy list_destroy(&zal->zal_list); 4569eda14cbcSMatt Macy zal->zal_size = 0; 4570eda14cbcSMatt Macy } 4571eda14cbcSMatt Macy 4572eda14cbcSMatt Macy /* 4573eda14cbcSMatt Macy * ========================================================================== 4574eda14cbcSMatt Macy * Metaslab block operations 4575eda14cbcSMatt Macy * ========================================================================== 4576eda14cbcSMatt Macy */ 4577eda14cbcSMatt Macy 4578eda14cbcSMatt Macy static void 4579a0b956f5SMartin Matuska metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, const void *tag, 4580a0b956f5SMartin Matuska int flags, int allocator) 4581eda14cbcSMatt Macy { 4582eda14cbcSMatt Macy if (!(flags & METASLAB_ASYNC_ALLOC) || 4583eda14cbcSMatt Macy (flags & METASLAB_DONT_THROTTLE)) 4584eda14cbcSMatt Macy return; 4585eda14cbcSMatt Macy 4586eda14cbcSMatt Macy metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4587eda14cbcSMatt Macy if (!mg->mg_class->mc_alloc_throttle_enabled) 4588eda14cbcSMatt Macy return; 4589eda14cbcSMatt Macy 4590eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4591eda14cbcSMatt Macy (void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag); 4592eda14cbcSMatt Macy } 4593eda14cbcSMatt Macy 4594eda14cbcSMatt Macy static void 4595eda14cbcSMatt Macy metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator) 4596eda14cbcSMatt Macy { 4597eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 45987877fdebSMatt Macy metaslab_class_allocator_t *mca = 45997877fdebSMatt Macy &mg->mg_class->mc_allocator[allocator]; 4600eda14cbcSMatt Macy uint64_t max = mg->mg_max_alloc_queue_depth; 4601eda14cbcSMatt Macy uint64_t cur = mga->mga_cur_max_alloc_queue_depth; 4602eda14cbcSMatt Macy while (cur < max) { 4603eda14cbcSMatt Macy if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth, 4604eda14cbcSMatt Macy cur, cur + 1) == cur) { 46057877fdebSMatt Macy atomic_inc_64(&mca->mca_alloc_max_slots); 4606eda14cbcSMatt Macy return; 4607eda14cbcSMatt Macy } 4608eda14cbcSMatt Macy cur = mga->mga_cur_max_alloc_queue_depth; 4609eda14cbcSMatt Macy } 4610eda14cbcSMatt Macy } 4611eda14cbcSMatt Macy 4612eda14cbcSMatt Macy void 4613a0b956f5SMartin Matuska metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, const void *tag, 4614a0b956f5SMartin Matuska int flags, int allocator, boolean_t io_complete) 4615eda14cbcSMatt Macy { 4616eda14cbcSMatt Macy if (!(flags & METASLAB_ASYNC_ALLOC) || 4617eda14cbcSMatt Macy (flags & METASLAB_DONT_THROTTLE)) 4618eda14cbcSMatt Macy return; 4619eda14cbcSMatt Macy 4620eda14cbcSMatt Macy metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4621eda14cbcSMatt Macy if (!mg->mg_class->mc_alloc_throttle_enabled) 4622eda14cbcSMatt Macy return; 4623eda14cbcSMatt Macy 4624eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4625eda14cbcSMatt Macy (void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag); 4626eda14cbcSMatt Macy if (io_complete) 4627eda14cbcSMatt Macy metaslab_group_increment_qdepth(mg, allocator); 4628eda14cbcSMatt Macy } 4629eda14cbcSMatt Macy 4630eda14cbcSMatt Macy void 4631a0b956f5SMartin Matuska metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, const void *tag, 4632eda14cbcSMatt Macy int allocator) 4633eda14cbcSMatt Macy { 4634eda14cbcSMatt Macy #ifdef ZFS_DEBUG 4635eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 4636eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 4637eda14cbcSMatt Macy 4638eda14cbcSMatt Macy for (int d = 0; d < ndvas; d++) { 4639eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(&dva[d]); 4640eda14cbcSMatt Macy metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4641eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4642eda14cbcSMatt Macy VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag)); 4643eda14cbcSMatt Macy } 4644eda14cbcSMatt Macy #endif 4645eda14cbcSMatt Macy } 4646eda14cbcSMatt Macy 4647eda14cbcSMatt Macy static uint64_t 4648eda14cbcSMatt Macy metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) 4649eda14cbcSMatt Macy { 4650eda14cbcSMatt Macy uint64_t start; 4651eda14cbcSMatt Macy range_tree_t *rt = msp->ms_allocatable; 4652eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 4653eda14cbcSMatt Macy 4654eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 4655eda14cbcSMatt Macy VERIFY(!msp->ms_condensing); 4656eda14cbcSMatt Macy VERIFY0(msp->ms_disabled); 4657e716630dSMartin Matuska VERIFY0(msp->ms_new); 4658eda14cbcSMatt Macy 4659eda14cbcSMatt Macy start = mc->mc_ops->msop_alloc(msp, size); 4660eda14cbcSMatt Macy if (start != -1ULL) { 4661eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 4662eda14cbcSMatt Macy vdev_t *vd = mg->mg_vd; 4663eda14cbcSMatt Macy 4664eda14cbcSMatt Macy VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); 4665eda14cbcSMatt Macy VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 4666eda14cbcSMatt Macy VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); 4667eda14cbcSMatt Macy range_tree_remove(rt, start, size); 4668eda14cbcSMatt Macy range_tree_clear(msp->ms_trim, start, size); 4669eda14cbcSMatt Macy 4670eda14cbcSMatt Macy if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) 4671eda14cbcSMatt Macy vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 4672eda14cbcSMatt Macy 4673eda14cbcSMatt Macy range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size); 4674eda14cbcSMatt Macy msp->ms_allocating_total += size; 4675eda14cbcSMatt Macy 4676eda14cbcSMatt Macy /* Track the last successful allocation */ 4677eda14cbcSMatt Macy msp->ms_alloc_txg = txg; 4678eda14cbcSMatt Macy metaslab_verify_space(msp, txg); 4679eda14cbcSMatt Macy } 4680eda14cbcSMatt Macy 4681eda14cbcSMatt Macy /* 4682eda14cbcSMatt Macy * Now that we've attempted the allocation we need to update the 4683eda14cbcSMatt Macy * metaslab's maximum block size since it may have changed. 4684eda14cbcSMatt Macy */ 4685eda14cbcSMatt Macy msp->ms_max_size = metaslab_largest_allocatable(msp); 4686eda14cbcSMatt Macy return (start); 4687eda14cbcSMatt Macy } 4688eda14cbcSMatt Macy 4689eda14cbcSMatt Macy /* 4690eda14cbcSMatt Macy * Find the metaslab with the highest weight that is less than what we've 4691eda14cbcSMatt Macy * already tried. In the common case, this means that we will examine each 4692eda14cbcSMatt Macy * metaslab at most once. Note that concurrent callers could reorder metaslabs 4693eda14cbcSMatt Macy * by activation/passivation once we have dropped the mg_lock. If a metaslab is 4694eda14cbcSMatt Macy * activated by another thread, and we fail to allocate from the metaslab we 4695eda14cbcSMatt Macy * have selected, we may not try the newly-activated metaslab, and instead 4696eda14cbcSMatt Macy * activate another metaslab. This is not optimal, but generally does not cause 4697eda14cbcSMatt Macy * any problems (a possible exception being if every metaslab is completely full 4698eda14cbcSMatt Macy * except for the newly-activated metaslab which we fail to examine). 4699eda14cbcSMatt Macy */ 4700eda14cbcSMatt Macy static metaslab_t * 4701eda14cbcSMatt Macy find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight, 4702eda14cbcSMatt Macy dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator, 4703eda14cbcSMatt Macy boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search, 4704eda14cbcSMatt Macy boolean_t *was_active) 4705eda14cbcSMatt Macy { 4706eda14cbcSMatt Macy avl_index_t idx; 4707eda14cbcSMatt Macy avl_tree_t *t = &mg->mg_metaslab_tree; 4708eda14cbcSMatt Macy metaslab_t *msp = avl_find(t, search, &idx); 4709eda14cbcSMatt Macy if (msp == NULL) 4710eda14cbcSMatt Macy msp = avl_nearest(t, idx, AVL_AFTER); 4711eda14cbcSMatt Macy 4712be181ee2SMartin Matuska uint_t tries = 0; 4713eda14cbcSMatt Macy for (; msp != NULL; msp = AVL_NEXT(t, msp)) { 4714eda14cbcSMatt Macy int i; 47157877fdebSMatt Macy 47167877fdebSMatt Macy if (!try_hard && tries > zfs_metaslab_find_max_tries) { 47177877fdebSMatt Macy METASLABSTAT_BUMP(metaslabstat_too_many_tries); 47187877fdebSMatt Macy return (NULL); 47197877fdebSMatt Macy } 47207877fdebSMatt Macy tries++; 47217877fdebSMatt Macy 4722eda14cbcSMatt Macy if (!metaslab_should_allocate(msp, asize, try_hard)) { 4723eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, 4724eda14cbcSMatt Macy TRACE_TOO_SMALL, allocator); 4725eda14cbcSMatt Macy continue; 4726eda14cbcSMatt Macy } 4727eda14cbcSMatt Macy 4728eda14cbcSMatt Macy /* 4729e716630dSMartin Matuska * If the selected metaslab is condensing or disabled, or 4730e716630dSMartin Matuska * hasn't gone through a metaslab_sync_done(), then skip it. 4731eda14cbcSMatt Macy */ 4732e716630dSMartin Matuska if (msp->ms_condensing || msp->ms_disabled > 0 || msp->ms_new) 4733eda14cbcSMatt Macy continue; 4734eda14cbcSMatt Macy 4735eda14cbcSMatt Macy *was_active = msp->ms_allocator != -1; 4736eda14cbcSMatt Macy /* 4737eda14cbcSMatt Macy * If we're activating as primary, this is our first allocation 4738eda14cbcSMatt Macy * from this disk, so we don't need to check how close we are. 4739eda14cbcSMatt Macy * If the metaslab under consideration was already active, 4740eda14cbcSMatt Macy * we're getting desperate enough to steal another allocator's 4741eda14cbcSMatt Macy * metaslab, so we still don't care about distances. 4742eda14cbcSMatt Macy */ 4743eda14cbcSMatt Macy if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active) 4744eda14cbcSMatt Macy break; 4745eda14cbcSMatt Macy 4746eda14cbcSMatt Macy for (i = 0; i < d; i++) { 4747eda14cbcSMatt Macy if (want_unique && 4748eda14cbcSMatt Macy !metaslab_is_unique(msp, &dva[i])) 4749eda14cbcSMatt Macy break; /* try another metaslab */ 4750eda14cbcSMatt Macy } 4751eda14cbcSMatt Macy if (i == d) 4752eda14cbcSMatt Macy break; 4753eda14cbcSMatt Macy } 4754eda14cbcSMatt Macy 4755eda14cbcSMatt Macy if (msp != NULL) { 4756eda14cbcSMatt Macy search->ms_weight = msp->ms_weight; 4757eda14cbcSMatt Macy search->ms_start = msp->ms_start + 1; 4758eda14cbcSMatt Macy search->ms_allocator = msp->ms_allocator; 4759eda14cbcSMatt Macy search->ms_primary = msp->ms_primary; 4760eda14cbcSMatt Macy } 4761eda14cbcSMatt Macy return (msp); 4762eda14cbcSMatt Macy } 4763eda14cbcSMatt Macy 4764eda14cbcSMatt Macy static void 4765eda14cbcSMatt Macy metaslab_active_mask_verify(metaslab_t *msp) 4766eda14cbcSMatt Macy { 4767eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&msp->ms_lock)); 4768eda14cbcSMatt Macy 4769eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 4770eda14cbcSMatt Macy return; 4771eda14cbcSMatt Macy 4772eda14cbcSMatt Macy if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) 4773eda14cbcSMatt Macy return; 4774eda14cbcSMatt Macy 4775eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) { 4776eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 4777eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM); 4778eda14cbcSMatt Macy VERIFY3S(msp->ms_allocator, !=, -1); 4779eda14cbcSMatt Macy VERIFY(msp->ms_primary); 4780eda14cbcSMatt Macy return; 4781eda14cbcSMatt Macy } 4782eda14cbcSMatt Macy 4783eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) { 4784eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 4785eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM); 4786eda14cbcSMatt Macy VERIFY3S(msp->ms_allocator, !=, -1); 4787eda14cbcSMatt Macy VERIFY(!msp->ms_primary); 4788eda14cbcSMatt Macy return; 4789eda14cbcSMatt Macy } 4790eda14cbcSMatt Macy 4791eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { 4792eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 4793eda14cbcSMatt Macy VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 4794eda14cbcSMatt Macy VERIFY3S(msp->ms_allocator, ==, -1); 4795eda14cbcSMatt Macy return; 4796eda14cbcSMatt Macy } 4797eda14cbcSMatt Macy } 4798eda14cbcSMatt Macy 4799eda14cbcSMatt Macy static uint64_t 4800eda14cbcSMatt Macy metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, 4801eda14cbcSMatt Macy uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, 4802eda14cbcSMatt Macy int allocator, boolean_t try_hard) 4803eda14cbcSMatt Macy { 4804eda14cbcSMatt Macy metaslab_t *msp = NULL; 4805eda14cbcSMatt Macy uint64_t offset = -1ULL; 4806eda14cbcSMatt Macy 4807eda14cbcSMatt Macy uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY; 4808eda14cbcSMatt Macy for (int i = 0; i < d; i++) { 4809eda14cbcSMatt Macy if (activation_weight == METASLAB_WEIGHT_PRIMARY && 4810eda14cbcSMatt Macy DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 4811eda14cbcSMatt Macy activation_weight = METASLAB_WEIGHT_SECONDARY; 4812eda14cbcSMatt Macy } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && 4813eda14cbcSMatt Macy DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 4814eda14cbcSMatt Macy activation_weight = METASLAB_WEIGHT_CLAIM; 4815eda14cbcSMatt Macy break; 4816eda14cbcSMatt Macy } 4817eda14cbcSMatt Macy } 4818eda14cbcSMatt Macy 4819eda14cbcSMatt Macy /* 4820eda14cbcSMatt Macy * If we don't have enough metaslabs active to fill the entire array, we 4821eda14cbcSMatt Macy * just use the 0th slot. 4822eda14cbcSMatt Macy */ 4823eda14cbcSMatt Macy if (mg->mg_ms_ready < mg->mg_allocators * 3) 4824eda14cbcSMatt Macy allocator = 0; 4825eda14cbcSMatt Macy metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4826eda14cbcSMatt Macy 4827eda14cbcSMatt Macy ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2); 4828eda14cbcSMatt Macy 4829eda14cbcSMatt Macy metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP); 4830eda14cbcSMatt Macy search->ms_weight = UINT64_MAX; 4831eda14cbcSMatt Macy search->ms_start = 0; 4832eda14cbcSMatt Macy /* 4833eda14cbcSMatt Macy * At the end of the metaslab tree are the already-active metaslabs, 4834eda14cbcSMatt Macy * first the primaries, then the secondaries. When we resume searching 4835eda14cbcSMatt Macy * through the tree, we need to consider ms_allocator and ms_primary so 4836eda14cbcSMatt Macy * we start in the location right after where we left off, and don't 4837eda14cbcSMatt Macy * accidentally loop forever considering the same metaslabs. 4838eda14cbcSMatt Macy */ 4839eda14cbcSMatt Macy search->ms_allocator = -1; 4840eda14cbcSMatt Macy search->ms_primary = B_TRUE; 4841eda14cbcSMatt Macy for (;;) { 4842eda14cbcSMatt Macy boolean_t was_active = B_FALSE; 4843eda14cbcSMatt Macy 4844eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 4845eda14cbcSMatt Macy 4846eda14cbcSMatt Macy if (activation_weight == METASLAB_WEIGHT_PRIMARY && 4847eda14cbcSMatt Macy mga->mga_primary != NULL) { 4848eda14cbcSMatt Macy msp = mga->mga_primary; 4849eda14cbcSMatt Macy 4850eda14cbcSMatt Macy /* 4851eda14cbcSMatt Macy * Even though we don't hold the ms_lock for the 4852eda14cbcSMatt Macy * primary metaslab, those fields should not 4853eda14cbcSMatt Macy * change while we hold the mg_lock. Thus it is 4854eda14cbcSMatt Macy * safe to make assertions on them. 4855eda14cbcSMatt Macy */ 4856eda14cbcSMatt Macy ASSERT(msp->ms_primary); 4857eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, allocator); 4858eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4859eda14cbcSMatt Macy 4860eda14cbcSMatt Macy was_active = B_TRUE; 4861eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 4862eda14cbcSMatt Macy } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && 4863eda14cbcSMatt Macy mga->mga_secondary != NULL) { 4864eda14cbcSMatt Macy msp = mga->mga_secondary; 4865eda14cbcSMatt Macy 4866eda14cbcSMatt Macy /* 4867eda14cbcSMatt Macy * See comment above about the similar assertions 4868eda14cbcSMatt Macy * for the primary metaslab. 4869eda14cbcSMatt Macy */ 4870eda14cbcSMatt Macy ASSERT(!msp->ms_primary); 4871eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, allocator); 4872eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4873eda14cbcSMatt Macy 4874eda14cbcSMatt Macy was_active = B_TRUE; 4875eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 4876eda14cbcSMatt Macy } else { 4877eda14cbcSMatt Macy msp = find_valid_metaslab(mg, activation_weight, dva, d, 4878eda14cbcSMatt Macy want_unique, asize, allocator, try_hard, zal, 4879eda14cbcSMatt Macy search, &was_active); 4880eda14cbcSMatt Macy } 4881eda14cbcSMatt Macy 4882eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 4883eda14cbcSMatt Macy if (msp == NULL) { 4884eda14cbcSMatt Macy kmem_free(search, sizeof (*search)); 4885eda14cbcSMatt Macy return (-1ULL); 4886eda14cbcSMatt Macy } 4887eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 4888eda14cbcSMatt Macy 4889eda14cbcSMatt Macy metaslab_active_mask_verify(msp); 4890eda14cbcSMatt Macy 4891eda14cbcSMatt Macy /* 4892eda14cbcSMatt Macy * This code is disabled out because of issues with 4893eda14cbcSMatt Macy * tracepoints in non-gpl kernel modules. 4894eda14cbcSMatt Macy */ 4895eda14cbcSMatt Macy #if 0 4896eda14cbcSMatt Macy DTRACE_PROBE3(ms__activation__attempt, 4897eda14cbcSMatt Macy metaslab_t *, msp, uint64_t, activation_weight, 4898eda14cbcSMatt Macy boolean_t, was_active); 4899eda14cbcSMatt Macy #endif 4900eda14cbcSMatt Macy 4901eda14cbcSMatt Macy /* 4902eda14cbcSMatt Macy * Ensure that the metaslab we have selected is still 4903eda14cbcSMatt Macy * capable of handling our request. It's possible that 4904eda14cbcSMatt Macy * another thread may have changed the weight while we 4905eda14cbcSMatt Macy * were blocked on the metaslab lock. We check the 4906eda14cbcSMatt Macy * active status first to see if we need to set_selected_txg 4907eda14cbcSMatt Macy * a new metaslab. 4908eda14cbcSMatt Macy */ 4909eda14cbcSMatt Macy if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) { 4910eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, -1); 4911eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4912eda14cbcSMatt Macy continue; 4913eda14cbcSMatt Macy } 4914eda14cbcSMatt Macy 4915eda14cbcSMatt Macy /* 4916eda14cbcSMatt Macy * If the metaslab was activated for another allocator 4917eda14cbcSMatt Macy * while we were waiting in the ms_lock above, or it's 4918eda14cbcSMatt Macy * a primary and we're seeking a secondary (or vice versa), 4919eda14cbcSMatt Macy * we go back and select a new metaslab. 4920eda14cbcSMatt Macy */ 4921eda14cbcSMatt Macy if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) && 4922eda14cbcSMatt Macy (msp->ms_allocator != -1) && 4923eda14cbcSMatt Macy (msp->ms_allocator != allocator || ((activation_weight == 4924eda14cbcSMatt Macy METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) { 4925eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4926eda14cbcSMatt Macy ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) || 4927eda14cbcSMatt Macy msp->ms_allocator != -1); 4928eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4929eda14cbcSMatt Macy continue; 4930eda14cbcSMatt Macy } 4931eda14cbcSMatt Macy 4932eda14cbcSMatt Macy /* 4933eda14cbcSMatt Macy * This metaslab was used for claiming regions allocated 4934eda14cbcSMatt Macy * by the ZIL during pool import. Once these regions are 4935eda14cbcSMatt Macy * claimed we don't need to keep the CLAIM bit set 4936eda14cbcSMatt Macy * anymore. Passivate this metaslab to zero its activation 4937eda14cbcSMatt Macy * mask. 4938eda14cbcSMatt Macy */ 4939eda14cbcSMatt Macy if (msp->ms_weight & METASLAB_WEIGHT_CLAIM && 4940eda14cbcSMatt Macy activation_weight != METASLAB_WEIGHT_CLAIM) { 4941eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4942eda14cbcSMatt Macy ASSERT3S(msp->ms_allocator, ==, -1); 4943eda14cbcSMatt Macy metaslab_passivate(msp, msp->ms_weight & 4944eda14cbcSMatt Macy ~METASLAB_WEIGHT_CLAIM); 4945eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4946eda14cbcSMatt Macy continue; 4947eda14cbcSMatt Macy } 4948eda14cbcSMatt Macy 4949eda14cbcSMatt Macy metaslab_set_selected_txg(msp, txg); 4950eda14cbcSMatt Macy 4951eda14cbcSMatt Macy int activation_error = 4952eda14cbcSMatt Macy metaslab_activate(msp, allocator, activation_weight); 4953eda14cbcSMatt Macy metaslab_active_mask_verify(msp); 4954eda14cbcSMatt Macy 4955eda14cbcSMatt Macy /* 4956eda14cbcSMatt Macy * If the metaslab was activated by another thread for 4957eda14cbcSMatt Macy * another allocator or activation_weight (EBUSY), or it 4958eda14cbcSMatt Macy * failed because another metaslab was assigned as primary 4959eda14cbcSMatt Macy * for this allocator (EEXIST) we continue using this 4960eda14cbcSMatt Macy * metaslab for our allocation, rather than going on to a 4961eda14cbcSMatt Macy * worse metaslab (we waited for that metaslab to be loaded 4962eda14cbcSMatt Macy * after all). 4963eda14cbcSMatt Macy * 4964eda14cbcSMatt Macy * If the activation failed due to an I/O error or ENOSPC we 4965eda14cbcSMatt Macy * skip to the next metaslab. 4966eda14cbcSMatt Macy */ 4967eda14cbcSMatt Macy boolean_t activated; 4968eda14cbcSMatt Macy if (activation_error == 0) { 4969eda14cbcSMatt Macy activated = B_TRUE; 4970eda14cbcSMatt Macy } else if (activation_error == EBUSY || 4971eda14cbcSMatt Macy activation_error == EEXIST) { 4972eda14cbcSMatt Macy activated = B_FALSE; 4973eda14cbcSMatt Macy } else { 4974eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 4975eda14cbcSMatt Macy continue; 4976eda14cbcSMatt Macy } 4977eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 4978eda14cbcSMatt Macy 4979eda14cbcSMatt Macy /* 4980eda14cbcSMatt Macy * Now that we have the lock, recheck to see if we should 4981eda14cbcSMatt Macy * continue to use this metaslab for this allocation. The 4982eda14cbcSMatt Macy * the metaslab is now loaded so metaslab_should_allocate() 4983eda14cbcSMatt Macy * can accurately determine if the allocation attempt should 4984eda14cbcSMatt Macy * proceed. 4985eda14cbcSMatt Macy */ 4986eda14cbcSMatt Macy if (!metaslab_should_allocate(msp, asize, try_hard)) { 4987eda14cbcSMatt Macy /* Passivate this metaslab and select a new one. */ 4988eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, 4989eda14cbcSMatt Macy TRACE_TOO_SMALL, allocator); 4990eda14cbcSMatt Macy goto next; 4991eda14cbcSMatt Macy } 4992eda14cbcSMatt Macy 4993eda14cbcSMatt Macy /* 4994eda14cbcSMatt Macy * If this metaslab is currently condensing then pick again 4995eda14cbcSMatt Macy * as we can't manipulate this metaslab until it's committed 4996eda14cbcSMatt Macy * to disk. If this metaslab is being initialized, we shouldn't 4997eda14cbcSMatt Macy * allocate from it since the allocated region might be 4998eda14cbcSMatt Macy * overwritten after allocation. 4999eda14cbcSMatt Macy */ 5000eda14cbcSMatt Macy if (msp->ms_condensing) { 5001eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, 5002eda14cbcSMatt Macy TRACE_CONDENSING, allocator); 5003eda14cbcSMatt Macy if (activated) { 5004eda14cbcSMatt Macy metaslab_passivate(msp, msp->ms_weight & 5005eda14cbcSMatt Macy ~METASLAB_ACTIVE_MASK); 5006eda14cbcSMatt Macy } 5007eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5008eda14cbcSMatt Macy continue; 5009eda14cbcSMatt Macy } else if (msp->ms_disabled > 0) { 5010eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, 5011eda14cbcSMatt Macy TRACE_DISABLED, allocator); 5012eda14cbcSMatt Macy if (activated) { 5013eda14cbcSMatt Macy metaslab_passivate(msp, msp->ms_weight & 5014eda14cbcSMatt Macy ~METASLAB_ACTIVE_MASK); 5015eda14cbcSMatt Macy } 5016eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5017eda14cbcSMatt Macy continue; 5018eda14cbcSMatt Macy } 5019eda14cbcSMatt Macy 5020eda14cbcSMatt Macy offset = metaslab_block_alloc(msp, asize, txg); 5021eda14cbcSMatt Macy metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator); 5022eda14cbcSMatt Macy 5023eda14cbcSMatt Macy if (offset != -1ULL) { 5024eda14cbcSMatt Macy /* Proactively passivate the metaslab, if needed */ 5025eda14cbcSMatt Macy if (activated) 5026eda14cbcSMatt Macy metaslab_segment_may_passivate(msp); 5027eda14cbcSMatt Macy break; 5028eda14cbcSMatt Macy } 5029eda14cbcSMatt Macy next: 5030eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 5031eda14cbcSMatt Macy 5032eda14cbcSMatt Macy /* 5033eda14cbcSMatt Macy * This code is disabled out because of issues with 5034eda14cbcSMatt Macy * tracepoints in non-gpl kernel modules. 5035eda14cbcSMatt Macy */ 5036eda14cbcSMatt Macy #if 0 5037eda14cbcSMatt Macy DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp, 5038eda14cbcSMatt Macy uint64_t, asize); 5039eda14cbcSMatt Macy #endif 5040eda14cbcSMatt Macy 5041eda14cbcSMatt Macy /* 5042eda14cbcSMatt Macy * We were unable to allocate from this metaslab so determine 5043eda14cbcSMatt Macy * a new weight for this metaslab. Now that we have loaded 5044eda14cbcSMatt Macy * the metaslab we can provide a better hint to the metaslab 5045eda14cbcSMatt Macy * selector. 5046eda14cbcSMatt Macy * 5047eda14cbcSMatt Macy * For space-based metaslabs, we use the maximum block size. 5048eda14cbcSMatt Macy * This information is only available when the metaslab 5049eda14cbcSMatt Macy * is loaded and is more accurate than the generic free 5050eda14cbcSMatt Macy * space weight that was calculated by metaslab_weight(). 5051eda14cbcSMatt Macy * This information allows us to quickly compare the maximum 5052eda14cbcSMatt Macy * available allocation in the metaslab to the allocation 5053eda14cbcSMatt Macy * size being requested. 5054eda14cbcSMatt Macy * 5055eda14cbcSMatt Macy * For segment-based metaslabs, determine the new weight 5056eda14cbcSMatt Macy * based on the highest bucket in the range tree. We 5057eda14cbcSMatt Macy * explicitly use the loaded segment weight (i.e. the range 5058eda14cbcSMatt Macy * tree histogram) since it contains the space that is 5059eda14cbcSMatt Macy * currently available for allocation and is accurate 5060eda14cbcSMatt Macy * even within a sync pass. 5061eda14cbcSMatt Macy */ 5062eda14cbcSMatt Macy uint64_t weight; 5063eda14cbcSMatt Macy if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 5064eda14cbcSMatt Macy weight = metaslab_largest_allocatable(msp); 5065eda14cbcSMatt Macy WEIGHT_SET_SPACEBASED(weight); 5066eda14cbcSMatt Macy } else { 5067eda14cbcSMatt Macy weight = metaslab_weight_from_range_tree(msp); 5068eda14cbcSMatt Macy } 5069eda14cbcSMatt Macy 5070eda14cbcSMatt Macy if (activated) { 5071eda14cbcSMatt Macy metaslab_passivate(msp, weight); 5072eda14cbcSMatt Macy } else { 5073eda14cbcSMatt Macy /* 5074eda14cbcSMatt Macy * For the case where we use the metaslab that is 5075eda14cbcSMatt Macy * active for another allocator we want to make 5076eda14cbcSMatt Macy * sure that we retain the activation mask. 5077eda14cbcSMatt Macy * 5078eda14cbcSMatt Macy * Note that we could attempt to use something like 5079eda14cbcSMatt Macy * metaslab_recalculate_weight_and_sort() that 5080eda14cbcSMatt Macy * retains the activation mask here. That function 5081eda14cbcSMatt Macy * uses metaslab_weight() to set the weight though 5082eda14cbcSMatt Macy * which is not as accurate as the calculations 5083eda14cbcSMatt Macy * above. 5084eda14cbcSMatt Macy */ 5085eda14cbcSMatt Macy weight |= msp->ms_weight & METASLAB_ACTIVE_MASK; 5086eda14cbcSMatt Macy metaslab_group_sort(mg, msp, weight); 5087eda14cbcSMatt Macy } 5088eda14cbcSMatt Macy metaslab_active_mask_verify(msp); 5089eda14cbcSMatt Macy 5090eda14cbcSMatt Macy /* 5091eda14cbcSMatt Macy * We have just failed an allocation attempt, check 5092eda14cbcSMatt Macy * that metaslab_should_allocate() agrees. Otherwise, 5093eda14cbcSMatt Macy * we may end up in an infinite loop retrying the same 5094eda14cbcSMatt Macy * metaslab. 5095eda14cbcSMatt Macy */ 5096eda14cbcSMatt Macy ASSERT(!metaslab_should_allocate(msp, asize, try_hard)); 5097eda14cbcSMatt Macy 5098eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5099eda14cbcSMatt Macy } 5100eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5101eda14cbcSMatt Macy kmem_free(search, sizeof (*search)); 5102eda14cbcSMatt Macy return (offset); 5103eda14cbcSMatt Macy } 5104eda14cbcSMatt Macy 5105eda14cbcSMatt Macy static uint64_t 5106eda14cbcSMatt Macy metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal, 5107eda14cbcSMatt Macy uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, 5108eda14cbcSMatt Macy int allocator, boolean_t try_hard) 5109eda14cbcSMatt Macy { 5110eda14cbcSMatt Macy uint64_t offset; 5111eda14cbcSMatt Macy 5112eda14cbcSMatt Macy offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique, 5113eda14cbcSMatt Macy dva, d, allocator, try_hard); 5114eda14cbcSMatt Macy 5115eda14cbcSMatt Macy mutex_enter(&mg->mg_lock); 5116eda14cbcSMatt Macy if (offset == -1ULL) { 5117eda14cbcSMatt Macy mg->mg_failed_allocations++; 5118eda14cbcSMatt Macy metaslab_trace_add(zal, mg, NULL, asize, d, 5119eda14cbcSMatt Macy TRACE_GROUP_FAILURE, allocator); 5120eda14cbcSMatt Macy if (asize == SPA_GANGBLOCKSIZE) { 5121eda14cbcSMatt Macy /* 5122eda14cbcSMatt Macy * This metaslab group was unable to allocate 5123eda14cbcSMatt Macy * the minimum gang block size so it must be out of 5124eda14cbcSMatt Macy * space. We must notify the allocation throttle 5125eda14cbcSMatt Macy * to start skipping allocation attempts to this 5126eda14cbcSMatt Macy * metaslab group until more space becomes available. 5127eda14cbcSMatt Macy * Note: this failure cannot be caused by the 5128eda14cbcSMatt Macy * allocation throttle since the allocation throttle 5129eda14cbcSMatt Macy * is only responsible for skipping devices and 5130eda14cbcSMatt Macy * not failing block allocations. 5131eda14cbcSMatt Macy */ 5132eda14cbcSMatt Macy mg->mg_no_free_space = B_TRUE; 5133eda14cbcSMatt Macy } 5134eda14cbcSMatt Macy } 5135eda14cbcSMatt Macy mg->mg_allocations++; 5136eda14cbcSMatt Macy mutex_exit(&mg->mg_lock); 5137eda14cbcSMatt Macy return (offset); 5138eda14cbcSMatt Macy } 5139eda14cbcSMatt Macy 5140eda14cbcSMatt Macy /* 5141eda14cbcSMatt Macy * Allocate a block for the specified i/o. 5142eda14cbcSMatt Macy */ 5143eda14cbcSMatt Macy int 5144eda14cbcSMatt Macy metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 5145eda14cbcSMatt Macy dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags, 5146eda14cbcSMatt Macy zio_alloc_list_t *zal, int allocator) 5147eda14cbcSMatt Macy { 51487877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 5149315ee00fSMartin Matuska metaslab_group_t *mg, *rotor; 5150eda14cbcSMatt Macy vdev_t *vd; 5151eda14cbcSMatt Macy boolean_t try_hard = B_FALSE; 5152eda14cbcSMatt Macy 5153eda14cbcSMatt Macy ASSERT(!DVA_IS_VALID(&dva[d])); 5154eda14cbcSMatt Macy 5155eda14cbcSMatt Macy /* 5156eda14cbcSMatt Macy * For testing, make some blocks above a certain size be gang blocks. 5157eda14cbcSMatt Macy * This will result in more split blocks when using device removal, 5158eda14cbcSMatt Macy * and a large number of split blocks coupled with ztest-induced 5159eda14cbcSMatt Macy * damage can result in extremely long reconstruction times. This 5160eda14cbcSMatt Macy * will also test spilling from special to normal. 5161eda14cbcSMatt Macy */ 5162315ee00fSMartin Matuska if (psize >= metaslab_force_ganging && 5163315ee00fSMartin Matuska metaslab_force_ganging_pct > 0 && 5164315ee00fSMartin Matuska (random_in_range(100) < MIN(metaslab_force_ganging_pct, 100))) { 5165eda14cbcSMatt Macy metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG, 5166eda14cbcSMatt Macy allocator); 5167eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 5168eda14cbcSMatt Macy } 5169eda14cbcSMatt Macy 5170eda14cbcSMatt Macy /* 5171eda14cbcSMatt Macy * Start at the rotor and loop through all mgs until we find something. 51727877fdebSMatt Macy * Note that there's no locking on mca_rotor or mca_aliquot because 5173eda14cbcSMatt Macy * nothing actually breaks if we miss a few updates -- we just won't 5174eda14cbcSMatt Macy * allocate quite as evenly. It all balances out over time. 5175eda14cbcSMatt Macy * 5176eda14cbcSMatt Macy * If we are doing ditto or log blocks, try to spread them across 5177eda14cbcSMatt Macy * consecutive vdevs. If we're forced to reuse a vdev before we've 5178eda14cbcSMatt Macy * allocated all of our ditto blocks, then try and spread them out on 5179eda14cbcSMatt Macy * that vdev as much as possible. If it turns out to not be possible, 5180eda14cbcSMatt Macy * gradually lower our standards until anything becomes acceptable. 5181eda14cbcSMatt Macy * Also, allocating on consecutive vdevs (as opposed to random vdevs) 5182eda14cbcSMatt Macy * gives us hope of containing our fault domains to something we're 5183eda14cbcSMatt Macy * able to reason about. Otherwise, any two top-level vdev failures 5184eda14cbcSMatt Macy * will guarantee the loss of data. With consecutive allocation, 5185eda14cbcSMatt Macy * only two adjacent top-level vdev failures will result in data loss. 5186eda14cbcSMatt Macy * 5187eda14cbcSMatt Macy * If we are doing gang blocks (hintdva is non-NULL), try to keep 5188eda14cbcSMatt Macy * ourselves on the same vdev as our gang block header. That 5189eda14cbcSMatt Macy * way, we can hope for locality in vdev_cache, plus it makes our 5190eda14cbcSMatt Macy * fault domains something tractable. 5191eda14cbcSMatt Macy */ 5192eda14cbcSMatt Macy if (hintdva) { 5193eda14cbcSMatt Macy vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 5194eda14cbcSMatt Macy 5195eda14cbcSMatt Macy /* 5196eda14cbcSMatt Macy * It's possible the vdev we're using as the hint no 5197eda14cbcSMatt Macy * longer exists or its mg has been closed (e.g. by 5198eda14cbcSMatt Macy * device removal). Consult the rotor when 5199eda14cbcSMatt Macy * all else fails. 5200eda14cbcSMatt Macy */ 5201eda14cbcSMatt Macy if (vd != NULL && vd->vdev_mg != NULL) { 5202184c1b94SMartin Matuska mg = vdev_get_mg(vd, mc); 5203eda14cbcSMatt Macy 5204dbd5678dSMartin Matuska if (flags & METASLAB_HINTBP_AVOID) 5205eda14cbcSMatt Macy mg = mg->mg_next; 5206eda14cbcSMatt Macy } else { 52077877fdebSMatt Macy mg = mca->mca_rotor; 5208eda14cbcSMatt Macy } 5209eda14cbcSMatt Macy } else if (d != 0) { 5210eda14cbcSMatt Macy vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 5211eda14cbcSMatt Macy mg = vd->vdev_mg->mg_next; 5212eda14cbcSMatt Macy } else { 52137877fdebSMatt Macy ASSERT(mca->mca_rotor != NULL); 52147877fdebSMatt Macy mg = mca->mca_rotor; 5215eda14cbcSMatt Macy } 5216eda14cbcSMatt Macy 5217eda14cbcSMatt Macy /* 5218eda14cbcSMatt Macy * If the hint put us into the wrong metaslab class, or into a 5219eda14cbcSMatt Macy * metaslab group that has been passivated, just follow the rotor. 5220eda14cbcSMatt Macy */ 5221eda14cbcSMatt Macy if (mg->mg_class != mc || mg->mg_activation_count <= 0) 52227877fdebSMatt Macy mg = mca->mca_rotor; 5223eda14cbcSMatt Macy 5224eda14cbcSMatt Macy rotor = mg; 5225eda14cbcSMatt Macy top: 5226eda14cbcSMatt Macy do { 5227eda14cbcSMatt Macy boolean_t allocatable; 5228eda14cbcSMatt Macy 5229eda14cbcSMatt Macy ASSERT(mg->mg_activation_count == 1); 5230eda14cbcSMatt Macy vd = mg->mg_vd; 5231eda14cbcSMatt Macy 5232eda14cbcSMatt Macy /* 5233eda14cbcSMatt Macy * Don't allocate from faulted devices. 5234eda14cbcSMatt Macy */ 5235eda14cbcSMatt Macy if (try_hard) { 5236eda14cbcSMatt Macy spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); 5237eda14cbcSMatt Macy allocatable = vdev_allocatable(vd); 5238eda14cbcSMatt Macy spa_config_exit(spa, SCL_ZIO, FTAG); 5239eda14cbcSMatt Macy } else { 5240eda14cbcSMatt Macy allocatable = vdev_allocatable(vd); 5241eda14cbcSMatt Macy } 5242eda14cbcSMatt Macy 5243eda14cbcSMatt Macy /* 5244eda14cbcSMatt Macy * Determine if the selected metaslab group is eligible 5245eda14cbcSMatt Macy * for allocations. If we're ganging then don't allow 5246eda14cbcSMatt Macy * this metaslab group to skip allocations since that would 5247eda14cbcSMatt Macy * inadvertently return ENOSPC and suspend the pool 5248eda14cbcSMatt Macy * even though space is still available. 5249eda14cbcSMatt Macy */ 5250eda14cbcSMatt Macy if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) { 5251eda14cbcSMatt Macy allocatable = metaslab_group_allocatable(mg, rotor, 525215f0b8c3SMartin Matuska flags, psize, allocator, d); 5253eda14cbcSMatt Macy } 5254eda14cbcSMatt Macy 5255eda14cbcSMatt Macy if (!allocatable) { 5256eda14cbcSMatt Macy metaslab_trace_add(zal, mg, NULL, psize, d, 5257eda14cbcSMatt Macy TRACE_NOT_ALLOCATABLE, allocator); 5258eda14cbcSMatt Macy goto next; 5259eda14cbcSMatt Macy } 5260eda14cbcSMatt Macy 5261eda14cbcSMatt Macy /* 5262dbd5678dSMartin Matuska * Avoid writing single-copy data to an unhealthy, 5263eda14cbcSMatt Macy * non-redundant vdev, unless we've already tried all 5264eda14cbcSMatt Macy * other vdevs. 5265eda14cbcSMatt Macy */ 5266dbd5678dSMartin Matuska if (vd->vdev_state < VDEV_STATE_HEALTHY && 5267eda14cbcSMatt Macy d == 0 && !try_hard && vd->vdev_children == 0) { 5268eda14cbcSMatt Macy metaslab_trace_add(zal, mg, NULL, psize, d, 5269eda14cbcSMatt Macy TRACE_VDEV_ERROR, allocator); 5270eda14cbcSMatt Macy goto next; 5271eda14cbcSMatt Macy } 5272eda14cbcSMatt Macy 5273eda14cbcSMatt Macy ASSERT(mg->mg_class == mc); 5274eda14cbcSMatt Macy 5275e716630dSMartin Matuska uint64_t asize = vdev_psize_to_asize_txg(vd, psize, txg); 5276eda14cbcSMatt Macy ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 5277eda14cbcSMatt Macy 5278eda14cbcSMatt Macy /* 5279eda14cbcSMatt Macy * If we don't need to try hard, then require that the 5280eda14cbcSMatt Macy * block be on a different metaslab from any other DVAs 5281eda14cbcSMatt Macy * in this BP (unique=true). If we are trying hard, then 5282eda14cbcSMatt Macy * allow any metaslab to be used (unique=false). 5283eda14cbcSMatt Macy */ 5284eda14cbcSMatt Macy uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg, 5285eda14cbcSMatt Macy !try_hard, dva, d, allocator, try_hard); 5286eda14cbcSMatt Macy 5287eda14cbcSMatt Macy if (offset != -1ULL) { 5288eda14cbcSMatt Macy /* 5289eda14cbcSMatt Macy * If we've just selected this metaslab group, 5290eda14cbcSMatt Macy * figure out whether the corresponding vdev is 5291eda14cbcSMatt Macy * over- or under-used relative to the pool, 5292eda14cbcSMatt Macy * and set an allocation bias to even it out. 5293eda14cbcSMatt Macy * 5294eda14cbcSMatt Macy * Bias is also used to compensate for unequally 5295eda14cbcSMatt Macy * sized vdevs so that space is allocated fairly. 5296eda14cbcSMatt Macy */ 52977877fdebSMatt Macy if (mca->mca_aliquot == 0 && metaslab_bias_enabled) { 5298eda14cbcSMatt Macy vdev_stat_t *vs = &vd->vdev_stat; 5299eda14cbcSMatt Macy int64_t vs_free = vs->vs_space - vs->vs_alloc; 5300eda14cbcSMatt Macy int64_t mc_free = mc->mc_space - mc->mc_alloc; 5301eda14cbcSMatt Macy int64_t ratio; 5302eda14cbcSMatt Macy 5303eda14cbcSMatt Macy /* 5304eda14cbcSMatt Macy * Calculate how much more or less we should 5305eda14cbcSMatt Macy * try to allocate from this device during 5306eda14cbcSMatt Macy * this iteration around the rotor. 5307eda14cbcSMatt Macy * 5308eda14cbcSMatt Macy * This basically introduces a zero-centered 5309eda14cbcSMatt Macy * bias towards the devices with the most 5310eda14cbcSMatt Macy * free space, while compensating for vdev 5311eda14cbcSMatt Macy * size differences. 5312eda14cbcSMatt Macy * 5313eda14cbcSMatt Macy * Examples: 5314eda14cbcSMatt Macy * vdev V1 = 16M/128M 5315eda14cbcSMatt Macy * vdev V2 = 16M/128M 5316eda14cbcSMatt Macy * ratio(V1) = 100% ratio(V2) = 100% 5317eda14cbcSMatt Macy * 5318eda14cbcSMatt Macy * vdev V1 = 16M/128M 5319eda14cbcSMatt Macy * vdev V2 = 64M/128M 5320eda14cbcSMatt Macy * ratio(V1) = 127% ratio(V2) = 72% 5321eda14cbcSMatt Macy * 5322eda14cbcSMatt Macy * vdev V1 = 16M/128M 5323eda14cbcSMatt Macy * vdev V2 = 64M/512M 5324eda14cbcSMatt Macy * ratio(V1) = 40% ratio(V2) = 160% 5325eda14cbcSMatt Macy */ 5326eda14cbcSMatt Macy ratio = (vs_free * mc->mc_alloc_groups * 100) / 5327eda14cbcSMatt Macy (mc_free + 1); 5328eda14cbcSMatt Macy mg->mg_bias = ((ratio - 100) * 5329eda14cbcSMatt Macy (int64_t)mg->mg_aliquot) / 100; 5330eda14cbcSMatt Macy } else if (!metaslab_bias_enabled) { 5331eda14cbcSMatt Macy mg->mg_bias = 0; 5332eda14cbcSMatt Macy } 5333eda14cbcSMatt Macy 5334315ee00fSMartin Matuska if ((flags & METASLAB_ZIL) || 53357877fdebSMatt Macy atomic_add_64_nv(&mca->mca_aliquot, asize) >= 5336eda14cbcSMatt Macy mg->mg_aliquot + mg->mg_bias) { 53377877fdebSMatt Macy mca->mca_rotor = mg->mg_next; 53387877fdebSMatt Macy mca->mca_aliquot = 0; 5339eda14cbcSMatt Macy } 5340eda14cbcSMatt Macy 5341eda14cbcSMatt Macy DVA_SET_VDEV(&dva[d], vd->vdev_id); 5342eda14cbcSMatt Macy DVA_SET_OFFSET(&dva[d], offset); 5343eda14cbcSMatt Macy DVA_SET_GANG(&dva[d], 5344eda14cbcSMatt Macy ((flags & METASLAB_GANG_HEADER) ? 1 : 0)); 5345eda14cbcSMatt Macy DVA_SET_ASIZE(&dva[d], asize); 5346eda14cbcSMatt Macy 5347eda14cbcSMatt Macy return (0); 5348eda14cbcSMatt Macy } 5349eda14cbcSMatt Macy next: 53507877fdebSMatt Macy mca->mca_rotor = mg->mg_next; 53517877fdebSMatt Macy mca->mca_aliquot = 0; 5352eda14cbcSMatt Macy } while ((mg = mg->mg_next) != rotor); 5353eda14cbcSMatt Macy 5354eda14cbcSMatt Macy /* 53557877fdebSMatt Macy * If we haven't tried hard, perhaps do so now. 5356eda14cbcSMatt Macy */ 53577877fdebSMatt Macy if (!try_hard && (zfs_metaslab_try_hard_before_gang || 53587877fdebSMatt Macy GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 || 53597877fdebSMatt Macy psize <= 1 << spa->spa_min_ashift)) { 53607877fdebSMatt Macy METASLABSTAT_BUMP(metaslabstat_try_hard); 5361eda14cbcSMatt Macy try_hard = B_TRUE; 5362eda14cbcSMatt Macy goto top; 5363eda14cbcSMatt Macy } 5364eda14cbcSMatt Macy 5365da5137abSMartin Matuska memset(&dva[d], 0, sizeof (dva_t)); 5366eda14cbcSMatt Macy 5367eda14cbcSMatt Macy metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator); 5368eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 5369eda14cbcSMatt Macy } 5370eda14cbcSMatt Macy 5371eda14cbcSMatt Macy void 5372eda14cbcSMatt Macy metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize, 5373eda14cbcSMatt Macy boolean_t checkpoint) 5374eda14cbcSMatt Macy { 5375eda14cbcSMatt Macy metaslab_t *msp; 5376eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 5377eda14cbcSMatt Macy 5378eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 5379eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5380eda14cbcSMatt Macy ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); 5381eda14cbcSMatt Macy 5382eda14cbcSMatt Macy msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5383eda14cbcSMatt Macy 5384eda14cbcSMatt Macy VERIFY(!msp->ms_condensing); 5385eda14cbcSMatt Macy VERIFY3U(offset, >=, msp->ms_start); 5386eda14cbcSMatt Macy VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size); 5387eda14cbcSMatt Macy VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5388eda14cbcSMatt Macy VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift)); 5389eda14cbcSMatt Macy 5390eda14cbcSMatt Macy metaslab_check_free_impl(vd, offset, asize); 5391eda14cbcSMatt Macy 5392eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 5393eda14cbcSMatt Macy if (range_tree_is_empty(msp->ms_freeing) && 5394eda14cbcSMatt Macy range_tree_is_empty(msp->ms_checkpointing)) { 5395eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa)); 5396eda14cbcSMatt Macy } 5397eda14cbcSMatt Macy 5398eda14cbcSMatt Macy if (checkpoint) { 5399eda14cbcSMatt Macy ASSERT(spa_has_checkpoint(spa)); 5400eda14cbcSMatt Macy range_tree_add(msp->ms_checkpointing, offset, asize); 5401eda14cbcSMatt Macy } else { 5402eda14cbcSMatt Macy range_tree_add(msp->ms_freeing, offset, asize); 5403eda14cbcSMatt Macy } 5404eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5405eda14cbcSMatt Macy } 5406eda14cbcSMatt Macy 5407eda14cbcSMatt Macy void 5408eda14cbcSMatt Macy metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5409eda14cbcSMatt Macy uint64_t size, void *arg) 5410eda14cbcSMatt Macy { 5411e92ffd9bSMartin Matuska (void) inner_offset; 5412eda14cbcSMatt Macy boolean_t *checkpoint = arg; 5413eda14cbcSMatt Macy 5414eda14cbcSMatt Macy ASSERT3P(checkpoint, !=, NULL); 5415eda14cbcSMatt Macy 5416eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_remap != NULL) 5417eda14cbcSMatt Macy vdev_indirect_mark_obsolete(vd, offset, size); 5418eda14cbcSMatt Macy else 5419eda14cbcSMatt Macy metaslab_free_impl(vd, offset, size, *checkpoint); 5420eda14cbcSMatt Macy } 5421eda14cbcSMatt Macy 5422eda14cbcSMatt Macy static void 5423eda14cbcSMatt Macy metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size, 5424eda14cbcSMatt Macy boolean_t checkpoint) 5425eda14cbcSMatt Macy { 5426eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 5427eda14cbcSMatt Macy 5428eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5429eda14cbcSMatt Macy 5430eda14cbcSMatt Macy if (spa_syncing_txg(spa) > spa_freeze_txg(spa)) 5431eda14cbcSMatt Macy return; 5432eda14cbcSMatt Macy 5433eda14cbcSMatt Macy if (spa->spa_vdev_removal != NULL && 5434eda14cbcSMatt Macy spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id && 5435eda14cbcSMatt Macy vdev_is_concrete(vd)) { 5436eda14cbcSMatt Macy /* 5437eda14cbcSMatt Macy * Note: we check if the vdev is concrete because when 5438eda14cbcSMatt Macy * we complete the removal, we first change the vdev to be 5439eda14cbcSMatt Macy * an indirect vdev (in open context), and then (in syncing 5440eda14cbcSMatt Macy * context) clear spa_vdev_removal. 5441eda14cbcSMatt Macy */ 5442eda14cbcSMatt Macy free_from_removing_vdev(vd, offset, size); 5443eda14cbcSMatt Macy } else if (vd->vdev_ops->vdev_op_remap != NULL) { 5444eda14cbcSMatt Macy vdev_indirect_mark_obsolete(vd, offset, size); 5445eda14cbcSMatt Macy vd->vdev_ops->vdev_op_remap(vd, offset, size, 5446eda14cbcSMatt Macy metaslab_free_impl_cb, &checkpoint); 5447eda14cbcSMatt Macy } else { 5448eda14cbcSMatt Macy metaslab_free_concrete(vd, offset, size, checkpoint); 5449eda14cbcSMatt Macy } 5450eda14cbcSMatt Macy } 5451eda14cbcSMatt Macy 5452eda14cbcSMatt Macy typedef struct remap_blkptr_cb_arg { 5453eda14cbcSMatt Macy blkptr_t *rbca_bp; 5454eda14cbcSMatt Macy spa_remap_cb_t rbca_cb; 5455eda14cbcSMatt Macy vdev_t *rbca_remap_vd; 5456eda14cbcSMatt Macy uint64_t rbca_remap_offset; 5457eda14cbcSMatt Macy void *rbca_cb_arg; 5458eda14cbcSMatt Macy } remap_blkptr_cb_arg_t; 5459eda14cbcSMatt Macy 5460eda14cbcSMatt Macy static void 5461eda14cbcSMatt Macy remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5462eda14cbcSMatt Macy uint64_t size, void *arg) 5463eda14cbcSMatt Macy { 5464eda14cbcSMatt Macy remap_blkptr_cb_arg_t *rbca = arg; 5465eda14cbcSMatt Macy blkptr_t *bp = rbca->rbca_bp; 5466eda14cbcSMatt Macy 5467eda14cbcSMatt Macy /* We can not remap split blocks. */ 5468eda14cbcSMatt Macy if (size != DVA_GET_ASIZE(&bp->blk_dva[0])) 5469eda14cbcSMatt Macy return; 5470eda14cbcSMatt Macy ASSERT0(inner_offset); 5471eda14cbcSMatt Macy 5472eda14cbcSMatt Macy if (rbca->rbca_cb != NULL) { 5473eda14cbcSMatt Macy /* 5474eda14cbcSMatt Macy * At this point we know that we are not handling split 5475eda14cbcSMatt Macy * blocks and we invoke the callback on the previous 5476eda14cbcSMatt Macy * vdev which must be indirect. 5477eda14cbcSMatt Macy */ 5478eda14cbcSMatt Macy ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops); 5479eda14cbcSMatt Macy 5480eda14cbcSMatt Macy rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id, 5481eda14cbcSMatt Macy rbca->rbca_remap_offset, size, rbca->rbca_cb_arg); 5482eda14cbcSMatt Macy 5483eda14cbcSMatt Macy /* set up remap_blkptr_cb_arg for the next call */ 5484eda14cbcSMatt Macy rbca->rbca_remap_vd = vd; 5485eda14cbcSMatt Macy rbca->rbca_remap_offset = offset; 5486eda14cbcSMatt Macy } 5487eda14cbcSMatt Macy 5488eda14cbcSMatt Macy /* 5489eda14cbcSMatt Macy * The phys birth time is that of dva[0]. This ensures that we know 5490eda14cbcSMatt Macy * when each dva was written, so that resilver can determine which 5491eda14cbcSMatt Macy * blocks need to be scrubbed (i.e. those written during the time 5492eda14cbcSMatt Macy * the vdev was offline). It also ensures that the key used in 5493eda14cbcSMatt Macy * the ARC hash table is unique (i.e. dva[0] + phys_birth). If 5494eda14cbcSMatt Macy * we didn't change the phys_birth, a lookup in the ARC for a 5495eda14cbcSMatt Macy * remapped BP could find the data that was previously stored at 5496eda14cbcSMatt Macy * this vdev + offset. 5497eda14cbcSMatt Macy */ 5498eda14cbcSMatt Macy vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa, 5499eda14cbcSMatt Macy DVA_GET_VDEV(&bp->blk_dva[0])); 5500eda14cbcSMatt Macy vdev_indirect_births_t *vib = oldvd->vdev_indirect_births; 5501783d3ff6SMartin Matuska uint64_t physical_birth = vdev_indirect_births_physbirth(vib, 5502eda14cbcSMatt Macy DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0])); 5503783d3ff6SMartin Matuska BP_SET_PHYSICAL_BIRTH(bp, physical_birth); 5504eda14cbcSMatt Macy 5505eda14cbcSMatt Macy DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id); 5506eda14cbcSMatt Macy DVA_SET_OFFSET(&bp->blk_dva[0], offset); 5507eda14cbcSMatt Macy } 5508eda14cbcSMatt Macy 5509eda14cbcSMatt Macy /* 5510eda14cbcSMatt Macy * If the block pointer contains any indirect DVAs, modify them to refer to 5511eda14cbcSMatt Macy * concrete DVAs. Note that this will sometimes not be possible, leaving 5512eda14cbcSMatt Macy * the indirect DVA in place. This happens if the indirect DVA spans multiple 5513eda14cbcSMatt Macy * segments in the mapping (i.e. it is a "split block"). 5514eda14cbcSMatt Macy * 5515eda14cbcSMatt Macy * If the BP was remapped, calls the callback on the original dva (note the 5516eda14cbcSMatt Macy * callback can be called multiple times if the original indirect DVA refers 5517eda14cbcSMatt Macy * to another indirect DVA, etc). 5518eda14cbcSMatt Macy * 5519eda14cbcSMatt Macy * Returns TRUE if the BP was remapped. 5520eda14cbcSMatt Macy */ 5521eda14cbcSMatt Macy boolean_t 5522eda14cbcSMatt Macy spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg) 5523eda14cbcSMatt Macy { 5524eda14cbcSMatt Macy remap_blkptr_cb_arg_t rbca; 5525eda14cbcSMatt Macy 5526eda14cbcSMatt Macy if (!zfs_remap_blkptr_enable) 5527eda14cbcSMatt Macy return (B_FALSE); 5528eda14cbcSMatt Macy 5529eda14cbcSMatt Macy if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) 5530eda14cbcSMatt Macy return (B_FALSE); 5531eda14cbcSMatt Macy 5532eda14cbcSMatt Macy /* 5533eda14cbcSMatt Macy * Dedup BP's can not be remapped, because ddt_phys_select() depends 5534eda14cbcSMatt Macy * on DVA[0] being the same in the BP as in the DDT (dedup table). 5535eda14cbcSMatt Macy */ 5536eda14cbcSMatt Macy if (BP_GET_DEDUP(bp)) 5537eda14cbcSMatt Macy return (B_FALSE); 5538eda14cbcSMatt Macy 5539eda14cbcSMatt Macy /* 5540eda14cbcSMatt Macy * Gang blocks can not be remapped, because 5541eda14cbcSMatt Macy * zio_checksum_gang_verifier() depends on the DVA[0] that's in 5542eda14cbcSMatt Macy * the BP used to read the gang block header (GBH) being the same 5543eda14cbcSMatt Macy * as the DVA[0] that we allocated for the GBH. 5544eda14cbcSMatt Macy */ 5545eda14cbcSMatt Macy if (BP_IS_GANG(bp)) 5546eda14cbcSMatt Macy return (B_FALSE); 5547eda14cbcSMatt Macy 5548eda14cbcSMatt Macy /* 5549eda14cbcSMatt Macy * Embedded BP's have no DVA to remap. 5550eda14cbcSMatt Macy */ 5551eda14cbcSMatt Macy if (BP_GET_NDVAS(bp) < 1) 5552eda14cbcSMatt Macy return (B_FALSE); 5553eda14cbcSMatt Macy 5554eda14cbcSMatt Macy /* 5555eda14cbcSMatt Macy * Note: we only remap dva[0]. If we remapped other dvas, we 5556eda14cbcSMatt Macy * would no longer know what their phys birth txg is. 5557eda14cbcSMatt Macy */ 5558eda14cbcSMatt Macy dva_t *dva = &bp->blk_dva[0]; 5559eda14cbcSMatt Macy 5560eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(dva); 5561eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(dva); 5562eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 5563eda14cbcSMatt Macy 5564eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_remap == NULL) 5565eda14cbcSMatt Macy return (B_FALSE); 5566eda14cbcSMatt Macy 5567eda14cbcSMatt Macy rbca.rbca_bp = bp; 5568eda14cbcSMatt Macy rbca.rbca_cb = callback; 5569eda14cbcSMatt Macy rbca.rbca_remap_vd = vd; 5570eda14cbcSMatt Macy rbca.rbca_remap_offset = offset; 5571eda14cbcSMatt Macy rbca.rbca_cb_arg = arg; 5572eda14cbcSMatt Macy 5573eda14cbcSMatt Macy /* 5574eda14cbcSMatt Macy * remap_blkptr_cb() will be called in order for each level of 5575eda14cbcSMatt Macy * indirection, until a concrete vdev is reached or a split block is 5576eda14cbcSMatt Macy * encountered. old_vd and old_offset are updated within the callback 5577eda14cbcSMatt Macy * as we go from the one indirect vdev to the next one (either concrete 5578eda14cbcSMatt Macy * or indirect again) in that order. 5579eda14cbcSMatt Macy */ 5580eda14cbcSMatt Macy vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca); 5581eda14cbcSMatt Macy 5582eda14cbcSMatt Macy /* Check if the DVA wasn't remapped because it is a split block */ 5583eda14cbcSMatt Macy if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id) 5584eda14cbcSMatt Macy return (B_FALSE); 5585eda14cbcSMatt Macy 5586eda14cbcSMatt Macy return (B_TRUE); 5587eda14cbcSMatt Macy } 5588eda14cbcSMatt Macy 5589eda14cbcSMatt Macy /* 5590eda14cbcSMatt Macy * Undo the allocation of a DVA which happened in the given transaction group. 5591eda14cbcSMatt Macy */ 5592eda14cbcSMatt Macy void 5593eda14cbcSMatt Macy metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 5594eda14cbcSMatt Macy { 5595eda14cbcSMatt Macy metaslab_t *msp; 5596eda14cbcSMatt Macy vdev_t *vd; 5597eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(dva); 5598eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(dva); 5599eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(dva); 5600eda14cbcSMatt Macy 5601eda14cbcSMatt Macy ASSERT(DVA_IS_VALID(dva)); 5602eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5603eda14cbcSMatt Macy 5604eda14cbcSMatt Macy if (txg > spa_freeze_txg(spa)) 5605eda14cbcSMatt Macy return; 5606eda14cbcSMatt Macy 5607eda14cbcSMatt Macy if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) || 5608eda14cbcSMatt Macy (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 5609eda14cbcSMatt Macy zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu", 5610eda14cbcSMatt Macy (u_longlong_t)vdev, (u_longlong_t)offset, 5611eda14cbcSMatt Macy (u_longlong_t)size); 5612eda14cbcSMatt Macy return; 5613eda14cbcSMatt Macy } 5614eda14cbcSMatt Macy 5615eda14cbcSMatt Macy ASSERT(!vd->vdev_removing); 5616eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 5617eda14cbcSMatt Macy ASSERT0(vd->vdev_indirect_config.vic_mapping_object); 5618eda14cbcSMatt Macy ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 5619eda14cbcSMatt Macy 5620eda14cbcSMatt Macy if (DVA_GET_GANG(dva)) 56216db169e9SMartin Matuska size = vdev_gang_header_asize(vd); 5622eda14cbcSMatt Macy 5623eda14cbcSMatt Macy msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5624eda14cbcSMatt Macy 5625eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 5626eda14cbcSMatt Macy range_tree_remove(msp->ms_allocating[txg & TXG_MASK], 5627eda14cbcSMatt Macy offset, size); 5628eda14cbcSMatt Macy msp->ms_allocating_total -= size; 5629eda14cbcSMatt Macy 5630eda14cbcSMatt Macy VERIFY(!msp->ms_condensing); 5631eda14cbcSMatt Macy VERIFY3U(offset, >=, msp->ms_start); 5632eda14cbcSMatt Macy VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); 5633eda14cbcSMatt Macy VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=, 5634eda14cbcSMatt Macy msp->ms_size); 5635eda14cbcSMatt Macy VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5636eda14cbcSMatt Macy VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 5637eda14cbcSMatt Macy range_tree_add(msp->ms_allocatable, offset, size); 5638eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5639eda14cbcSMatt Macy } 5640eda14cbcSMatt Macy 5641eda14cbcSMatt Macy /* 5642eda14cbcSMatt Macy * Free the block represented by the given DVA. 5643eda14cbcSMatt Macy */ 5644eda14cbcSMatt Macy void 5645eda14cbcSMatt Macy metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint) 5646eda14cbcSMatt Macy { 5647eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(dva); 5648eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(dva); 5649eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(dva); 5650eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, vdev); 5651eda14cbcSMatt Macy 5652eda14cbcSMatt Macy ASSERT(DVA_IS_VALID(dva)); 5653eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5654eda14cbcSMatt Macy 5655eda14cbcSMatt Macy if (DVA_GET_GANG(dva)) { 56566db169e9SMartin Matuska size = vdev_gang_header_asize(vd); 5657eda14cbcSMatt Macy } 5658eda14cbcSMatt Macy 5659eda14cbcSMatt Macy metaslab_free_impl(vd, offset, size, checkpoint); 5660eda14cbcSMatt Macy } 5661eda14cbcSMatt Macy 5662eda14cbcSMatt Macy /* 5663eda14cbcSMatt Macy * Reserve some allocation slots. The reservation system must be called 5664eda14cbcSMatt Macy * before we call into the allocator. If there aren't any available slots 5665eda14cbcSMatt Macy * then the I/O will be throttled until an I/O completes and its slots are 5666eda14cbcSMatt Macy * freed up. The function returns true if it was successful in placing 5667eda14cbcSMatt Macy * the reservation. 5668eda14cbcSMatt Macy */ 5669eda14cbcSMatt Macy boolean_t 5670eda14cbcSMatt Macy metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator, 5671eda14cbcSMatt Macy zio_t *zio, int flags) 5672eda14cbcSMatt Macy { 56737877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 56747877fdebSMatt Macy uint64_t max = mca->mca_alloc_max_slots; 5675eda14cbcSMatt Macy 5676eda14cbcSMatt Macy ASSERT(mc->mc_alloc_throttle_enabled); 56773f9d360cSMartin Matuska if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) || 56783f9d360cSMartin Matuska zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) { 5679eda14cbcSMatt Macy /* 56801f88aa09SMartin Matuska * The potential race between _count() and _add() is covered 56811f88aa09SMartin Matuska * by the allocator lock in most cases, or irrelevant due to 56821f88aa09SMartin Matuska * GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others. 56831f88aa09SMartin Matuska * But even if we assume some other non-existing scenario, the 56841f88aa09SMartin Matuska * worst that can happen is few more I/Os get to allocation 56851f88aa09SMartin Matuska * earlier, that is not a problem. 56861f88aa09SMartin Matuska * 5687eda14cbcSMatt Macy * We reserve the slots individually so that we can unreserve 5688eda14cbcSMatt Macy * them individually when an I/O completes. 5689eda14cbcSMatt Macy */ 56904e8d558cSMartin Matuska zfs_refcount_add_few(&mca->mca_alloc_slots, slots, zio); 5691eda14cbcSMatt Macy zio->io_flags |= ZIO_FLAG_IO_ALLOCATING; 56923f9d360cSMartin Matuska return (B_TRUE); 5693eda14cbcSMatt Macy } 56943f9d360cSMartin Matuska return (B_FALSE); 5695eda14cbcSMatt Macy } 5696eda14cbcSMatt Macy 5697eda14cbcSMatt Macy void 5698eda14cbcSMatt Macy metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, 5699eda14cbcSMatt Macy int allocator, zio_t *zio) 5700eda14cbcSMatt Macy { 57017877fdebSMatt Macy metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 57027877fdebSMatt Macy 5703eda14cbcSMatt Macy ASSERT(mc->mc_alloc_throttle_enabled); 57044e8d558cSMartin Matuska zfs_refcount_remove_few(&mca->mca_alloc_slots, slots, zio); 5705eda14cbcSMatt Macy } 5706eda14cbcSMatt Macy 5707eda14cbcSMatt Macy static int 5708eda14cbcSMatt Macy metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size, 5709eda14cbcSMatt Macy uint64_t txg) 5710eda14cbcSMatt Macy { 5711eda14cbcSMatt Macy metaslab_t *msp; 5712eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 5713eda14cbcSMatt Macy int error = 0; 5714eda14cbcSMatt Macy 5715eda14cbcSMatt Macy if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count) 5716eda14cbcSMatt Macy return (SET_ERROR(ENXIO)); 5717eda14cbcSMatt Macy 5718eda14cbcSMatt Macy ASSERT3P(vd->vdev_ms, !=, NULL); 5719eda14cbcSMatt Macy msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5720eda14cbcSMatt Macy 5721eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 5722eda14cbcSMatt Macy 5723eda14cbcSMatt Macy if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) { 5724eda14cbcSMatt Macy error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM); 5725eda14cbcSMatt Macy if (error == EBUSY) { 5726eda14cbcSMatt Macy ASSERT(msp->ms_loaded); 5727eda14cbcSMatt Macy ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 5728eda14cbcSMatt Macy error = 0; 5729eda14cbcSMatt Macy } 5730eda14cbcSMatt Macy } 5731eda14cbcSMatt Macy 5732eda14cbcSMatt Macy if (error == 0 && 5733eda14cbcSMatt Macy !range_tree_contains(msp->ms_allocatable, offset, size)) 5734eda14cbcSMatt Macy error = SET_ERROR(ENOENT); 5735eda14cbcSMatt Macy 5736eda14cbcSMatt Macy if (error || txg == 0) { /* txg == 0 indicates dry run */ 5737eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5738eda14cbcSMatt Macy return (error); 5739eda14cbcSMatt Macy } 5740eda14cbcSMatt Macy 5741eda14cbcSMatt Macy VERIFY(!msp->ms_condensing); 5742eda14cbcSMatt Macy VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5743eda14cbcSMatt Macy VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 5744eda14cbcSMatt Macy VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=, 5745eda14cbcSMatt Macy msp->ms_size); 5746eda14cbcSMatt Macy range_tree_remove(msp->ms_allocatable, offset, size); 5747eda14cbcSMatt Macy range_tree_clear(msp->ms_trim, offset, size); 5748eda14cbcSMatt Macy 57497877fdebSMatt Macy if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */ 5750eda14cbcSMatt Macy metaslab_class_t *mc = msp->ms_group->mg_class; 5751eda14cbcSMatt Macy multilist_sublist_t *mls = 57523ff01b23SMartin Matuska multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 5753eda14cbcSMatt Macy if (!multilist_link_active(&msp->ms_class_txg_node)) { 5754eda14cbcSMatt Macy msp->ms_selected_txg = txg; 5755eda14cbcSMatt Macy multilist_sublist_insert_head(mls, msp); 5756eda14cbcSMatt Macy } 5757eda14cbcSMatt Macy multilist_sublist_unlock(mls); 5758eda14cbcSMatt Macy 5759eda14cbcSMatt Macy if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) 5760eda14cbcSMatt Macy vdev_dirty(vd, VDD_METASLAB, msp, txg); 5761eda14cbcSMatt Macy range_tree_add(msp->ms_allocating[txg & TXG_MASK], 5762eda14cbcSMatt Macy offset, size); 5763eda14cbcSMatt Macy msp->ms_allocating_total += size; 5764eda14cbcSMatt Macy } 5765eda14cbcSMatt Macy 5766eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 5767eda14cbcSMatt Macy 5768eda14cbcSMatt Macy return (0); 5769eda14cbcSMatt Macy } 5770eda14cbcSMatt Macy 5771eda14cbcSMatt Macy typedef struct metaslab_claim_cb_arg_t { 5772eda14cbcSMatt Macy uint64_t mcca_txg; 5773eda14cbcSMatt Macy int mcca_error; 5774eda14cbcSMatt Macy } metaslab_claim_cb_arg_t; 5775eda14cbcSMatt Macy 5776eda14cbcSMatt Macy static void 5777eda14cbcSMatt Macy metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5778eda14cbcSMatt Macy uint64_t size, void *arg) 5779eda14cbcSMatt Macy { 5780e92ffd9bSMartin Matuska (void) inner_offset; 5781eda14cbcSMatt Macy metaslab_claim_cb_arg_t *mcca_arg = arg; 5782eda14cbcSMatt Macy 5783eda14cbcSMatt Macy if (mcca_arg->mcca_error == 0) { 5784eda14cbcSMatt Macy mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset, 5785eda14cbcSMatt Macy size, mcca_arg->mcca_txg); 5786eda14cbcSMatt Macy } 5787eda14cbcSMatt Macy } 5788eda14cbcSMatt Macy 5789eda14cbcSMatt Macy int 5790eda14cbcSMatt Macy metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) 5791eda14cbcSMatt Macy { 5792eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_remap != NULL) { 5793eda14cbcSMatt Macy metaslab_claim_cb_arg_t arg; 5794eda14cbcSMatt Macy 5795eda14cbcSMatt Macy /* 57967877fdebSMatt Macy * Only zdb(8) can claim on indirect vdevs. This is used 5797eda14cbcSMatt Macy * to detect leaks of mapped space (that are not accounted 5798eda14cbcSMatt Macy * for in the obsolete counts, spacemap, or bpobj). 5799eda14cbcSMatt Macy */ 5800eda14cbcSMatt Macy ASSERT(!spa_writeable(vd->vdev_spa)); 5801eda14cbcSMatt Macy arg.mcca_error = 0; 5802eda14cbcSMatt Macy arg.mcca_txg = txg; 5803eda14cbcSMatt Macy 5804eda14cbcSMatt Macy vd->vdev_ops->vdev_op_remap(vd, offset, size, 5805eda14cbcSMatt Macy metaslab_claim_impl_cb, &arg); 5806eda14cbcSMatt Macy 5807eda14cbcSMatt Macy if (arg.mcca_error == 0) { 5808eda14cbcSMatt Macy arg.mcca_error = metaslab_claim_concrete(vd, 5809eda14cbcSMatt Macy offset, size, txg); 5810eda14cbcSMatt Macy } 5811eda14cbcSMatt Macy return (arg.mcca_error); 5812eda14cbcSMatt Macy } else { 5813eda14cbcSMatt Macy return (metaslab_claim_concrete(vd, offset, size, txg)); 5814eda14cbcSMatt Macy } 5815eda14cbcSMatt Macy } 5816eda14cbcSMatt Macy 5817eda14cbcSMatt Macy /* 5818eda14cbcSMatt Macy * Intent log support: upon opening the pool after a crash, notify the SPA 5819eda14cbcSMatt Macy * of blocks that the intent log has allocated for immediate write, but 5820eda14cbcSMatt Macy * which are still considered free by the SPA because the last transaction 5821eda14cbcSMatt Macy * group didn't commit yet. 5822eda14cbcSMatt Macy */ 5823eda14cbcSMatt Macy static int 5824eda14cbcSMatt Macy metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 5825eda14cbcSMatt Macy { 5826eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(dva); 5827eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(dva); 5828eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(dva); 5829eda14cbcSMatt Macy vdev_t *vd; 5830eda14cbcSMatt Macy 5831eda14cbcSMatt Macy if ((vd = vdev_lookup_top(spa, vdev)) == NULL) { 5832eda14cbcSMatt Macy return (SET_ERROR(ENXIO)); 5833eda14cbcSMatt Macy } 5834eda14cbcSMatt Macy 5835eda14cbcSMatt Macy ASSERT(DVA_IS_VALID(dva)); 5836eda14cbcSMatt Macy 5837eda14cbcSMatt Macy if (DVA_GET_GANG(dva)) 58386db169e9SMartin Matuska size = vdev_gang_header_asize(vd); 5839eda14cbcSMatt Macy 5840eda14cbcSMatt Macy return (metaslab_claim_impl(vd, offset, size, txg)); 5841eda14cbcSMatt Macy } 5842eda14cbcSMatt Macy 5843eda14cbcSMatt Macy int 5844eda14cbcSMatt Macy metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 5845eda14cbcSMatt Macy int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, 5846eda14cbcSMatt Macy zio_alloc_list_t *zal, zio_t *zio, int allocator) 5847eda14cbcSMatt Macy { 5848eda14cbcSMatt Macy dva_t *dva = bp->blk_dva; 5849eda14cbcSMatt Macy dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL; 5850eda14cbcSMatt Macy int error = 0; 5851eda14cbcSMatt Macy 5852783d3ff6SMartin Matuska ASSERT0(BP_GET_LOGICAL_BIRTH(bp)); 5853783d3ff6SMartin Matuska ASSERT0(BP_GET_PHYSICAL_BIRTH(bp)); 5854eda14cbcSMatt Macy 5855eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 5856eda14cbcSMatt Macy 58577877fdebSMatt Macy if (mc->mc_allocator[allocator].mca_rotor == NULL) { 58587877fdebSMatt Macy /* no vdevs in this class */ 5859eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 5860eda14cbcSMatt Macy return (SET_ERROR(ENOSPC)); 5861eda14cbcSMatt Macy } 5862eda14cbcSMatt Macy 5863eda14cbcSMatt Macy ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 5864eda14cbcSMatt Macy ASSERT(BP_GET_NDVAS(bp) == 0); 5865eda14cbcSMatt Macy ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 5866eda14cbcSMatt Macy ASSERT3P(zal, !=, NULL); 5867eda14cbcSMatt Macy 5868eda14cbcSMatt Macy for (int d = 0; d < ndvas; d++) { 5869eda14cbcSMatt Macy error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 5870eda14cbcSMatt Macy txg, flags, zal, allocator); 5871eda14cbcSMatt Macy if (error != 0) { 5872eda14cbcSMatt Macy for (d--; d >= 0; d--) { 5873eda14cbcSMatt Macy metaslab_unalloc_dva(spa, &dva[d], txg); 5874eda14cbcSMatt Macy metaslab_group_alloc_decrement(spa, 5875eda14cbcSMatt Macy DVA_GET_VDEV(&dva[d]), zio, flags, 5876eda14cbcSMatt Macy allocator, B_FALSE); 5877da5137abSMartin Matuska memset(&dva[d], 0, sizeof (dva_t)); 5878eda14cbcSMatt Macy } 5879eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 5880eda14cbcSMatt Macy return (error); 5881eda14cbcSMatt Macy } else { 5882eda14cbcSMatt Macy /* 5883eda14cbcSMatt Macy * Update the metaslab group's queue depth 5884eda14cbcSMatt Macy * based on the newly allocated dva. 5885eda14cbcSMatt Macy */ 5886eda14cbcSMatt Macy metaslab_group_alloc_increment(spa, 5887eda14cbcSMatt Macy DVA_GET_VDEV(&dva[d]), zio, flags, allocator); 5888eda14cbcSMatt Macy } 5889eda14cbcSMatt Macy } 5890eda14cbcSMatt Macy ASSERT(error == 0); 5891eda14cbcSMatt Macy ASSERT(BP_GET_NDVAS(bp) == ndvas); 5892eda14cbcSMatt Macy 5893eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 5894eda14cbcSMatt Macy 5895eda14cbcSMatt Macy BP_SET_BIRTH(bp, txg, 0); 5896eda14cbcSMatt Macy 5897eda14cbcSMatt Macy return (0); 5898eda14cbcSMatt Macy } 5899eda14cbcSMatt Macy 5900eda14cbcSMatt Macy void 5901eda14cbcSMatt Macy metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 5902eda14cbcSMatt Macy { 5903eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 5904eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 5905eda14cbcSMatt Macy 5906eda14cbcSMatt Macy ASSERT(!BP_IS_HOLE(bp)); 5907783d3ff6SMartin Matuska ASSERT(!now || BP_GET_LOGICAL_BIRTH(bp) >= spa_syncing_txg(spa)); 5908eda14cbcSMatt Macy 5909eda14cbcSMatt Macy /* 5910eda14cbcSMatt Macy * If we have a checkpoint for the pool we need to make sure that 5911eda14cbcSMatt Macy * the blocks that we free that are part of the checkpoint won't be 5912eda14cbcSMatt Macy * reused until the checkpoint is discarded or we revert to it. 5913eda14cbcSMatt Macy * 5914eda14cbcSMatt Macy * The checkpoint flag is passed down the metaslab_free code path 5915eda14cbcSMatt Macy * and is set whenever we want to add a block to the checkpoint's 5916eda14cbcSMatt Macy * accounting. That is, we "checkpoint" blocks that existed at the 5917eda14cbcSMatt Macy * time the checkpoint was created and are therefore referenced by 5918eda14cbcSMatt Macy * the checkpointed uberblock. 5919eda14cbcSMatt Macy * 5920eda14cbcSMatt Macy * Note that, we don't checkpoint any blocks if the current 5921eda14cbcSMatt Macy * syncing txg <= spa_checkpoint_txg. We want these frees to sync 5922eda14cbcSMatt Macy * normally as they will be referenced by the checkpointed uberblock. 5923eda14cbcSMatt Macy */ 5924eda14cbcSMatt Macy boolean_t checkpoint = B_FALSE; 5925783d3ff6SMartin Matuska if (BP_GET_LOGICAL_BIRTH(bp) <= spa->spa_checkpoint_txg && 5926eda14cbcSMatt Macy spa_syncing_txg(spa) > spa->spa_checkpoint_txg) { 5927eda14cbcSMatt Macy /* 5928eda14cbcSMatt Macy * At this point, if the block is part of the checkpoint 5929eda14cbcSMatt Macy * there is no way it was created in the current txg. 5930eda14cbcSMatt Macy */ 5931eda14cbcSMatt Macy ASSERT(!now); 5932eda14cbcSMatt Macy ASSERT3U(spa_syncing_txg(spa), ==, txg); 5933eda14cbcSMatt Macy checkpoint = B_TRUE; 5934eda14cbcSMatt Macy } 5935eda14cbcSMatt Macy 5936eda14cbcSMatt Macy spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); 5937eda14cbcSMatt Macy 5938eda14cbcSMatt Macy for (int d = 0; d < ndvas; d++) { 5939eda14cbcSMatt Macy if (now) { 5940eda14cbcSMatt Macy metaslab_unalloc_dva(spa, &dva[d], txg); 5941eda14cbcSMatt Macy } else { 5942eda14cbcSMatt Macy ASSERT3U(txg, ==, spa_syncing_txg(spa)); 5943eda14cbcSMatt Macy metaslab_free_dva(spa, &dva[d], checkpoint); 5944eda14cbcSMatt Macy } 5945eda14cbcSMatt Macy } 5946eda14cbcSMatt Macy 5947eda14cbcSMatt Macy spa_config_exit(spa, SCL_FREE, FTAG); 5948eda14cbcSMatt Macy } 5949eda14cbcSMatt Macy 5950eda14cbcSMatt Macy int 5951eda14cbcSMatt Macy metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 5952eda14cbcSMatt Macy { 5953eda14cbcSMatt Macy const dva_t *dva = bp->blk_dva; 5954eda14cbcSMatt Macy int ndvas = BP_GET_NDVAS(bp); 5955eda14cbcSMatt Macy int error = 0; 5956eda14cbcSMatt Macy 5957eda14cbcSMatt Macy ASSERT(!BP_IS_HOLE(bp)); 5958eda14cbcSMatt Macy 5959eda14cbcSMatt Macy if (txg != 0) { 5960eda14cbcSMatt Macy /* 5961eda14cbcSMatt Macy * First do a dry run to make sure all DVAs are claimable, 5962eda14cbcSMatt Macy * so we don't have to unwind from partial failures below. 5963eda14cbcSMatt Macy */ 5964eda14cbcSMatt Macy if ((error = metaslab_claim(spa, bp, 0)) != 0) 5965eda14cbcSMatt Macy return (error); 5966eda14cbcSMatt Macy } 5967eda14cbcSMatt Macy 5968eda14cbcSMatt Macy spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 5969eda14cbcSMatt Macy 5970eda14cbcSMatt Macy for (int d = 0; d < ndvas; d++) { 5971eda14cbcSMatt Macy error = metaslab_claim_dva(spa, &dva[d], txg); 5972eda14cbcSMatt Macy if (error != 0) 5973eda14cbcSMatt Macy break; 5974eda14cbcSMatt Macy } 5975eda14cbcSMatt Macy 5976eda14cbcSMatt Macy spa_config_exit(spa, SCL_ALLOC, FTAG); 5977eda14cbcSMatt Macy 5978eda14cbcSMatt Macy ASSERT(error == 0 || txg == 0); 5979eda14cbcSMatt Macy 5980eda14cbcSMatt Macy return (error); 5981eda14cbcSMatt Macy } 5982eda14cbcSMatt Macy 5983eda14cbcSMatt Macy static void 5984eda14cbcSMatt Macy metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset, 5985eda14cbcSMatt Macy uint64_t size, void *arg) 5986eda14cbcSMatt Macy { 5987e92ffd9bSMartin Matuska (void) inner, (void) arg; 5988e92ffd9bSMartin Matuska 5989eda14cbcSMatt Macy if (vd->vdev_ops == &vdev_indirect_ops) 5990eda14cbcSMatt Macy return; 5991eda14cbcSMatt Macy 5992eda14cbcSMatt Macy metaslab_check_free_impl(vd, offset, size); 5993eda14cbcSMatt Macy } 5994eda14cbcSMatt Macy 5995eda14cbcSMatt Macy static void 5996eda14cbcSMatt Macy metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size) 5997eda14cbcSMatt Macy { 5998eda14cbcSMatt Macy metaslab_t *msp; 5999eda14cbcSMatt Macy spa_t *spa __maybe_unused = vd->vdev_spa; 6000eda14cbcSMatt Macy 6001eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 6002eda14cbcSMatt Macy return; 6003eda14cbcSMatt Macy 6004eda14cbcSMatt Macy if (vd->vdev_ops->vdev_op_remap != NULL) { 6005eda14cbcSMatt Macy vd->vdev_ops->vdev_op_remap(vd, offset, size, 6006eda14cbcSMatt Macy metaslab_check_free_impl_cb, NULL); 6007eda14cbcSMatt Macy return; 6008eda14cbcSMatt Macy } 6009eda14cbcSMatt Macy 6010eda14cbcSMatt Macy ASSERT(vdev_is_concrete(vd)); 6011eda14cbcSMatt Macy ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); 6012eda14cbcSMatt Macy ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 6013eda14cbcSMatt Macy 6014eda14cbcSMatt Macy msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 6015eda14cbcSMatt Macy 6016eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 6017eda14cbcSMatt Macy if (msp->ms_loaded) { 6018eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_allocatable, 6019eda14cbcSMatt Macy offset, size); 6020eda14cbcSMatt Macy } 6021eda14cbcSMatt Macy 6022eda14cbcSMatt Macy /* 6023eda14cbcSMatt Macy * Check all segments that currently exist in the freeing pipeline. 6024eda14cbcSMatt Macy * 6025eda14cbcSMatt Macy * It would intuitively make sense to also check the current allocating 6026eda14cbcSMatt Macy * tree since metaslab_unalloc_dva() exists for extents that are 6027eda14cbcSMatt Macy * allocated and freed in the same sync pass within the same txg. 6028eda14cbcSMatt Macy * Unfortunately there are places (e.g. the ZIL) where we allocate a 6029eda14cbcSMatt Macy * segment but then we free part of it within the same txg 6030eda14cbcSMatt Macy * [see zil_sync()]. Thus, we don't call range_tree_verify() in the 6031eda14cbcSMatt Macy * current allocating tree. 6032eda14cbcSMatt Macy */ 6033eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_freeing, offset, size); 6034eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_checkpointing, offset, size); 6035eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_freed, offset, size); 6036eda14cbcSMatt Macy for (int j = 0; j < TXG_DEFER_SIZE; j++) 6037eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_defer[j], offset, size); 6038eda14cbcSMatt Macy range_tree_verify_not_present(msp->ms_trim, offset, size); 6039eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 6040eda14cbcSMatt Macy } 6041eda14cbcSMatt Macy 6042eda14cbcSMatt Macy void 6043eda14cbcSMatt Macy metaslab_check_free(spa_t *spa, const blkptr_t *bp) 6044eda14cbcSMatt Macy { 6045eda14cbcSMatt Macy if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 6046eda14cbcSMatt Macy return; 6047eda14cbcSMatt Macy 6048eda14cbcSMatt Macy spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 6049eda14cbcSMatt Macy for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 6050eda14cbcSMatt Macy uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 6051eda14cbcSMatt Macy vdev_t *vd = vdev_lookup_top(spa, vdev); 6052eda14cbcSMatt Macy uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 6053eda14cbcSMatt Macy uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); 6054eda14cbcSMatt Macy 6055eda14cbcSMatt Macy if (DVA_GET_GANG(&bp->blk_dva[i])) 60566db169e9SMartin Matuska size = vdev_gang_header_asize(vd); 6057eda14cbcSMatt Macy 6058eda14cbcSMatt Macy ASSERT3P(vd, !=, NULL); 6059eda14cbcSMatt Macy 6060eda14cbcSMatt Macy metaslab_check_free_impl(vd, offset, size); 6061eda14cbcSMatt Macy } 6062eda14cbcSMatt Macy spa_config_exit(spa, SCL_VDEV, FTAG); 6063eda14cbcSMatt Macy } 6064eda14cbcSMatt Macy 6065eda14cbcSMatt Macy static void 6066eda14cbcSMatt Macy metaslab_group_disable_wait(metaslab_group_t *mg) 6067eda14cbcSMatt Macy { 6068eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock)); 6069eda14cbcSMatt Macy while (mg->mg_disabled_updating) { 6070eda14cbcSMatt Macy cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock); 6071eda14cbcSMatt Macy } 6072eda14cbcSMatt Macy } 6073eda14cbcSMatt Macy 6074eda14cbcSMatt Macy static void 6075eda14cbcSMatt Macy metaslab_group_disabled_increment(metaslab_group_t *mg) 6076eda14cbcSMatt Macy { 6077eda14cbcSMatt Macy ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock)); 6078eda14cbcSMatt Macy ASSERT(mg->mg_disabled_updating); 6079eda14cbcSMatt Macy 6080eda14cbcSMatt Macy while (mg->mg_ms_disabled >= max_disabled_ms) { 6081eda14cbcSMatt Macy cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock); 6082eda14cbcSMatt Macy } 6083eda14cbcSMatt Macy mg->mg_ms_disabled++; 6084eda14cbcSMatt Macy ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms); 6085eda14cbcSMatt Macy } 6086eda14cbcSMatt Macy 6087eda14cbcSMatt Macy /* 6088eda14cbcSMatt Macy * Mark the metaslab as disabled to prevent any allocations on this metaslab. 6089eda14cbcSMatt Macy * We must also track how many metaslabs are currently disabled within a 6090eda14cbcSMatt Macy * metaslab group and limit them to prevent allocation failures from 6091eda14cbcSMatt Macy * occurring because all metaslabs are disabled. 6092eda14cbcSMatt Macy */ 6093eda14cbcSMatt Macy void 6094eda14cbcSMatt Macy metaslab_disable(metaslab_t *msp) 6095eda14cbcSMatt Macy { 6096eda14cbcSMatt Macy ASSERT(!MUTEX_HELD(&msp->ms_lock)); 6097eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 6098eda14cbcSMatt Macy 6099eda14cbcSMatt Macy mutex_enter(&mg->mg_ms_disabled_lock); 6100eda14cbcSMatt Macy 6101eda14cbcSMatt Macy /* 6102eda14cbcSMatt Macy * To keep an accurate count of how many threads have disabled 6103eda14cbcSMatt Macy * a specific metaslab group, we only allow one thread to mark 6104eda14cbcSMatt Macy * the metaslab group at a time. This ensures that the value of 6105eda14cbcSMatt Macy * ms_disabled will be accurate when we decide to mark a metaslab 6106eda14cbcSMatt Macy * group as disabled. To do this we force all other threads 6107eda14cbcSMatt Macy * to wait till the metaslab's mg_disabled_updating flag is no 6108eda14cbcSMatt Macy * longer set. 6109eda14cbcSMatt Macy */ 6110eda14cbcSMatt Macy metaslab_group_disable_wait(mg); 6111eda14cbcSMatt Macy mg->mg_disabled_updating = B_TRUE; 6112eda14cbcSMatt Macy if (msp->ms_disabled == 0) { 6113eda14cbcSMatt Macy metaslab_group_disabled_increment(mg); 6114eda14cbcSMatt Macy } 6115eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 6116eda14cbcSMatt Macy msp->ms_disabled++; 6117eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 6118eda14cbcSMatt Macy 6119eda14cbcSMatt Macy mg->mg_disabled_updating = B_FALSE; 6120eda14cbcSMatt Macy cv_broadcast(&mg->mg_ms_disabled_cv); 6121eda14cbcSMatt Macy mutex_exit(&mg->mg_ms_disabled_lock); 6122eda14cbcSMatt Macy } 6123eda14cbcSMatt Macy 6124eda14cbcSMatt Macy void 6125eda14cbcSMatt Macy metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload) 6126eda14cbcSMatt Macy { 6127eda14cbcSMatt Macy metaslab_group_t *mg = msp->ms_group; 6128eda14cbcSMatt Macy spa_t *spa = mg->mg_vd->vdev_spa; 6129eda14cbcSMatt Macy 6130eda14cbcSMatt Macy /* 6131eda14cbcSMatt Macy * Wait for the outstanding IO to be synced to prevent newly 6132eda14cbcSMatt Macy * allocated blocks from being overwritten. This used by 6133eda14cbcSMatt Macy * initialize and TRIM which are modifying unallocated space. 6134eda14cbcSMatt Macy */ 6135eda14cbcSMatt Macy if (sync) 6136eda14cbcSMatt Macy txg_wait_synced(spa_get_dsl(spa), 0); 6137eda14cbcSMatt Macy 6138eda14cbcSMatt Macy mutex_enter(&mg->mg_ms_disabled_lock); 6139eda14cbcSMatt Macy mutex_enter(&msp->ms_lock); 6140eda14cbcSMatt Macy if (--msp->ms_disabled == 0) { 6141eda14cbcSMatt Macy mg->mg_ms_disabled--; 6142eda14cbcSMatt Macy cv_broadcast(&mg->mg_ms_disabled_cv); 6143eda14cbcSMatt Macy if (unload) 6144eda14cbcSMatt Macy metaslab_unload(msp); 6145eda14cbcSMatt Macy } 6146eda14cbcSMatt Macy mutex_exit(&msp->ms_lock); 6147eda14cbcSMatt Macy mutex_exit(&mg->mg_ms_disabled_lock); 6148eda14cbcSMatt Macy } 6149eda14cbcSMatt Macy 6150716fd348SMartin Matuska void 6151716fd348SMartin Matuska metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty) 6152716fd348SMartin Matuska { 6153716fd348SMartin Matuska ms->ms_unflushed_dirty = dirty; 6154716fd348SMartin Matuska } 6155716fd348SMartin Matuska 6156eda14cbcSMatt Macy static void 6157eda14cbcSMatt Macy metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx) 6158eda14cbcSMatt Macy { 6159eda14cbcSMatt Macy vdev_t *vd = ms->ms_group->mg_vd; 6160eda14cbcSMatt Macy spa_t *spa = vd->vdev_spa; 6161eda14cbcSMatt Macy objset_t *mos = spa_meta_objset(spa); 6162eda14cbcSMatt Macy 6163eda14cbcSMatt Macy ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 6164eda14cbcSMatt Macy 6165eda14cbcSMatt Macy metaslab_unflushed_phys_t entry = { 6166eda14cbcSMatt Macy .msp_unflushed_txg = metaslab_unflushed_txg(ms), 6167eda14cbcSMatt Macy }; 6168eda14cbcSMatt Macy uint64_t entry_size = sizeof (entry); 6169eda14cbcSMatt Macy uint64_t entry_offset = ms->ms_id * entry_size; 6170eda14cbcSMatt Macy 6171eda14cbcSMatt Macy uint64_t object = 0; 6172eda14cbcSMatt Macy int err = zap_lookup(mos, vd->vdev_top_zap, 6173eda14cbcSMatt Macy VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, 6174eda14cbcSMatt Macy &object); 6175eda14cbcSMatt Macy if (err == ENOENT) { 6176eda14cbcSMatt Macy object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA, 6177eda14cbcSMatt Macy SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx); 6178eda14cbcSMatt Macy VERIFY0(zap_add(mos, vd->vdev_top_zap, 6179eda14cbcSMatt Macy VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, 6180eda14cbcSMatt Macy &object, tx)); 6181eda14cbcSMatt Macy } else { 6182eda14cbcSMatt Macy VERIFY0(err); 6183eda14cbcSMatt Macy } 6184eda14cbcSMatt Macy 6185eda14cbcSMatt Macy dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size, 6186eda14cbcSMatt Macy &entry, tx); 6187eda14cbcSMatt Macy } 6188eda14cbcSMatt Macy 6189eda14cbcSMatt Macy void 6190eda14cbcSMatt Macy metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx) 6191eda14cbcSMatt Macy { 6192eda14cbcSMatt Macy ms->ms_unflushed_txg = txg; 6193eda14cbcSMatt Macy metaslab_update_ondisk_flush_data(ms, tx); 6194eda14cbcSMatt Macy } 6195eda14cbcSMatt Macy 6196716fd348SMartin Matuska boolean_t 6197716fd348SMartin Matuska metaslab_unflushed_dirty(metaslab_t *ms) 6198716fd348SMartin Matuska { 6199716fd348SMartin Matuska return (ms->ms_unflushed_dirty); 6200716fd348SMartin Matuska } 6201716fd348SMartin Matuska 6202eda14cbcSMatt Macy uint64_t 6203eda14cbcSMatt Macy metaslab_unflushed_txg(metaslab_t *ms) 6204eda14cbcSMatt Macy { 6205eda14cbcSMatt Macy return (ms->ms_unflushed_txg); 6206eda14cbcSMatt Macy } 6207eda14cbcSMatt Macy 6208dbd5678dSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW, 6209eda14cbcSMatt Macy "Allocation granularity (a.k.a. stripe size)"); 6210eda14cbcSMatt Macy 6211eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW, 6212eda14cbcSMatt Macy "Load all metaslabs when pool is first opened"); 6213eda14cbcSMatt Macy 6214eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW, 6215eda14cbcSMatt Macy "Prevent metaslabs from being unloaded"); 6216eda14cbcSMatt Macy 6217eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW, 6218eda14cbcSMatt Macy "Preload potential metaslabs during reassessment"); 6219eda14cbcSMatt Macy 6220b2526e8bSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_limit, UINT, ZMOD_RW, 6221b2526e8bSMartin Matuska "Max number of metaslabs per group to preload"); 6222b2526e8bSMartin Matuska 6223be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW, 6224eda14cbcSMatt Macy "Delay in txgs after metaslab was last used before unloading"); 6225eda14cbcSMatt Macy 6226be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW, 6227eda14cbcSMatt Macy "Delay in milliseconds after metaslab was last used before unloading"); 6228eda14cbcSMatt Macy 6229be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW, 6230eda14cbcSMatt Macy "Percentage of metaslab group size that should be free to make it " 6231eda14cbcSMatt Macy "eligible for allocation"); 6232eda14cbcSMatt Macy 6233be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW, 6234eda14cbcSMatt Macy "Percentage of metaslab group size that should be considered eligible " 6235eda14cbcSMatt Macy "for allocations unless all metaslab groups within the metaslab class " 6236eda14cbcSMatt Macy "have also crossed this threshold"); 6237eda14cbcSMatt Macy 6238c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT, 6239c03c5b1cSMartin Matuska ZMOD_RW, 6240eda14cbcSMatt Macy "Use the fragmentation metric to prefer less fragmented metaslabs"); 6241eda14cbcSMatt Macy 6242be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT, 6243c03c5b1cSMartin Matuska ZMOD_RW, "Fragmentation for metaslab to allow allocation"); 6244c03c5b1cSMartin Matuska 6245eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW, 6246eda14cbcSMatt Macy "Prefer metaslabs with lower LBAs"); 6247eda14cbcSMatt Macy 6248eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW, 6249eda14cbcSMatt Macy "Enable metaslab group biasing"); 6250eda14cbcSMatt Macy 6251eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT, 6252eda14cbcSMatt Macy ZMOD_RW, "Enable segment-based metaslab selection"); 6253eda14cbcSMatt Macy 6254eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW, 6255eda14cbcSMatt Macy "Segment-based metaslab selection maximum buckets before switching"); 6256eda14cbcSMatt Macy 6257dbd5678dSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW, 6258315ee00fSMartin Matuska "Blocks larger than this size are sometimes forced to be gang blocks"); 6259315ee00fSMartin Matuska 6260315ee00fSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging_pct, UINT, ZMOD_RW, 6261315ee00fSMartin Matuska "Percentage of large blocks that will be forced to be gang blocks"); 6262eda14cbcSMatt Macy 6263be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW, 6264eda14cbcSMatt Macy "Max distance (bytes) to search forward before using size tree"); 6265eda14cbcSMatt Macy 6266eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW, 6267eda14cbcSMatt Macy "When looking in size tree, use largest segment instead of exact fit"); 6268eda14cbcSMatt Macy 6269dbd5678dSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64, 6270eda14cbcSMatt Macy ZMOD_RW, "How long to trust the cached max chunk size of a metaslab"); 6271eda14cbcSMatt Macy 6272be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW, 6273eda14cbcSMatt Macy "Percentage of memory that can be used to store metaslab range trees"); 62747877fdebSMatt Macy 62757877fdebSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT, 62767877fdebSMatt Macy ZMOD_RW, "Try hard to allocate before ganging"); 62777877fdebSMatt Macy 6278be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW, 62797877fdebSMatt Macy "Normally only consider this many of the best metaslabs in each vdev"); 62802ad756a6SMartin Matuska 62812ad756a6SMartin Matuska ZFS_MODULE_PARAM_CALL(zfs, zfs_, active_allocator, 62822ad756a6SMartin Matuska param_set_active_allocator, param_get_charp, ZMOD_RW, 62832ad756a6SMartin Matuska "SPA active allocator"); 6284