Lines Matching +defs:t +defs:cmp
210 * last allocation from it. A metaslab can't be unloaded until at least
271 * The limit should be sufficiently large that we don't expect any allocation
285 * To avoid 64-bit overflow, don't set above UINT32_MAX.
327 * metaslabs per vdev and the allocation can't actually be satisfied (so we
329 * worse weight but it can actually satisfy the allocation, we won't find it
652 * and it was evicted during this loop), then we can't
705 * primary, then secondary active metaslab. If it doesn't have active
706 * metaslabs, or can't allocate from them, it searches for an inactive
707 * metaslab to activate. If it can't find a suitable one, it will steal
715 int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
716 if (likely(cmp))
717 return (cmp);
760 * If the metaslab group was just added then it won't
763 * for allocations. We also don't consider non-activated
765 * to be initialized, because they can't be used for allocation.
818 int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
819 if (likely(cmp))
820 return (cmp);
824 cmp = TREE_CMP(a_vdev_id, b_vdev_id);
825 if (cmp)
826 return (cmp);
1025 avl_tree_t *t = &mg->mg_metaslab_tree;
1038 for (metaslab_t *msp = avl_first(t);
1039 msp != NULL; msp = AVL_NEXT(t, msp)) {
1298 * racy since we can't hold the locks for all metaslab
1320 * We didn't find another group to handle the allocation
1321 * so we can't skip this metaslab group even though
1352 int cmp = TREE_CMP(rs_size1, rs_size2);
1354 return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
1371 int cmp = TREE_CMP(rs_size1, rs_size2);
1373 return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
1517 zfs_btree_t *t = &msp->ms_allocatable_by_size;
1520 if (t == NULL)
1522 if (zfs_btree_numnodes(t) == 0)
1525 rs = zfs_btree_last(t, NULL);
1577 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1580 boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
1600 metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
1609 rs = zfs_btree_find(t, &rsearch, where);
1611 rs = zfs_btree_next(t, where, where);
1806 zfs_btree_t *t = &msp->ms_allocatable_by_size;
1818 if (zfs_btree_numnodes(t) == 0)
1820 rs = zfs_btree_last(t, NULL);
1853 zfs_btree_t *t = &msp->ms_allocatable->rt_root;
1870 rs = zfs_btree_find(t, &rsearch, &where);
1872 t = &msp->ms_allocatable_by_size;
1878 rs = zfs_btree_find(t, &rsearch, &where);
1880 rs = zfs_btree_next(t, &where, &where);
1989 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
1991 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
2016 for (int t = 0; t < TXG_DEFER_SIZE; t++)
2017 memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
2062 * we only care about segments that haven't made it into the
2071 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2072 metaslab_aux_histogram_add(msp->ms_deferhist[t],
2073 sm->sm_shift, msp->ms_defer[t]);
2095 * pool, looking at a metaslab that hasn't had any allocations
2155 for (int t = 0; t < TXG_SIZE; t++) {
2156 if (txg_list_member(&vd->vdev_ms_list, msp, t))
2162 * with what's on disk. If the pool is read-only then there aren't
2218 * this class that was used longest ago, and attempt to unload it. We don't
2257 * can't continue on or we'll panic when we attempt to
2272 * We can't unload metaslabs with no spacemap because
2273 * they're not ready to be unloaded yet. We can't
2278 * loading. We also don't unload metaslabs that are
2312 * If we are using the log space maps, metaslab_sync() can't write to
2314 * it when we are flushing the metaslab, and that can't happen while
2323 * we didn't, the ms_allocatable would have entries that
2362 * Add the size-sorted tree first, since we don't need to load
2378 * If the ms_sm doesn't exist, this means that this
2379 * metaslab hasn't gone through metaslab_sync() and
2380 * thus has never been dirtied. So we shouldn't
2393 * The ms_lock is insufficient for this because metaslab_sync() doesn't
2443 * correctly doesn't contain any segments that exist
2464 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2465 range_tree_walk(msp->ms_defer[t],
2629 * at which point it doesn't make sense for us to do the recalculation
2720 * When called from vdev_expand(), we can't call into the DMU as
2723 * that case, the object parameter is zero though, so we won't
2745 for (int t = 0; t < TXG_SIZE; t++) {
2746 ms->ms_allocating[t] = range_tree_create(NULL, type,
2751 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2752 ms->ms_defer[t] = range_tree_create(NULL, type, NULL,
2777 * out this txg. This ensures that we don't attempt to allocate
2840 * If this metaslab hasn't been through metaslab_sync_done() yet its
2841 * space hasn't been accounted for in its vdev and doesn't need to be
2868 for (int t = 0; t < TXG_SIZE; t++) {
2869 range_tree_destroy(msp->ms_allocating[t]);
2871 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2872 range_tree_destroy(msp->ms_defer[t]);
2876 for (int t = 0; t < TXG_SIZE; t++)
2877 ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
2971 * be shutting down the pool. We don't want to dirty
3038 * don't exclude this metaslab completely when it's 100%
3074 * has exceed our threshold, then don't mark it active.
3152 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3154 deferspace_histogram[i] += msp->ms_deferhist[t][i];
3304 * allocatable segments, but we aren't aware of those until loading
3347 * If we're activating for the claim code, we don't want to actually
3388 * is nothing to do. Already activated though, doesn't mean
3437 * that case, don't bother activating it. This can happen if the
3520 * metaslab so that we don't dirty more block and cause more sync passes.
3565 avl_tree_t *t = &mg->mg_metaslab_tree;
3576 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
3728 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3729 range_tree_walk(msp->ms_defer[t],
3733 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
3734 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
3872 * Just because a metaslab got flushed, that doesn't mean that
3874 * update ms_synced_length here in case it doesn't.
3928 * don't need to care about setting ms_flushing or broadcasting
3949 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3951 msp->ms_defer[t], tx);
4043 * Normally, we don't want to process a metaslab if there are no
4079 * Generate a log space map if one doesn't exist already.
4191 * tree won't be changing during that time, we drop the
4232 * ensure that we don't end up with a space map histogram
4236 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4238 msp->ms_defer[t], tx);
4303 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
4305 msp->ms_allocating[(txg + t) & TXG_MASK]));
4368 * If there's a metaslab_load() in progress and we don't have
4707 avl_tree_t *t = &mg->mg_metaslab_tree;
4708 metaslab_t *msp = avl_find(t, search, &idx);
4710 msp = avl_nearest(t, idx, AVL_AFTER);
4713 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
4730 * hasn't gone through a metaslab_sync_done(), then skip it.
4738 * from this disk, so we don't need to check how close we are.
4741 * metaslab, so we still don't care about distances.
4820 * If we don't have enough metaslabs active to fill the entire array, we
4836 * we start in the location right after where we left off, and don't
4851 * Even though we don't hold the ms_lock for the
4935 * claimed we don't need to keep the CLAIM bit set
4995 * as we can't manipulate this metaslab until it's committed
4996 * to disk. If this metaslab is being initialized, we shouldn't
5173 * nothing actually breaks if we miss a few updates -- we just won't
5233 * Don't allocate from faulted devices.
5245 * for allocations. If we're ganging then don't allow
5279 * If we don't need to try hard, then require that the
5355 * If we haven't tried hard, perhaps do so now.
5494 * we didn't change the phys_birth, a lookup in the ARC for a
5582 /* Check if the DVA wasn't remapped because it is a split block */
5664 * before we call into the allocator. If there aren't any available slots
5749 if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */
5821 * group didn't commit yet.
5911 * the blocks that we free that are part of the checkpoint won't be
5920 * Note that, we don't checkpoint any blocks if the current
5962 * so we don't have to unwind from partial failures below.
6030 * [see zil_sync()]. Thus, we don't call range_tree_verify() in the