1*789Sahrens /* 2*789Sahrens * CDDL HEADER START 3*789Sahrens * 4*789Sahrens * The contents of this file are subject to the terms of the 5*789Sahrens * Common Development and Distribution License, Version 1.0 only 6*789Sahrens * (the "License"). You may not use this file except in compliance 7*789Sahrens * with the License. 8*789Sahrens * 9*789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*789Sahrens * or http://www.opensolaris.org/os/licensing. 11*789Sahrens * See the License for the specific language governing permissions 12*789Sahrens * and limitations under the License. 13*789Sahrens * 14*789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 15*789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*789Sahrens * If applicable, add the following below this CDDL HEADER, with the 17*789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 18*789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 19*789Sahrens * 20*789Sahrens * CDDL HEADER END 21*789Sahrens */ 22*789Sahrens /* 23*789Sahrens * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*789Sahrens * Use is subject to license terms. 25*789Sahrens */ 26*789Sahrens 27*789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 28*789Sahrens 29*789Sahrens #include <sys/zfs_context.h> 30*789Sahrens #include <sys/dbuf.h> 31*789Sahrens #include <sys/dnode.h> 32*789Sahrens #include <sys/dmu.h> 33*789Sahrens #include <sys/dmu_tx.h> 34*789Sahrens #include <sys/dmu_objset.h> 35*789Sahrens #include <sys/dsl_dataset.h> 36*789Sahrens #include <sys/spa.h> 37*789Sahrens #include <sys/zio.h> 38*789Sahrens 39*789Sahrens 40*789Sahrens static void 41*789Sahrens dnode_increase_indirection(dnode_t *dn, dmu_tx_t *tx) 42*789Sahrens { 43*789Sahrens dmu_buf_impl_t *db; 44*789Sahrens int i; 45*789Sahrens uint64_t txg = tx->tx_txg; 46*789Sahrens 47*789Sahrens ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 48*789Sahrens ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 49*789Sahrens /* this dnode can't be paged out because it's dirty */ 50*789Sahrens 51*789Sahrens db = dbuf_hold_level(dn, dn->dn_phys->dn_nlevels, 0, FTAG); 52*789Sahrens for (i = 0; i < dn->dn_phys->dn_nblkptr; i++) 53*789Sahrens if (!BP_IS_HOLE(&dn->dn_phys->dn_blkptr[i])) 54*789Sahrens break; 55*789Sahrens if (i != dn->dn_phys->dn_nblkptr) { 56*789Sahrens ASSERT(list_link_active(&db->db_dirty_node[txg&TXG_MASK])); 57*789Sahrens 58*789Sahrens dbuf_read_havestruct(db); 59*789Sahrens arc_release(db->db_buf, db); 60*789Sahrens /* copy dnode's block pointers to new indirect block */ 61*789Sahrens ASSERT3U(sizeof (blkptr_t) * dn->dn_phys->dn_nblkptr, <=, 62*789Sahrens db->db.db_size); 63*789Sahrens bcopy(dn->dn_phys->dn_blkptr, db->db.db_data, 64*789Sahrens sizeof (blkptr_t) * dn->dn_phys->dn_nblkptr); 65*789Sahrens } 66*789Sahrens 67*789Sahrens dn->dn_phys->dn_nlevels += 1; 68*789Sahrens dprintf("os=%p obj=%llu, increase to %d\n", 69*789Sahrens dn->dn_objset, dn->dn_object, 70*789Sahrens dn->dn_phys->dn_nlevels); 71*789Sahrens 72*789Sahrens /* set dbuf's parent pointers to new indirect buf */ 73*789Sahrens for (i = 0; i < dn->dn_phys->dn_nblkptr; i++) { 74*789Sahrens dmu_buf_impl_t *child = 75*789Sahrens dbuf_find(dn, dn->dn_phys->dn_nlevels-2, i); 76*789Sahrens if (child == NULL) 77*789Sahrens continue; 78*789Sahrens if (child->db_dnode == NULL) { 79*789Sahrens mutex_exit(&child->db_mtx); 80*789Sahrens continue; 81*789Sahrens } 82*789Sahrens 83*789Sahrens if (child->db_parent == NULL || 84*789Sahrens child->db_parent == dn->dn_dbuf) { 85*789Sahrens dprintf_dbuf_bp(child, child->db_blkptr, 86*789Sahrens "changing db_blkptr to new indirect %s", ""); 87*789Sahrens child->db_parent = db; 88*789Sahrens dbuf_add_ref(db, child); 89*789Sahrens if (db->db.db_data) { 90*789Sahrens child->db_blkptr = 91*789Sahrens (blkptr_t *)db->db.db_data + i; 92*789Sahrens } else { 93*789Sahrens child->db_blkptr = NULL; 94*789Sahrens } 95*789Sahrens dprintf_dbuf_bp(child, child->db_blkptr, 96*789Sahrens "changed db_blkptr to new indirect %s", ""); 97*789Sahrens } 98*789Sahrens ASSERT3P(child->db_parent, ==, db); 99*789Sahrens 100*789Sahrens mutex_exit(&child->db_mtx); 101*789Sahrens } 102*789Sahrens 103*789Sahrens bzero(dn->dn_phys->dn_blkptr, 104*789Sahrens sizeof (blkptr_t) * dn->dn_phys->dn_nblkptr); 105*789Sahrens 106*789Sahrens dbuf_remove_ref(db, FTAG); 107*789Sahrens } 108*789Sahrens 109*789Sahrens static void 110*789Sahrens free_blocks(dnode_t *dn, blkptr_t *bp, int num, dmu_tx_t *tx) 111*789Sahrens { 112*789Sahrens objset_impl_t *os = dn->dn_objset; 113*789Sahrens uint64_t bytesfreed = 0; 114*789Sahrens int i; 115*789Sahrens 116*789Sahrens dprintf("os=%p obj=%llx num=%d\n", os, dn->dn_object, num); 117*789Sahrens 118*789Sahrens for (i = 0; i < num; i++, bp++) { 119*789Sahrens if (BP_IS_HOLE(bp)) 120*789Sahrens continue; 121*789Sahrens 122*789Sahrens bytesfreed += BP_GET_ASIZE(bp); 123*789Sahrens ASSERT3U(bytesfreed >> DEV_BSHIFT, <=, dn->dn_phys->dn_secphys); 124*789Sahrens dsl_dataset_block_kill(os->os_dsl_dataset, bp, tx); 125*789Sahrens } 126*789Sahrens dnode_diduse_space(dn, -bytesfreed); 127*789Sahrens } 128*789Sahrens 129*789Sahrens static void 130*789Sahrens free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx) 131*789Sahrens { 132*789Sahrens #ifdef ZFS_DEBUG 133*789Sahrens int off, num; 134*789Sahrens int i, err, epbs; 135*789Sahrens uint64_t txg = tx->tx_txg; 136*789Sahrens 137*789Sahrens epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 138*789Sahrens off = start - (db->db_blkid * 1<<epbs); 139*789Sahrens num = end - start + 1; 140*789Sahrens 141*789Sahrens ASSERT3U(off, >=, 0); 142*789Sahrens ASSERT3U(num, >=, 0); 143*789Sahrens ASSERT3U(db->db_level, >, 0); 144*789Sahrens ASSERT3U(db->db.db_size, ==, 1<<db->db_dnode->dn_phys->dn_indblkshift); 145*789Sahrens ASSERT3U(off+num, <=, db->db.db_size >> SPA_BLKPTRSHIFT); 146*789Sahrens ASSERT(db->db_blkptr != NULL); 147*789Sahrens 148*789Sahrens for (i = off; i < off+num; i++) { 149*789Sahrens uint64_t *buf; 150*789Sahrens int j; 151*789Sahrens dmu_buf_impl_t *child; 152*789Sahrens 153*789Sahrens ASSERT(db->db_level == 1); 154*789Sahrens 155*789Sahrens rw_enter(&db->db_dnode->dn_struct_rwlock, RW_READER); 156*789Sahrens err = dbuf_hold_impl(db->db_dnode, db->db_level-1, 157*789Sahrens (db->db_blkid << epbs) + i, TRUE, FTAG, &child); 158*789Sahrens rw_exit(&db->db_dnode->dn_struct_rwlock); 159*789Sahrens if (err == ENOENT) 160*789Sahrens continue; 161*789Sahrens ASSERT(err == 0); 162*789Sahrens ASSERT(child->db_level == 0); 163*789Sahrens ASSERT(!list_link_active(&child->db_dirty_node[txg&TXG_MASK])); 164*789Sahrens 165*789Sahrens /* db_data_old better be zeroed */ 166*789Sahrens if (child->db_d.db_data_old[txg & TXG_MASK]) { 167*789Sahrens buf = (child->db_d.db_data_old[txg & TXG_MASK])->b_data; 168*789Sahrens for (j = 0; j < child->db.db_size >> 3; j++) { 169*789Sahrens if (buf[j] != 0) { 170*789Sahrens panic("freed data not zero: " 171*789Sahrens "child=%p i=%d off=%d num=%d\n", 172*789Sahrens child, i, off, num); 173*789Sahrens } 174*789Sahrens } 175*789Sahrens } 176*789Sahrens 177*789Sahrens /* 178*789Sahrens * db_data better be zeroed unless it's dirty in a 179*789Sahrens * future txg. 180*789Sahrens */ 181*789Sahrens mutex_enter(&child->db_mtx); 182*789Sahrens buf = child->db.db_data; 183*789Sahrens if (buf != NULL && child->db_state != DB_FILL && 184*789Sahrens !list_link_active(&child->db_dirty_node 185*789Sahrens [(txg+1) & TXG_MASK]) && 186*789Sahrens !list_link_active(&child->db_dirty_node 187*789Sahrens [(txg+2) & TXG_MASK])) { 188*789Sahrens for (j = 0; j < child->db.db_size >> 3; j++) { 189*789Sahrens if (buf[j] != 0) { 190*789Sahrens panic("freed data not zero: " 191*789Sahrens "child=%p i=%d off=%d num=%d\n", 192*789Sahrens child, i, off, num); 193*789Sahrens } 194*789Sahrens } 195*789Sahrens } 196*789Sahrens mutex_exit(&child->db_mtx); 197*789Sahrens 198*789Sahrens dbuf_remove_ref(child, FTAG); 199*789Sahrens } 200*789Sahrens #endif 201*789Sahrens } 202*789Sahrens 203*789Sahrens static int 204*789Sahrens free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, int trunc, 205*789Sahrens dmu_tx_t *tx) 206*789Sahrens { 207*789Sahrens dnode_t *dn = db->db_dnode; 208*789Sahrens blkptr_t *bp; 209*789Sahrens dmu_buf_impl_t *subdb; 210*789Sahrens uint64_t start, end, dbstart, dbend, i; 211*789Sahrens int epbs, shift, err; 212*789Sahrens int txg_index = tx->tx_txg&TXG_MASK; 213*789Sahrens int all = TRUE; 214*789Sahrens 215*789Sahrens dbuf_read(db); 216*789Sahrens arc_release(db->db_buf, db); 217*789Sahrens bp = (blkptr_t *)db->db.db_data; 218*789Sahrens 219*789Sahrens epbs = db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 220*789Sahrens shift = (db->db_level - 1) * epbs; 221*789Sahrens dbstart = db->db_blkid << epbs; 222*789Sahrens start = blkid >> shift; 223*789Sahrens if (dbstart < start) { 224*789Sahrens bp += start - dbstart; 225*789Sahrens all = FALSE; 226*789Sahrens } else { 227*789Sahrens start = dbstart; 228*789Sahrens } 229*789Sahrens dbend = ((db->db_blkid + 1) << epbs) - 1; 230*789Sahrens end = (blkid + nblks - 1) >> shift; 231*789Sahrens if (dbend <= end) 232*789Sahrens end = dbend; 233*789Sahrens else if (all) 234*789Sahrens all = trunc; 235*789Sahrens ASSERT3U(start, <=, end); 236*789Sahrens 237*789Sahrens if (db->db_level == 1) { 238*789Sahrens free_verify(db, start, end, tx); 239*789Sahrens free_blocks(dn, bp, end-start+1, tx); 240*789Sahrens ASSERT(all || list_link_active(&db->db_dirty_node[txg_index])); 241*789Sahrens return (all); 242*789Sahrens } 243*789Sahrens 244*789Sahrens for (i = start; i <= end; i++, bp++) { 245*789Sahrens if (BP_IS_HOLE(bp)) 246*789Sahrens continue; 247*789Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 248*789Sahrens err = dbuf_hold_impl(dn, db->db_level-1, i, TRUE, FTAG, &subdb); 249*789Sahrens ASSERT3U(err, ==, 0); 250*789Sahrens rw_exit(&dn->dn_struct_rwlock); 251*789Sahrens 252*789Sahrens if (free_children(subdb, blkid, nblks, trunc, tx)) { 253*789Sahrens ASSERT3P(subdb->db_blkptr, ==, bp); 254*789Sahrens free_blocks(dn, bp, 1, tx); 255*789Sahrens } 256*789Sahrens dbuf_remove_ref(subdb, FTAG); 257*789Sahrens } 258*789Sahrens #ifdef ZFS_DEBUG 259*789Sahrens bp -= (end-start)+1; 260*789Sahrens for (i = start; i <= end; i++, bp++) { 261*789Sahrens if (i == start && blkid != 0) 262*789Sahrens continue; 263*789Sahrens else if (i == end && !trunc) 264*789Sahrens continue; 265*789Sahrens ASSERT3U(bp->blk_birth, ==, 0); 266*789Sahrens } 267*789Sahrens #endif 268*789Sahrens ASSERT(all || list_link_active(&db->db_dirty_node[txg_index])); 269*789Sahrens return (all); 270*789Sahrens } 271*789Sahrens 272*789Sahrens /* 273*789Sahrens * free_range: Traverse the indicated range of the provided file 274*789Sahrens * and "free" all the blocks contained there. 275*789Sahrens */ 276*789Sahrens static void 277*789Sahrens dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx) 278*789Sahrens { 279*789Sahrens blkptr_t *bp = dn->dn_phys->dn_blkptr; 280*789Sahrens dmu_buf_impl_t *db; 281*789Sahrens int trunc, start, end, shift, i, err; 282*789Sahrens int dnlevel = dn->dn_phys->dn_nlevels; 283*789Sahrens 284*789Sahrens if (blkid > dn->dn_phys->dn_maxblkid) 285*789Sahrens return; 286*789Sahrens 287*789Sahrens ASSERT(dn->dn_phys->dn_maxblkid < UINT64_MAX); 288*789Sahrens trunc = blkid + nblks > dn->dn_phys->dn_maxblkid; 289*789Sahrens if (trunc) 290*789Sahrens nblks = dn->dn_phys->dn_maxblkid - blkid + 1; 291*789Sahrens 292*789Sahrens /* There are no indirect blocks in the object */ 293*789Sahrens if (dnlevel == 1) { 294*789Sahrens if (blkid >= dn->dn_phys->dn_nblkptr) { 295*789Sahrens /* this range was never made persistent */ 296*789Sahrens return; 297*789Sahrens } 298*789Sahrens ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr); 299*789Sahrens free_blocks(dn, bp + blkid, nblks, tx); 300*789Sahrens if (trunc) { 301*789Sahrens uint64_t off = (dn->dn_phys->dn_maxblkid + 1) * 302*789Sahrens (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); 303*789Sahrens dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0); 304*789Sahrens ASSERT(off < dn->dn_phys->dn_maxblkid || 305*789Sahrens dn->dn_phys->dn_maxblkid == 0 || 306*789Sahrens dnode_next_offset(dn, FALSE, &off, 1, 1) == ESRCH); 307*789Sahrens } 308*789Sahrens return; 309*789Sahrens } 310*789Sahrens 311*789Sahrens shift = (dnlevel - 1) * (dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT); 312*789Sahrens start = blkid >> shift; 313*789Sahrens ASSERT(start < dn->dn_phys->dn_nblkptr); 314*789Sahrens end = (blkid + nblks - 1) >> shift; 315*789Sahrens bp += start; 316*789Sahrens for (i = start; i <= end; i++, bp++) { 317*789Sahrens if (BP_IS_HOLE(bp)) 318*789Sahrens continue; 319*789Sahrens rw_enter(&dn->dn_struct_rwlock, RW_READER); 320*789Sahrens err = dbuf_hold_impl(dn, dnlevel-1, i, TRUE, FTAG, &db); 321*789Sahrens ASSERT3U(err, ==, 0); 322*789Sahrens rw_exit(&dn->dn_struct_rwlock); 323*789Sahrens 324*789Sahrens if (free_children(db, blkid, nblks, trunc, tx)) { 325*789Sahrens ASSERT3P(db->db_blkptr, ==, bp); 326*789Sahrens free_blocks(dn, bp, 1, tx); 327*789Sahrens } 328*789Sahrens dbuf_remove_ref(db, FTAG); 329*789Sahrens } 330*789Sahrens if (trunc) { 331*789Sahrens uint64_t off = (dn->dn_phys->dn_maxblkid + 1) * 332*789Sahrens (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); 333*789Sahrens dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0); 334*789Sahrens ASSERT(off < dn->dn_phys->dn_maxblkid || 335*789Sahrens dn->dn_phys->dn_maxblkid == 0 || 336*789Sahrens dnode_next_offset(dn, FALSE, &off, 1, 1) == ESRCH); 337*789Sahrens } 338*789Sahrens } 339*789Sahrens 340*789Sahrens static int 341*789Sahrens dnode_sync_free(dnode_t *dn, dmu_tx_t *tx) 342*789Sahrens { 343*789Sahrens dmu_buf_impl_t *db; 344*789Sahrens int txgoff = tx->tx_txg & TXG_MASK; 345*789Sahrens 346*789Sahrens ASSERT(dmu_tx_is_syncing(tx)); 347*789Sahrens 348*789Sahrens /* Undirty all buffers */ 349*789Sahrens while (db = list_head(&dn->dn_dirty_dbufs[txgoff])) { 350*789Sahrens mutex_enter(&db->db_mtx); 351*789Sahrens /* XXX - use dbuf_undirty()? */ 352*789Sahrens list_remove(&dn->dn_dirty_dbufs[txgoff], db); 353*789Sahrens if (db->db_level == 0) { 354*789Sahrens ASSERT3P(db->db_d.db_data_old[txgoff], ==, db->db_buf); 355*789Sahrens if (db->db_d.db_overridden_by[txgoff]) 356*789Sahrens dbuf_unoverride(db, tx->tx_txg); 357*789Sahrens db->db_d.db_data_old[txgoff] = NULL; 358*789Sahrens } 359*789Sahrens db->db_dirtycnt -= 1; 360*789Sahrens mutex_exit(&db->db_mtx); 361*789Sahrens dbuf_remove_ref(db, (void *)(uintptr_t)tx->tx_txg); 362*789Sahrens } 363*789Sahrens 364*789Sahrens ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); 365*789Sahrens 366*789Sahrens /* Undirty next bits */ 367*789Sahrens dn->dn_next_nlevels[txgoff] = 0; 368*789Sahrens dn->dn_next_indblkshift[txgoff] = 0; 369*789Sahrens 370*789Sahrens /* free up all the blocks in the file. */ 371*789Sahrens dbuf_free_range(dn, 0, -1, tx); 372*789Sahrens dnode_sync_free_range(dn, 0, dn->dn_phys->dn_maxblkid+1, tx); 373*789Sahrens ASSERT3U(dn->dn_phys->dn_secphys, ==, 0); 374*789Sahrens 375*789Sahrens /* 376*789Sahrens * All dbufs should be gone, since all holds are gone... 377*789Sahrens */ 378*789Sahrens ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL); 379*789Sahrens 380*789Sahrens /* ASSERT(blkptrs are zero); */ 381*789Sahrens ASSERT(dn->dn_phys->dn_type != DMU_OT_NONE); 382*789Sahrens ASSERT(dn->dn_type != DMU_OT_NONE); 383*789Sahrens 384*789Sahrens ASSERT(dn->dn_free_txg > 0); 385*789Sahrens if (dn->dn_allocated_txg != dn->dn_free_txg) 386*789Sahrens dbuf_will_dirty(dn->dn_dbuf, tx); 387*789Sahrens bzero(dn->dn_phys, sizeof (dnode_phys_t)); 388*789Sahrens 389*789Sahrens mutex_enter(&dn->dn_mtx); 390*789Sahrens dn->dn_type = DMU_OT_NONE; 391*789Sahrens dn->dn_dirtyblksz[txgoff] = 0; 392*789Sahrens dn->dn_maxblkid = 0; 393*789Sahrens dn->dn_allocated_txg = 0; 394*789Sahrens mutex_exit(&dn->dn_mtx); 395*789Sahrens 396*789Sahrens ASSERT(!IS_DNODE_DNODE(dn->dn_object)); 397*789Sahrens 398*789Sahrens dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 399*789Sahrens /* 400*789Sahrens * Now that we've released our hold, the dnode may 401*789Sahrens * be evicted, so we musn't access it. 402*789Sahrens */ 403*789Sahrens return (1); 404*789Sahrens } 405*789Sahrens 406*789Sahrens /* 407*789Sahrens * Write out the dnode's dirty buffers at the specified level. 408*789Sahrens * This may create more dirty buffers at the next level up. 409*789Sahrens * 410*789Sahrens * NOTE: The dnode is kept in memory by being dirty. Once the 411*789Sahrens * dirty bit is cleared, it may be evicted. Beware of this! 412*789Sahrens */ 413*789Sahrens int 414*789Sahrens dnode_sync(dnode_t *dn, int level, zio_t *zio, dmu_tx_t *tx) 415*789Sahrens { 416*789Sahrens free_range_t *rp; 417*789Sahrens int txgoff = tx->tx_txg & TXG_MASK; 418*789Sahrens dnode_phys_t *dnp = dn->dn_phys; 419*789Sahrens 420*789Sahrens /* ASSERT(dn->dn_objset->dd_snapshot == NULL); */ 421*789Sahrens ASSERT(dmu_tx_is_syncing(tx)); 422*789Sahrens ASSERT(IS_DNODE_DNODE(dn->dn_object) || 423*789Sahrens dn->dn_dirtyblksz[txgoff] > 0); 424*789Sahrens 425*789Sahrens ASSERT(dnp->dn_type != DMU_OT_NONE || dn->dn_allocated_txg); 426*789Sahrens dnode_verify(dn); 427*789Sahrens /* 428*789Sahrens * Make sure the dbuf for the dn_phys is released before we modify it. 429*789Sahrens */ 430*789Sahrens if (dn->dn_dbuf) 431*789Sahrens arc_release(dn->dn_dbuf->db_buf, dn->dn_dbuf); 432*789Sahrens 433*789Sahrens mutex_enter(&dn->dn_mtx); 434*789Sahrens if (dn->dn_allocated_txg == tx->tx_txg) { 435*789Sahrens /* The dnode is newly allocated or reallocated */ 436*789Sahrens if (dnp->dn_type == DMU_OT_NONE) { 437*789Sahrens /* this is a first alloc, not a realloc */ 438*789Sahrens /* XXX shouldn't the phys already be zeroed? */ 439*789Sahrens bzero(dnp, DNODE_CORE_SIZE); 440*789Sahrens dnp->dn_datablkszsec = dn->dn_datablkszsec; 441*789Sahrens dnp->dn_indblkshift = dn->dn_indblkshift; 442*789Sahrens dnp->dn_nlevels = 1; 443*789Sahrens } 444*789Sahrens 445*789Sahrens if (dn->dn_nblkptr > dnp->dn_nblkptr) { 446*789Sahrens /* zero the new blkptrs we are gaining */ 447*789Sahrens bzero(dnp->dn_blkptr + dnp->dn_nblkptr, 448*789Sahrens sizeof (blkptr_t) * 449*789Sahrens (dn->dn_nblkptr - dnp->dn_nblkptr)); 450*789Sahrens } 451*789Sahrens dnp->dn_type = dn->dn_type; 452*789Sahrens dnp->dn_bonustype = dn->dn_bonustype; 453*789Sahrens dnp->dn_bonuslen = dn->dn_bonuslen; 454*789Sahrens dnp->dn_nblkptr = dn->dn_nblkptr; 455*789Sahrens } 456*789Sahrens 457*789Sahrens if (dn->dn_dirtyblksz[txgoff]) { 458*789Sahrens ASSERT(P2PHASE(dn->dn_dirtyblksz[txgoff], 459*789Sahrens SPA_MINBLOCKSIZE) == 0); 460*789Sahrens dnp->dn_datablkszsec = 461*789Sahrens dn->dn_dirtyblksz[txgoff] >> SPA_MINBLOCKSHIFT; 462*789Sahrens } 463*789Sahrens 464*789Sahrens if (dn->dn_next_indblkshift[txgoff]) { 465*789Sahrens ASSERT(dnp->dn_nlevels == 1); 466*789Sahrens dnp->dn_indblkshift = dn->dn_next_indblkshift[txgoff]; 467*789Sahrens dn->dn_next_indblkshift[txgoff] = 0; 468*789Sahrens } 469*789Sahrens 470*789Sahrens /* 471*789Sahrens * Just take the live (open-context) values for checksum and compress. 472*789Sahrens * Strictly speaking it's a future leak, but nothing bad happens if we 473*789Sahrens * start using the new checksum or compress algorithm a little early. 474*789Sahrens */ 475*789Sahrens dnp->dn_checksum = dn->dn_checksum; 476*789Sahrens dnp->dn_compress = dn->dn_compress; 477*789Sahrens 478*789Sahrens mutex_exit(&dn->dn_mtx); 479*789Sahrens 480*789Sahrens /* process all the "freed" ranges in the file */ 481*789Sahrens if (dn->dn_free_txg == 0 || dn->dn_free_txg > tx->tx_txg) { 482*789Sahrens for (rp = avl_first(&dn->dn_ranges[txgoff]); rp != NULL; 483*789Sahrens rp = AVL_NEXT(&dn->dn_ranges[txgoff], rp)) 484*789Sahrens dnode_sync_free_range(dn, 485*789Sahrens rp->fr_blkid, rp->fr_nblks, tx); 486*789Sahrens } 487*789Sahrens mutex_enter(&dn->dn_mtx); 488*789Sahrens for (rp = avl_first(&dn->dn_ranges[txgoff]); rp; ) { 489*789Sahrens free_range_t *last = rp; 490*789Sahrens rp = AVL_NEXT(&dn->dn_ranges[txgoff], rp); 491*789Sahrens avl_remove(&dn->dn_ranges[txgoff], last); 492*789Sahrens kmem_free(last, sizeof (free_range_t)); 493*789Sahrens } 494*789Sahrens mutex_exit(&dn->dn_mtx); 495*789Sahrens 496*789Sahrens if (dn->dn_free_txg > 0 && dn->dn_free_txg <= tx->tx_txg) { 497*789Sahrens ASSERT3U(level, ==, 0); 498*789Sahrens return (dnode_sync_free(dn, tx)); 499*789Sahrens } 500*789Sahrens 501*789Sahrens if (dn->dn_next_nlevels[txgoff]) { 502*789Sahrens int new_lvl = dn->dn_next_nlevels[txgoff]; 503*789Sahrens 504*789Sahrens rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 505*789Sahrens while (new_lvl > dnp->dn_nlevels) 506*789Sahrens dnode_increase_indirection(dn, tx); 507*789Sahrens rw_exit(&dn->dn_struct_rwlock); 508*789Sahrens dn->dn_next_nlevels[txgoff] = 0; 509*789Sahrens } 510*789Sahrens 511*789Sahrens if (level == dnp->dn_nlevels) { 512*789Sahrens uint64_t off = (dn->dn_phys->dn_maxblkid + 1) * 513*789Sahrens (dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT); 514*789Sahrens 515*789Sahrens /* we've already synced out all data and indirect blocks */ 516*789Sahrens /* there are no more dirty dbufs under this dnode */ 517*789Sahrens ASSERT3P(list_head(&dn->dn_dirty_dbufs[txgoff]), ==, NULL); 518*789Sahrens ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= tx->tx_txg); 519*789Sahrens 520*789Sahrens /* XXX this is expensive. remove once 6343073 is closed. */ 521*789Sahrens /* NB: the "off < maxblkid" is to catch overflow */ 522*789Sahrens /* 523*789Sahrens * NB: if blocksize is changing, we could get confused, 524*789Sahrens * so only bother if there are multiple blocks and thus 525*789Sahrens * it can't be changing. 526*789Sahrens */ 527*789Sahrens ASSERT(off < dn->dn_phys->dn_maxblkid || 528*789Sahrens dn->dn_phys->dn_maxblkid == 0 || 529*789Sahrens dnode_next_offset(dn, FALSE, &off, 1, 1) == ESRCH); 530*789Sahrens 531*789Sahrens dn->dn_dirtyblksz[txgoff] = 0; 532*789Sahrens 533*789Sahrens 534*789Sahrens if (!IS_DNODE_DNODE(dn->dn_object)) { 535*789Sahrens dbuf_will_dirty(dn->dn_dbuf, tx); 536*789Sahrens dnode_rele(dn, (void *)(uintptr_t)tx->tx_txg); 537*789Sahrens } 538*789Sahrens 539*789Sahrens /* 540*789Sahrens * Now that we've dropped the reference, the dnode may 541*789Sahrens * be evicted, so we musn't access it. 542*789Sahrens */ 543*789Sahrens return (1); 544*789Sahrens } else { 545*789Sahrens dmu_buf_impl_t *db, *db_next; 546*789Sahrens list_t *list = &dn->dn_dirty_dbufs[txgoff]; 547*789Sahrens /* 548*789Sahrens * Iterate over the list, removing and sync'ing dbufs 549*789Sahrens * which are on the level we want, and leaving others. 550*789Sahrens */ 551*789Sahrens for (db = list_head(list); db; db = db_next) { 552*789Sahrens db_next = list_next(list, db); 553*789Sahrens if (db->db_level == level) { 554*789Sahrens list_remove(list, db); 555*789Sahrens dbuf_sync(db, zio, tx); 556*789Sahrens } 557*789Sahrens } 558*789Sahrens return (0); 559*789Sahrens } 560*789Sahrens } 561