Lines Matching +full:os +full:- +full:initiated
9 * or https://opensource.org/licenses/CDDL-1.0.
181 rw_enter(&dn->dn_struct_rwlock, RW_READER);
184 rw_exit(&dn->dn_struct_rwlock);
191 *dbp = &db->db;
196 dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
204 err = dnode_hold(os, object, FTAG, &dn);
207 rw_enter(&dn->dn_struct_rwlock, RW_READER);
210 rw_exit(&dn->dn_struct_rwlock);
218 *dbp = &db->db;
248 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
259 err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
285 if (newsize < 0 || newsize > db_fake->db_size)
291 if (dn->dn_bonus != db) {
315 if (dn->dn_bonus != db) {
333 type = DB_DNODE(db)->dn_bonustype;
340 dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
345 error = dnode_hold(os, object, FTAG, &dn);
347 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
349 rw_exit(&dn->dn_struct_rwlock);
371 rw_enter(&dn->dn_struct_rwlock, RW_READER);
372 if (dn->dn_bonus == NULL) {
373 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
374 rw_exit(&dn->dn_struct_rwlock);
375 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
377 if (dn->dn_bonus == NULL)
380 db = dn->dn_bonus;
383 if (zfs_refcount_add(&db->db_holds, tag) == 1) {
385 atomic_inc_32(&dn->dn_dbufs_count);
393 rw_exit(&dn->dn_struct_rwlock);
403 *dbp = &db->db;
408 dmu_bonus_hold(objset_t *os, uint64_t object, const void *tag, dmu_buf_t **dbp)
413 error = dnode_hold(os, object, FTAG, &dn);
440 rw_enter(&dn->dn_struct_rwlock, RW_READER);
445 rw_exit(&dn->dn_struct_rwlock);
453 *dbp = &db->db;
471 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
474 rw_enter(&dn->dn_struct_rwlock, RW_READER);
476 if (!dn->dn_have_spill) {
483 rw_exit(&dn->dn_struct_rwlock);
509 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
510 * to take a held dnode rather than <os, object> -- the lookup is wasteful,
531 * we can tell it about the multi-block read. dbuf_read() only knows
540 rw_enter(&dn->dn_struct_rwlock, RW_READER);
541 if (dn->dn_datablkshift) {
542 int blkshift = dn->dn_datablkshift;
543 nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) -
547 if (offset + length > dn->dn_datablksz) {
550 (longlong_t)dn->dn_objset->
551 os_dsl_dataset->ds_object,
552 (longlong_t)dn->dn_object, dn->dn_datablksz,
554 rw_exit(&dn->dn_struct_rwlock);
562 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL,
571 zs = dmu_zfetch_prepare(&dn->dn_zfetch, blkid, nblks, read,
578 dmu_zfetch_run(&dn->dn_zfetch, zs, missed,
581 rw_exit(&dn->dn_struct_rwlock);
597 if (i == nblks - 1 && blkid + i < dn->dn_maxblkid &&
598 offset + length < db->db.db_offset +
599 db->db.db_size) {
600 if (offset <= db->db.db_offset)
606 if (db->db_state != DB_CACHED)
609 dbp[i] = &db->db;
621 zfs_racct_write(dn->dn_objset->os_spa, length, nblks, flags);
624 dmu_zfetch_run(&dn->dn_zfetch, zs, missed, B_TRUE);
625 rw_exit(&dn->dn_struct_rwlock);
638 mutex_enter(&db->db_mtx);
639 while (db->db_state == DB_READ ||
640 db->db_state == DB_FILL)
641 cv_wait(&db->db_changed, &db->db_mtx);
642 if (db->db_state == DB_UNCACHED)
644 mutex_exit(&db->db_mtx);
658 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
665 err = dnode_hold(os, object, FTAG, &dn);
711 * Issue prefetch I/Os for the given blocks. If level is greater than 0, the
724 dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
730 dmu_prefetch_dnode(os, object, pri);
734 if (dnode_hold(os, object, FTAG, &dn) != 0)
753 rw_enter(&dn->dn_struct_rwlock, RW_READER);
754 if (dn->dn_datablkshift != 0) {
761 end2 = dbuf_whichblock(dn, level, offset + len - 1) + 1;
762 uint8_t ibs = dn->dn_indblkshift;
763 uint8_t bs = (level == 0) ? dn->dn_datablkshift : ibs;
770 uint8_t ibps = ibs - SPA_BLKPTRSHIFT;
776 } while (end2 - start2 > limit);
780 end = start + (level == 0 && offset < dn->dn_datablksz);
787 rw_exit(&dn->dn_struct_rwlock);
804 mutex_enter(&dpa->dpa_lock);
805 ASSERT3U(dpa->dpa_pending_io, >, 0);
806 if (--dpa->dpa_pending_io == 0)
807 cv_broadcast(&dpa->dpa_cv);
808 mutex_exit(&dpa->dpa_lock);
819 rw_enter(&dn->dn_struct_rwlock, RW_READER);
822 uint64_t end = dbuf_whichblock(dn, 0, offset + len - 1) + 1;
823 dpa.dpa_pending_io = end - start;
830 rw_exit(&dn->dn_struct_rwlock);
845 * Issue prefetch I/Os for the given L0 block range and wait for the I/O
851 dmu_prefetch_wait(objset_t *os, uint64_t object, uint64_t offset, uint64_t size)
856 err = dnode_hold(os, object, FTAG, &dn);
864 if (dn->dn_indblkshift) {
865 uint64_t nbps = bp_span_in_blocks(dn->dn_indblkshift, 1);
866 chunksize = (nbps * 16) << dn->dn_datablkshift;
868 chunksize = dn->dn_datablksz;
877 size -= mylen;
891 * Issue prefetch I/Os for the given object's dnode.
894 dmu_prefetch_dnode(objset_t *os, uint64_t object, zio_priority_t pri)
899 dnode_t *dn = DMU_META_DNODE(os);
900 rw_enter(&dn->dn_struct_rwlock, RW_READER);
903 rw_exit(&dn->dn_struct_rwlock);
921 uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
922 /* bytes of data covered by a level-1 indirect block */
923 uint64_t iblkrange = (uint64_t)dn->dn_datablksz *
924 EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
929 if (dn->dn_nlevels <= 1) {
942 (roundup(*start, iblkrange) - (minimum / iblkrange * iblkrange)) /
960 (*start)--;
990 dmu_objset_zfs_unmounting(objset_t *os)
993 if (dmu_objset_type(os) == DMU_OST_ZFS)
994 return (zfs_get_vfs_flag_unmounted(os));
996 (void) os;
1002 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
1008 dsl_pool_t *dp = dmu_objset_pool(os);
1013 object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
1024 length = object_size - offset;
1031 if (dmu_objset_zfs_unmounting(dn->dn_objset))
1043 chunk_len = chunk_end - chunk_begin;
1045 tx = dmu_tx_create(os);
1046 dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
1061 mutex_enter(&dp->dp_lock);
1063 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK];
1064 mutex_exit(&dp->dp_lock);
1089 mutex_enter(&dp->dp_lock);
1090 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] +=
1091 l1blks << dn->dn_indblkshift;
1092 mutex_exit(&dp->dp_lock);
1100 length -= chunk_len;
1106 dmu_free_long_range(objset_t *os, uint64_t object,
1112 err = dnode_hold(os, object, FTAG, &dn);
1115 err = dmu_free_long_range_impl(os, dn, offset, length);
1124 dn->dn_maxblkid = 0;
1131 dmu_free_long_object(objset_t *os, uint64_t object)
1136 err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
1140 tx = dmu_tx_create(os);
1146 err = dmu_object_free(os, object, tx);
1156 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
1160 int err = dnode_hold(os, object, FTAG, &dn);
1164 ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset);
1182 if (dn->dn_maxblkid == 0) {
1183 uint64_t newsz = offset > dn->dn_datablksz ? 0 :
1184 MIN(size, dn->dn_datablksz - offset);
1185 memset((char *)buf + newsz, 0, size - newsz);
1206 * NB: we could do this block-at-a-time, but it's nice
1221 bufoff = offset - db->db_offset;
1222 tocpy = MIN(db->db_size - bufoff, size);
1224 ASSERT(db->db_data != NULL);
1225 (void) memcpy(buf, (char *)db->db_data + bufoff, tocpy);
1228 size -= tocpy;
1237 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1243 err = dnode_hold(os, object, FTAG, &dn);
1272 bufoff = offset - db->db_offset;
1273 tocpy = MIN(db->db_size - bufoff, size);
1275 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1277 if (tocpy == db->db_size)
1282 ASSERT(db->db_data != NULL);
1283 (void) memcpy((char *)db->db_data + bufoff, buf, tocpy);
1285 if (tocpy == db->db_size)
1289 size -= tocpy;
1295 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1304 VERIFY0(dmu_buf_hold_array(os, object, offset, size,
1327 zfs_dio_aligned(offset, size, dn->dn_datablksz)) {
1349 dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1358 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
1370 dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
1378 VERIFY0(dmu_buf_hold_noread(os, object, offset,
1389 dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1395 VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG,
1409 if (uio->uio_extflg & UIO_DIRECT)
1413 * NB: we could do this block-at-a-time, but it's nice
1428 bufoff = zfs_uio_offset(uio) - db->db_offset;
1429 tocpy = MIN(db->db_size - bufoff, size);
1431 ASSERT(db->db_data != NULL);
1432 err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy,
1438 size -= tocpy;
1447 * From object zdb->db_object.
1476 dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size)
1484 err = dnode_hold(os, object, FTAG, &dn);
1510 if ((uio->uio_extflg & UIO_DIRECT) &&
1511 (write_size >= dn->dn_datablksz)) {
1513 dn->dn_datablksz)) {
1515 } else if (write_size > dn->dn_datablksz &&
1517 dn->dn_datablksz)) {
1519 dn->dn_datablksz * (write_size / dn->dn_datablksz);
1522 size -= write_size;
1529 P2PHASE(zfs_uio_offset(uio), dn->dn_datablksz);
1546 bufoff = off - db->db_offset;
1547 tocpy = MIN(db->db_size - bufoff, write_size);
1549 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1551 if (tocpy == db->db_size)
1556 ASSERT(db->db_data != NULL);
1557 err = zfs_uio_fault_move((char *)db->db_data + bufoff,
1560 if (tocpy == db->db_size && dmu_buf_fill_done(db, tx, err)) {
1562 zfs_uio_advance(uio, off - zfs_uio_offset(uio));
1568 write_size -= tocpy;
1569 size -= tocpy;
1576 if ((uio->uio_extflg & UIO_DIRECT) && size > 0) {
1585 * To object zdb->db_object.
1615 dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
1624 err = dnode_hold(os, object, FTAG, &dn);
1667 dmu_object_cached_size(objset_t *os, uint64_t object,
1676 if (dnode_hold(os, object, FTAG, &dn) != 0)
1679 if (dn->dn_nlevels < 2) {
1697 uint_t nbps = bp_span_in_blocks(dn->dn_indblkshift, 1);
1698 uint64_t l1blks = 1 + (dn->dn_maxblkid / nbps);
1700 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1729 dmu_cached_bps(dmu_objset_spa(os), db->db.db_data,
1738 rw_exit(&dn->dn_struct_rwlock);
1752 return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size));
1771 * (temporarily) write-only, like "zfs receive".
1785 dr->dt.dll.dr_abd = abd;
1786 dr->dt.dll.dr_props = *zp;
1787 dr->dt.dll.dr_flags = flags;
1801 objset_t *os = dn->dn_objset;
1802 uint64_t object = dn->dn_object;
1806 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1809 rw_exit(&dn->dn_struct_rwlock);
1817 if (offset == db->db.db_offset && blksz == db->db.db_size) {
1818 zfs_racct_write(os->os_spa, blksz, 1, 0);
1824 ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
1827 dmu_write(os, object, offset, blksz, buf->b_data, tx);
1854 if (zio->io_error == 0) {
1855 dbuf_dirty_record_t *dr = dsa->dsa_dr;
1856 blkptr_t *bp = zio->io_bp;
1861 db = &(dr->dr_dbuf->db);
1863 db = dsa->dsa_zgd->zgd_db;
1868 BP_SET_LSIZE(bp, db->db_size);
1879 dmu_sync_ready(zio, NULL, zio->io_private);
1887 dbuf_dirty_record_t *dr = dsa->dsa_dr;
1888 dmu_buf_impl_t *db = dr->dr_dbuf;
1889 zgd_t *zgd = dsa->dsa_zgd;
1895 if (zgd && zio->io_error == 0) {
1896 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1899 mutex_enter(&db->db_mtx);
1900 ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
1901 if (zio->io_error == 0) {
1902 ASSERT0(dr->dt.dl.dr_has_raw_params);
1903 dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
1904 if (dr->dt.dl.dr_nopwrite) {
1905 blkptr_t *bp = zio->io_bp;
1906 blkptr_t *bp_orig = &zio->io_bp_orig;
1910 VERIFY(BP_EQUAL(bp, db->db_blkptr));
1911 ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
1915 dr->dt.dl.dr_overridden_by = *zio->io_bp;
1916 dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
1917 dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
1921 * new-style holes maintain their lsize, type, level,
1929 if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
1930 BP_GET_LOGICAL_BIRTH(&dr->dt.dl.dr_overridden_by) == 0)
1931 BP_ZERO(&dr->dt.dl.dr_overridden_by);
1933 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1936 cv_broadcast(&db->db_changed);
1937 mutex_exit(&db->db_mtx);
1939 if (dsa->dsa_done)
1940 dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1948 blkptr_t *bp = zio->io_bp;
1949 dmu_sync_arg_t *dsa = zio->io_private;
1950 zgd_t *zgd = dsa->dsa_zgd;
1952 if (zio->io_error == 0) {
1957 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1960 blkptr_t *bp_orig __maybe_unused = &zio->io_bp_orig;
1961 ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
1963 ASSERT(BP_GET_LOGICAL_BIRTH(zio->io_bp) == zio->io_txg);
1964 ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
1965 zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
1969 dmu_tx_commit(dsa->dsa_tx);
1971 dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1973 abd_free(zio->io_abd);
1978 dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
1985 error = dbuf_read((dmu_buf_impl_t *)zgd->zgd_db, NULL,
1990 tx = dmu_tx_create(os);
1991 dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
2009 zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
2012 dsa->dsa_dr = NULL;
2013 dsa->dsa_done = done;
2014 dsa->dsa_zgd = zgd;
2015 dsa->dsa_tx = tx;
2023 * (this is similar to the non-late-arrival case where the dbuf is
2038 zp->zp_nopwrite = B_FALSE;
2040 zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
2041 abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
2042 zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
2068 * 0: the I/O has been initiated.
2077 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
2078 objset_t *os = db->db_objset;
2079 dsl_dataset_t *ds = os->os_dsl_dataset;
2088 SET_BOOKMARK(&zb, ds->ds_object,
2089 db->db.db_object, db->db_level, db->db_blkid);
2092 dmu_write_policy(os, DB_DNODE(db), db->db_level, WP_DMU_SYNC, &zp);
2098 if (txg > spa_freeze_txg(os->os_spa))
2099 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
2107 mutex_enter(&db->db_mtx);
2109 if (txg <= spa_last_synced_txg(os->os_spa)) {
2113 mutex_exit(&db->db_mtx);
2117 if (txg <= spa_syncing_txg(os->os_spa)) {
2122 mutex_exit(&db->db_mtx);
2123 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
2133 mutex_exit(&db->db_mtx);
2137 dr_next = list_next(&db->db_dirty_records, dr);
2138 ASSERT(dr_next == NULL || dr_next->dr_txg < txg);
2140 if (db->db_blkptr != NULL) {
2152 *zgd->zgd_bp = *db->db_blkptr;
2156 * Assume the on-disk data is X, the current syncing data (in
2157 * txg - 1) is Y, and the current in-memory data is Z (currently
2163 * be incorrect - we would override with X, which could have
2166 * (Note that this is not a concern when we are nop-writing from
2172 * being dirty (dr_next is non-NULL), or by being freed
2175 * to the on-disk BP.
2181 if (dnode_block_freed(DB_DNODE(db), db->db_blkid))
2186 ASSERT(dr->dr_txg == txg);
2187 if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
2188 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2194 mutex_exit(&db->db_mtx);
2198 ASSERT0(dr->dt.dl.dr_has_raw_params);
2199 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2200 dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
2201 mutex_exit(&db->db_mtx);
2204 dsa->dsa_dr = dr;
2205 dsa->dsa_done = done;
2206 dsa->dsa_zgd = zgd;
2207 dsa->dsa_tx = NULL;
2209 zio_nowait(arc_write(pio, os->os_spa, txg, zgd->zgd_bp,
2210 dr->dt.dl.dr_data, !DBUF_IS_CACHEABLE(db),
2219 dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx)
2224 err = dnode_hold(os, object, FTAG, &dn);
2233 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
2239 err = dnode_hold(os, object, FTAG, &dn);
2248 dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid,
2254 err = dnode_hold(os, object, FTAG, &dn);
2257 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2259 rw_exit(&dn->dn_struct_rwlock);
2265 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
2277 VERIFY0(dnode_hold(os, object, FTAG, &dn));
2279 dn->dn_checksum = checksum;
2285 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
2297 VERIFY0(dnode_hold(os, object, FTAG, &dn));
2298 dn->dn_compress = compress;
2310 dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
2312 dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
2315 enum zio_checksum checksum = os->os_checksum;
2316 enum zio_compress compress = os->os_compress;
2317 uint8_t complevel = os->os_complevel;
2318 enum zio_checksum dedup_checksum = os->os_dedup_checksum;
2321 boolean_t dedup_verify = os->os_dedup_verify;
2323 int copies = os->os_copies;
2329 * 2. preallocated blocks (i.e. level-0 blocks of a dump device)
2334 * XXX -- we should design a compression algorithm
2337 compress = zio_compress_select(os->os_spa,
2342 * checksum is multi-bit correctable, and it's not a
2343 * ZBT-style checksum, then it's suitable for metadata
2353 switch (os->os_redundant_metadata) {
2381 dn ? dn->dn_storage_type : DMU_OT_NONE;
2404 compress = zio_compress_select(os->os_spa, dn->dn_compress,
2406 complevel = zio_complevel_select(os->os_spa, compress,
2410 zio_checksum_select(dn->dn_checksum, checksum) :
2449 if (os->os_encrypted && (wp & WP_NOFILL) == 0) {
2453 copies = MIN(copies, SPA_DVAS_PER_BP - 1);
2465 zp->zp_compress = compress;
2466 zp->zp_complevel = complevel;
2467 zp->zp_checksum = checksum;
2468 zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
2469 zp->zp_level = level;
2470 zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
2471 zp->zp_dedup = dedup;
2472 zp->zp_dedup_verify = dedup && dedup_verify;
2473 zp->zp_nopwrite = nopwrite;
2474 zp->zp_encrypt = encrypt;
2475 zp->zp_byteorder = ZFS_HOST_BYTEORDER;
2476 zp->zp_direct_write = (wp & WP_DIRECT_WR) ? B_TRUE : B_FALSE;
2477 memset(zp->zp_salt, 0, ZIO_DATA_SALT_LEN);
2478 memset(zp->zp_iv, 0, ZIO_DATA_IV_LEN);
2479 memset(zp->zp_mac, 0, ZIO_DATA_MAC_LEN);
2480 zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ?
2481 os->os_zpl_special_smallblock : 0;
2482 zp->zp_storage_type = dn ? dn->dn_storage_type : DMU_OT_NONE;
2484 ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
2496 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
2502 err = dnode_hold(os, object, FTAG, &dn);
2506 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2514 * Provided a RL_READER rangelock spanning 0-UINT64_MAX is
2520 rw_exit(&dn->dn_struct_rwlock);
2526 txg_wait_synced(dmu_objset_pool(os), 0);
2537 rw_exit(&dn->dn_struct_rwlock);
2544 dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
2552 error = dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG,
2567 mutex_enter(&db->db_mtx);
2569 if (!list_is_empty(&db->db_dirty_records)) {
2572 dr = list_head(&db->db_dirty_records);
2573 if (dr->dt.dl.dr_brtwrite) {
2579 bp = &dr->dt.dl.dr_overridden_by;
2585 mutex_exit(&db->db_mtx);
2590 bp = db->db_blkptr;
2593 mutex_exit(&db->db_mtx);
2619 if (BP_GET_BIRTH(bp) > spa_freeze_txg(os->os_spa)) {
2623 if (BP_GET_BIRTH(bp) > spa_last_synced_txg(os->os_spa)) {
2639 dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset, uint64_t length,
2650 spa = os->os_spa;
2652 VERIFY0(dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG,
2658 * sizes. If they don't, that's a no-go, as we are not able to shrink
2666 ASSERT3U(db->db.db_object, !=, DMU_META_DNODE_OBJECT);
2667 ASSERT0(db->db_level);
2668 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2669 ASSERT(db->db_blkid != DMU_SPILL_BLKID);
2671 if (!BP_IS_HOLE(bp) && BP_GET_LSIZE(bp) != dbuf->db_size) {
2684 mutex_enter(&db->db_mtx);
2686 dr = list_head(&db->db_dirty_records);
2688 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2689 dl = &dr->dt.dl;
2690 ASSERT0(dl->dr_has_raw_params);
2691 dl->dr_overridden_by = *bp;
2694 BP_SET_BIRTH(&dl->dr_overridden_by, dr->dr_txg,
2697 BP_SET_LOGICAL_BIRTH(&dl->dr_overridden_by,
2698 dr->dr_txg);
2701 dl->dr_brtwrite = B_TRUE;
2702 dl->dr_override_state = DR_OVERRIDDEN;
2704 mutex_exit(&db->db_mtx);
2724 dnode_phys_t *dnp = dn->dn_phys;
2726 doi->doi_data_block_size = dn->dn_datablksz;
2727 doi->doi_metadata_block_size = dn->dn_indblkshift ?
2728 1ULL << dn->dn_indblkshift : 0;
2729 doi->doi_type = dn->dn_type;
2730 doi->doi_bonus_type = dn->dn_bonustype;
2731 doi->doi_bonus_size = dn->dn_bonuslen;
2732 doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
2733 doi->doi_indirection = dn->dn_nlevels;
2734 doi->doi_checksum = dn->dn_checksum;
2735 doi->doi_compress = dn->dn_compress;
2736 doi->doi_nblkptr = dn->dn_nblkptr;
2737 doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
2738 doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
2739 doi->doi_fill_count = 0;
2740 for (int i = 0; i < dnp->dn_nblkptr; i++)
2741 doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
2747 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2748 mutex_enter(&dn->dn_mtx);
2752 mutex_exit(&dn->dn_mtx);
2753 rw_exit(&dn->dn_struct_rwlock);
2761 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
2764 int err = dnode_hold(os, object, FTAG, &dn);
2802 *blksize = dn->dn_datablksz;
2804 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
2805 SPA_MINBLOCKSHIFT) + dn->dn_num_slots;
2815 *dnsize = DB_DNODE(db)->dn_num_slots << DNODE_SHIFT;