Lines Matching defs:dn

175 dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
181 rw_enter(&dn->dn_struct_rwlock, RW_READER);
182 blkid = dbuf_whichblock(dn, 0, offset);
183 db = dbuf_hold(dn, blkid, tag);
184 rw_exit(&dn->dn_struct_rwlock);
199 dnode_t *dn;
204 err = dnode_hold(os, object, FTAG, &dn);
207 rw_enter(&dn->dn_struct_rwlock, RW_READER);
208 blkid = dbuf_whichblock(dn, 0, offset);
209 db = dbuf_hold(dn, blkid, tag);
210 rw_exit(&dn->dn_struct_rwlock);
211 dnode_rele(dn, FTAG);
223 dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
234 err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp);
282 dnode_t *dn;
289 dn = DB_DNODE(db);
291 if (dn->dn_bonus != db) {
294 dnode_setbonuslen(dn, newsize, tx);
306 dnode_t *dn;
313 dn = DB_DNODE(db);
315 if (dn->dn_bonus != db) {
318 dnode_setbonus_type(dn, type, tx);
342 dnode_t *dn;
345 error = dnode_hold(os, object, FTAG, &dn);
346 dbuf_rm_spill(dn, tx);
347 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
348 dnode_rm_spill(dn, tx);
349 rw_exit(&dn->dn_struct_rwlock);
350 dnode_rele(dn, FTAG);
359 int dmu_bonus_hold_by_dnode(dnode_t *dn, const void *tag, dmu_buf_t **dbp,
371 rw_enter(&dn->dn_struct_rwlock, RW_READER);
372 if (dn->dn_bonus == NULL) {
373 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
374 rw_exit(&dn->dn_struct_rwlock);
375 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
377 if (dn->dn_bonus == NULL)
378 dbuf_create_bonus(dn);
380 db = dn->dn_bonus;
384 VERIFY(dnode_add_ref(dn, db));
385 atomic_inc_32(&dn->dn_dbufs_count);
393 rw_exit(&dn->dn_struct_rwlock);
397 dnode_evict_bonus(dn);
410 dnode_t *dn;
413 error = dnode_hold(os, object, FTAG, &dn);
417 error = dmu_bonus_hold_by_dnode(dn, tag, dbp, DMU_READ_NO_PREFETCH);
418 dnode_rele(dn, FTAG);
433 dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, const void *tag,
440 rw_enter(&dn->dn_struct_rwlock, RW_READER);
442 db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
445 rw_exit(&dn->dn_struct_rwlock);
465 dnode_t *dn;
469 dn = DB_DNODE(db);
471 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
474 rw_enter(&dn->dn_struct_rwlock, RW_READER);
476 if (!dn->dn_have_spill) {
479 err = dmu_spill_hold_by_dnode(dn,
483 rw_exit(&dn->dn_struct_rwlock);
515 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
540 rw_enter(&dn->dn_struct_rwlock, RW_READER);
541 if (dn->dn_datablkshift) {
542 int blkshift = dn->dn_datablkshift;
547 if (offset + length > dn->dn_datablksz) {
550 (longlong_t)dn->dn_objset->
552 (longlong_t)dn->dn_object, dn->dn_datablksz,
554 rw_exit(&dn->dn_struct_rwlock);
562 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL,
564 blkid = dbuf_whichblock(dn, 0, offset);
571 zs = dmu_zfetch_prepare(&dn->dn_zfetch, blkid, nblks, read,
575 dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
578 dmu_zfetch_run(&dn->dn_zfetch, zs, missed,
581 rw_exit(&dn->dn_struct_rwlock);
597 if (i == nblks - 1 && blkid + i < dn->dn_maxblkid &&
621 zfs_racct_write(dn->dn_objset->os_spa, length, nblks, flags);
624 dmu_zfetch_run(&dn->dn_zfetch, zs, missed, B_TRUE);
625 rw_exit(&dn->dn_struct_rwlock);
662 dnode_t *dn;
665 err = dnode_hold(os, object, FTAG, &dn);
669 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
672 dnode_rele(dn, FTAG);
727 dnode_t *dn;
734 if (dnode_hold(os, object, FTAG, &dn) != 0)
737 dmu_prefetch_by_dnode(dn, level, offset, len, pri);
739 dnode_rele(dn, FTAG);
743 dmu_prefetch_by_dnode(dnode_t *dn, int64_t level, uint64_t offset,
753 rw_enter(&dn->dn_struct_rwlock, RW_READER);
754 if (dn->dn_datablkshift != 0) {
760 start = dbuf_whichblock(dn, level, offset);
761 end2 = dbuf_whichblock(dn, level, offset + len - 1) + 1;
762 uint8_t ibs = dn->dn_indblkshift;
763 uint8_t bs = (level == 0) ? dn->dn_datablkshift : ibs;
780 end = start + (level == 0 && offset < dn->dn_datablksz);
784 dbuf_prefetch(dn, level, i, pri, 0);
786 dbuf_prefetch(dn, level2, i, pri, 0);
787 rw_exit(&dn->dn_struct_rwlock);
812 dmu_prefetch_wait_by_dnode(dnode_t *dn, uint64_t offset, uint64_t len)
819 rw_enter(&dn->dn_struct_rwlock, RW_READER);
821 uint64_t start = dbuf_whichblock(dn, 0, offset);
822 uint64_t end = dbuf_whichblock(dn, 0, offset + len - 1) + 1;
826 (void) dbuf_prefetch_impl(dn, 0, blk, ZIO_PRIORITY_ASYNC_READ,
830 rw_exit(&dn->dn_struct_rwlock);
853 dnode_t *dn;
856 err = dnode_hold(os, object, FTAG, &dn);
864 if (dn->dn_indblkshift) {
865 uint64_t nbps = bp_span_in_blocks(dn->dn_indblkshift, 1);
866 chunksize = (nbps * 16) << dn->dn_datablkshift;
868 chunksize = dn->dn_datablksz;
874 dmu_prefetch_wait_by_dnode(dn, offset, mylen);
885 dnode_rele(dn, FTAG);
899 dnode_t *dn = DMU_META_DNODE(os);
900 rw_enter(&dn->dn_struct_rwlock, RW_READER);
901 uint64_t blkid = dbuf_whichblock(dn, 0, object * sizeof (dnode_phys_t));
902 dbuf_prefetch(dn, 0, blkid, pri, 0);
903 rw_exit(&dn->dn_struct_rwlock);
918 get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks)
921 uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
923 uint64_t iblkrange = (uint64_t)dn->dn_datablksz *
924 EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
929 if (dn->dn_nlevels <= 1) {
962 err = dnode_next_offset(dn,
1002 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
1010 if (dn == NULL)
1013 object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
1031 if (dmu_objset_zfs_unmounting(dn->dn_objset))
1037 err = get_next_chunk(dn, &chunk_begin, offset, &l1blks);
1046 dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
1091 l1blks << dn->dn_indblkshift;
1096 dnode_free_range(dn, chunk_begin, chunk_len, tx);
1109 dnode_t *dn;
1112 err = dnode_hold(os, object, FTAG, &dn);
1115 err = dmu_free_long_range_impl(os, dn, offset, length);
1124 dn->dn_maxblkid = 0;
1126 dnode_rele(dn, FTAG);
1159 dnode_t *dn;
1160 int err = dnode_hold(os, object, FTAG, &dn);
1165 dnode_free_range(dn, offset, size, tx);
1166 dnode_rele(dn, FTAG);
1171 dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
1182 if (dn->dn_maxblkid == 0) {
1183 uint64_t newsz = offset > dn->dn_datablksz ? 0 :
1184 MIN(size, dn->dn_datablksz - offset);
1196 err = dmu_read_abd(dn, offset, size, data, flags);
1209 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
1240 dnode_t *dn;
1243 err = dnode_hold(os, object, FTAG, &dn);
1247 err = dmu_read_impl(dn, offset, size, buf, flags);
1248 dnode_rele(dn, FTAG);
1253 dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
1256 return (dmu_read_impl(dn, offset, size, buf, flags));
1315 dmu_write_by_dnode_flags(dnode_t *dn, uint64_t offset, uint64_t size,
1327 zfs_dio_aligned(offset, size, dn->dn_datablksz)) {
1329 error = dmu_write_abd(dn, offset, size, data, DMU_DIRECTIO, tx);
1334 VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
1342 dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
1345 return (dmu_write_by_dnode_flags(dn, offset, size, buf, tx, 0));
1404 dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size)
1410 return (dmu_read_uio_direct(dn, uio, size));
1416 err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
1478 dnode_t *dn;
1484 err = dnode_hold(os, object, FTAG, &dn);
1488 err = dmu_read_uio_dnode(dn, uio, size);
1490 dnode_rele(dn, FTAG);
1496 dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx)
1511 (write_size >= dn->dn_datablksz)) {
1513 dn->dn_datablksz)) {
1514 return (dmu_write_uio_direct(dn, uio, size, tx));
1515 } else if (write_size > dn->dn_datablksz &&
1517 dn->dn_datablksz)) {
1519 dn->dn_datablksz * (write_size / dn->dn_datablksz);
1520 err = dmu_write_uio_direct(dn, uio, write_size, tx);
1529 P2PHASE(zfs_uio_offset(uio), dn->dn_datablksz);
1533 err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), write_size,
1618 dnode_t *dn;
1624 err = dnode_hold(os, object, FTAG, &dn);
1628 err = dmu_write_uio_dnode(dn, uio, size, tx);
1630 dnode_rele(dn, FTAG);
1670 dnode_t *dn;
1676 if (dnode_hold(os, object, FTAG, &dn) != 0)
1679 if (dn->dn_nlevels < 2) {
1680 dnode_rele(dn, FTAG);
1684 dmu_object_info_from_dnode(dn, &doi);
1689 dmu_prefetch_by_dnode(dn, 1, off,
1697 uint_t nbps = bp_span_in_blocks(dn->dn_indblkshift, 1);
1698 uint64_t l1blks = 1 + (dn->dn_maxblkid / nbps);
1700 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1718 err = dbuf_hold_impl(dn, 1, blk, B_TRUE, B_FALSE, FTAG, &db);
1738 rw_exit(&dn->dn_struct_rwlock);
1740 dnode_rele(dn, FTAG);
1778 dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
1782 dbuf_dirty_lightweight(dn, dbuf_whichblock(dn, 0, offset), tx);
1797 dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
1801 objset_t *os = dn->dn_objset;
1802 uint64_t object = dn->dn_object;
1806 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1807 blkid = dbuf_whichblock(dn, 0, offset);
1808 db = dbuf_hold(dn, blkid, FTAG);
1809 rw_exit(&dn->dn_struct_rwlock);
2221 dnode_t *dn;
2224 err = dnode_hold(os, object, FTAG, &dn);
2227 err = dnode_set_nlevels(dn, nlevels, tx);
2228 dnode_rele(dn, FTAG);
2236 dnode_t *dn;
2239 err = dnode_hold(os, object, FTAG, &dn);
2242 err = dnode_set_blksz(dn, size, ibs, tx);
2243 dnode_rele(dn, FTAG);
2251 dnode_t *dn;
2254 err = dnode_hold(os, object, FTAG, &dn);
2257 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2258 dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE);
2259 rw_exit(&dn->dn_struct_rwlock);
2260 dnode_rele(dn, FTAG);
2268 dnode_t *dn;
2277 VERIFY0(dnode_hold(os, object, FTAG, &dn));
2279 dn->dn_checksum = checksum;
2280 dnode_setdirty(dn, tx);
2281 dnode_rele(dn, FTAG);
2288 dnode_t *dn;
2297 VERIFY0(dnode_hold(os, object, FTAG, &dn));
2298 dn->dn_compress = compress;
2299 dnode_setdirty(dn, tx);
2300 dnode_rele(dn, FTAG);
2310 dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
2312 dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
2381 dn ? dn->dn_storage_type : DMU_OT_NONE;
2404 compress = zio_compress_select(os->os_spa, dn->dn_compress,
2410 zio_checksum_select(dn->dn_checksum, checksum) :
2468 zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
2482 zp->zp_storage_type = dn ? dn->dn_storage_type : DMU_OT_NONE;
2498 dnode_t *dn;
2502 err = dnode_hold(os, object, FTAG, &dn);
2506 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2508 if (dnode_is_dirty(dn)) {
2520 rw_exit(&dn->dn_struct_rwlock);
2521 dnode_rele(dn, FTAG);
2533 err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK |
2537 rw_exit(&dn->dn_struct_rwlock);
2538 dnode_rele(dn, FTAG);
2722 __dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2724 dnode_phys_t *dnp = dn->dn_phys;
2726 doi->doi_data_block_size = dn->dn_datablksz;
2727 doi->doi_metadata_block_size = dn->dn_indblkshift ?
2728 1ULL << dn->dn_indblkshift : 0;
2729 doi->doi_type = dn->dn_type;
2730 doi->doi_bonus_type = dn->dn_bonustype;
2731 doi->doi_bonus_size = dn->dn_bonuslen;
2732 doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
2733 doi->doi_indirection = dn->dn_nlevels;
2734 doi->doi_checksum = dn->dn_checksum;
2735 doi->doi_compress = dn->dn_compress;
2736 doi->doi_nblkptr = dn->dn_nblkptr;
2738 doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
2745 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2747 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2748 mutex_enter(&dn->dn_mtx);
2750 __dmu_object_info_from_dnode(dn, doi);
2752 mutex_exit(&dn->dn_mtx);
2753 rw_exit(&dn->dn_struct_rwlock);
2763 dnode_t *dn;
2764 int err = dnode_hold(os, object, FTAG, &dn);
2770 dmu_object_info_from_dnode(dn, doi);
2772 dnode_rele(dn, FTAG);
2797 dnode_t *dn;
2800 dn = DB_DNODE(db);
2802 *blksize = dn->dn_datablksz;
2804 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
2805 SPA_MINBLOCKSHIFT) + dn->dn_num_slots;