/netbsd-src/external/cddl/osnet/dist/uts/common/fs/zfs/ |
H A D | zio.c | 47 SYSCTL_NODE(_vfs_zfs, OID_AUTO, zio, CTLFLAG_RW, 0, "ZFS ZIO"); 136 #define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE) argument 360 zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize, in zio_push_transform() argument 365 zt->zt_orig_data = zio->io_data; in zio_push_transform() 366 zt->zt_orig_size = zio->io_size; in zio_push_transform() 370 zt->zt_next = zio->io_transform_stack; in zio_push_transform() 371 zio->io_transform_stack = zt; in zio_push_transform() 373 zio->io_data = data; in zio_push_transform() 374 zio->io_size = size; in zio_push_transform() 378 zio_pop_transforms(zio_t *zio) in zio_pop_transforms() argument [all …]
|
H A D | vdev_mirror.c | 138 vdev_mirror_map_free(zio_t *zio) in vdev_mirror_map_free() argument 140 mirror_map_t *mm = zio->io_vsd; in vdev_mirror_map_free() 202 vdev_mirror_map_init(zio_t *zio) in vdev_mirror_map_init() argument 206 vdev_t *vd = zio->io_vd; in vdev_mirror_map_init() 210 dva_t *dva = zio->io_bp->blk_dva; in vdev_mirror_map_init() 211 spa_t *spa = zio->io_spa; in vdev_mirror_map_init() 213 mm = vdev_mirror_map_alloc(BP_GET_NDVAS(zio->io_bp), B_FALSE, in vdev_mirror_map_init() 227 mc->mc_offset = zio->io_offset; in vdev_mirror_map_init() 231 zio->io_vsd = mm; in vdev_mirror_map_init() 232 zio->io_vsd_ops = &vdev_mirror_vsd_ops; in vdev_mirror_map_init() [all …]
|
H A D | vdev_queue.c | 367 sizeof (zio_t), offsetof(struct zio, io_queue_node)); in vdev_queue_init() 370 offsetof(struct zio, io_offset_node)); in vdev_queue_init() 373 offsetof(struct zio, io_offset_node)); in vdev_queue_init() 389 sizeof (zio_t), offsetof(struct zio, io_queue_node)); in vdev_queue_init() 410 vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio) in vdev_queue_io_add() argument 412 spa_t *spa = zio->io_spa; in vdev_queue_io_add() 416 ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE); in vdev_queue_io_add() 417 avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio); in vdev_queue_io_add() 418 qtt = vdev_queue_type_tree(vq, zio->io_type); in vdev_queue_io_add() 420 avl_add(qtt, zio); in vdev_queue_io_add() [all …]
|
H A D | vdev_label.c | 187 vdev_label_read(zio_t *zio, vdev_t *vd, int l, void *buf, uint64_t offset, in vdev_label_read() argument 190 ASSERT(spa_config_held(zio->io_spa, SCL_STATE_ALL, RW_WRITER) == in vdev_label_read() 194 zio_nowait(zio_read_phys(zio, vd, in vdev_label_read() 201 vdev_label_write(zio_t *zio, vdev_t *vd, int l, void *buf, uint64_t offset, in vdev_label_write() argument 204 ASSERT(spa_config_held(zio->io_spa, SCL_ALL, RW_WRITER) == SCL_ALL || in vdev_label_write() 205 (spa_config_held(zio->io_spa, SCL_CONFIG | SCL_STATE, RW_READER) == in vdev_label_write() 207 dsl_pool_sync_context(spa_get_dsl(zio->io_spa)))); in vdev_label_write() 210 zio_nowait(zio_write_phys(zio, vd, in vdev_label_write() 453 zio_t *zio; in vdev_label_read_config() local 470 zio = zio_root(spa, NULL, NULL, flags); in vdev_label_read_config() [all …]
|
H A D | vdev_file.c | 161 vdev_file_io_start(zio_t *zio) in vdev_file_io_start() argument 163 vdev_t *vd = zio->io_vd; in vdev_file_io_start() 169 zio->io_error = SET_ERROR(ENXIO); in vdev_file_io_start() 170 zio_interrupt(zio); in vdev_file_io_start() 177 if (zio->io_type == ZIO_TYPE_IOCTL) { in vdev_file_io_start() 178 switch (zio->io_cmd) { in vdev_file_io_start() 180 zio->io_error = VOP_FSYNC(vp, FSYNC | FDSYNC, in vdev_file_io_start() 184 zio->io_error = SET_ERROR(ENOTSUP); in vdev_file_io_start() 187 zio_execute(zio); in vdev_file_io_start() 191 ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); in vdev_file_io_start() [all …]
|
H A D | vdev_cache.c | 168 vdev_cache_allocate(zio_t *zio) in vdev_cache_allocate() argument 170 vdev_cache_t *vc = &zio->io_vd->vdev_cache; in vdev_cache_allocate() 171 uint64_t offset = P2ALIGN(zio->io_offset, VCBS); in vdev_cache_allocate() 204 vdev_cache_hit(vdev_cache_t *vc, vdev_cache_entry_t *ve, zio_t *zio) in vdev_cache_hit() argument 206 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); in vdev_cache_hit() 218 bcopy(ve->ve_data + cache_phase, zio->io_data, zio->io_size); in vdev_cache_hit() 264 vdev_cache_read(zio_t *zio) in vdev_cache_read() argument 266 vdev_cache_t *vc = &zio->io_vd->vdev_cache; in vdev_cache_read() 268 uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS); in vdev_cache_read() 269 uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS); in vdev_cache_read() [all …]
|
H A D | vdev_disk.c | 391 zio_t *zio = bp->b_private; in vdev_disk_io_intr() local 398 zio->io_error = (geterror(bp) != 0 ? SET_ERROR(EIO) : 0); in vdev_disk_io_intr() 400 if (zio->io_error == 0 && bp->b_resid != 0) in vdev_disk_io_intr() 401 zio->io_error = SET_ERROR(EIO); in vdev_disk_io_intr() 404 zio_delay_interrupt(zio); in vdev_disk_io_intr() 408 vdev_disk_ioctl_free(zio_t *zio) in vdev_disk_ioctl_free() argument 410 kmem_free(zio->io_vsd, sizeof (struct dk_callback)); in vdev_disk_ioctl_free() 421 zio_t *zio = zio_arg; in vdev_disk_ioctl_done() local 423 zio->io_error = error; in vdev_disk_ioctl_done() 425 zio_interrupt(zio); in vdev_disk_ioctl_done() [all …]
|
H A D | zfs_fm.c | 107 const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio, in zfs_ereport_start() argument 132 if (zio != NULL) { in zfs_ereport_start() 137 if (zio->io_type != ZIO_TYPE_READ && in zfs_ereport_start() 138 zio->io_type != ZIO_TYPE_WRITE) in zfs_ereport_start() 145 if (zio->io_flags & ZIO_FLAG_SPECULATIVE) in zfs_ereport_start() 153 if (zio->io_error == EIO && in zfs_ereport_start() 154 !(zio->io_flags & ZIO_FLAG_IO_RETRY)) in zfs_ereport_start() 167 if (zio->io_vd == vd && !vdev_accessible(vd, zio)) in zfs_ereport_start() 174 if (zio->io_type == ZIO_TYPE_READ && in zfs_ereport_start() 175 zio->io_error == ECKSUM && in zfs_ereport_start() [all …]
|
H A D | zio_inject.c | 167 zio_handle_fault_injection(zio_t *zio, int error) in zio_handle_fault_injection() argument 175 if (zio->io_logical == NULL) in zio_handle_fault_injection() 181 if (zio->io_type != ZIO_TYPE_READ) in zio_handle_fault_injection() 189 if (zio->io_spa != handler->zi_spa || in zio_handle_fault_injection() 194 if (zio_match_handler(&zio->io_logical->io_bookmark, in zio_handle_fault_injection() 195 zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE, in zio_handle_fault_injection() 214 zio_handle_label_injection(zio_t *zio, int error) in zio_handle_label_injection() argument 217 vdev_t *vd = zio->io_vd; in zio_handle_label_injection() 218 uint64_t offset = zio->io_offset; in zio_handle_label_injection() 245 if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid && in zio_handle_label_injection() [all …]
|
H A D | vdev_raidz.c | 293 vdev_raidz_map_free_vsd(zio_t *zio) in vdev_raidz_map_free_vsd() argument 295 raidz_map_t *rm = zio->io_vsd; in vdev_raidz_map_free_vsd() 396 vdev_raidz_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *arg) in vdev_raidz_cksum_report() argument 401 raidz_map_t *rm = zio->io_vsd; in vdev_raidz_cksum_report() 1706 vdev_raidz_child_done(zio_t *zio) in vdev_raidz_child_done() argument 1708 raidz_col_t *rc = zio->io_private; in vdev_raidz_child_done() 1710 rc->rc_error = zio->io_error; in vdev_raidz_child_done() 1733 vdev_raidz_io_start(zio_t *zio) in vdev_raidz_io_start() argument 1735 vdev_t *vd = zio->io_vd; in vdev_raidz_io_start() 1742 rm = vdev_raidz_map_alloc(zio->io_data, zio->io_size, zio->io_offset, in vdev_raidz_io_start() [all …]
|
H A D | vdev_geom.c | 921 zio_t *zio; in vdev_geom_io_intr() local 923 zio = bp->bio_caller1; in vdev_geom_io_intr() 924 vd = zio->io_vd; in vdev_geom_io_intr() 925 zio->io_error = bp->bio_error; in vdev_geom_io_intr() 926 if (zio->io_error == 0 && bp->bio_resid != 0) in vdev_geom_io_intr() 927 zio->io_error = SET_ERROR(EIO); in vdev_geom_io_intr() 929 switch(zio->io_error) { in vdev_geom_io_intr() 954 spa_async_request(zio->io_spa, in vdev_geom_io_intr() 963 zio_delay_interrupt(zio); in vdev_geom_io_intr() 967 vdev_geom_io_start(zio_t *zio) in vdev_geom_io_start() argument [all …]
|
H A D | trim_map.c | 335 trim_map_write_start(zio_t *zio) in trim_map_write_start() argument 337 vdev_t *vd = zio->io_vd; in trim_map_write_start() 346 start = zio->io_offset; in trim_map_write_start() 347 end = TRIM_ZIO_END(zio->io_vd, start, zio->io_size); in trim_map_write_start() 358 list_insert_tail(&tm->tm_pending_writes, zio); in trim_map_write_start() 373 avl_add(&tm->tm_inflight_writes, zio); in trim_map_write_start() 381 trim_map_write_done(zio_t *zio) in trim_map_write_done() argument 383 vdev_t *vd = zio->io_vd; in trim_map_write_done() 398 if (zio->io_trim_node.avl_child[0] || in trim_map_write_done() 399 zio->io_trim_node.avl_child[1] || in trim_map_write_done() [all …]
|
H A D | zio_checksum.c | 261 zio_checksum_compute(zio_t *zio, enum zio_checksum checksum, in zio_checksum_compute() argument 264 blkptr_t *bp = zio->io_bp; in zio_checksum_compute() 265 uint64_t offset = zio->io_offset; in zio_checksum_compute() 268 spa_t *spa = zio->io_spa; in zio_checksum_compute() 385 zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info) in zio_checksum_error() argument 387 blkptr_t *bp = zio->io_bp; in zio_checksum_error() 388 uint_t checksum = (bp == NULL ? zio->io_prop.zp_checksum : in zio_checksum_error() 391 uint64_t size = (bp == NULL ? zio->io_size : in zio_checksum_error() 393 uint64_t offset = zio->io_offset; in zio_checksum_error() 394 void *data = zio->io_data; in zio_checksum_error() [all …]
|
H A D | arc.c | 1764 arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio) in arc_cksum_is_equal() argument 1766 enum zio_compress compress = BP_GET_COMPRESS(zio->io_bp); in arc_cksum_is_equal() 1769 ASSERT(!BP_IS_EMBEDDED(zio->io_bp)); in arc_cksum_is_equal() 1770 VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr)); in arc_cksum_is_equal() 1790 csize = zio_compress_data(compress, zio->io_data, cbuf, lsize); in arc_cksum_is_equal() 1808 zio_push_transform(zio, cbuf, csize, HDR_GET_PSIZE(hdr), NULL); in arc_cksum_is_equal() 1824 valid_cksum = (zio_checksum_error_impl(zio->io_spa, zio->io_bp, in arc_cksum_is_equal() 1825 BP_GET_CHECKSUM(zio->io_bp), zio->io_data, zio->io_size, in arc_cksum_is_equal() 1826 zio->io_offset, NULL) == 0); in arc_cksum_is_equal() 1827 zio_pop_transforms(zio); in arc_cksum_is_equal() [all …]
|
H A D | vdev_missing.c | 71 vdev_missing_io_start(zio_t *zio) in vdev_missing_io_start() argument 73 zio->io_error = SET_ERROR(ENOTSUP); in vdev_missing_io_start() 74 zio_execute(zio); in vdev_missing_io_start() 79 vdev_missing_io_done(zio_t *zio) in vdev_missing_io_done() argument
|
H A D | dbuf.c | 902 dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) in dbuf_read_done() argument 922 } else if (zio == NULL || zio->io_error == 0) { in dbuf_read_done() 936 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) in dbuf_read_impl() argument 1021 (void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr, in dbuf_read_impl() 1028 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) in dbuf_read() argument 1031 boolean_t havepzio = (zio != NULL); in dbuf_read() 1064 if (zio == NULL) in dbuf_read() 1065 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); in dbuf_read() 1066 dbuf_read_impl(db, zio, flags); in dbuf_read() 1078 err = zio_wait(zio); in dbuf_read() [all …]
|
H A D | dmu.c | 452 zio_t *zio; in dmu_buf_hold_array_by_dnode() local 493 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL); in dmu_buf_hold_array_by_dnode() 500 zio_nowait(zio); in dmu_buf_hold_array_by_dnode() 506 (void) dbuf_read(db, zio, dbuf_flags); in dmu_buf_hold_array_by_dnode() 523 err = zio_wait(zio); in dmu_buf_hold_array_by_dnode() 1641 dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) in dmu_sync_ready() argument 1645 blkptr_t *bp = zio->io_bp; in dmu_sync_ready() 1647 if (zio->io_error == 0) { in dmu_sync_ready() 1662 dmu_sync_late_arrival_ready(zio_t *zio) in dmu_sync_late_arrival_ready() argument 1664 dmu_sync_ready(zio, NULL, zio->io_private); in dmu_sync_late_arrival_ready() [all …]
|
H A D | vdev.c | 1050 vdev_probe_done(zio_t *zio) in vdev_probe_done() argument 1052 spa_t *spa = zio->io_spa; in vdev_probe_done() 1053 vdev_t *vd = zio->io_vd; in vdev_probe_done() 1054 vdev_probe_stats_t *vps = zio->io_private; in vdev_probe_done() 1058 if (zio->io_type == ZIO_TYPE_READ) { in vdev_probe_done() 1059 if (zio->io_error == 0) in vdev_probe_done() 1061 if (zio->io_error == 0 && spa_writeable(spa)) { in vdev_probe_done() 1063 zio->io_offset, zio->io_size, zio->io_data, in vdev_probe_done() 1067 zio_buf_free(zio->io_data, zio->io_size); in vdev_probe_done() 1069 } else if (zio->io_type == ZIO_TYPE_WRITE) { in vdev_probe_done() [all …]
|
H A D | dsl_pool.c | 532 zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); in dsl_pool_sync_mos() local 533 dmu_objset_sync(dp->dp_meta_objset, zio, tx); in dsl_pool_sync_mos() 534 VERIFY0(zio_wait(zio)); in dsl_pool_sync_mos() 560 zio_t *zio; in dsl_pool_sync() local 575 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); in dsl_pool_sync() 584 dsl_dataset_sync(ds, zio, tx); in dsl_pool_sync() 586 VERIFY0(zio_wait(zio)); in dsl_pool_sync() 623 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); in dsl_pool_sync() 627 dsl_dataset_sync(ds, zio, tx); in dsl_pool_sync() 629 VERIFY0(zio_wait(zio)); in dsl_pool_sync()
|
H A D | ddt.c | 539 zio_t *zio = dde->dde_lead_zio[p]; in ddt_ditto_copies_needed() local 541 if (zio != NULL) in ddt_ditto_copies_needed() 542 refcnt += zio->io_parent_count; /* pending refs */ in ddt_ditto_copies_needed() 943 ddt_repair_entry_done(zio_t *zio) in ddt_repair_entry_done() argument 945 ddt_entry_t *rdde = zio->io_private; in ddt_repair_entry_done() 957 zio_t *zio; in ddt_repair_entry() local 960 zio = zio_null(rio, rio->io_spa, NULL, in ddt_repair_entry() 969 zio_nowait(zio_rewrite(zio, zio->io_spa, 0, &blk, in ddt_repair_entry() 971 ZIO_PRIORITY_SYNC_WRITE, ZIO_DDT_CHILD_FLAGS(zio), NULL)); in ddt_repair_entry() 974 zio_nowait(zio); in ddt_repair_entry()
|
H A D | zil.c | 841 zio_t *zio = NULL; in zil_flush_vdevs() local 857 if (zio == NULL) in zil_flush_vdevs() 858 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); in zil_flush_vdevs() 859 zio_flush(zio, vd); in zil_flush_vdevs() 868 if (zio) in zil_flush_vdevs() 869 (void) zio_wait(zio); in zil_flush_vdevs() 878 zil_lwb_write_done(zio_t *zio) in zil_lwb_write_done() argument 880 lwb_t *lwb = zio->io_private; in zil_lwb_write_done() 884 ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF); in zil_lwb_write_done() 885 ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG); in zil_lwb_write_done() [all …]
|
/netbsd-src/external/cddl/osnet/dist/uts/common/fs/zfs/sys/ |
H A D | zio.h | 214 #define ZIO_DDT_CHILD_FLAGS(zio) \ argument 215 (((zio)->io_flags & ZIO_FLAG_DDT_INHERIT) | \ 218 #define ZIO_GANG_CHILD_FLAGS(zio) \ argument 219 (((zio)->io_flags & ZIO_FLAG_GANG_INHERIT) | \ 222 #define ZIO_VDEV_CHILD_FLAGS(zio) \ argument 223 (((zio)->io_flags & ZIO_FLAG_VDEV_INHERIT) | \ 248 typedef void zio_done_func_t(zio_t *zio); 341 typedef void zio_vsd_cksum_report_f(zio_t *zio, zio_cksum_report_t *zcr, 356 typedef zio_t *zio_gang_issue_func_t(zio_t *zio, blkptr_t *bp, 359 typedef void zio_transform_func_t(zio_t *zio, void *data, uint64_t size); [all …]
|
H A D | vdev.h | 94 extern void vdev_stat_update(zio_t *zio, uint64_t psize); 116 extern boolean_t vdev_accessible(vdev_t *vd, zio_t *zio); 120 extern boolean_t vdev_cache_read(zio_t *zio); 121 extern void vdev_cache_write(zio_t *zio); 126 extern zio_t *vdev_queue_io(zio_t *zio); 127 extern void vdev_queue_io_done(zio_t *zio); 130 extern void vdev_queue_register_lastoffset(vdev_t *vd, zio_t *zio);
|
H A D | trim_map.h | 40 extern boolean_t trim_map_write_start(zio_t *zio); 41 extern void trim_map_write_done(zio_t *zio);
|
H A D | zio_checksum.h | 106 extern void zio_checksum_compute(zio_t *zio, enum zio_checksum checksum, 110 extern int zio_checksum_error(zio_t *zio, zio_bad_cksum_t *out);
|