1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51544Seschrock * Common Development and Distribution License (the "License"). 61544Seschrock * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 223459Sek110237 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23789Sahrens * Use is subject to license terms. 24789Sahrens */ 25789Sahrens 26789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 27789Sahrens 28789Sahrens #include <sys/zfs_context.h> 291544Seschrock #include <sys/fm/fs/zfs.h> 30789Sahrens #include <sys/spa.h> 31789Sahrens #include <sys/txg.h> 32789Sahrens #include <sys/spa_impl.h> 33789Sahrens #include <sys/vdev_impl.h> 34789Sahrens #include <sys/zio_impl.h> 35789Sahrens #include <sys/zio_compress.h> 36789Sahrens #include <sys/zio_checksum.h> 37789Sahrens 38789Sahrens /* 39789Sahrens * ========================================================================== 40789Sahrens * I/O priority table 41789Sahrens * ========================================================================== 42789Sahrens */ 43789Sahrens uint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE] = { 44789Sahrens 0, /* ZIO_PRIORITY_NOW */ 45789Sahrens 0, /* ZIO_PRIORITY_SYNC_READ */ 46789Sahrens 0, /* ZIO_PRIORITY_SYNC_WRITE */ 47789Sahrens 6, /* ZIO_PRIORITY_ASYNC_READ */ 48789Sahrens 4, /* ZIO_PRIORITY_ASYNC_WRITE */ 49789Sahrens 4, /* ZIO_PRIORITY_FREE */ 50789Sahrens 0, /* ZIO_PRIORITY_CACHE_FILL */ 51789Sahrens 0, /* ZIO_PRIORITY_LOG_WRITE */ 52789Sahrens 10, /* ZIO_PRIORITY_RESILVER */ 53789Sahrens 20, /* ZIO_PRIORITY_SCRUB */ 54789Sahrens }; 55789Sahrens 56789Sahrens /* 57789Sahrens * ========================================================================== 58789Sahrens * I/O type descriptions 59789Sahrens * ========================================================================== 60789Sahrens */ 61789Sahrens char *zio_type_name[ZIO_TYPES] = { 62789Sahrens "null", "read", "write", "free", "claim", "ioctl" }; 63789Sahrens 64789Sahrens /* At or above this size, force gang blocking - for testing */ 65789Sahrens uint64_t zio_gang_bang = SPA_MAXBLOCKSIZE + 1; 66789Sahrens 673668Sgw25295 /* Force an allocation failure when non-zero */ 683668Sgw25295 uint16_t zio_zil_fail_shift = 0; 693668Sgw25295 70789Sahrens typedef struct zio_sync_pass { 71789Sahrens int zp_defer_free; /* defer frees after this pass */ 72789Sahrens int zp_dontcompress; /* don't compress after this pass */ 73789Sahrens int zp_rewrite; /* rewrite new bps after this pass */ 74789Sahrens } zio_sync_pass_t; 75789Sahrens 76789Sahrens zio_sync_pass_t zio_sync_pass = { 77789Sahrens 1, /* zp_defer_free */ 78789Sahrens 4, /* zp_dontcompress */ 79789Sahrens 1, /* zp_rewrite */ 80789Sahrens }; 81789Sahrens 82789Sahrens /* 83789Sahrens * ========================================================================== 84789Sahrens * I/O kmem caches 85789Sahrens * ========================================================================== 86789Sahrens */ 874055Seschrock kmem_cache_t *zio_cache; 88789Sahrens kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 893290Sjohansen kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; 903290Sjohansen 913290Sjohansen #ifdef _KERNEL 923290Sjohansen extern vmem_t *zio_alloc_arena; 933290Sjohansen #endif 94789Sahrens 95789Sahrens void 96789Sahrens zio_init(void) 97789Sahrens { 98789Sahrens size_t c; 993290Sjohansen vmem_t *data_alloc_arena = NULL; 1003290Sjohansen 1013290Sjohansen #ifdef _KERNEL 1023290Sjohansen data_alloc_arena = zio_alloc_arena; 1033290Sjohansen #endif 104789Sahrens 1054055Seschrock zio_cache = kmem_cache_create("zio_cache", sizeof (zio_t), 0, 1064055Seschrock NULL, NULL, NULL, NULL, NULL, 0); 1074055Seschrock 108789Sahrens /* 109789Sahrens * For small buffers, we want a cache for each multiple of 110789Sahrens * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache 111789Sahrens * for each quarter-power of 2. For large buffers, we want 112789Sahrens * a cache for each multiple of PAGESIZE. 113789Sahrens */ 114789Sahrens for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 115789Sahrens size_t size = (c + 1) << SPA_MINBLOCKSHIFT; 116789Sahrens size_t p2 = size; 117789Sahrens size_t align = 0; 118789Sahrens 119789Sahrens while (p2 & (p2 - 1)) 120789Sahrens p2 &= p2 - 1; 121789Sahrens 122789Sahrens if (size <= 4 * SPA_MINBLOCKSIZE) { 123789Sahrens align = SPA_MINBLOCKSIZE; 124789Sahrens } else if (P2PHASE(size, PAGESIZE) == 0) { 125789Sahrens align = PAGESIZE; 126789Sahrens } else if (P2PHASE(size, p2 >> 2) == 0) { 127789Sahrens align = p2 >> 2; 128789Sahrens } 129789Sahrens 130789Sahrens if (align != 0) { 1313290Sjohansen char name[36]; 1322856Snd150628 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); 133789Sahrens zio_buf_cache[c] = kmem_cache_create(name, size, 134849Sbonwick align, NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 1353290Sjohansen 1363290Sjohansen (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); 1373290Sjohansen zio_data_buf_cache[c] = kmem_cache_create(name, size, 1383290Sjohansen align, NULL, NULL, NULL, NULL, data_alloc_arena, 1393290Sjohansen KMC_NODEBUG); 1403290Sjohansen 141789Sahrens dprintf("creating cache for size %5lx align %5lx\n", 142789Sahrens size, align); 143789Sahrens } 144789Sahrens } 145789Sahrens 146789Sahrens while (--c != 0) { 147789Sahrens ASSERT(zio_buf_cache[c] != NULL); 148789Sahrens if (zio_buf_cache[c - 1] == NULL) 149789Sahrens zio_buf_cache[c - 1] = zio_buf_cache[c]; 1503290Sjohansen 1513290Sjohansen ASSERT(zio_data_buf_cache[c] != NULL); 1523290Sjohansen if (zio_data_buf_cache[c - 1] == NULL) 1533290Sjohansen zio_data_buf_cache[c - 1] = zio_data_buf_cache[c]; 154789Sahrens } 1551544Seschrock 1561544Seschrock zio_inject_init(); 157789Sahrens } 158789Sahrens 159789Sahrens void 160789Sahrens zio_fini(void) 161789Sahrens { 162789Sahrens size_t c; 163789Sahrens kmem_cache_t *last_cache = NULL; 1643290Sjohansen kmem_cache_t *last_data_cache = NULL; 165789Sahrens 166789Sahrens for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) { 167789Sahrens if (zio_buf_cache[c] != last_cache) { 168789Sahrens last_cache = zio_buf_cache[c]; 169789Sahrens kmem_cache_destroy(zio_buf_cache[c]); 170789Sahrens } 171789Sahrens zio_buf_cache[c] = NULL; 1723290Sjohansen 1733290Sjohansen if (zio_data_buf_cache[c] != last_data_cache) { 1743290Sjohansen last_data_cache = zio_data_buf_cache[c]; 1753290Sjohansen kmem_cache_destroy(zio_data_buf_cache[c]); 1763290Sjohansen } 1773290Sjohansen zio_data_buf_cache[c] = NULL; 178789Sahrens } 1791544Seschrock 1804055Seschrock kmem_cache_destroy(zio_cache); 1814055Seschrock 1821544Seschrock zio_inject_fini(); 183789Sahrens } 184789Sahrens 185789Sahrens /* 186789Sahrens * ========================================================================== 187789Sahrens * Allocate and free I/O buffers 188789Sahrens * ========================================================================== 189789Sahrens */ 1903290Sjohansen 1913290Sjohansen /* 1923290Sjohansen * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a 1933290Sjohansen * crashdump if the kernel panics, so use it judiciously. Obviously, it's 1943290Sjohansen * useful to inspect ZFS metadata, but if possible, we should avoid keeping 1953290Sjohansen * excess / transient data in-core during a crashdump. 1963290Sjohansen */ 197789Sahrens void * 198789Sahrens zio_buf_alloc(size_t size) 199789Sahrens { 200789Sahrens size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 201789Sahrens 202789Sahrens ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 203789Sahrens 204789Sahrens return (kmem_cache_alloc(zio_buf_cache[c], KM_SLEEP)); 205789Sahrens } 206789Sahrens 2073290Sjohansen /* 2083290Sjohansen * Use zio_data_buf_alloc to allocate data. The data will not appear in a 2093290Sjohansen * crashdump if the kernel panics. This exists so that we will limit the amount 2103290Sjohansen * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount 2113290Sjohansen * of kernel heap dumped to disk when the kernel panics) 2123290Sjohansen */ 2133290Sjohansen void * 2143290Sjohansen zio_data_buf_alloc(size_t size) 2153290Sjohansen { 2163290Sjohansen size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 2173290Sjohansen 2183290Sjohansen ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 2193290Sjohansen 2203290Sjohansen return (kmem_cache_alloc(zio_data_buf_cache[c], KM_SLEEP)); 2213290Sjohansen } 2223290Sjohansen 223789Sahrens void 224789Sahrens zio_buf_free(void *buf, size_t size) 225789Sahrens { 226789Sahrens size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 227789Sahrens 228789Sahrens ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 229789Sahrens 230789Sahrens kmem_cache_free(zio_buf_cache[c], buf); 231789Sahrens } 232789Sahrens 2333290Sjohansen void 2343290Sjohansen zio_data_buf_free(void *buf, size_t size) 2353290Sjohansen { 2363290Sjohansen size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; 2373290Sjohansen 2383290Sjohansen ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); 2393290Sjohansen 2403290Sjohansen kmem_cache_free(zio_data_buf_cache[c], buf); 2413290Sjohansen } 2423463Sahrens 243789Sahrens /* 244789Sahrens * ========================================================================== 245789Sahrens * Push and pop I/O transform buffers 246789Sahrens * ========================================================================== 247789Sahrens */ 248789Sahrens static void 249789Sahrens zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize) 250789Sahrens { 251789Sahrens zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP); 252789Sahrens 253789Sahrens zt->zt_data = data; 254789Sahrens zt->zt_size = size; 255789Sahrens zt->zt_bufsize = bufsize; 256789Sahrens 257789Sahrens zt->zt_next = zio->io_transform_stack; 258789Sahrens zio->io_transform_stack = zt; 259789Sahrens 260789Sahrens zio->io_data = data; 261789Sahrens zio->io_size = size; 262789Sahrens } 263789Sahrens 264789Sahrens static void 265789Sahrens zio_pop_transform(zio_t *zio, void **data, uint64_t *size, uint64_t *bufsize) 266789Sahrens { 267789Sahrens zio_transform_t *zt = zio->io_transform_stack; 268789Sahrens 269789Sahrens *data = zt->zt_data; 270789Sahrens *size = zt->zt_size; 271789Sahrens *bufsize = zt->zt_bufsize; 272789Sahrens 273789Sahrens zio->io_transform_stack = zt->zt_next; 274789Sahrens kmem_free(zt, sizeof (zio_transform_t)); 275789Sahrens 276789Sahrens if ((zt = zio->io_transform_stack) != NULL) { 277789Sahrens zio->io_data = zt->zt_data; 278789Sahrens zio->io_size = zt->zt_size; 279789Sahrens } 280789Sahrens } 281789Sahrens 282789Sahrens static void 283789Sahrens zio_clear_transform_stack(zio_t *zio) 284789Sahrens { 285789Sahrens void *data; 286789Sahrens uint64_t size, bufsize; 287789Sahrens 288789Sahrens ASSERT(zio->io_transform_stack != NULL); 289789Sahrens 290789Sahrens zio_pop_transform(zio, &data, &size, &bufsize); 291789Sahrens while (zio->io_transform_stack != NULL) { 292789Sahrens zio_buf_free(data, bufsize); 293789Sahrens zio_pop_transform(zio, &data, &size, &bufsize); 294789Sahrens } 295789Sahrens } 296789Sahrens 297789Sahrens /* 298789Sahrens * ========================================================================== 299789Sahrens * Create the various types of I/O (read, write, free) 300789Sahrens * ========================================================================== 301789Sahrens */ 302789Sahrens static zio_t * 303789Sahrens zio_create(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 304789Sahrens void *data, uint64_t size, zio_done_func_t *done, void *private, 305789Sahrens zio_type_t type, int priority, int flags, uint8_t stage, uint32_t pipeline) 306789Sahrens { 307789Sahrens zio_t *zio; 308789Sahrens 309789Sahrens ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 310789Sahrens ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 311789Sahrens 3124055Seschrock zio = kmem_cache_alloc(zio_cache, KM_SLEEP); 3134055Seschrock bzero(zio, sizeof (zio_t)); 314789Sahrens zio->io_parent = pio; 315789Sahrens zio->io_spa = spa; 316789Sahrens zio->io_txg = txg; 317789Sahrens if (bp != NULL) { 318789Sahrens zio->io_bp = bp; 319789Sahrens zio->io_bp_copy = *bp; 320789Sahrens zio->io_bp_orig = *bp; 321789Sahrens } 322789Sahrens zio->io_done = done; 323789Sahrens zio->io_private = private; 324789Sahrens zio->io_type = type; 325789Sahrens zio->io_priority = priority; 326789Sahrens zio->io_stage = stage; 327789Sahrens zio->io_pipeline = pipeline; 328789Sahrens zio->io_async_stages = ZIO_ASYNC_PIPELINE_STAGES; 329789Sahrens zio->io_timestamp = lbolt64; 330789Sahrens zio->io_flags = flags; 3312856Snd150628 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); 332789Sahrens zio_push_transform(zio, data, size, size); 333789Sahrens 3343463Sahrens /* 3353463Sahrens * Note on config lock: 3363463Sahrens * 3373463Sahrens * If CONFIG_HELD is set, then the caller already has the config 3383463Sahrens * lock, so we don't need it for this io. 3393463Sahrens * 3403463Sahrens * We set CONFIG_GRABBED to indicate that we have grabbed the 3413463Sahrens * config lock on behalf of this io, so it should be released 3423463Sahrens * in zio_done. 3433463Sahrens * 3443463Sahrens * Unless CONFIG_HELD is set, we will grab the config lock for 3453463Sahrens * any top-level (parent-less) io, *except* NULL top-level ios. 3463463Sahrens * The NULL top-level ios rarely have any children, so we delay 3473463Sahrens * grabbing the lock until the first child is added (but it is 3483463Sahrens * still grabbed on behalf of the top-level i/o, so additional 3493463Sahrens * children don't need to also grab it). This greatly reduces 3503463Sahrens * contention on the config lock. 3513463Sahrens */ 352789Sahrens if (pio == NULL) { 3533463Sahrens if (type != ZIO_TYPE_NULL && 3543463Sahrens !(flags & ZIO_FLAG_CONFIG_HELD)) { 3551544Seschrock spa_config_enter(zio->io_spa, RW_READER, zio); 3563463Sahrens zio->io_flags |= ZIO_FLAG_CONFIG_GRABBED; 3573463Sahrens } 358789Sahrens zio->io_root = zio; 359789Sahrens } else { 360789Sahrens zio->io_root = pio->io_root; 3611544Seschrock if (!(flags & ZIO_FLAG_NOBOOKMARK)) 3621544Seschrock zio->io_logical = pio->io_logical; 363789Sahrens mutex_enter(&pio->io_lock); 3643463Sahrens if (pio->io_parent == NULL && 3653463Sahrens pio->io_type == ZIO_TYPE_NULL && 3663463Sahrens !(pio->io_flags & ZIO_FLAG_CONFIG_GRABBED) && 3673463Sahrens !(pio->io_flags & ZIO_FLAG_CONFIG_HELD)) { 3683463Sahrens pio->io_flags |= ZIO_FLAG_CONFIG_GRABBED; 3693463Sahrens spa_config_enter(zio->io_spa, RW_READER, pio); 3703463Sahrens } 371789Sahrens if (stage < ZIO_STAGE_READY) 372789Sahrens pio->io_children_notready++; 373789Sahrens pio->io_children_notdone++; 374789Sahrens zio->io_sibling_next = pio->io_child; 375789Sahrens zio->io_sibling_prev = NULL; 376789Sahrens if (pio->io_child != NULL) 377789Sahrens pio->io_child->io_sibling_prev = zio; 378789Sahrens pio->io_child = zio; 3791775Sbillm zio->io_ndvas = pio->io_ndvas; 380789Sahrens mutex_exit(&pio->io_lock); 381789Sahrens } 382789Sahrens 383789Sahrens return (zio); 384789Sahrens } 385789Sahrens 386789Sahrens zio_t * 387789Sahrens zio_null(zio_t *pio, spa_t *spa, zio_done_func_t *done, void *private, 388789Sahrens int flags) 389789Sahrens { 390789Sahrens zio_t *zio; 391789Sahrens 392789Sahrens zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 393789Sahrens ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, ZIO_STAGE_OPEN, 394789Sahrens ZIO_WAIT_FOR_CHILDREN_PIPELINE); 395789Sahrens 396789Sahrens return (zio); 397789Sahrens } 398789Sahrens 399789Sahrens zio_t * 400789Sahrens zio_root(spa_t *spa, zio_done_func_t *done, void *private, int flags) 401789Sahrens { 402789Sahrens return (zio_null(NULL, spa, done, private, flags)); 403789Sahrens } 404789Sahrens 405789Sahrens zio_t * 406789Sahrens zio_read(zio_t *pio, spa_t *spa, blkptr_t *bp, void *data, 407789Sahrens uint64_t size, zio_done_func_t *done, void *private, 4081544Seschrock int priority, int flags, zbookmark_t *zb) 409789Sahrens { 410789Sahrens zio_t *zio; 411789Sahrens 412789Sahrens ASSERT3U(size, ==, BP_GET_LSIZE(bp)); 413789Sahrens 414789Sahrens zio = zio_create(pio, spa, bp->blk_birth, bp, data, size, done, private, 4152981Sahrens ZIO_TYPE_READ, priority, flags | ZIO_FLAG_USER, 4162981Sahrens ZIO_STAGE_OPEN, ZIO_READ_PIPELINE); 4171544Seschrock zio->io_bookmark = *zb; 4181544Seschrock 4191544Seschrock zio->io_logical = zio; 420789Sahrens 421789Sahrens /* 422789Sahrens * Work off our copy of the bp so the caller can free it. 423789Sahrens */ 424789Sahrens zio->io_bp = &zio->io_bp_copy; 425789Sahrens 426789Sahrens if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) { 427789Sahrens uint64_t csize = BP_GET_PSIZE(bp); 428789Sahrens void *cbuf = zio_buf_alloc(csize); 429789Sahrens 430789Sahrens zio_push_transform(zio, cbuf, csize, csize); 431789Sahrens zio->io_pipeline |= 1U << ZIO_STAGE_READ_DECOMPRESS; 432789Sahrens } 433789Sahrens 4341775Sbillm if (BP_IS_GANG(bp)) { 435789Sahrens uint64_t gsize = SPA_GANGBLOCKSIZE; 436789Sahrens void *gbuf = zio_buf_alloc(gsize); 437789Sahrens 438789Sahrens zio_push_transform(zio, gbuf, gsize, gsize); 439789Sahrens zio->io_pipeline |= 1U << ZIO_STAGE_READ_GANG_MEMBERS; 440789Sahrens } 441789Sahrens 442789Sahrens return (zio); 443789Sahrens } 444789Sahrens 445789Sahrens zio_t * 4461775Sbillm zio_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 447789Sahrens uint64_t txg, blkptr_t *bp, void *data, uint64_t size, 4483547Smaybee zio_done_func_t *ready, zio_done_func_t *done, void *private, int priority, 4493547Smaybee int flags, zbookmark_t *zb) 450789Sahrens { 451789Sahrens zio_t *zio; 452789Sahrens 453789Sahrens ASSERT(checksum >= ZIO_CHECKSUM_OFF && 454789Sahrens checksum < ZIO_CHECKSUM_FUNCTIONS); 455789Sahrens 456789Sahrens ASSERT(compress >= ZIO_COMPRESS_OFF && 457789Sahrens compress < ZIO_COMPRESS_FUNCTIONS); 458789Sahrens 459789Sahrens zio = zio_create(pio, spa, txg, bp, data, size, done, private, 4602981Sahrens ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_USER, 461789Sahrens ZIO_STAGE_OPEN, ZIO_WRITE_PIPELINE); 462789Sahrens 4633547Smaybee zio->io_ready = ready; 4643547Smaybee 4651544Seschrock zio->io_bookmark = *zb; 4661544Seschrock 4671544Seschrock zio->io_logical = zio; 4681544Seschrock 469789Sahrens zio->io_checksum = checksum; 470789Sahrens zio->io_compress = compress; 4711775Sbillm zio->io_ndvas = ncopies; 472789Sahrens 473789Sahrens if (compress != ZIO_COMPRESS_OFF) 474789Sahrens zio->io_async_stages |= 1U << ZIO_STAGE_WRITE_COMPRESS; 475789Sahrens 476789Sahrens if (bp->blk_birth != txg) { 477789Sahrens /* XXX the bp usually (always?) gets re-zeroed later */ 478789Sahrens BP_ZERO(bp); 479789Sahrens BP_SET_LSIZE(bp, size); 480789Sahrens BP_SET_PSIZE(bp, size); 4811775Sbillm } else { 4821775Sbillm /* Make sure someone doesn't change their mind on overwrites */ 4831775Sbillm ASSERT(MIN(zio->io_ndvas + BP_IS_GANG(bp), 4841775Sbillm spa_max_replication(spa)) == BP_GET_NDVAS(bp)); 485789Sahrens } 486789Sahrens 487789Sahrens return (zio); 488789Sahrens } 489789Sahrens 490789Sahrens zio_t * 491789Sahrens zio_rewrite(zio_t *pio, spa_t *spa, int checksum, 492789Sahrens uint64_t txg, blkptr_t *bp, void *data, uint64_t size, 4931544Seschrock zio_done_func_t *done, void *private, int priority, int flags, 4941544Seschrock zbookmark_t *zb) 495789Sahrens { 496789Sahrens zio_t *zio; 497789Sahrens 498789Sahrens zio = zio_create(pio, spa, txg, bp, data, size, done, private, 4992981Sahrens ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_USER, 500789Sahrens ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE); 501789Sahrens 5021544Seschrock zio->io_bookmark = *zb; 503789Sahrens zio->io_checksum = checksum; 504789Sahrens zio->io_compress = ZIO_COMPRESS_OFF; 505789Sahrens 5061775Sbillm if (pio != NULL) 5071775Sbillm ASSERT3U(zio->io_ndvas, <=, BP_GET_NDVAS(bp)); 5081775Sbillm 509789Sahrens return (zio); 510789Sahrens } 511789Sahrens 512789Sahrens static zio_t * 513789Sahrens zio_write_allocate(zio_t *pio, spa_t *spa, int checksum, 514789Sahrens uint64_t txg, blkptr_t *bp, void *data, uint64_t size, 515789Sahrens zio_done_func_t *done, void *private, int priority, int flags) 516789Sahrens { 517789Sahrens zio_t *zio; 518789Sahrens 519789Sahrens BP_ZERO(bp); 520789Sahrens BP_SET_LSIZE(bp, size); 521789Sahrens BP_SET_PSIZE(bp, size); 522789Sahrens BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 523789Sahrens 524789Sahrens zio = zio_create(pio, spa, txg, bp, data, size, done, private, 525789Sahrens ZIO_TYPE_WRITE, priority, flags, 526789Sahrens ZIO_STAGE_OPEN, ZIO_WRITE_ALLOCATE_PIPELINE); 527789Sahrens 528789Sahrens zio->io_checksum = checksum; 529789Sahrens zio->io_compress = ZIO_COMPRESS_OFF; 530789Sahrens 531789Sahrens return (zio); 532789Sahrens } 533789Sahrens 534789Sahrens zio_t * 535789Sahrens zio_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 536789Sahrens zio_done_func_t *done, void *private) 537789Sahrens { 538789Sahrens zio_t *zio; 539789Sahrens 540789Sahrens ASSERT(!BP_IS_HOLE(bp)); 541789Sahrens 542789Sahrens if (txg == spa->spa_syncing_txg && 543789Sahrens spa->spa_sync_pass > zio_sync_pass.zp_defer_free) { 544789Sahrens bplist_enqueue_deferred(&spa->spa_sync_bplist, bp); 545789Sahrens return (zio_null(pio, spa, NULL, NULL, 0)); 546789Sahrens } 547789Sahrens 548789Sahrens zio = zio_create(pio, spa, txg, bp, NULL, 0, done, private, 5492981Sahrens ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, ZIO_FLAG_USER, 550789Sahrens ZIO_STAGE_OPEN, ZIO_FREE_PIPELINE); 551789Sahrens 552789Sahrens zio->io_bp = &zio->io_bp_copy; 553789Sahrens 554789Sahrens return (zio); 555789Sahrens } 556789Sahrens 557789Sahrens zio_t * 558789Sahrens zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 559789Sahrens zio_done_func_t *done, void *private) 560789Sahrens { 561789Sahrens zio_t *zio; 562789Sahrens 563789Sahrens /* 564789Sahrens * A claim is an allocation of a specific block. Claims are needed 565789Sahrens * to support immediate writes in the intent log. The issue is that 566789Sahrens * immediate writes contain committed data, but in a txg that was 567789Sahrens * *not* committed. Upon opening the pool after an unclean shutdown, 568789Sahrens * the intent log claims all blocks that contain immediate write data 569789Sahrens * so that the SPA knows they're in use. 570789Sahrens * 571789Sahrens * All claims *must* be resolved in the first txg -- before the SPA 572789Sahrens * starts allocating blocks -- so that nothing is allocated twice. 573789Sahrens */ 574789Sahrens ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa)); 575789Sahrens ASSERT3U(spa_first_txg(spa), <=, txg); 576789Sahrens 577789Sahrens zio = zio_create(pio, spa, txg, bp, NULL, 0, done, private, 578789Sahrens ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 0, 579789Sahrens ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE); 580789Sahrens 581789Sahrens zio->io_bp = &zio->io_bp_copy; 582789Sahrens 583789Sahrens return (zio); 584789Sahrens } 585789Sahrens 586789Sahrens zio_t * 587789Sahrens zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd, 588789Sahrens zio_done_func_t *done, void *private, int priority, int flags) 589789Sahrens { 590789Sahrens zio_t *zio; 591789Sahrens int c; 592789Sahrens 593789Sahrens if (vd->vdev_children == 0) { 594789Sahrens zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private, 595789Sahrens ZIO_TYPE_IOCTL, priority, flags, 596789Sahrens ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE); 597789Sahrens 598789Sahrens zio->io_vd = vd; 599789Sahrens zio->io_cmd = cmd; 600789Sahrens } else { 601789Sahrens zio = zio_null(pio, spa, NULL, NULL, flags); 602789Sahrens 603789Sahrens for (c = 0; c < vd->vdev_children; c++) 604789Sahrens zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd, 605789Sahrens done, private, priority, flags)); 606789Sahrens } 607789Sahrens 608789Sahrens return (zio); 609789Sahrens } 610789Sahrens 611789Sahrens static void 612789Sahrens zio_phys_bp_init(vdev_t *vd, blkptr_t *bp, uint64_t offset, uint64_t size, 613789Sahrens int checksum) 614789Sahrens { 615789Sahrens ASSERT(vd->vdev_children == 0); 616789Sahrens 617789Sahrens ASSERT(size <= SPA_MAXBLOCKSIZE); 618789Sahrens ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0); 619789Sahrens ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0); 620789Sahrens 621789Sahrens ASSERT(offset + size <= VDEV_LABEL_START_SIZE || 622789Sahrens offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE); 623789Sahrens ASSERT3U(offset + size, <=, vd->vdev_psize); 624789Sahrens 625789Sahrens BP_ZERO(bp); 626789Sahrens 627789Sahrens BP_SET_LSIZE(bp, size); 628789Sahrens BP_SET_PSIZE(bp, size); 629789Sahrens 630789Sahrens BP_SET_CHECKSUM(bp, checksum); 631789Sahrens BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 632789Sahrens BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 633789Sahrens 634789Sahrens if (checksum != ZIO_CHECKSUM_OFF) 635789Sahrens ZIO_SET_CHECKSUM(&bp->blk_cksum, offset, 0, 0, 0); 636789Sahrens } 637789Sahrens 638789Sahrens zio_t * 639789Sahrens zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 640789Sahrens void *data, int checksum, zio_done_func_t *done, void *private, 641789Sahrens int priority, int flags) 642789Sahrens { 643789Sahrens zio_t *zio; 644789Sahrens blkptr_t blk; 645789Sahrens 646789Sahrens zio_phys_bp_init(vd, &blk, offset, size, checksum); 647789Sahrens 648789Sahrens zio = zio_create(pio, vd->vdev_spa, 0, &blk, data, size, done, private, 649789Sahrens ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, 650789Sahrens ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE); 651789Sahrens 652789Sahrens zio->io_vd = vd; 653789Sahrens zio->io_offset = offset; 654789Sahrens 655789Sahrens /* 656789Sahrens * Work off our copy of the bp so the caller can free it. 657789Sahrens */ 658789Sahrens zio->io_bp = &zio->io_bp_copy; 659789Sahrens 660789Sahrens return (zio); 661789Sahrens } 662789Sahrens 663789Sahrens zio_t * 664789Sahrens zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size, 665789Sahrens void *data, int checksum, zio_done_func_t *done, void *private, 666789Sahrens int priority, int flags) 667789Sahrens { 668789Sahrens zio_block_tail_t *zbt; 669789Sahrens void *wbuf; 670789Sahrens zio_t *zio; 671789Sahrens blkptr_t blk; 672789Sahrens 673789Sahrens zio_phys_bp_init(vd, &blk, offset, size, checksum); 674789Sahrens 675789Sahrens zio = zio_create(pio, vd->vdev_spa, 0, &blk, data, size, done, private, 676789Sahrens ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, 677789Sahrens ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE); 678789Sahrens 679789Sahrens zio->io_vd = vd; 680789Sahrens zio->io_offset = offset; 681789Sahrens 682789Sahrens zio->io_bp = &zio->io_bp_copy; 683789Sahrens zio->io_checksum = checksum; 684789Sahrens 685789Sahrens if (zio_checksum_table[checksum].ci_zbt) { 686789Sahrens /* 687789Sahrens * zbt checksums are necessarily destructive -- they modify 688789Sahrens * one word of the write buffer to hold the verifier/checksum. 689789Sahrens * Therefore, we must make a local copy in case the data is 690789Sahrens * being written to multiple places. 691789Sahrens */ 692789Sahrens wbuf = zio_buf_alloc(size); 693789Sahrens bcopy(data, wbuf, size); 694789Sahrens zio_push_transform(zio, wbuf, size, size); 695789Sahrens 696789Sahrens zbt = (zio_block_tail_t *)((char *)wbuf + size) - 1; 697789Sahrens zbt->zbt_cksum = blk.blk_cksum; 698789Sahrens } 699789Sahrens 700789Sahrens return (zio); 701789Sahrens } 702789Sahrens 703789Sahrens /* 704789Sahrens * Create a child I/O to do some work for us. It has no associated bp. 705789Sahrens */ 706789Sahrens zio_t * 707789Sahrens zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd, uint64_t offset, 708789Sahrens void *data, uint64_t size, int type, int priority, int flags, 709789Sahrens zio_done_func_t *done, void *private) 710789Sahrens { 711789Sahrens uint32_t pipeline = ZIO_VDEV_CHILD_PIPELINE; 712789Sahrens zio_t *cio; 713789Sahrens 714789Sahrens if (type == ZIO_TYPE_READ && bp != NULL) { 715789Sahrens /* 716789Sahrens * If we have the bp, then the child should perform the 717789Sahrens * checksum and the parent need not. This pushes error 718789Sahrens * detection as close to the leaves as possible and 719789Sahrens * eliminates redundant checksums in the interior nodes. 720789Sahrens */ 721789Sahrens pipeline |= 1U << ZIO_STAGE_CHECKSUM_VERIFY; 722789Sahrens zio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY); 723789Sahrens } 724789Sahrens 725789Sahrens cio = zio_create(zio, zio->io_spa, zio->io_txg, bp, data, size, 726789Sahrens done, private, type, priority, 727789Sahrens (zio->io_flags & ZIO_FLAG_VDEV_INHERIT) | ZIO_FLAG_CANFAIL | flags, 7281775Sbillm ZIO_STAGE_VDEV_IO_START - 1, pipeline); 729789Sahrens 730789Sahrens cio->io_vd = vd; 731789Sahrens cio->io_offset = offset; 732789Sahrens 733789Sahrens return (cio); 734789Sahrens } 735789Sahrens 736789Sahrens /* 737789Sahrens * ========================================================================== 738789Sahrens * Initiate I/O, either sync or async 739789Sahrens * ========================================================================== 740789Sahrens */ 741789Sahrens int 742789Sahrens zio_wait(zio_t *zio) 743789Sahrens { 744789Sahrens int error; 745789Sahrens 746789Sahrens ASSERT(zio->io_stage == ZIO_STAGE_OPEN); 747789Sahrens 748789Sahrens zio->io_waiter = curthread; 749789Sahrens 750789Sahrens zio_next_stage_async(zio); 751789Sahrens 752789Sahrens mutex_enter(&zio->io_lock); 753789Sahrens while (zio->io_stalled != ZIO_STAGE_DONE) 754789Sahrens cv_wait(&zio->io_cv, &zio->io_lock); 755789Sahrens mutex_exit(&zio->io_lock); 756789Sahrens 757789Sahrens error = zio->io_error; 7582856Snd150628 mutex_destroy(&zio->io_lock); 7594055Seschrock kmem_cache_free(zio_cache, zio); 760789Sahrens 761789Sahrens return (error); 762789Sahrens } 763789Sahrens 764789Sahrens void 765789Sahrens zio_nowait(zio_t *zio) 766789Sahrens { 767789Sahrens zio_next_stage_async(zio); 768789Sahrens } 769789Sahrens 770789Sahrens /* 771789Sahrens * ========================================================================== 772789Sahrens * I/O pipeline interlocks: parent/child dependency scoreboarding 773789Sahrens * ========================================================================== 774789Sahrens */ 775789Sahrens static void 776789Sahrens zio_wait_for_children(zio_t *zio, uint32_t stage, uint64_t *countp) 777789Sahrens { 778789Sahrens mutex_enter(&zio->io_lock); 779789Sahrens if (*countp == 0) { 780789Sahrens ASSERT(zio->io_stalled == 0); 781789Sahrens mutex_exit(&zio->io_lock); 782789Sahrens zio_next_stage(zio); 783789Sahrens } else { 784789Sahrens zio->io_stalled = stage; 785789Sahrens mutex_exit(&zio->io_lock); 786789Sahrens } 787789Sahrens } 788789Sahrens 789789Sahrens static void 790789Sahrens zio_notify_parent(zio_t *zio, uint32_t stage, uint64_t *countp) 791789Sahrens { 792789Sahrens zio_t *pio = zio->io_parent; 793789Sahrens 794789Sahrens mutex_enter(&pio->io_lock); 795789Sahrens if (pio->io_error == 0 && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE)) 796789Sahrens pio->io_error = zio->io_error; 797789Sahrens if (--*countp == 0 && pio->io_stalled == stage) { 798789Sahrens pio->io_stalled = 0; 799789Sahrens mutex_exit(&pio->io_lock); 800789Sahrens zio_next_stage_async(pio); 801789Sahrens } else { 802789Sahrens mutex_exit(&pio->io_lock); 803789Sahrens } 804789Sahrens } 805789Sahrens 806789Sahrens static void 807789Sahrens zio_wait_children_ready(zio_t *zio) 808789Sahrens { 809789Sahrens zio_wait_for_children(zio, ZIO_STAGE_WAIT_CHILDREN_READY, 810789Sahrens &zio->io_children_notready); 811789Sahrens } 812789Sahrens 813789Sahrens void 814789Sahrens zio_wait_children_done(zio_t *zio) 815789Sahrens { 816789Sahrens zio_wait_for_children(zio, ZIO_STAGE_WAIT_CHILDREN_DONE, 817789Sahrens &zio->io_children_notdone); 818789Sahrens } 819789Sahrens 820789Sahrens static void 821789Sahrens zio_ready(zio_t *zio) 822789Sahrens { 823789Sahrens zio_t *pio = zio->io_parent; 824789Sahrens 8253547Smaybee if (zio->io_ready) 8263547Smaybee zio->io_ready(zio); 8273547Smaybee 828789Sahrens if (pio != NULL) 829789Sahrens zio_notify_parent(zio, ZIO_STAGE_WAIT_CHILDREN_READY, 830789Sahrens &pio->io_children_notready); 831789Sahrens 832789Sahrens if (zio->io_bp) 833789Sahrens zio->io_bp_copy = *zio->io_bp; 834789Sahrens 835789Sahrens zio_next_stage(zio); 836789Sahrens } 837789Sahrens 838789Sahrens static void 839789Sahrens zio_done(zio_t *zio) 840789Sahrens { 841789Sahrens zio_t *pio = zio->io_parent; 842789Sahrens spa_t *spa = zio->io_spa; 843789Sahrens blkptr_t *bp = zio->io_bp; 844789Sahrens vdev_t *vd = zio->io_vd; 845789Sahrens 846789Sahrens ASSERT(zio->io_children_notready == 0); 847789Sahrens ASSERT(zio->io_children_notdone == 0); 848789Sahrens 849789Sahrens if (bp != NULL) { 850789Sahrens ASSERT(bp->blk_pad[0] == 0); 851789Sahrens ASSERT(bp->blk_pad[1] == 0); 852789Sahrens ASSERT(bp->blk_pad[2] == 0); 853789Sahrens ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0); 854789Sahrens if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) && 8551775Sbillm !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { 856789Sahrens ASSERT(!BP_SHOULD_BYTESWAP(bp)); 8571775Sbillm if (zio->io_ndvas != 0) 8581775Sbillm ASSERT3U(zio->io_ndvas, <=, BP_GET_NDVAS(bp)); 8591775Sbillm ASSERT(BP_COUNT_GANG(bp) == 0 || 8601775Sbillm (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp))); 8611775Sbillm } 862789Sahrens } 863789Sahrens 864789Sahrens if (vd != NULL) 865789Sahrens vdev_stat_update(zio); 866789Sahrens 867789Sahrens if (zio->io_error) { 8681544Seschrock /* 8691544Seschrock * If this I/O is attached to a particular vdev, 8701544Seschrock * generate an error message describing the I/O failure 8711544Seschrock * at the block level. We ignore these errors if the 8721544Seschrock * device is currently unavailable. 8731544Seschrock */ 8741732Sbonwick if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd)) 8751544Seschrock zfs_ereport_post(FM_EREPORT_ZFS_IO, 8761732Sbonwick zio->io_spa, vd, zio, 0, 0); 877789Sahrens 8781544Seschrock if ((zio->io_error == EIO || 8791544Seschrock !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) && 8801544Seschrock zio->io_logical == zio) { 8811544Seschrock /* 8821544Seschrock * For root I/O requests, tell the SPA to log the error 8831544Seschrock * appropriately. Also, generate a logical data 8841544Seschrock * ereport. 8851544Seschrock */ 8861544Seschrock spa_log_error(zio->io_spa, zio); 8871544Seschrock 8881544Seschrock zfs_ereport_post(FM_EREPORT_ZFS_DATA, 8891544Seschrock zio->io_spa, NULL, zio, 0, 0); 8901544Seschrock } 891789Sahrens 8921544Seschrock /* 8931544Seschrock * For I/O requests that cannot fail, panic appropriately. 8941544Seschrock */ 8951544Seschrock if (!(zio->io_flags & ZIO_FLAG_CANFAIL)) { 8963459Sek110237 char *blkbuf; 8973459Sek110237 8983459Sek110237 blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_NOSLEEP); 8993459Sek110237 if (blkbuf) { 9003459Sek110237 sprintf_blkptr(blkbuf, BP_SPRINTF_LEN, 9013459Sek110237 bp ? bp : &zio->io_bp_copy); 9023459Sek110237 } 9031544Seschrock panic("ZFS: %s (%s on %s off %llx: zio %p %s): error " 9041544Seschrock "%d", zio->io_error == ECKSUM ? 9051544Seschrock "bad checksum" : "I/O failure", 9061544Seschrock zio_type_name[zio->io_type], 9071544Seschrock vdev_description(vd), 9081544Seschrock (u_longlong_t)zio->io_offset, 9093459Sek110237 zio, blkbuf ? blkbuf : "", zio->io_error); 9101544Seschrock } 911789Sahrens } 912789Sahrens zio_clear_transform_stack(zio); 913789Sahrens 914789Sahrens if (zio->io_done) 915789Sahrens zio->io_done(zio); 916789Sahrens 917789Sahrens ASSERT(zio->io_delegate_list == NULL); 918789Sahrens ASSERT(zio->io_delegate_next == NULL); 919789Sahrens 920789Sahrens if (pio != NULL) { 921789Sahrens zio_t *next, *prev; 922789Sahrens 923789Sahrens mutex_enter(&pio->io_lock); 924789Sahrens next = zio->io_sibling_next; 925789Sahrens prev = zio->io_sibling_prev; 926789Sahrens if (next != NULL) 927789Sahrens next->io_sibling_prev = prev; 928789Sahrens if (prev != NULL) 929789Sahrens prev->io_sibling_next = next; 930789Sahrens if (pio->io_child == zio) 931789Sahrens pio->io_child = next; 932789Sahrens mutex_exit(&pio->io_lock); 933789Sahrens 934789Sahrens zio_notify_parent(zio, ZIO_STAGE_WAIT_CHILDREN_DONE, 935789Sahrens &pio->io_children_notdone); 936789Sahrens } 937789Sahrens 9383463Sahrens /* 9394055Seschrock * Note: this I/O is now done, and will shortly be freed, so there is no 9404055Seschrock * need to clear this (or any other) flag. 9413463Sahrens */ 9423463Sahrens if (zio->io_flags & ZIO_FLAG_CONFIG_GRABBED) 9431544Seschrock spa_config_exit(spa, zio); 944789Sahrens 945789Sahrens if (zio->io_waiter != NULL) { 946789Sahrens mutex_enter(&zio->io_lock); 947789Sahrens ASSERT(zio->io_stage == ZIO_STAGE_DONE); 948789Sahrens zio->io_stalled = zio->io_stage; 949789Sahrens cv_broadcast(&zio->io_cv); 950789Sahrens mutex_exit(&zio->io_lock); 951789Sahrens } else { 9524055Seschrock kmem_cache_free(zio_cache, zio); 953789Sahrens } 954789Sahrens } 955789Sahrens 956789Sahrens /* 957789Sahrens * ========================================================================== 958789Sahrens * Compression support 959789Sahrens * ========================================================================== 960789Sahrens */ 961789Sahrens static void 962789Sahrens zio_write_compress(zio_t *zio) 963789Sahrens { 964789Sahrens int compress = zio->io_compress; 965789Sahrens blkptr_t *bp = zio->io_bp; 966789Sahrens void *cbuf; 967789Sahrens uint64_t lsize = zio->io_size; 968789Sahrens uint64_t csize = lsize; 969789Sahrens uint64_t cbufsize = 0; 970789Sahrens int pass; 971789Sahrens 972789Sahrens if (bp->blk_birth == zio->io_txg) { 973789Sahrens /* 974789Sahrens * We're rewriting an existing block, which means we're 975789Sahrens * working on behalf of spa_sync(). For spa_sync() to 976789Sahrens * converge, it must eventually be the case that we don't 977789Sahrens * have to allocate new blocks. But compression changes 978789Sahrens * the blocksize, which forces a reallocate, and makes 979789Sahrens * convergence take longer. Therefore, after the first 980789Sahrens * few passes, stop compressing to ensure convergence. 981789Sahrens */ 982789Sahrens pass = spa_sync_pass(zio->io_spa); 983789Sahrens if (pass > zio_sync_pass.zp_dontcompress) 984789Sahrens compress = ZIO_COMPRESS_OFF; 985789Sahrens } else { 986789Sahrens ASSERT(BP_IS_HOLE(bp)); 987789Sahrens pass = 1; 988789Sahrens } 989789Sahrens 990789Sahrens if (compress != ZIO_COMPRESS_OFF) 991789Sahrens if (!zio_compress_data(compress, zio->io_data, zio->io_size, 992789Sahrens &cbuf, &csize, &cbufsize)) 993789Sahrens compress = ZIO_COMPRESS_OFF; 994789Sahrens 995789Sahrens if (compress != ZIO_COMPRESS_OFF && csize != 0) 996789Sahrens zio_push_transform(zio, cbuf, csize, cbufsize); 997789Sahrens 998789Sahrens /* 999789Sahrens * The final pass of spa_sync() must be all rewrites, but the first 1000789Sahrens * few passes offer a trade-off: allocating blocks defers convergence, 1001789Sahrens * but newly allocated blocks are sequential, so they can be written 1002789Sahrens * to disk faster. Therefore, we allow the first few passes of 1003789Sahrens * spa_sync() to reallocate new blocks, but force rewrites after that. 1004789Sahrens * There should only be a handful of blocks after pass 1 in any case. 1005789Sahrens */ 1006789Sahrens if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == csize && 1007789Sahrens pass > zio_sync_pass.zp_rewrite) { 1008789Sahrens ASSERT(csize != 0); 10092885Sahrens BP_SET_LSIZE(bp, lsize); 10102885Sahrens BP_SET_COMPRESS(bp, compress); 1011789Sahrens zio->io_pipeline = ZIO_REWRITE_PIPELINE; 1012789Sahrens } else { 10133882Sahrens if (bp->blk_birth == zio->io_txg) 10143882Sahrens BP_ZERO(bp); 1015789Sahrens if (csize == 0) { 1016789Sahrens BP_ZERO(bp); 1017789Sahrens zio->io_pipeline = ZIO_WAIT_FOR_CHILDREN_PIPELINE; 1018789Sahrens } else { 10191775Sbillm ASSERT3U(BP_GET_NDVAS(bp), ==, 0); 1020789Sahrens BP_SET_LSIZE(bp, lsize); 1021789Sahrens BP_SET_PSIZE(bp, csize); 1022789Sahrens BP_SET_COMPRESS(bp, compress); 1023789Sahrens zio->io_pipeline = ZIO_WRITE_ALLOCATE_PIPELINE; 1024789Sahrens } 1025789Sahrens } 1026789Sahrens 1027789Sahrens zio_next_stage(zio); 1028789Sahrens } 1029789Sahrens 1030789Sahrens static void 1031789Sahrens zio_read_decompress(zio_t *zio) 1032789Sahrens { 1033789Sahrens blkptr_t *bp = zio->io_bp; 1034789Sahrens void *data; 1035789Sahrens uint64_t size; 1036789Sahrens uint64_t bufsize; 1037789Sahrens int compress = BP_GET_COMPRESS(bp); 1038789Sahrens 1039789Sahrens ASSERT(compress != ZIO_COMPRESS_OFF); 1040789Sahrens 1041789Sahrens zio_pop_transform(zio, &data, &size, &bufsize); 1042789Sahrens 1043789Sahrens if (zio_decompress_data(compress, data, size, 1044789Sahrens zio->io_data, zio->io_size)) 1045789Sahrens zio->io_error = EIO; 1046789Sahrens 1047789Sahrens zio_buf_free(data, bufsize); 1048789Sahrens 1049789Sahrens zio_next_stage(zio); 1050789Sahrens } 1051789Sahrens 1052789Sahrens /* 1053789Sahrens * ========================================================================== 1054789Sahrens * Gang block support 1055789Sahrens * ========================================================================== 1056789Sahrens */ 1057789Sahrens static void 1058789Sahrens zio_gang_pipeline(zio_t *zio) 1059789Sahrens { 1060789Sahrens /* 1061789Sahrens * By default, the pipeline assumes that we're dealing with a gang 1062789Sahrens * block. If we're not, strip out any gang-specific stages. 1063789Sahrens */ 10641775Sbillm if (!BP_IS_GANG(zio->io_bp)) 1065789Sahrens zio->io_pipeline &= ~ZIO_GANG_STAGES; 1066789Sahrens 1067789Sahrens zio_next_stage(zio); 1068789Sahrens } 1069789Sahrens 1070789Sahrens static void 1071789Sahrens zio_gang_byteswap(zio_t *zio) 1072789Sahrens { 1073789Sahrens ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); 1074789Sahrens 1075789Sahrens if (BP_SHOULD_BYTESWAP(zio->io_bp)) 1076789Sahrens byteswap_uint64_array(zio->io_data, zio->io_size); 1077789Sahrens } 1078789Sahrens 1079789Sahrens static void 1080789Sahrens zio_get_gang_header(zio_t *zio) 1081789Sahrens { 1082789Sahrens blkptr_t *bp = zio->io_bp; 1083789Sahrens uint64_t gsize = SPA_GANGBLOCKSIZE; 1084789Sahrens void *gbuf = zio_buf_alloc(gsize); 1085789Sahrens 10861775Sbillm ASSERT(BP_IS_GANG(bp)); 1087789Sahrens 1088789Sahrens zio_push_transform(zio, gbuf, gsize, gsize); 1089789Sahrens 1090789Sahrens zio_nowait(zio_create(zio, zio->io_spa, bp->blk_birth, bp, gbuf, gsize, 1091789Sahrens NULL, NULL, ZIO_TYPE_READ, zio->io_priority, 1092789Sahrens zio->io_flags & ZIO_FLAG_GANG_INHERIT, 1093789Sahrens ZIO_STAGE_OPEN, ZIO_READ_PIPELINE)); 1094789Sahrens 1095789Sahrens zio_wait_children_done(zio); 1096789Sahrens } 1097789Sahrens 1098789Sahrens static void 1099789Sahrens zio_read_gang_members(zio_t *zio) 1100789Sahrens { 1101789Sahrens zio_gbh_phys_t *gbh; 1102789Sahrens uint64_t gsize, gbufsize, loff, lsize; 1103789Sahrens int i; 1104789Sahrens 11051775Sbillm ASSERT(BP_IS_GANG(zio->io_bp)); 1106789Sahrens 1107789Sahrens zio_gang_byteswap(zio); 1108789Sahrens zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize); 1109789Sahrens 1110789Sahrens for (loff = 0, i = 0; loff != zio->io_size; loff += lsize, i++) { 1111789Sahrens blkptr_t *gbp = &gbh->zg_blkptr[i]; 1112789Sahrens lsize = BP_GET_PSIZE(gbp); 1113789Sahrens 1114789Sahrens ASSERT(BP_GET_COMPRESS(gbp) == ZIO_COMPRESS_OFF); 1115789Sahrens ASSERT3U(lsize, ==, BP_GET_LSIZE(gbp)); 1116789Sahrens ASSERT3U(loff + lsize, <=, zio->io_size); 1117789Sahrens ASSERT(i < SPA_GBH_NBLKPTRS); 1118789Sahrens ASSERT(!BP_IS_HOLE(gbp)); 1119789Sahrens 1120789Sahrens zio_nowait(zio_read(zio, zio->io_spa, gbp, 1121789Sahrens (char *)zio->io_data + loff, lsize, NULL, NULL, 11221544Seschrock zio->io_priority, zio->io_flags & ZIO_FLAG_GANG_INHERIT, 11231544Seschrock &zio->io_bookmark)); 1124789Sahrens } 1125789Sahrens 1126789Sahrens zio_buf_free(gbh, gbufsize); 1127789Sahrens zio_wait_children_done(zio); 1128789Sahrens } 1129789Sahrens 1130789Sahrens static void 1131789Sahrens zio_rewrite_gang_members(zio_t *zio) 1132789Sahrens { 1133789Sahrens zio_gbh_phys_t *gbh; 1134789Sahrens uint64_t gsize, gbufsize, loff, lsize; 1135789Sahrens int i; 1136789Sahrens 11371775Sbillm ASSERT(BP_IS_GANG(zio->io_bp)); 1138789Sahrens ASSERT3U(zio->io_size, ==, SPA_GANGBLOCKSIZE); 1139789Sahrens 1140789Sahrens zio_gang_byteswap(zio); 1141789Sahrens zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize); 1142789Sahrens 1143789Sahrens ASSERT(gsize == gbufsize); 1144789Sahrens 1145789Sahrens for (loff = 0, i = 0; loff != zio->io_size; loff += lsize, i++) { 1146789Sahrens blkptr_t *gbp = &gbh->zg_blkptr[i]; 1147789Sahrens lsize = BP_GET_PSIZE(gbp); 1148789Sahrens 1149789Sahrens ASSERT(BP_GET_COMPRESS(gbp) == ZIO_COMPRESS_OFF); 1150789Sahrens ASSERT3U(lsize, ==, BP_GET_LSIZE(gbp)); 1151789Sahrens ASSERT3U(loff + lsize, <=, zio->io_size); 1152789Sahrens ASSERT(i < SPA_GBH_NBLKPTRS); 1153789Sahrens ASSERT(!BP_IS_HOLE(gbp)); 1154789Sahrens 1155789Sahrens zio_nowait(zio_rewrite(zio, zio->io_spa, zio->io_checksum, 1156789Sahrens zio->io_txg, gbp, (char *)zio->io_data + loff, lsize, 11571544Seschrock NULL, NULL, zio->io_priority, zio->io_flags, 11581544Seschrock &zio->io_bookmark)); 1159789Sahrens } 1160789Sahrens 1161789Sahrens zio_push_transform(zio, gbh, gsize, gbufsize); 1162789Sahrens zio_wait_children_ready(zio); 1163789Sahrens } 1164789Sahrens 1165789Sahrens static void 1166789Sahrens zio_free_gang_members(zio_t *zio) 1167789Sahrens { 1168789Sahrens zio_gbh_phys_t *gbh; 1169789Sahrens uint64_t gsize, gbufsize; 1170789Sahrens int i; 1171789Sahrens 11721775Sbillm ASSERT(BP_IS_GANG(zio->io_bp)); 1173789Sahrens 1174789Sahrens zio_gang_byteswap(zio); 1175789Sahrens zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize); 1176789Sahrens 1177789Sahrens for (i = 0; i < SPA_GBH_NBLKPTRS; i++) { 1178789Sahrens blkptr_t *gbp = &gbh->zg_blkptr[i]; 1179789Sahrens 1180789Sahrens if (BP_IS_HOLE(gbp)) 1181789Sahrens continue; 1182789Sahrens zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg, 1183789Sahrens gbp, NULL, NULL)); 1184789Sahrens } 1185789Sahrens 1186789Sahrens zio_buf_free(gbh, gbufsize); 1187789Sahrens zio_next_stage(zio); 1188789Sahrens } 1189789Sahrens 1190789Sahrens static void 1191789Sahrens zio_claim_gang_members(zio_t *zio) 1192789Sahrens { 1193789Sahrens zio_gbh_phys_t *gbh; 1194789Sahrens uint64_t gsize, gbufsize; 1195789Sahrens int i; 1196789Sahrens 11971775Sbillm ASSERT(BP_IS_GANG(zio->io_bp)); 1198789Sahrens 1199789Sahrens zio_gang_byteswap(zio); 1200789Sahrens zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize); 1201789Sahrens 1202789Sahrens for (i = 0; i < SPA_GBH_NBLKPTRS; i++) { 1203789Sahrens blkptr_t *gbp = &gbh->zg_blkptr[i]; 1204789Sahrens if (BP_IS_HOLE(gbp)) 1205789Sahrens continue; 1206789Sahrens zio_nowait(zio_claim(zio, zio->io_spa, zio->io_txg, 1207789Sahrens gbp, NULL, NULL)); 1208789Sahrens } 1209789Sahrens 1210789Sahrens zio_buf_free(gbh, gbufsize); 1211789Sahrens zio_next_stage(zio); 1212789Sahrens } 1213789Sahrens 1214789Sahrens static void 1215789Sahrens zio_write_allocate_gang_member_done(zio_t *zio) 1216789Sahrens { 1217789Sahrens zio_t *pio = zio->io_parent; 12181775Sbillm dva_t *cdva = zio->io_bp->blk_dva; 12191775Sbillm dva_t *pdva = pio->io_bp->blk_dva; 1220789Sahrens uint64_t asize; 12211775Sbillm int d; 1222789Sahrens 12231775Sbillm ASSERT3U(pio->io_ndvas, ==, zio->io_ndvas); 12241775Sbillm ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); 12251775Sbillm ASSERT3U(zio->io_ndvas, <=, BP_GET_NDVAS(zio->io_bp)); 12261775Sbillm ASSERT3U(pio->io_ndvas, <=, BP_GET_NDVAS(pio->io_bp)); 12271775Sbillm 1228789Sahrens mutex_enter(&pio->io_lock); 12291775Sbillm for (d = 0; d < BP_GET_NDVAS(pio->io_bp); d++) { 12301775Sbillm ASSERT(DVA_GET_GANG(&pdva[d])); 12311775Sbillm asize = DVA_GET_ASIZE(&pdva[d]); 12321775Sbillm asize += DVA_GET_ASIZE(&cdva[d]); 12331775Sbillm DVA_SET_ASIZE(&pdva[d], asize); 12341775Sbillm } 1235789Sahrens mutex_exit(&pio->io_lock); 1236789Sahrens } 1237789Sahrens 1238789Sahrens static void 1239789Sahrens zio_write_allocate_gang_members(zio_t *zio) 1240789Sahrens { 1241789Sahrens blkptr_t *bp = zio->io_bp; 12421775Sbillm dva_t *dva = bp->blk_dva; 12431775Sbillm spa_t *spa = zio->io_spa; 1244789Sahrens zio_gbh_phys_t *gbh; 12451775Sbillm uint64_t txg = zio->io_txg; 1246789Sahrens uint64_t resid = zio->io_size; 1247789Sahrens uint64_t maxalloc = P2ROUNDUP(zio->io_size >> 1, SPA_MINBLOCKSIZE); 1248789Sahrens uint64_t gsize, loff, lsize; 1249789Sahrens uint32_t gbps_left; 12501775Sbillm int ndvas = zio->io_ndvas; 12511775Sbillm int gbh_ndvas = MIN(ndvas + 1, spa_max_replication(spa)); 1252789Sahrens int error; 12531775Sbillm int i, d; 1254789Sahrens 1255789Sahrens gsize = SPA_GANGBLOCKSIZE; 1256789Sahrens gbps_left = SPA_GBH_NBLKPTRS; 1257789Sahrens 12583063Sperrin error = metaslab_alloc(spa, gsize, bp, gbh_ndvas, txg, NULL, B_FALSE); 1259789Sahrens if (error == ENOSPC) 1260789Sahrens panic("can't allocate gang block header"); 1261789Sahrens ASSERT(error == 0); 1262789Sahrens 12631775Sbillm for (d = 0; d < gbh_ndvas; d++) 12641775Sbillm DVA_SET_GANG(&dva[d], 1); 1265789Sahrens 12661775Sbillm bp->blk_birth = txg; 1267789Sahrens 1268789Sahrens gbh = zio_buf_alloc(gsize); 1269789Sahrens bzero(gbh, gsize); 1270789Sahrens 12711775Sbillm /* We need to test multi-level gang blocks */ 12721775Sbillm if (maxalloc >= zio_gang_bang && (lbolt & 0x1) == 0) 12731775Sbillm maxalloc = MAX(maxalloc >> 2, SPA_MINBLOCKSIZE); 12741775Sbillm 1275789Sahrens for (loff = 0, i = 0; loff != zio->io_size; 1276789Sahrens loff += lsize, resid -= lsize, gbps_left--, i++) { 1277789Sahrens blkptr_t *gbp = &gbh->zg_blkptr[i]; 12781775Sbillm dva = gbp->blk_dva; 1279789Sahrens 1280789Sahrens ASSERT(gbps_left != 0); 1281789Sahrens maxalloc = MIN(maxalloc, resid); 1282789Sahrens 1283789Sahrens while (resid <= maxalloc * gbps_left) { 12841775Sbillm error = metaslab_alloc(spa, maxalloc, gbp, ndvas, 12853063Sperrin txg, bp, B_FALSE); 1286789Sahrens if (error == 0) 1287789Sahrens break; 1288789Sahrens ASSERT3U(error, ==, ENOSPC); 1289789Sahrens if (maxalloc == SPA_MINBLOCKSIZE) 1290789Sahrens panic("really out of space"); 1291789Sahrens maxalloc = P2ROUNDUP(maxalloc >> 1, SPA_MINBLOCKSIZE); 1292789Sahrens } 1293789Sahrens 1294789Sahrens if (resid <= maxalloc * gbps_left) { 1295789Sahrens lsize = maxalloc; 1296789Sahrens BP_SET_LSIZE(gbp, lsize); 1297789Sahrens BP_SET_PSIZE(gbp, lsize); 1298789Sahrens BP_SET_COMPRESS(gbp, ZIO_COMPRESS_OFF); 12991775Sbillm gbp->blk_birth = txg; 13001775Sbillm zio_nowait(zio_rewrite(zio, spa, 13011775Sbillm zio->io_checksum, txg, gbp, 1302789Sahrens (char *)zio->io_data + loff, lsize, 1303789Sahrens zio_write_allocate_gang_member_done, NULL, 13041544Seschrock zio->io_priority, zio->io_flags, 13051544Seschrock &zio->io_bookmark)); 1306789Sahrens } else { 1307789Sahrens lsize = P2ROUNDUP(resid / gbps_left, SPA_MINBLOCKSIZE); 1308789Sahrens ASSERT(lsize != SPA_MINBLOCKSIZE); 13091775Sbillm zio_nowait(zio_write_allocate(zio, spa, 13101775Sbillm zio->io_checksum, txg, gbp, 1311789Sahrens (char *)zio->io_data + loff, lsize, 1312789Sahrens zio_write_allocate_gang_member_done, NULL, 1313789Sahrens zio->io_priority, zio->io_flags)); 1314789Sahrens } 1315789Sahrens } 1316789Sahrens 1317789Sahrens ASSERT(resid == 0 && loff == zio->io_size); 1318789Sahrens 1319789Sahrens zio->io_pipeline |= 1U << ZIO_STAGE_GANG_CHECKSUM_GENERATE; 1320789Sahrens 1321789Sahrens zio_push_transform(zio, gbh, gsize, gsize); 13221775Sbillm /* 13231775Sbillm * As much as we'd like this to be zio_wait_children_ready(), 13241775Sbillm * updating our ASIZE doesn't happen until the io_done callback, 13251775Sbillm * so we have to wait for that to finish in order for our BP 13261775Sbillm * to be stable. 13271775Sbillm */ 1328789Sahrens zio_wait_children_done(zio); 1329789Sahrens } 1330789Sahrens 1331789Sahrens /* 1332789Sahrens * ========================================================================== 1333789Sahrens * Allocate and free blocks 1334789Sahrens * ========================================================================== 1335789Sahrens */ 1336789Sahrens static void 1337789Sahrens zio_dva_allocate(zio_t *zio) 1338789Sahrens { 1339789Sahrens blkptr_t *bp = zio->io_bp; 1340789Sahrens int error; 1341789Sahrens 1342789Sahrens ASSERT(BP_IS_HOLE(bp)); 13431775Sbillm ASSERT3U(BP_GET_NDVAS(bp), ==, 0); 13441775Sbillm ASSERT3U(zio->io_ndvas, >, 0); 13451775Sbillm ASSERT3U(zio->io_ndvas, <=, spa_max_replication(zio->io_spa)); 1346789Sahrens 1347789Sahrens /* For testing, make some blocks above a certain size be gang blocks */ 1348789Sahrens if (zio->io_size >= zio_gang_bang && (lbolt & 0x3) == 0) { 1349789Sahrens zio_write_allocate_gang_members(zio); 1350789Sahrens return; 1351789Sahrens } 1352789Sahrens 1353789Sahrens ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 1354789Sahrens 13551775Sbillm error = metaslab_alloc(zio->io_spa, zio->io_size, bp, zio->io_ndvas, 13563063Sperrin zio->io_txg, NULL, B_FALSE); 1357789Sahrens 1358789Sahrens if (error == 0) { 1359789Sahrens bp->blk_birth = zio->io_txg; 1360789Sahrens } else if (error == ENOSPC) { 1361789Sahrens if (zio->io_size == SPA_MINBLOCKSIZE) 1362789Sahrens panic("really, truly out of space"); 1363789Sahrens zio_write_allocate_gang_members(zio); 1364789Sahrens return; 1365789Sahrens } else { 1366789Sahrens zio->io_error = error; 1367789Sahrens } 1368789Sahrens zio_next_stage(zio); 1369789Sahrens } 1370789Sahrens 1371789Sahrens static void 1372789Sahrens zio_dva_free(zio_t *zio) 1373789Sahrens { 1374789Sahrens blkptr_t *bp = zio->io_bp; 1375789Sahrens 13761807Sbonwick metaslab_free(zio->io_spa, bp, zio->io_txg, B_FALSE); 1377789Sahrens 1378789Sahrens BP_ZERO(bp); 1379789Sahrens 1380789Sahrens zio_next_stage(zio); 1381789Sahrens } 1382789Sahrens 1383789Sahrens static void 1384789Sahrens zio_dva_claim(zio_t *zio) 1385789Sahrens { 13861807Sbonwick zio->io_error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg); 1387789Sahrens 1388789Sahrens zio_next_stage(zio); 1389789Sahrens } 1390789Sahrens 1391789Sahrens /* 1392789Sahrens * ========================================================================== 1393789Sahrens * Read and write to physical devices 1394789Sahrens * ========================================================================== 1395789Sahrens */ 1396789Sahrens 1397789Sahrens static void 13981775Sbillm zio_vdev_io_start(zio_t *zio) 1399789Sahrens { 1400789Sahrens vdev_t *vd = zio->io_vd; 14011775Sbillm vdev_t *tvd = vd ? vd->vdev_top : NULL; 14021775Sbillm blkptr_t *bp = zio->io_bp; 14031775Sbillm uint64_t align; 1404789Sahrens 14051775Sbillm if (vd == NULL) { 14061775Sbillm /* The mirror_ops handle multiple DVAs in a single BP */ 14071775Sbillm vdev_mirror_ops.vdev_op_io_start(zio); 14081775Sbillm return; 14091775Sbillm } 14101775Sbillm 14111775Sbillm align = 1ULL << tvd->vdev_ashift; 14121775Sbillm 14131732Sbonwick if (zio->io_retries == 0 && vd == tvd) 1414789Sahrens zio->io_flags |= ZIO_FLAG_FAILFAST; 1415789Sahrens 14161775Sbillm if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) && 14171775Sbillm vd->vdev_children == 0) { 1418789Sahrens zio->io_flags |= ZIO_FLAG_PHYSICAL; 1419789Sahrens zio->io_offset += VDEV_LABEL_START_SIZE; 1420789Sahrens } 1421789Sahrens 14221732Sbonwick if (P2PHASE(zio->io_size, align) != 0) { 14231732Sbonwick uint64_t asize = P2ROUNDUP(zio->io_size, align); 14241732Sbonwick char *abuf = zio_buf_alloc(asize); 14251732Sbonwick ASSERT(vd == tvd); 14261732Sbonwick if (zio->io_type == ZIO_TYPE_WRITE) { 14271732Sbonwick bcopy(zio->io_data, abuf, zio->io_size); 14281732Sbonwick bzero(abuf + zio->io_size, asize - zio->io_size); 14291732Sbonwick } 14301732Sbonwick zio_push_transform(zio, abuf, asize, asize); 14311732Sbonwick ASSERT(!(zio->io_flags & ZIO_FLAG_SUBBLOCK)); 14321732Sbonwick zio->io_flags |= ZIO_FLAG_SUBBLOCK; 14331732Sbonwick } 14341732Sbonwick 14351732Sbonwick ASSERT(P2PHASE(zio->io_offset, align) == 0); 14361732Sbonwick ASSERT(P2PHASE(zio->io_size, align) == 0); 14371732Sbonwick ASSERT(bp == NULL || 14381732Sbonwick P2ROUNDUP(ZIO_GET_IOSIZE(zio), align) == zio->io_size); 1439789Sahrens ASSERT(zio->io_type != ZIO_TYPE_WRITE || (spa_mode & FWRITE)); 1440789Sahrens 1441789Sahrens vdev_io_start(zio); 1442789Sahrens 1443789Sahrens /* zio_next_stage_async() gets called from io completion interrupt */ 1444789Sahrens } 1445789Sahrens 1446789Sahrens static void 1447789Sahrens zio_vdev_io_done(zio_t *zio) 1448789Sahrens { 14491775Sbillm if (zio->io_vd == NULL) 14501775Sbillm /* The mirror_ops handle multiple DVAs in a single BP */ 14511775Sbillm vdev_mirror_ops.vdev_op_io_done(zio); 14521775Sbillm else 14531775Sbillm vdev_io_done(zio); 1454789Sahrens } 1455789Sahrens 1456789Sahrens /* XXPOLICY */ 14571544Seschrock boolean_t 1458789Sahrens zio_should_retry(zio_t *zio) 1459789Sahrens { 1460789Sahrens vdev_t *vd = zio->io_vd; 1461789Sahrens 1462789Sahrens if (zio->io_error == 0) 1463789Sahrens return (B_FALSE); 1464789Sahrens if (zio->io_delegate_list != NULL) 1465789Sahrens return (B_FALSE); 14661775Sbillm if (vd && vd != vd->vdev_top) 1467789Sahrens return (B_FALSE); 1468789Sahrens if (zio->io_flags & ZIO_FLAG_DONT_RETRY) 1469789Sahrens return (B_FALSE); 14701544Seschrock if (zio->io_retries > 0) 1471789Sahrens return (B_FALSE); 1472789Sahrens 1473789Sahrens return (B_TRUE); 1474789Sahrens } 1475789Sahrens 1476789Sahrens static void 1477789Sahrens zio_vdev_io_assess(zio_t *zio) 1478789Sahrens { 1479789Sahrens vdev_t *vd = zio->io_vd; 14801775Sbillm vdev_t *tvd = vd ? vd->vdev_top : NULL; 1481789Sahrens 14821544Seschrock ASSERT(zio->io_vsd == NULL); 1483789Sahrens 14841732Sbonwick if (zio->io_flags & ZIO_FLAG_SUBBLOCK) { 14851732Sbonwick void *abuf; 14861732Sbonwick uint64_t asize; 14871732Sbonwick ASSERT(vd == tvd); 14881732Sbonwick zio_pop_transform(zio, &abuf, &asize, &asize); 14891732Sbonwick if (zio->io_type == ZIO_TYPE_READ) 14901732Sbonwick bcopy(abuf, zio->io_data, zio->io_size); 14911732Sbonwick zio_buf_free(abuf, asize); 14921732Sbonwick zio->io_flags &= ~ZIO_FLAG_SUBBLOCK; 14931732Sbonwick } 14941732Sbonwick 14951544Seschrock if (zio_injection_enabled && !zio->io_error) 14961544Seschrock zio->io_error = zio_handle_fault_injection(zio, EIO); 1497789Sahrens 1498789Sahrens /* 1499789Sahrens * If the I/O failed, determine whether we should attempt to retry it. 1500789Sahrens */ 1501789Sahrens /* XXPOLICY */ 1502789Sahrens if (zio_should_retry(zio)) { 1503789Sahrens ASSERT(tvd == vd); 1504789Sahrens 1505789Sahrens zio->io_retries++; 1506789Sahrens zio->io_error = 0; 15073463Sahrens zio->io_flags &= ZIO_FLAG_VDEV_INHERIT | 15083463Sahrens ZIO_FLAG_CONFIG_GRABBED; 1509789Sahrens /* XXPOLICY */ 1510789Sahrens zio->io_flags &= ~ZIO_FLAG_FAILFAST; 1511789Sahrens zio->io_flags |= ZIO_FLAG_DONT_CACHE; 15121775Sbillm zio->io_stage = ZIO_STAGE_VDEV_IO_START - 1; 1513789Sahrens 1514789Sahrens dprintf("retry #%d for %s to %s offset %llx\n", 1515789Sahrens zio->io_retries, zio_type_name[zio->io_type], 1516789Sahrens vdev_description(vd), zio->io_offset); 1517789Sahrens 15181544Seschrock zio_next_stage_async(zio); 15191544Seschrock return; 15201544Seschrock } 1521789Sahrens 1522789Sahrens zio_next_stage(zio); 1523789Sahrens } 1524789Sahrens 1525789Sahrens void 1526789Sahrens zio_vdev_io_reissue(zio_t *zio) 1527789Sahrens { 1528789Sahrens ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 1529789Sahrens ASSERT(zio->io_error == 0); 1530789Sahrens 1531789Sahrens zio->io_stage--; 1532789Sahrens } 1533789Sahrens 1534789Sahrens void 1535789Sahrens zio_vdev_io_redone(zio_t *zio) 1536789Sahrens { 1537789Sahrens ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE); 1538789Sahrens 1539789Sahrens zio->io_stage--; 1540789Sahrens } 1541789Sahrens 1542789Sahrens void 1543789Sahrens zio_vdev_io_bypass(zio_t *zio) 1544789Sahrens { 1545789Sahrens ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START); 1546789Sahrens ASSERT(zio->io_error == 0); 1547789Sahrens 1548789Sahrens zio->io_flags |= ZIO_FLAG_IO_BYPASS; 1549789Sahrens zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS - 1; 1550789Sahrens } 1551789Sahrens 1552789Sahrens /* 1553789Sahrens * ========================================================================== 1554789Sahrens * Generate and verify checksums 1555789Sahrens * ========================================================================== 1556789Sahrens */ 1557789Sahrens static void 1558789Sahrens zio_checksum_generate(zio_t *zio) 1559789Sahrens { 1560789Sahrens int checksum = zio->io_checksum; 1561789Sahrens blkptr_t *bp = zio->io_bp; 1562789Sahrens 1563789Sahrens ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp)); 1564789Sahrens 1565789Sahrens BP_SET_CHECKSUM(bp, checksum); 1566789Sahrens BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 1567789Sahrens 1568789Sahrens zio_checksum(checksum, &bp->blk_cksum, zio->io_data, zio->io_size); 1569789Sahrens 1570789Sahrens zio_next_stage(zio); 1571789Sahrens } 1572789Sahrens 1573789Sahrens static void 1574789Sahrens zio_gang_checksum_generate(zio_t *zio) 1575789Sahrens { 1576789Sahrens zio_cksum_t zc; 1577789Sahrens zio_gbh_phys_t *gbh = zio->io_data; 1578789Sahrens 15791775Sbillm ASSERT(BP_IS_GANG(zio->io_bp)); 1580789Sahrens ASSERT3U(zio->io_size, ==, SPA_GANGBLOCKSIZE); 1581789Sahrens 1582789Sahrens zio_set_gang_verifier(zio, &gbh->zg_tail.zbt_cksum); 1583789Sahrens 1584789Sahrens zio_checksum(ZIO_CHECKSUM_GANG_HEADER, &zc, zio->io_data, zio->io_size); 1585789Sahrens 1586789Sahrens zio_next_stage(zio); 1587789Sahrens } 1588789Sahrens 1589789Sahrens static void 1590789Sahrens zio_checksum_verify(zio_t *zio) 1591789Sahrens { 1592789Sahrens if (zio->io_bp != NULL) { 1593789Sahrens zio->io_error = zio_checksum_error(zio); 15941544Seschrock if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) 15951544Seschrock zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM, 15961544Seschrock zio->io_spa, zio->io_vd, zio, 0, 0); 1597789Sahrens } 1598789Sahrens 1599789Sahrens zio_next_stage(zio); 1600789Sahrens } 1601789Sahrens 1602789Sahrens /* 1603789Sahrens * Called by RAID-Z to ensure we don't compute the checksum twice. 1604789Sahrens */ 1605789Sahrens void 1606789Sahrens zio_checksum_verified(zio_t *zio) 1607789Sahrens { 1608789Sahrens zio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY); 1609789Sahrens } 1610789Sahrens 1611789Sahrens /* 1612789Sahrens * Set the external verifier for a gang block based on stuff in the bp 1613789Sahrens */ 1614789Sahrens void 1615789Sahrens zio_set_gang_verifier(zio_t *zio, zio_cksum_t *zcp) 1616789Sahrens { 16171775Sbillm blkptr_t *bp = zio->io_bp; 16181775Sbillm 16191775Sbillm zcp->zc_word[0] = DVA_GET_VDEV(BP_IDENTITY(bp)); 16201775Sbillm zcp->zc_word[1] = DVA_GET_OFFSET(BP_IDENTITY(bp)); 16211775Sbillm zcp->zc_word[2] = bp->blk_birth; 1622789Sahrens zcp->zc_word[3] = 0; 1623789Sahrens } 1624789Sahrens 1625789Sahrens /* 1626789Sahrens * ========================================================================== 1627789Sahrens * Define the pipeline 1628789Sahrens * ========================================================================== 1629789Sahrens */ 1630789Sahrens typedef void zio_pipe_stage_t(zio_t *zio); 1631789Sahrens 1632789Sahrens static void 1633789Sahrens zio_badop(zio_t *zio) 1634789Sahrens { 1635789Sahrens panic("Invalid I/O pipeline stage %u for zio %p", zio->io_stage, zio); 1636789Sahrens } 1637789Sahrens 1638789Sahrens zio_pipe_stage_t *zio_pipeline[ZIO_STAGE_DONE + 2] = { 1639789Sahrens zio_badop, 1640789Sahrens zio_wait_children_ready, 1641789Sahrens zio_write_compress, 1642789Sahrens zio_checksum_generate, 1643789Sahrens zio_gang_pipeline, 1644789Sahrens zio_get_gang_header, 1645789Sahrens zio_rewrite_gang_members, 1646789Sahrens zio_free_gang_members, 1647789Sahrens zio_claim_gang_members, 1648789Sahrens zio_dva_allocate, 1649789Sahrens zio_dva_free, 1650789Sahrens zio_dva_claim, 1651789Sahrens zio_gang_checksum_generate, 1652789Sahrens zio_ready, 1653789Sahrens zio_vdev_io_start, 1654789Sahrens zio_vdev_io_done, 1655789Sahrens zio_vdev_io_assess, 1656789Sahrens zio_wait_children_done, 1657789Sahrens zio_checksum_verify, 1658789Sahrens zio_read_gang_members, 1659789Sahrens zio_read_decompress, 1660789Sahrens zio_done, 1661789Sahrens zio_badop 1662789Sahrens }; 1663789Sahrens 1664789Sahrens /* 1665789Sahrens * Move an I/O to the next stage of the pipeline and execute that stage. 1666789Sahrens * There's no locking on io_stage because there's no legitimate way for 1667789Sahrens * multiple threads to be attempting to process the same I/O. 1668789Sahrens */ 1669789Sahrens void 1670789Sahrens zio_next_stage(zio_t *zio) 1671789Sahrens { 1672789Sahrens uint32_t pipeline = zio->io_pipeline; 1673789Sahrens 1674789Sahrens ASSERT(!MUTEX_HELD(&zio->io_lock)); 1675789Sahrens 1676789Sahrens if (zio->io_error) { 1677789Sahrens dprintf("zio %p vdev %s offset %llx stage %d error %d\n", 1678789Sahrens zio, vdev_description(zio->io_vd), 1679789Sahrens zio->io_offset, zio->io_stage, zio->io_error); 1680789Sahrens if (((1U << zio->io_stage) & ZIO_VDEV_IO_PIPELINE) == 0) 1681789Sahrens pipeline &= ZIO_ERROR_PIPELINE_MASK; 1682789Sahrens } 1683789Sahrens 1684789Sahrens while (((1U << ++zio->io_stage) & pipeline) == 0) 1685789Sahrens continue; 1686789Sahrens 1687789Sahrens ASSERT(zio->io_stage <= ZIO_STAGE_DONE); 1688789Sahrens ASSERT(zio->io_stalled == 0); 1689789Sahrens 16903689Sek110237 /* 16913689Sek110237 * See the comment in zio_next_stage_async() about per-CPU taskqs. 16923689Sek110237 */ 16933689Sek110237 if (((1U << zio->io_stage) & zio->io_async_stages) && 16943689Sek110237 (zio->io_stage == ZIO_STAGE_WRITE_COMPRESS) && 16953689Sek110237 !(zio->io_flags & ZIO_FLAG_METADATA)) { 16963689Sek110237 taskq_t *tq = zio->io_spa->spa_zio_issue_taskq[zio->io_type]; 16973689Sek110237 (void) taskq_dispatch(tq, 16983689Sek110237 (task_func_t *)zio_pipeline[zio->io_stage], zio, TQ_SLEEP); 16993689Sek110237 } else { 17003689Sek110237 zio_pipeline[zio->io_stage](zio); 17013689Sek110237 } 1702789Sahrens } 1703789Sahrens 1704789Sahrens void 1705789Sahrens zio_next_stage_async(zio_t *zio) 1706789Sahrens { 1707789Sahrens taskq_t *tq; 1708789Sahrens uint32_t pipeline = zio->io_pipeline; 1709789Sahrens 1710789Sahrens ASSERT(!MUTEX_HELD(&zio->io_lock)); 1711789Sahrens 1712789Sahrens if (zio->io_error) { 1713789Sahrens dprintf("zio %p vdev %s offset %llx stage %d error %d\n", 1714789Sahrens zio, vdev_description(zio->io_vd), 1715789Sahrens zio->io_offset, zio->io_stage, zio->io_error); 1716789Sahrens if (((1U << zio->io_stage) & ZIO_VDEV_IO_PIPELINE) == 0) 1717789Sahrens pipeline &= ZIO_ERROR_PIPELINE_MASK; 1718789Sahrens } 1719789Sahrens 1720789Sahrens while (((1U << ++zio->io_stage) & pipeline) == 0) 1721789Sahrens continue; 1722789Sahrens 1723789Sahrens ASSERT(zio->io_stage <= ZIO_STAGE_DONE); 1724789Sahrens ASSERT(zio->io_stalled == 0); 1725789Sahrens 1726789Sahrens /* 1727789Sahrens * For performance, we'll probably want two sets of task queues: 1728789Sahrens * per-CPU issue taskqs and per-CPU completion taskqs. The per-CPU 1729789Sahrens * part is for read performance: since we have to make a pass over 1730789Sahrens * the data to checksum it anyway, we want to do this on the same CPU 1731789Sahrens * that issued the read, because (assuming CPU scheduling affinity) 1732789Sahrens * that thread is probably still there. Getting this optimization 1733789Sahrens * right avoids performance-hostile cache-to-cache transfers. 1734789Sahrens * 1735789Sahrens * Note that having two sets of task queues is also necessary for 1736789Sahrens * correctness: if all of the issue threads get bogged down waiting 1737789Sahrens * for dependent reads (e.g. metaslab freelist) to complete, then 1738789Sahrens * there won't be any threads available to service I/O completion 1739789Sahrens * interrupts. 1740789Sahrens */ 1741789Sahrens if ((1U << zio->io_stage) & zio->io_async_stages) { 1742789Sahrens if (zio->io_stage < ZIO_STAGE_VDEV_IO_DONE) 1743789Sahrens tq = zio->io_spa->spa_zio_issue_taskq[zio->io_type]; 1744789Sahrens else 1745789Sahrens tq = zio->io_spa->spa_zio_intr_taskq[zio->io_type]; 1746789Sahrens (void) taskq_dispatch(tq, 1747789Sahrens (task_func_t *)zio_pipeline[zio->io_stage], zio, TQ_SLEEP); 1748789Sahrens } else { 1749789Sahrens zio_pipeline[zio->io_stage](zio); 1750789Sahrens } 1751789Sahrens } 1752789Sahrens 17533668Sgw25295 static boolean_t 17543668Sgw25295 zio_alloc_should_fail(void) 17553668Sgw25295 { 17563668Sgw25295 static uint16_t allocs = 0; 17573668Sgw25295 17583668Sgw25295 return (P2PHASE(allocs++, 1U<<zio_zil_fail_shift) == 0); 17593668Sgw25295 } 17603668Sgw25295 1761789Sahrens /* 1762789Sahrens * Try to allocate an intent log block. Return 0 on success, errno on failure. 1763789Sahrens */ 1764789Sahrens int 17653063Sperrin zio_alloc_blk(spa_t *spa, uint64_t size, blkptr_t *new_bp, blkptr_t *old_bp, 17663063Sperrin uint64_t txg) 1767789Sahrens { 1768789Sahrens int error; 1769789Sahrens 17701544Seschrock spa_config_enter(spa, RW_READER, FTAG); 1771789Sahrens 17723668Sgw25295 if (zio_zil_fail_shift && zio_alloc_should_fail()) { 17733668Sgw25295 spa_config_exit(spa, FTAG); 17743668Sgw25295 return (ENOSPC); 17753668Sgw25295 } 17763668Sgw25295 17773063Sperrin /* 17783063Sperrin * We were passed the previous log blocks dva_t in bp->blk_dva[0]. 17793063Sperrin */ 17803063Sperrin error = metaslab_alloc(spa, size, new_bp, 1, txg, old_bp, B_TRUE); 1781789Sahrens 1782789Sahrens if (error == 0) { 17833063Sperrin BP_SET_LSIZE(new_bp, size); 17843063Sperrin BP_SET_PSIZE(new_bp, size); 17853063Sperrin BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF); 17863063Sperrin BP_SET_CHECKSUM(new_bp, ZIO_CHECKSUM_ZILOG); 17873063Sperrin BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG); 17883063Sperrin BP_SET_LEVEL(new_bp, 0); 17893063Sperrin BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER); 17903063Sperrin new_bp->blk_birth = txg; 1791789Sahrens } 1792789Sahrens 17931544Seschrock spa_config_exit(spa, FTAG); 1794789Sahrens 1795789Sahrens return (error); 1796789Sahrens } 1797789Sahrens 1798789Sahrens /* 1799789Sahrens * Free an intent log block. We know it can't be a gang block, so there's 1800789Sahrens * nothing to do except metaslab_free() it. 1801789Sahrens */ 1802789Sahrens void 1803789Sahrens zio_free_blk(spa_t *spa, blkptr_t *bp, uint64_t txg) 1804789Sahrens { 18051775Sbillm ASSERT(!BP_IS_GANG(bp)); 1806789Sahrens 18071544Seschrock spa_config_enter(spa, RW_READER, FTAG); 1808789Sahrens 18091807Sbonwick metaslab_free(spa, bp, txg, B_FALSE); 1810789Sahrens 18111544Seschrock spa_config_exit(spa, FTAG); 1812789Sahrens } 1813*4469Sperrin 1814*4469Sperrin /* 1815*4469Sperrin * start an async flush of the write cache for this vdev 1816*4469Sperrin */ 1817*4469Sperrin void 1818*4469Sperrin zio_flush_vdev(spa_t *spa, uint64_t vdev, zio_t **zio) 1819*4469Sperrin { 1820*4469Sperrin vdev_t *vd; 1821*4469Sperrin 1822*4469Sperrin /* 1823*4469Sperrin * Lock out configuration changes. 1824*4469Sperrin */ 1825*4469Sperrin spa_config_enter(spa, RW_READER, FTAG); 1826*4469Sperrin 1827*4469Sperrin if (*zio == NULL) 1828*4469Sperrin *zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 1829*4469Sperrin 1830*4469Sperrin vd = vdev_lookup_top(spa, vdev); 1831*4469Sperrin ASSERT(vd); 1832*4469Sperrin 1833*4469Sperrin (void) zio_nowait(zio_ioctl(*zio, spa, vd, DKIOCFLUSHWRITECACHE, 1834*4469Sperrin NULL, NULL, ZIO_PRIORITY_NOW, 1835*4469Sperrin ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY)); 1836*4469Sperrin 1837*4469Sperrin spa_config_exit(spa, FTAG); 1838*4469Sperrin } 1839