1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 5789Sahrens * Common Development and Distribution License, Version 1.0 only 6789Sahrens * (the "License"). You may not use this file except in compliance 7789Sahrens * with the License. 8789Sahrens * 9789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10789Sahrens * or http://www.opensolaris.org/os/licensing. 11789Sahrens * See the License for the specific language governing permissions 12789Sahrens * and limitations under the License. 13789Sahrens * 14789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 15789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16789Sahrens * If applicable, add the following below this CDDL HEADER, with the 17789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 18789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 19789Sahrens * 20789Sahrens * CDDL HEADER END 21789Sahrens */ 22789Sahrens /* 23789Sahrens * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24789Sahrens * Use is subject to license terms. 25789Sahrens */ 26789Sahrens 27789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 28789Sahrens 29789Sahrens #include <sys/zfs_context.h> 30789Sahrens #include <sys/spa.h> 31789Sahrens #include <sys/dmu.h> 32789Sahrens #include <sys/zap.h> 33789Sahrens #include <sys/arc.h> 34789Sahrens #include <sys/stat.h> 35789Sahrens #include <sys/resource.h> 36789Sahrens #include <sys/zil.h> 37789Sahrens #include <sys/zil_impl.h> 38789Sahrens #include <sys/dsl_dataset.h> 39789Sahrens #include <sys/vdev.h> 40789Sahrens 41789Sahrens /* 42789Sahrens * The zfs intent log (ZIL) saves transaction records of system calls 43789Sahrens * that change the file system in memory with enough information 44789Sahrens * to be able to replay them. These are stored in memory until 45789Sahrens * either the DMU transaction group (txg) commits them to the stable pool 46789Sahrens * and they can be discarded, or they are flushed to the stable log 47789Sahrens * (also in the pool) due to a fsync, O_DSYNC or other synchronous 48789Sahrens * requirement. In the event of a panic or power fail then those log 49789Sahrens * records (transactions) are replayed. 50789Sahrens * 51789Sahrens * There is one ZIL per file system. Its on-disk (pool) format consists 52789Sahrens * of 3 parts: 53789Sahrens * 54789Sahrens * - ZIL header 55789Sahrens * - ZIL blocks 56789Sahrens * - ZIL records 57789Sahrens * 58789Sahrens * A log record holds a system call transaction. Log blocks can 59789Sahrens * hold many log records and the blocks are chained together. 60789Sahrens * Each ZIL block contains a block pointer (blkptr_t) to the next 61789Sahrens * ZIL block in the chain. The ZIL header points to the first 62789Sahrens * block in the chain. Note there is not a fixed place in the pool 63789Sahrens * to hold blocks. They are dynamically allocated and freed as 64789Sahrens * needed from the blocks available. Figure X shows the ZIL structure: 65789Sahrens */ 66789Sahrens 67789Sahrens /* 68789Sahrens * These global ZIL switches affect all pools 69789Sahrens */ 70789Sahrens int zil_disable = 0; /* disable intent logging */ 71789Sahrens int zil_always = 0; /* make every transaction synchronous */ 72789Sahrens int zil_purge = 0; /* at pool open, just throw everything away */ 73789Sahrens int zil_noflush = 0; /* don't flush write cache buffers on disks */ 74789Sahrens 75789Sahrens static kmem_cache_t *zil_lwb_cache; 76789Sahrens 77789Sahrens static int 78789Sahrens zil_dva_compare(const void *x1, const void *x2) 79789Sahrens { 80789Sahrens const dva_t *dva1 = x1; 81789Sahrens const dva_t *dva2 = x2; 82789Sahrens 83789Sahrens if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 84789Sahrens return (-1); 85789Sahrens if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 86789Sahrens return (1); 87789Sahrens 88789Sahrens if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 89789Sahrens return (-1); 90789Sahrens if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 91789Sahrens return (1); 92789Sahrens 93789Sahrens return (0); 94789Sahrens } 95789Sahrens 96789Sahrens static void 97789Sahrens zil_dva_tree_init(avl_tree_t *t) 98789Sahrens { 99789Sahrens avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 100789Sahrens offsetof(zil_dva_node_t, zn_node)); 101789Sahrens } 102789Sahrens 103789Sahrens static void 104789Sahrens zil_dva_tree_fini(avl_tree_t *t) 105789Sahrens { 106789Sahrens zil_dva_node_t *zn; 107789Sahrens void *cookie = NULL; 108789Sahrens 109789Sahrens while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 110789Sahrens kmem_free(zn, sizeof (zil_dva_node_t)); 111789Sahrens 112789Sahrens avl_destroy(t); 113789Sahrens } 114789Sahrens 115789Sahrens static int 116789Sahrens zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 117789Sahrens { 118789Sahrens zil_dva_node_t *zn; 119789Sahrens avl_index_t where; 120789Sahrens 121789Sahrens if (avl_find(t, dva, &where) != NULL) 122789Sahrens return (EEXIST); 123789Sahrens 124789Sahrens zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 125789Sahrens zn->zn_dva = *dva; 126789Sahrens avl_insert(t, zn, where); 127789Sahrens 128789Sahrens return (0); 129789Sahrens } 130789Sahrens 131789Sahrens /* 132789Sahrens * Read a log block, make sure it's valid, and byteswap it if necessary. 133789Sahrens */ 134789Sahrens static int 135789Sahrens zil_read_log_block(zilog_t *zilog, blkptr_t *bp, char *buf) 136789Sahrens { 137789Sahrens uint64_t blksz = BP_GET_LSIZE(bp); 138789Sahrens zil_trailer_t *ztp = (zil_trailer_t *)(buf + blksz) - 1; 139789Sahrens zio_cksum_t cksum; 140789Sahrens int error; 141789Sahrens 142789Sahrens error = zio_wait(zio_read(NULL, zilog->zl_spa, bp, buf, blksz, 143789Sahrens NULL, NULL, ZIO_PRIORITY_SYNC_READ, 144789Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE)); 145789Sahrens if (error) { 146789Sahrens dprintf_bp(bp, "zilog %p bp %p read failed, error %d: ", 147789Sahrens zilog, bp, error); 148789Sahrens return (error); 149789Sahrens } 150789Sahrens 151789Sahrens if (BP_SHOULD_BYTESWAP(bp)) 152789Sahrens byteswap_uint64_array(buf, blksz); 153789Sahrens 154789Sahrens /* 155789Sahrens * Sequence numbers should be... sequential. The checksum verifier for 156789Sahrens * the next block should be: <logid[0], logid[1], objset id, seq + 1>. 157789Sahrens */ 158789Sahrens cksum = bp->blk_cksum; 159789Sahrens cksum.zc_word[3]++; 160789Sahrens if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, sizeof (cksum)) != 0) { 161789Sahrens dprintf_bp(bp, "zilog %p bp %p stale pointer: ", zilog, bp); 162789Sahrens return (ESTALE); 163789Sahrens } 164789Sahrens 165789Sahrens if (BP_IS_HOLE(&ztp->zit_next_blk)) { 166789Sahrens dprintf_bp(bp, "zilog %p bp %p hole: ", zilog, bp); 167789Sahrens return (ENOENT); 168789Sahrens } 169789Sahrens 170789Sahrens if (ztp->zit_nused > (blksz - sizeof (zil_trailer_t))) { 171789Sahrens dprintf("zilog %p bp %p nused exceeds blksz\n", zilog, bp); 172789Sahrens return (EOVERFLOW); 173789Sahrens } 174789Sahrens 175789Sahrens dprintf_bp(bp, "zilog %p bp %p good block: ", zilog, bp); 176789Sahrens 177789Sahrens return (0); 178789Sahrens } 179789Sahrens 180789Sahrens /* 181789Sahrens * Parse the intent log, and call parse_func for each valid record within. 182789Sahrens */ 183789Sahrens void 184789Sahrens zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 185789Sahrens zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 186789Sahrens { 187789Sahrens blkptr_t blk; 188789Sahrens char *lrbuf, *lrp; 189789Sahrens zil_trailer_t *ztp; 190789Sahrens int reclen, error; 191789Sahrens 192789Sahrens blk = zilog->zl_header->zh_log; 193789Sahrens if (BP_IS_HOLE(&blk)) 194789Sahrens return; 195789Sahrens 196789Sahrens /* 197789Sahrens * Starting at the block pointed to by zh_log we read the log chain. 198789Sahrens * For each block in the chain we strongly check that block to 199789Sahrens * ensure its validity. We stop when an invalid block is found. 200789Sahrens * For each block pointer in the chain we call parse_blk_func(). 201789Sahrens * For each record in each valid block we call parse_lr_func(). 202789Sahrens */ 203789Sahrens zil_dva_tree_init(&zilog->zl_dva_tree); 204789Sahrens lrbuf = zio_buf_alloc(SPA_MAXBLOCKSIZE); 205789Sahrens for (;;) { 206789Sahrens error = zil_read_log_block(zilog, &blk, lrbuf); 207789Sahrens 208789Sahrens if (parse_blk_func != NULL) 209789Sahrens parse_blk_func(zilog, &blk, arg, txg); 210789Sahrens 211789Sahrens if (error) 212789Sahrens break; 213789Sahrens 214789Sahrens ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 215789Sahrens blk = ztp->zit_next_blk; 216789Sahrens 217789Sahrens if (parse_lr_func == NULL) 218789Sahrens continue; 219789Sahrens 220789Sahrens for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 221789Sahrens lr_t *lr = (lr_t *)lrp; 222789Sahrens reclen = lr->lrc_reclen; 223789Sahrens ASSERT3U(reclen, >=, sizeof (lr_t)); 224789Sahrens parse_lr_func(zilog, lr, arg, txg); 225789Sahrens } 226789Sahrens } 227789Sahrens zio_buf_free(lrbuf, SPA_MAXBLOCKSIZE); 228789Sahrens zil_dva_tree_fini(&zilog->zl_dva_tree); 229789Sahrens } 230789Sahrens 231789Sahrens /* ARGSUSED */ 232789Sahrens static void 233789Sahrens zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 234789Sahrens { 235789Sahrens spa_t *spa = zilog->zl_spa; 236789Sahrens int err; 237789Sahrens 238789Sahrens dprintf_bp(bp, "first_txg %llu: ", first_txg); 239789Sahrens 240789Sahrens /* 241789Sahrens * Claim log block if not already committed and not already claimed. 242789Sahrens */ 243789Sahrens if (bp->blk_birth >= first_txg && 244789Sahrens zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 245789Sahrens err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL)); 246789Sahrens ASSERT(err == 0); 247789Sahrens } 248789Sahrens } 249789Sahrens 250789Sahrens static void 251789Sahrens zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 252789Sahrens { 253789Sahrens if (lrc->lrc_txtype == TX_WRITE) { 254789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 255789Sahrens zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 256789Sahrens } 257789Sahrens } 258789Sahrens 259789Sahrens /* ARGSUSED */ 260789Sahrens static void 261789Sahrens zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 262789Sahrens { 263789Sahrens zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 264789Sahrens } 265789Sahrens 266789Sahrens static void 267789Sahrens zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 268789Sahrens { 269789Sahrens /* 270789Sahrens * If we previously claimed it, we need to free it. 271789Sahrens */ 272789Sahrens if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 273789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 274789Sahrens blkptr_t *bp = &lr->lr_blkptr; 275789Sahrens if (bp->blk_birth >= claim_txg && 276789Sahrens !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 277789Sahrens (void) arc_free(NULL, zilog->zl_spa, 278789Sahrens dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 279789Sahrens } 280789Sahrens } 281789Sahrens } 282789Sahrens 283789Sahrens /* 284789Sahrens * Create an on-disk intent log. 285789Sahrens */ 286789Sahrens static void 287789Sahrens zil_create(zilog_t *zilog) 288789Sahrens { 289789Sahrens lwb_t *lwb; 290789Sahrens uint64_t txg; 291789Sahrens dmu_tx_t *tx; 292789Sahrens blkptr_t blk; 293789Sahrens int error; 294789Sahrens 295789Sahrens ASSERT(zilog->zl_header->zh_claim_txg == 0); 296789Sahrens ASSERT(zilog->zl_header->zh_replay_seq == 0); 297789Sahrens 298789Sahrens /* 299789Sahrens * Initialize the log header block. 300789Sahrens */ 301789Sahrens tx = dmu_tx_create(zilog->zl_os); 302789Sahrens (void) dmu_tx_assign(tx, TXG_WAIT); 303789Sahrens dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 304789Sahrens txg = dmu_tx_get_txg(tx); 305789Sahrens 306789Sahrens /* 307789Sahrens * Allocate the first log block and assign its checksum verifier. 308789Sahrens */ 309789Sahrens error = zio_alloc_blk(zilog->zl_spa, ZIO_CHECKSUM_ZILOG, 310789Sahrens ZIL_MIN_BLKSZ, &blk, txg); 311789Sahrens if (error == 0) { 312789Sahrens ZIO_SET_CHECKSUM(&blk.blk_cksum, 313789Sahrens spa_get_random(-1ULL), spa_get_random(-1ULL), 314789Sahrens dmu_objset_id(zilog->zl_os), 1ULL); 315789Sahrens 316789Sahrens /* 317789Sahrens * Allocate a log write buffer (lwb) for the first log block. 318789Sahrens */ 319789Sahrens lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 320789Sahrens lwb->lwb_zilog = zilog; 321789Sahrens lwb->lwb_blk = blk; 322789Sahrens lwb->lwb_nused = 0; 323789Sahrens lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 324789Sahrens lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 325789Sahrens lwb->lwb_max_txg = txg; 326789Sahrens lwb->lwb_seq = 0; 327789Sahrens lwb->lwb_state = UNWRITTEN; 328789Sahrens mutex_enter(&zilog->zl_lock); 329789Sahrens list_insert_tail(&zilog->zl_lwb_list, lwb); 330789Sahrens mutex_exit(&zilog->zl_lock); 331789Sahrens } 332789Sahrens 333789Sahrens dmu_tx_commit(tx); 334789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 335789Sahrens } 336789Sahrens 337789Sahrens /* 338789Sahrens * In one tx, free all log blocks and clear the log header. 339789Sahrens */ 340789Sahrens void 341789Sahrens zil_destroy(zilog_t *zilog) 342789Sahrens { 343789Sahrens dmu_tx_t *tx; 344789Sahrens uint64_t txg; 345789Sahrens 346789Sahrens mutex_enter(&zilog->zl_destroy_lock); 347789Sahrens 348789Sahrens if (BP_IS_HOLE(&zilog->zl_header->zh_log)) { 349789Sahrens mutex_exit(&zilog->zl_destroy_lock); 350789Sahrens return; 351789Sahrens } 352789Sahrens 353789Sahrens tx = dmu_tx_create(zilog->zl_os); 354789Sahrens (void) dmu_tx_assign(tx, TXG_WAIT); 355789Sahrens dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 356789Sahrens txg = dmu_tx_get_txg(tx); 357789Sahrens 358789Sahrens zil_parse(zilog, zil_free_log_block, zil_free_log_record, tx, 359789Sahrens zilog->zl_header->zh_claim_txg); 360789Sahrens zilog->zl_destroy_txg = txg; 361789Sahrens 362789Sahrens dmu_tx_commit(tx); 363789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 364789Sahrens 365789Sahrens mutex_exit(&zilog->zl_destroy_lock); 366789Sahrens } 367789Sahrens 368789Sahrens void 369789Sahrens zil_claim(char *osname, void *txarg) 370789Sahrens { 371789Sahrens dmu_tx_t *tx = txarg; 372789Sahrens uint64_t first_txg = dmu_tx_get_txg(tx); 373789Sahrens zilog_t *zilog; 374789Sahrens zil_header_t *zh; 375789Sahrens objset_t *os; 376789Sahrens int error; 377789Sahrens 378789Sahrens error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_STANDARD, &os); 379789Sahrens if (error) { 380789Sahrens cmn_err(CE_WARN, "can't process intent log for %s", osname); 381789Sahrens return; 382789Sahrens } 383789Sahrens 384789Sahrens zilog = dmu_objset_zil(os); 385789Sahrens zh = zilog->zl_header; 386789Sahrens 387789Sahrens /* 388789Sahrens * Claim all log blocks if we haven't already done so. 389789Sahrens */ 390789Sahrens ASSERT3U(zh->zh_claim_txg, <=, first_txg); 391789Sahrens if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 392789Sahrens zh->zh_claim_txg = first_txg; 393789Sahrens zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, 394789Sahrens tx, first_txg); 395789Sahrens dsl_dataset_dirty(dmu_objset_ds(os), tx); 396789Sahrens } 397789Sahrens ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 398789Sahrens dmu_objset_close(os); 399789Sahrens } 400789Sahrens 401789Sahrens void 402789Sahrens zil_add_vdev(zilog_t *zilog, uint64_t vdev, uint64_t seq) 403789Sahrens { 404789Sahrens zil_vdev_t *zv; 405789Sahrens 406789Sahrens if (zil_noflush) 407789Sahrens return; 408789Sahrens 409789Sahrens ASSERT(MUTEX_HELD(&zilog->zl_lock)); 410789Sahrens zv = kmem_alloc(sizeof (zil_vdev_t), KM_SLEEP); 411789Sahrens zv->vdev = vdev; 412789Sahrens zv->seq = seq; 413789Sahrens list_insert_tail(&zilog->zl_vdev_list, zv); 414789Sahrens } 415789Sahrens 416789Sahrens void 417789Sahrens zil_flush_vdevs(zilog_t *zilog, uint64_t seq) 418789Sahrens { 419789Sahrens vdev_t *vd; 420789Sahrens zil_vdev_t *zv, *zv2; 421789Sahrens zio_t *zio; 422789Sahrens spa_t *spa; 423789Sahrens uint64_t vdev; 424789Sahrens 425789Sahrens if (zil_noflush) 426789Sahrens return; 427789Sahrens 428789Sahrens ASSERT(MUTEX_HELD(&zilog->zl_lock)); 429789Sahrens 430789Sahrens spa = zilog->zl_spa; 431789Sahrens zio = NULL; 432789Sahrens 433789Sahrens while ((zv = list_head(&zilog->zl_vdev_list)) != NULL && 434789Sahrens zv->seq <= seq) { 435789Sahrens vdev = zv->vdev; 436789Sahrens list_remove(&zilog->zl_vdev_list, zv); 437789Sahrens kmem_free(zv, sizeof (zil_vdev_t)); 438789Sahrens 439789Sahrens /* 440789Sahrens * remove all chained entries <= seq with same vdev 441789Sahrens */ 442789Sahrens zv = list_head(&zilog->zl_vdev_list); 443789Sahrens while (zv && zv->seq <= seq) { 444789Sahrens zv2 = list_next(&zilog->zl_vdev_list, zv); 445789Sahrens if (zv->vdev == vdev) { 446789Sahrens list_remove(&zilog->zl_vdev_list, zv); 447789Sahrens kmem_free(zv, sizeof (zil_vdev_t)); 448789Sahrens } 449789Sahrens zv = zv2; 450789Sahrens } 451789Sahrens 452789Sahrens /* flush the write cache for this vdev */ 453789Sahrens mutex_exit(&zilog->zl_lock); 454789Sahrens if (zio == NULL) 455789Sahrens zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 456789Sahrens vd = vdev_lookup_top(spa, vdev); 457789Sahrens ASSERT(vd); 458789Sahrens (void) zio_nowait(zio_ioctl(zio, spa, vd, DKIOCFLUSHWRITECACHE, 459789Sahrens NULL, NULL, ZIO_PRIORITY_NOW, 460789Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY)); 461789Sahrens mutex_enter(&zilog->zl_lock); 462789Sahrens } 463789Sahrens 464789Sahrens /* 465789Sahrens * Wait for all the flushes to complete. Not all devices actually 466789Sahrens * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 467789Sahrens */ 468*1141Sperrin if (zio != NULL) { 469*1141Sperrin mutex_exit(&zilog->zl_lock); 470789Sahrens (void) zio_wait(zio); 471*1141Sperrin mutex_enter(&zilog->zl_lock); 472*1141Sperrin } 473789Sahrens } 474789Sahrens 475789Sahrens /* 476789Sahrens * Function called when a log block write completes 477789Sahrens */ 478789Sahrens static void 479789Sahrens zil_lwb_write_done(zio_t *zio) 480789Sahrens { 481789Sahrens lwb_t *prev; 482789Sahrens lwb_t *lwb = zio->io_private; 483789Sahrens zilog_t *zilog = lwb->lwb_zilog; 484789Sahrens uint64_t max_seq; 485789Sahrens 486789Sahrens /* 487789Sahrens * Now that we've written this log block, we have a stable pointer 488789Sahrens * to the next block in the chain, so it's OK to let the txg in 489789Sahrens * which we allocated the next block sync. 490789Sahrens */ 491789Sahrens txg_rele_to_sync(&lwb->lwb_txgh); 492789Sahrens 493789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 494789Sahrens mutex_enter(&zilog->zl_lock); 495789Sahrens lwb->lwb_buf = NULL; 496789Sahrens if (zio->io_error) { 497789Sahrens zilog->zl_log_error = B_TRUE; 498789Sahrens mutex_exit(&zilog->zl_lock); 499789Sahrens cv_broadcast(&zilog->zl_cv_seq); 500789Sahrens return; 501789Sahrens } 502789Sahrens 503789Sahrens prev = list_prev(&zilog->zl_lwb_list, lwb); 504789Sahrens if (prev && prev->lwb_state != SEQ_COMPLETE) { 505789Sahrens /* There's an unwritten buffer in the chain before this one */ 506789Sahrens lwb->lwb_state = SEQ_INCOMPLETE; 507789Sahrens mutex_exit(&zilog->zl_lock); 508789Sahrens return; 509789Sahrens } 510789Sahrens 511789Sahrens max_seq = lwb->lwb_seq; 512789Sahrens lwb->lwb_state = SEQ_COMPLETE; 513789Sahrens /* 514789Sahrens * We must also follow up the chain for already written buffers 515789Sahrens * to see if we can set zl_ss_seq even higher. 516789Sahrens */ 517789Sahrens while (lwb = list_next(&zilog->zl_lwb_list, lwb)) { 518789Sahrens if (lwb->lwb_state != SEQ_INCOMPLETE) 519789Sahrens break; 520789Sahrens lwb->lwb_state = SEQ_COMPLETE; 521789Sahrens /* lwb_seq will be zero if we've written an empty buffer */ 522789Sahrens if (lwb->lwb_seq) { 523789Sahrens ASSERT3U(max_seq, <, lwb->lwb_seq); 524789Sahrens max_seq = lwb->lwb_seq; 525789Sahrens } 526789Sahrens } 527789Sahrens zilog->zl_ss_seq = MAX(max_seq, zilog->zl_ss_seq); 528789Sahrens mutex_exit(&zilog->zl_lock); 529789Sahrens cv_broadcast(&zilog->zl_cv_seq); 530789Sahrens } 531789Sahrens 532789Sahrens /* 533789Sahrens * Start a log block write and advance to the next log block. 534789Sahrens * Calls are serialized. 535789Sahrens */ 536789Sahrens static lwb_t * 537789Sahrens zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 538789Sahrens { 539789Sahrens lwb_t *nlwb; 540789Sahrens zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 541789Sahrens uint64_t txg; 542789Sahrens uint64_t zil_blksz; 543789Sahrens int error; 544789Sahrens 545789Sahrens ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 546789Sahrens 547789Sahrens /* 548789Sahrens * Allocate the next block and save its address in this block 549789Sahrens * before writing it in order to establish the log chain. 550789Sahrens * Note that if the allocation of nlwb synced before we wrote 551789Sahrens * the block that points at it (lwb), we'd leak it if we crashed. 552789Sahrens * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 553789Sahrens */ 554789Sahrens txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 555789Sahrens txg_rele_to_quiesce(&lwb->lwb_txgh); 556789Sahrens 557789Sahrens /* 558*1141Sperrin * Pick a ZIL blocksize. We request a size that is the 559*1141Sperrin * maximum of the previous used size, the current used size and 560*1141Sperrin * the amount waiting in the queue. 561789Sahrens */ 562*1141Sperrin zil_blksz = MAX(zilog->zl_cur_used, zilog->zl_prev_used); 563*1141Sperrin zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 564*1141Sperrin zil_blksz = P2ROUNDUP(zil_blksz, ZIL_MIN_BLKSZ); 565*1141Sperrin if (zil_blksz > ZIL_MAX_BLKSZ) 566*1141Sperrin zil_blksz = ZIL_MAX_BLKSZ; 567789Sahrens 568789Sahrens error = zio_alloc_blk(zilog->zl_spa, ZIO_CHECKSUM_ZILOG, 569789Sahrens zil_blksz, &ztp->zit_next_blk, txg); 570789Sahrens if (error) { 571789Sahrens txg_rele_to_sync(&lwb->lwb_txgh); 572789Sahrens return (NULL); 573789Sahrens } 574789Sahrens 575789Sahrens ASSERT3U(ztp->zit_next_blk.blk_birth, ==, txg); 576789Sahrens ztp->zit_nused = lwb->lwb_nused; 577789Sahrens ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 578789Sahrens ztp->zit_next_blk.blk_cksum = lwb->lwb_blk.blk_cksum; 579789Sahrens ztp->zit_next_blk.blk_cksum.zc_word[3]++; 580789Sahrens 581789Sahrens /* 582789Sahrens * Allocate a new log write buffer (lwb). 583789Sahrens */ 584789Sahrens nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 585789Sahrens 586789Sahrens nlwb->lwb_zilog = zilog; 587789Sahrens nlwb->lwb_blk = ztp->zit_next_blk; 588789Sahrens nlwb->lwb_nused = 0; 589789Sahrens nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 590789Sahrens nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 591789Sahrens nlwb->lwb_max_txg = txg; 592789Sahrens nlwb->lwb_seq = 0; 593789Sahrens nlwb->lwb_state = UNWRITTEN; 594789Sahrens 595789Sahrens /* 596789Sahrens * Put new lwb at the end of the log chain, 597789Sahrens * and record the vdev for later flushing 598789Sahrens */ 599789Sahrens mutex_enter(&zilog->zl_lock); 600789Sahrens list_insert_tail(&zilog->zl_lwb_list, nlwb); 601789Sahrens zil_add_vdev(zilog, DVA_GET_VDEV(BP_IDENTITY(&(lwb->lwb_blk))), 602789Sahrens lwb->lwb_seq); 603789Sahrens mutex_exit(&zilog->zl_lock); 604789Sahrens 605789Sahrens /* 606789Sahrens * write the old log block 607789Sahrens */ 608789Sahrens dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg); 609789Sahrens zio_nowait(zio_rewrite(NULL, zilog->zl_spa, ZIO_CHECKSUM_ZILOG, 0, 610789Sahrens &lwb->lwb_blk, lwb->lwb_buf, lwb->lwb_sz, zil_lwb_write_done, lwb, 611789Sahrens ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_MUSTSUCCEED)); 612789Sahrens 613789Sahrens return (nlwb); 614789Sahrens } 615789Sahrens 616789Sahrens static lwb_t * 617789Sahrens zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 618789Sahrens { 619789Sahrens lr_t *lrc = &itx->itx_lr; /* common log record */ 620789Sahrens uint64_t seq = lrc->lrc_seq; 621789Sahrens uint64_t txg = lrc->lrc_txg; 622789Sahrens uint64_t reclen = lrc->lrc_reclen; 623789Sahrens int error; 624789Sahrens 625789Sahrens if (lwb == NULL) 626789Sahrens return (NULL); 627789Sahrens ASSERT(lwb->lwb_buf != NULL); 628789Sahrens 629789Sahrens /* 630789Sahrens * If it's a write, fetch the data or get its blkptr as appropriate. 631789Sahrens */ 632789Sahrens if (lrc->lrc_txtype == TX_WRITE) { 633789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 634789Sahrens if (txg > spa_freeze_txg(zilog->zl_spa)) 635789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 636789Sahrens 637789Sahrens if (!itx->itx_data_copied && 638789Sahrens (error = zilog->zl_get_data(itx->itx_private, lr)) != 0) { 639789Sahrens if (error != ENOENT && error != EALREADY) { 640789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 641789Sahrens mutex_enter(&zilog->zl_lock); 642789Sahrens zilog->zl_ss_seq = MAX(seq, zilog->zl_ss_seq); 643789Sahrens zil_add_vdev(zilog, 644789Sahrens DVA_GET_VDEV(BP_IDENTITY(&(lr->lr_blkptr))), 645789Sahrens seq); 646789Sahrens mutex_exit(&zilog->zl_lock); 647789Sahrens return (lwb); 648789Sahrens } 649789Sahrens mutex_enter(&zilog->zl_lock); 650789Sahrens zil_add_vdev(zilog, 651789Sahrens DVA_GET_VDEV(BP_IDENTITY(&(lr->lr_blkptr))), seq); 652789Sahrens mutex_exit(&zilog->zl_lock); 653789Sahrens return (lwb); 654789Sahrens } 655789Sahrens } 656789Sahrens 657*1141Sperrin zilog->zl_cur_used += reclen; 658*1141Sperrin 659789Sahrens /* 660789Sahrens * If this record won't fit in the current log block, start a new one. 661789Sahrens */ 662789Sahrens if (lwb->lwb_nused + reclen > ZIL_BLK_DATA_SZ(lwb)) { 663789Sahrens lwb = zil_lwb_write_start(zilog, lwb); 664789Sahrens if (lwb == NULL) 665789Sahrens return (NULL); 666789Sahrens if (lwb->lwb_nused + reclen > ZIL_BLK_DATA_SZ(lwb)) { 667789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 668789Sahrens mutex_enter(&zilog->zl_lock); 669789Sahrens zilog->zl_ss_seq = MAX(seq, zilog->zl_ss_seq); 670789Sahrens mutex_exit(&zilog->zl_lock); 671789Sahrens return (lwb); 672789Sahrens } 673789Sahrens } 674789Sahrens 675789Sahrens bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 676789Sahrens lwb->lwb_nused += reclen; 677789Sahrens lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 678789Sahrens ASSERT3U(lwb->lwb_seq, <, seq); 679789Sahrens lwb->lwb_seq = seq; 680789Sahrens ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 681789Sahrens ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 682789Sahrens 683789Sahrens return (lwb); 684789Sahrens } 685789Sahrens 686789Sahrens itx_t * 687789Sahrens zil_itx_create(int txtype, size_t lrsize) 688789Sahrens { 689789Sahrens itx_t *itx; 690789Sahrens 691789Sahrens lrsize = P2ROUNDUP(lrsize, sizeof (uint64_t)); 692789Sahrens 693789Sahrens itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 694789Sahrens itx->itx_lr.lrc_txtype = txtype; 695789Sahrens itx->itx_lr.lrc_reclen = lrsize; 696789Sahrens itx->itx_lr.lrc_seq = 0; /* defensive */ 697789Sahrens 698789Sahrens return (itx); 699789Sahrens } 700789Sahrens 701789Sahrens uint64_t 702789Sahrens zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 703789Sahrens { 704789Sahrens uint64_t seq; 705789Sahrens 706789Sahrens ASSERT(itx->itx_lr.lrc_seq == 0); 707789Sahrens 708789Sahrens mutex_enter(&zilog->zl_lock); 709789Sahrens list_insert_tail(&zilog->zl_itx_list, itx); 710789Sahrens zilog->zl_itx_list_sz += itx->itx_lr.lrc_reclen; 711789Sahrens itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 712789Sahrens itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 713789Sahrens mutex_exit(&zilog->zl_lock); 714789Sahrens 715789Sahrens return (seq); 716789Sahrens } 717789Sahrens 718789Sahrens /* 719789Sahrens * Free up all in-memory intent log transactions that have now been synced. 720789Sahrens */ 721789Sahrens static void 722789Sahrens zil_itx_clean(zilog_t *zilog) 723789Sahrens { 724789Sahrens uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 725789Sahrens uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 726789Sahrens uint64_t max_seq = 0; 727789Sahrens itx_t *itx; 728789Sahrens 729789Sahrens mutex_enter(&zilog->zl_lock); 730789Sahrens while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 731789Sahrens itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 732789Sahrens list_remove(&zilog->zl_itx_list, itx); 733789Sahrens zilog->zl_itx_list_sz -= itx->itx_lr.lrc_reclen; 734789Sahrens ASSERT3U(max_seq, <, itx->itx_lr.lrc_seq); 735789Sahrens max_seq = itx->itx_lr.lrc_seq; 736789Sahrens kmem_free(itx, offsetof(itx_t, itx_lr) 737789Sahrens + itx->itx_lr.lrc_reclen); 738789Sahrens } 739789Sahrens if (max_seq > zilog->zl_ss_seq) { 740789Sahrens zilog->zl_ss_seq = max_seq; 741789Sahrens cv_broadcast(&zilog->zl_cv_seq); 742789Sahrens } 743789Sahrens mutex_exit(&zilog->zl_lock); 744789Sahrens } 745789Sahrens 746789Sahrens void 747789Sahrens zil_clean(zilog_t *zilog) 748789Sahrens { 749789Sahrens /* 750789Sahrens * Check for any log blocks that can be freed. 751789Sahrens * Log blocks are only freed when the log block allocation and 752789Sahrens * log records contained within are both known to be committed. 753789Sahrens */ 754789Sahrens mutex_enter(&zilog->zl_lock); 755789Sahrens if (list_head(&zilog->zl_itx_list) != NULL) 756789Sahrens (void) taskq_dispatch(zilog->zl_clean_taskq, 757789Sahrens (void (*)(void *))zil_itx_clean, zilog, TQ_NOSLEEP); 758789Sahrens mutex_exit(&zilog->zl_lock); 759789Sahrens } 760789Sahrens 761789Sahrens /* 762789Sahrens * Push zfs transactions to stable storage up to the supplied sequence number. 763789Sahrens */ 764789Sahrens void 765789Sahrens zil_commit(zilog_t *zilog, uint64_t seq, int ioflag) 766789Sahrens { 767789Sahrens uint64_t txg; 768789Sahrens uint64_t max_seq; 769789Sahrens uint64_t reclen; 770789Sahrens itx_t *itx; 771789Sahrens lwb_t *lwb; 772789Sahrens spa_t *spa; 773789Sahrens 774789Sahrens if (zilog == NULL || seq == 0 || 775789Sahrens ((ioflag & (FSYNC | FDSYNC | FRSYNC)) == 0 && !zil_always)) 776789Sahrens return; 777789Sahrens 778789Sahrens spa = zilog->zl_spa; 779789Sahrens mutex_enter(&zilog->zl_lock); 780789Sahrens 781789Sahrens seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 782789Sahrens 783789Sahrens for (;;) { 784789Sahrens if (zilog->zl_ss_seq >= seq) { /* already on stable storage */ 785789Sahrens cv_signal(&zilog->zl_cv_write); 786789Sahrens mutex_exit(&zilog->zl_lock); 787789Sahrens return; 788789Sahrens } 789789Sahrens 790789Sahrens if (zilog->zl_writer == B_FALSE) /* no one writing, do it */ 791789Sahrens break; 792789Sahrens 793789Sahrens cv_wait(&zilog->zl_cv_write, &zilog->zl_lock); 794789Sahrens } 795789Sahrens 796789Sahrens zilog->zl_writer = B_TRUE; 797789Sahrens max_seq = 0; 798789Sahrens 799789Sahrens if (zilog->zl_suspend) { 800789Sahrens lwb = NULL; 801789Sahrens } else { 802789Sahrens lwb = list_tail(&zilog->zl_lwb_list); 803789Sahrens if (lwb == NULL) { 804789Sahrens mutex_exit(&zilog->zl_lock); 805789Sahrens zil_create(zilog); 806789Sahrens mutex_enter(&zilog->zl_lock); 807789Sahrens lwb = list_tail(&zilog->zl_lwb_list); 808789Sahrens } 809789Sahrens } 810789Sahrens 811789Sahrens /* 812789Sahrens * Loop through in-memory log transactions filling log blocks, 813789Sahrens * until we reach the given sequence number and there's no more 814789Sahrens * room in the write buffer. 815789Sahrens */ 816789Sahrens for (;;) { 817789Sahrens itx = list_head(&zilog->zl_itx_list); 818789Sahrens if (itx == NULL) 819789Sahrens break; 820789Sahrens 821789Sahrens reclen = itx->itx_lr.lrc_reclen; 822789Sahrens if ((itx->itx_lr.lrc_seq > seq) && 823789Sahrens ((lwb == NULL) || (lwb->lwb_nused + reclen > 824789Sahrens ZIL_BLK_DATA_SZ(lwb)))) 825789Sahrens break; 826789Sahrens 827789Sahrens list_remove(&zilog->zl_itx_list, itx); 828789Sahrens txg = itx->itx_lr.lrc_txg; 829789Sahrens ASSERT(txg); 830789Sahrens 831789Sahrens mutex_exit(&zilog->zl_lock); 832789Sahrens if (txg > spa_last_synced_txg(spa) || 833789Sahrens txg > spa_freeze_txg(spa)) 834789Sahrens lwb = zil_lwb_commit(zilog, itx, lwb); 835789Sahrens else 836789Sahrens max_seq = itx->itx_lr.lrc_seq; 837789Sahrens kmem_free(itx, offsetof(itx_t, itx_lr) 838789Sahrens + itx->itx_lr.lrc_reclen); 839789Sahrens mutex_enter(&zilog->zl_lock); 840789Sahrens zilog->zl_itx_list_sz -= reclen; 841789Sahrens } 842789Sahrens 843789Sahrens mutex_exit(&zilog->zl_lock); 844789Sahrens 845789Sahrens /* write the last block out */ 846789Sahrens if (lwb != NULL && lwb->lwb_nused != 0) 847789Sahrens lwb = zil_lwb_write_start(zilog, lwb); 848789Sahrens 849*1141Sperrin zilog->zl_prev_used = zilog->zl_cur_used; 850*1141Sperrin zilog->zl_cur_used = 0; 851*1141Sperrin 852789Sahrens mutex_enter(&zilog->zl_lock); 853789Sahrens if (max_seq > zilog->zl_ss_seq) { 854789Sahrens zilog->zl_ss_seq = max_seq; 855789Sahrens cv_broadcast(&zilog->zl_cv_seq); 856789Sahrens } 857789Sahrens /* 858789Sahrens * Wait if necessary for our seq to be committed. 859789Sahrens */ 860789Sahrens if (lwb) { 861789Sahrens while (zilog->zl_ss_seq < seq && zilog->zl_log_error == 0) 862789Sahrens cv_wait(&zilog->zl_cv_seq, &zilog->zl_lock); 863789Sahrens zil_flush_vdevs(zilog, seq); 864789Sahrens } 865*1141Sperrin 866789Sahrens if (zilog->zl_log_error || lwb == NULL) { 867789Sahrens zilog->zl_log_error = 0; 868789Sahrens max_seq = zilog->zl_itx_seq; 869789Sahrens mutex_exit(&zilog->zl_lock); 870789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 871789Sahrens mutex_enter(&zilog->zl_lock); 872789Sahrens zilog->zl_ss_seq = MAX(max_seq, zilog->zl_ss_seq); 873789Sahrens cv_broadcast(&zilog->zl_cv_seq); 874789Sahrens } 875*1141Sperrin /* wake up others waiting to start a write */ 876*1141Sperrin zilog->zl_writer = B_FALSE; 877789Sahrens mutex_exit(&zilog->zl_lock); 878*1141Sperrin cv_signal(&zilog->zl_cv_write); 879789Sahrens } 880789Sahrens 881789Sahrens /* 882789Sahrens * Called in syncing context to free committed log blocks and update log header. 883789Sahrens */ 884789Sahrens void 885789Sahrens zil_sync(zilog_t *zilog, dmu_tx_t *tx) 886789Sahrens { 887789Sahrens uint64_t txg = dmu_tx_get_txg(tx); 888789Sahrens spa_t *spa = zilog->zl_spa; 889789Sahrens lwb_t *lwb; 890789Sahrens 891789Sahrens ASSERT(zilog->zl_stop_sync == 0); 892789Sahrens 893789Sahrens zilog->zl_header->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK]; 894789Sahrens 895789Sahrens if (zilog->zl_destroy_txg == txg) { 896789Sahrens bzero(zilog->zl_header, sizeof (zil_header_t)); 897789Sahrens bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq)); 898789Sahrens zilog->zl_destroy_txg = 0; 899789Sahrens } 900789Sahrens 901789Sahrens mutex_enter(&zilog->zl_lock); 902789Sahrens for (;;) { 903789Sahrens lwb = list_head(&zilog->zl_lwb_list); 904789Sahrens if (lwb == NULL) { 905789Sahrens mutex_exit(&zilog->zl_lock); 906789Sahrens return; 907789Sahrens } 908789Sahrens if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 909789Sahrens break; 910789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 911789Sahrens zio_free_blk(spa, &lwb->lwb_blk, txg); 912789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 913789Sahrens } 914789Sahrens zilog->zl_header->zh_log = lwb->lwb_blk; 915789Sahrens mutex_exit(&zilog->zl_lock); 916789Sahrens } 917789Sahrens 918789Sahrens void 919789Sahrens zil_init(void) 920789Sahrens { 921789Sahrens zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 922789Sahrens sizeof (struct lwb), NULL, NULL, NULL, NULL, NULL, NULL, 0); 923789Sahrens } 924789Sahrens 925789Sahrens void 926789Sahrens zil_fini(void) 927789Sahrens { 928789Sahrens kmem_cache_destroy(zil_lwb_cache); 929789Sahrens } 930789Sahrens 931789Sahrens zilog_t * 932789Sahrens zil_alloc(objset_t *os, zil_header_t *zh_phys) 933789Sahrens { 934789Sahrens zilog_t *zilog; 935789Sahrens 936789Sahrens zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 937789Sahrens 938789Sahrens zilog->zl_header = zh_phys; 939789Sahrens zilog->zl_os = os; 940789Sahrens zilog->zl_spa = dmu_objset_spa(os); 941789Sahrens zilog->zl_dmu_pool = dmu_objset_pool(os); 942789Sahrens 943789Sahrens list_create(&zilog->zl_itx_list, sizeof (itx_t), 944789Sahrens offsetof(itx_t, itx_node)); 945789Sahrens 946789Sahrens list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 947789Sahrens offsetof(lwb_t, lwb_node)); 948789Sahrens 949789Sahrens list_create(&zilog->zl_vdev_list, sizeof (zil_vdev_t), 950789Sahrens offsetof(zil_vdev_t, vdev_seq_node)); 951789Sahrens 952789Sahrens return (zilog); 953789Sahrens } 954789Sahrens 955789Sahrens void 956789Sahrens zil_free(zilog_t *zilog) 957789Sahrens { 958789Sahrens lwb_t *lwb; 959789Sahrens zil_vdev_t *zv; 960789Sahrens 961789Sahrens zilog->zl_stop_sync = 1; 962789Sahrens 963789Sahrens while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 964789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 965789Sahrens if (lwb->lwb_buf != NULL) 966789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 967789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 968789Sahrens } 969789Sahrens list_destroy(&zilog->zl_lwb_list); 970789Sahrens 971789Sahrens while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) { 972789Sahrens list_remove(&zilog->zl_vdev_list, zv); 973789Sahrens kmem_free(zv, sizeof (zil_vdev_t)); 974789Sahrens } 975789Sahrens list_destroy(&zilog->zl_vdev_list); 976789Sahrens 977789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 978789Sahrens list_destroy(&zilog->zl_itx_list); 979789Sahrens 980789Sahrens kmem_free(zilog, sizeof (zilog_t)); 981789Sahrens } 982789Sahrens 983789Sahrens /* 984789Sahrens * Open an intent log. 985789Sahrens */ 986789Sahrens zilog_t * 987789Sahrens zil_open(objset_t *os, zil_get_data_t *get_data) 988789Sahrens { 989789Sahrens zilog_t *zilog = dmu_objset_zil(os); 990789Sahrens 991789Sahrens zilog->zl_get_data = get_data; 992789Sahrens zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 993789Sahrens 2, 2, TASKQ_PREPOPULATE); 994789Sahrens 995789Sahrens return (zilog); 996789Sahrens } 997789Sahrens 998789Sahrens /* 999789Sahrens * Close an intent log. 1000789Sahrens */ 1001789Sahrens void 1002789Sahrens zil_close(zilog_t *zilog) 1003789Sahrens { 1004789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 1005789Sahrens taskq_destroy(zilog->zl_clean_taskq); 1006789Sahrens zilog->zl_clean_taskq = NULL; 1007789Sahrens zilog->zl_get_data = NULL; 1008789Sahrens 1009789Sahrens zil_itx_clean(zilog); 1010789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1011789Sahrens } 1012789Sahrens 1013789Sahrens /* 1014789Sahrens * Suspend an intent log. While in suspended mode, we still honor 1015789Sahrens * synchronous semantics, but we rely on txg_wait_synced() to do it. 1016789Sahrens * We suspend the log briefly when taking a snapshot so that the snapshot 1017789Sahrens * contains all the data it's supposed to, and has an empty intent log. 1018789Sahrens */ 1019789Sahrens int 1020789Sahrens zil_suspend(zilog_t *zilog) 1021789Sahrens { 1022789Sahrens lwb_t *lwb; 1023789Sahrens 1024789Sahrens mutex_enter(&zilog->zl_lock); 1025789Sahrens if (zilog->zl_header->zh_claim_txg != 0) { /* unplayed log */ 1026789Sahrens mutex_exit(&zilog->zl_lock); 1027789Sahrens return (EBUSY); 1028789Sahrens } 1029789Sahrens zilog->zl_suspend++; 1030789Sahrens mutex_exit(&zilog->zl_lock); 1031789Sahrens 1032789Sahrens zil_commit(zilog, UINT64_MAX, FSYNC); 1033789Sahrens 1034789Sahrens mutex_enter(&zilog->zl_lock); 1035789Sahrens while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1036789Sahrens if (lwb->lwb_buf != NULL) { 1037789Sahrens /* 1038789Sahrens * Wait for the buffer if it's in the process of 1039789Sahrens * being written. 1040789Sahrens */ 1041789Sahrens if ((lwb->lwb_seq != 0) && 1042789Sahrens (lwb->lwb_state != SEQ_COMPLETE)) { 1043789Sahrens cv_wait(&zilog->zl_cv_seq, &zilog->zl_lock); 1044789Sahrens continue; 1045789Sahrens } 1046789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1047789Sahrens } 1048789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 1049789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 1050789Sahrens } 1051789Sahrens mutex_exit(&zilog->zl_lock); 1052789Sahrens 1053789Sahrens zil_destroy(zilog); 1054789Sahrens 1055789Sahrens return (0); 1056789Sahrens } 1057789Sahrens 1058789Sahrens void 1059789Sahrens zil_resume(zilog_t *zilog) 1060789Sahrens { 1061789Sahrens mutex_enter(&zilog->zl_lock); 1062789Sahrens ASSERT(zilog->zl_suspend != 0); 1063789Sahrens zilog->zl_suspend--; 1064789Sahrens mutex_exit(&zilog->zl_lock); 1065789Sahrens } 1066789Sahrens 1067789Sahrens typedef struct zil_replay_arg { 1068789Sahrens objset_t *zr_os; 1069789Sahrens zil_replay_func_t **zr_replay; 1070789Sahrens void *zr_arg; 1071789Sahrens void (*zr_rm_sync)(void *arg); 1072789Sahrens uint64_t *zr_txgp; 1073789Sahrens boolean_t zr_byteswap; 1074789Sahrens char *zr_lrbuf; 1075789Sahrens } zil_replay_arg_t; 1076789Sahrens 1077789Sahrens static void 1078789Sahrens zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1079789Sahrens { 1080789Sahrens zil_replay_arg_t *zr = zra; 1081789Sahrens zil_header_t *zh = zilog->zl_header; 1082789Sahrens uint64_t reclen = lr->lrc_reclen; 1083789Sahrens uint64_t txtype = lr->lrc_txtype; 1084789Sahrens int pass, error; 1085789Sahrens 1086789Sahrens if (zilog->zl_stop_replay) 1087789Sahrens return; 1088789Sahrens 1089789Sahrens if (lr->lrc_txg < claim_txg) /* already committed */ 1090789Sahrens return; 1091789Sahrens 1092789Sahrens if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1093789Sahrens return; 1094789Sahrens 1095789Sahrens /* 1096789Sahrens * Make a copy of the data so we can revise and extend it. 1097789Sahrens */ 1098789Sahrens bcopy(lr, zr->zr_lrbuf, reclen); 1099789Sahrens 1100789Sahrens /* 1101789Sahrens * The log block containing this lr may have been byteswapped 1102789Sahrens * so that we can easily examine common fields like lrc_txtype. 1103789Sahrens * However, the log is a mix of different data types, and only the 1104789Sahrens * replay vectors know how to byteswap their records. Therefore, if 1105789Sahrens * the lr was byteswapped, undo it before invoking the replay vector. 1106789Sahrens */ 1107789Sahrens if (zr->zr_byteswap) 1108789Sahrens byteswap_uint64_array(zr->zr_lrbuf, reclen); 1109789Sahrens 1110789Sahrens /* 1111789Sahrens * If this is a TX_WRITE with a blkptr, suck in the data. 1112789Sahrens */ 1113789Sahrens if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 1114789Sahrens lr_write_t *lrw = (lr_write_t *)lr; 1115789Sahrens blkptr_t *wbp = &lrw->lr_blkptr; 1116789Sahrens uint64_t wlen = lrw->lr_length; 1117789Sahrens char *wbuf = zr->zr_lrbuf + reclen; 1118789Sahrens 1119789Sahrens if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 1120789Sahrens bzero(wbuf, wlen); 1121789Sahrens } else { 1122789Sahrens /* 1123789Sahrens * A subsequent write may have overwritten this block, 1124789Sahrens * in which case wbp may have been been freed and 1125789Sahrens * reallocated, and our read of wbp may fail with a 1126789Sahrens * checksum error. We can safely ignore this because 1127789Sahrens * the later write will provide the correct data. 1128789Sahrens */ 1129789Sahrens (void) zio_wait(zio_read(NULL, zilog->zl_spa, 1130789Sahrens wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, 1131789Sahrens ZIO_PRIORITY_SYNC_READ, 1132789Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE)); 1133789Sahrens (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); 1134789Sahrens } 1135789Sahrens } 1136789Sahrens 1137789Sahrens /* 1138789Sahrens * We must now do two things atomically: replay this log record, 1139789Sahrens * and update the log header to reflect the fact that we did so. 1140789Sahrens * We use the DMU's ability to assign into a specific txg to do this. 1141789Sahrens */ 1142789Sahrens for (pass = 1; /* CONSTANTCONDITION */; pass++) { 1143789Sahrens uint64_t replay_txg; 1144789Sahrens dmu_tx_t *replay_tx; 1145789Sahrens 1146789Sahrens replay_tx = dmu_tx_create(zr->zr_os); 1147789Sahrens error = dmu_tx_assign(replay_tx, TXG_WAIT); 1148789Sahrens if (error) { 1149789Sahrens dmu_tx_abort(replay_tx); 1150789Sahrens break; 1151789Sahrens } 1152789Sahrens 1153789Sahrens replay_txg = dmu_tx_get_txg(replay_tx); 1154789Sahrens 1155789Sahrens if (txtype == 0 || txtype >= TX_MAX_TYPE) { 1156789Sahrens error = EINVAL; 1157789Sahrens } else { 1158789Sahrens /* 1159789Sahrens * On the first pass, arrange for the replay vector 1160789Sahrens * to fail its dmu_tx_assign(). That's the only way 1161789Sahrens * to ensure that those code paths remain well tested. 1162789Sahrens */ 1163789Sahrens *zr->zr_txgp = replay_txg - (pass == 1); 1164789Sahrens error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 1165789Sahrens zr->zr_byteswap); 1166789Sahrens *zr->zr_txgp = TXG_NOWAIT; 1167789Sahrens } 1168789Sahrens 1169789Sahrens if (error == 0) { 1170789Sahrens dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx); 1171789Sahrens zilog->zl_replay_seq[replay_txg & TXG_MASK] = 1172789Sahrens lr->lrc_seq; 1173789Sahrens } 1174789Sahrens 1175789Sahrens dmu_tx_commit(replay_tx); 1176789Sahrens 1177789Sahrens if (error != ERESTART) 1178789Sahrens break; 1179789Sahrens 1180789Sahrens if (pass != 1) 1181789Sahrens txg_wait_open(spa_get_dsl(zilog->zl_spa), 1182789Sahrens replay_txg + 1); 1183789Sahrens 1184789Sahrens dprintf("pass %d, retrying\n", pass); 1185789Sahrens } 1186789Sahrens 1187789Sahrens if (error) { 1188789Sahrens char *name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1189789Sahrens dmu_objset_name(zr->zr_os, name); 1190789Sahrens cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1191789Sahrens "dataset %s, seq 0x%llx, txtype %llu\n", 1192789Sahrens error, name, 1193789Sahrens (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype); 1194789Sahrens zilog->zl_stop_replay = 1; 1195789Sahrens kmem_free(name, MAXNAMELEN); 1196789Sahrens } 1197789Sahrens 1198789Sahrens /* 1199789Sahrens * The DMU's dnode layer doesn't see removes until the txg commits, 1200789Sahrens * so a subsequent claim can spuriously fail with EEXIST. 1201789Sahrens * To prevent this, if we might have removed an object, 1202789Sahrens * wait for the delete thread to delete it, and then 1203789Sahrens * wait for the transaction group to sync. 1204789Sahrens */ 1205789Sahrens if (txtype == TX_REMOVE || txtype == TX_RMDIR || txtype == TX_RENAME) { 1206789Sahrens if (zr->zr_rm_sync != NULL) 1207789Sahrens zr->zr_rm_sync(zr->zr_arg); 1208789Sahrens txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1209789Sahrens } 1210789Sahrens } 1211789Sahrens 1212789Sahrens /* 1213789Sahrens * If this dataset has an intent log, replay it and destroy it. 1214789Sahrens */ 1215789Sahrens void 1216789Sahrens zil_replay(objset_t *os, void *arg, uint64_t *txgp, 1217789Sahrens zil_replay_func_t *replay_func[TX_MAX_TYPE], void (*rm_sync)(void *arg)) 1218789Sahrens { 1219789Sahrens zilog_t *zilog = dmu_objset_zil(os); 1220789Sahrens zil_replay_arg_t zr; 1221789Sahrens 1222789Sahrens zr.zr_os = os; 1223789Sahrens zr.zr_replay = replay_func; 1224789Sahrens zr.zr_arg = arg; 1225789Sahrens zr.zr_rm_sync = rm_sync; 1226789Sahrens zr.zr_txgp = txgp; 1227789Sahrens zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zilog->zl_header->zh_log); 1228789Sahrens zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1229789Sahrens 1230789Sahrens /* 1231789Sahrens * Wait for in-progress removes to sync before starting replay. 1232789Sahrens */ 1233789Sahrens if (rm_sync != NULL) 1234789Sahrens rm_sync(arg); 1235789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 1236789Sahrens 1237789Sahrens zilog->zl_stop_replay = 0; 1238789Sahrens zil_parse(zilog, NULL, zil_replay_log_record, &zr, 1239789Sahrens zilog->zl_header->zh_claim_txg); 1240789Sahrens kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1241789Sahrens 1242789Sahrens zil_destroy(zilog); 1243789Sahrens } 1244