1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51472Sperrin * Common Development and Distribution License (the "License"). 61472Sperrin * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 221362Sperrin * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23789Sahrens * Use is subject to license terms. 24789Sahrens */ 25789Sahrens 26789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 27789Sahrens 28789Sahrens #include <sys/zfs_context.h> 29789Sahrens #include <sys/spa.h> 30789Sahrens #include <sys/dmu.h> 31789Sahrens #include <sys/zap.h> 32789Sahrens #include <sys/arc.h> 33789Sahrens #include <sys/stat.h> 34789Sahrens #include <sys/resource.h> 35789Sahrens #include <sys/zil.h> 36789Sahrens #include <sys/zil_impl.h> 37789Sahrens #include <sys/dsl_dataset.h> 38789Sahrens #include <sys/vdev.h> 39789Sahrens 40789Sahrens /* 41789Sahrens * The zfs intent log (ZIL) saves transaction records of system calls 42789Sahrens * that change the file system in memory with enough information 43789Sahrens * to be able to replay them. These are stored in memory until 44789Sahrens * either the DMU transaction group (txg) commits them to the stable pool 45789Sahrens * and they can be discarded, or they are flushed to the stable log 46789Sahrens * (also in the pool) due to a fsync, O_DSYNC or other synchronous 47789Sahrens * requirement. In the event of a panic or power fail then those log 48789Sahrens * records (transactions) are replayed. 49789Sahrens * 50789Sahrens * There is one ZIL per file system. Its on-disk (pool) format consists 51789Sahrens * of 3 parts: 52789Sahrens * 53789Sahrens * - ZIL header 54789Sahrens * - ZIL blocks 55789Sahrens * - ZIL records 56789Sahrens * 57789Sahrens * A log record holds a system call transaction. Log blocks can 58789Sahrens * hold many log records and the blocks are chained together. 59789Sahrens * Each ZIL block contains a block pointer (blkptr_t) to the next 60789Sahrens * ZIL block in the chain. The ZIL header points to the first 61789Sahrens * block in the chain. Note there is not a fixed place in the pool 62789Sahrens * to hold blocks. They are dynamically allocated and freed as 63789Sahrens * needed from the blocks available. Figure X shows the ZIL structure: 64789Sahrens */ 65789Sahrens 66789Sahrens /* 67789Sahrens * These global ZIL switches affect all pools 68789Sahrens */ 69789Sahrens int zil_disable = 0; /* disable intent logging */ 70789Sahrens int zil_always = 0; /* make every transaction synchronous */ 71789Sahrens int zil_purge = 0; /* at pool open, just throw everything away */ 72789Sahrens int zil_noflush = 0; /* don't flush write cache buffers on disks */ 73789Sahrens 74789Sahrens static kmem_cache_t *zil_lwb_cache; 75789Sahrens 76789Sahrens static int 77789Sahrens zil_dva_compare(const void *x1, const void *x2) 78789Sahrens { 79789Sahrens const dva_t *dva1 = x1; 80789Sahrens const dva_t *dva2 = x2; 81789Sahrens 82789Sahrens if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 83789Sahrens return (-1); 84789Sahrens if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 85789Sahrens return (1); 86789Sahrens 87789Sahrens if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 88789Sahrens return (-1); 89789Sahrens if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 90789Sahrens return (1); 91789Sahrens 92789Sahrens return (0); 93789Sahrens } 94789Sahrens 95789Sahrens static void 96789Sahrens zil_dva_tree_init(avl_tree_t *t) 97789Sahrens { 98789Sahrens avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 99789Sahrens offsetof(zil_dva_node_t, zn_node)); 100789Sahrens } 101789Sahrens 102789Sahrens static void 103789Sahrens zil_dva_tree_fini(avl_tree_t *t) 104789Sahrens { 105789Sahrens zil_dva_node_t *zn; 106789Sahrens void *cookie = NULL; 107789Sahrens 108789Sahrens while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 109789Sahrens kmem_free(zn, sizeof (zil_dva_node_t)); 110789Sahrens 111789Sahrens avl_destroy(t); 112789Sahrens } 113789Sahrens 114789Sahrens static int 115789Sahrens zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 116789Sahrens { 117789Sahrens zil_dva_node_t *zn; 118789Sahrens avl_index_t where; 119789Sahrens 120789Sahrens if (avl_find(t, dva, &where) != NULL) 121789Sahrens return (EEXIST); 122789Sahrens 123789Sahrens zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 124789Sahrens zn->zn_dva = *dva; 125789Sahrens avl_insert(t, zn, where); 126789Sahrens 127789Sahrens return (0); 128789Sahrens } 129789Sahrens 130789Sahrens /* 131789Sahrens * Read a log block, make sure it's valid, and byteswap it if necessary. 132789Sahrens */ 133789Sahrens static int 134789Sahrens zil_read_log_block(zilog_t *zilog, blkptr_t *bp, char *buf) 135789Sahrens { 136789Sahrens uint64_t blksz = BP_GET_LSIZE(bp); 137789Sahrens zil_trailer_t *ztp = (zil_trailer_t *)(buf + blksz) - 1; 138789Sahrens zio_cksum_t cksum; 1391544Seschrock zbookmark_t zb; 140789Sahrens int error; 141789Sahrens 1421544Seschrock zb.zb_objset = bp->blk_cksum.zc_word[2]; 1431544Seschrock zb.zb_object = 0; 1441544Seschrock zb.zb_level = -1; 1451544Seschrock zb.zb_blkid = bp->blk_cksum.zc_word[3]; 1461544Seschrock 147789Sahrens error = zio_wait(zio_read(NULL, zilog->zl_spa, bp, buf, blksz, 148789Sahrens NULL, NULL, ZIO_PRIORITY_SYNC_READ, 1491544Seschrock ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); 150789Sahrens if (error) { 151789Sahrens dprintf_bp(bp, "zilog %p bp %p read failed, error %d: ", 152789Sahrens zilog, bp, error); 153789Sahrens return (error); 154789Sahrens } 155789Sahrens 156789Sahrens if (BP_SHOULD_BYTESWAP(bp)) 157789Sahrens byteswap_uint64_array(buf, blksz); 158789Sahrens 159789Sahrens /* 160789Sahrens * Sequence numbers should be... sequential. The checksum verifier for 161789Sahrens * the next block should be: <logid[0], logid[1], objset id, seq + 1>. 162789Sahrens */ 163789Sahrens cksum = bp->blk_cksum; 164789Sahrens cksum.zc_word[3]++; 165789Sahrens if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, sizeof (cksum)) != 0) { 166789Sahrens dprintf_bp(bp, "zilog %p bp %p stale pointer: ", zilog, bp); 167789Sahrens return (ESTALE); 168789Sahrens } 169789Sahrens 170789Sahrens if (BP_IS_HOLE(&ztp->zit_next_blk)) { 171789Sahrens dprintf_bp(bp, "zilog %p bp %p hole: ", zilog, bp); 172789Sahrens return (ENOENT); 173789Sahrens } 174789Sahrens 175789Sahrens if (ztp->zit_nused > (blksz - sizeof (zil_trailer_t))) { 176789Sahrens dprintf("zilog %p bp %p nused exceeds blksz\n", zilog, bp); 177789Sahrens return (EOVERFLOW); 178789Sahrens } 179789Sahrens 180789Sahrens dprintf_bp(bp, "zilog %p bp %p good block: ", zilog, bp); 181789Sahrens 182789Sahrens return (0); 183789Sahrens } 184789Sahrens 185789Sahrens /* 186789Sahrens * Parse the intent log, and call parse_func for each valid record within. 187789Sahrens */ 188789Sahrens void 189789Sahrens zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 190789Sahrens zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 191789Sahrens { 192789Sahrens blkptr_t blk; 193789Sahrens char *lrbuf, *lrp; 194789Sahrens zil_trailer_t *ztp; 195789Sahrens int reclen, error; 196789Sahrens 197789Sahrens blk = zilog->zl_header->zh_log; 198789Sahrens if (BP_IS_HOLE(&blk)) 199789Sahrens return; 200789Sahrens 201789Sahrens /* 202789Sahrens * Starting at the block pointed to by zh_log we read the log chain. 203789Sahrens * For each block in the chain we strongly check that block to 204789Sahrens * ensure its validity. We stop when an invalid block is found. 205789Sahrens * For each block pointer in the chain we call parse_blk_func(). 206789Sahrens * For each record in each valid block we call parse_lr_func(). 207789Sahrens */ 208789Sahrens zil_dva_tree_init(&zilog->zl_dva_tree); 209789Sahrens lrbuf = zio_buf_alloc(SPA_MAXBLOCKSIZE); 210789Sahrens for (;;) { 211789Sahrens error = zil_read_log_block(zilog, &blk, lrbuf); 212789Sahrens 213789Sahrens if (parse_blk_func != NULL) 214789Sahrens parse_blk_func(zilog, &blk, arg, txg); 215789Sahrens 216789Sahrens if (error) 217789Sahrens break; 218789Sahrens 219789Sahrens ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 220789Sahrens blk = ztp->zit_next_blk; 221789Sahrens 222789Sahrens if (parse_lr_func == NULL) 223789Sahrens continue; 224789Sahrens 225789Sahrens for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 226789Sahrens lr_t *lr = (lr_t *)lrp; 227789Sahrens reclen = lr->lrc_reclen; 228789Sahrens ASSERT3U(reclen, >=, sizeof (lr_t)); 229789Sahrens parse_lr_func(zilog, lr, arg, txg); 230789Sahrens } 231789Sahrens } 232789Sahrens zio_buf_free(lrbuf, SPA_MAXBLOCKSIZE); 233789Sahrens zil_dva_tree_fini(&zilog->zl_dva_tree); 234789Sahrens } 235789Sahrens 236789Sahrens /* ARGSUSED */ 237789Sahrens static void 238789Sahrens zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 239789Sahrens { 240789Sahrens spa_t *spa = zilog->zl_spa; 241789Sahrens int err; 242789Sahrens 243789Sahrens dprintf_bp(bp, "first_txg %llu: ", first_txg); 244789Sahrens 245789Sahrens /* 246789Sahrens * Claim log block if not already committed and not already claimed. 247789Sahrens */ 248789Sahrens if (bp->blk_birth >= first_txg && 249789Sahrens zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 250789Sahrens err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL)); 251789Sahrens ASSERT(err == 0); 252789Sahrens } 253789Sahrens } 254789Sahrens 255789Sahrens static void 256789Sahrens zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 257789Sahrens { 258789Sahrens if (lrc->lrc_txtype == TX_WRITE) { 259789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 260789Sahrens zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 261789Sahrens } 262789Sahrens } 263789Sahrens 264789Sahrens /* ARGSUSED */ 265789Sahrens static void 266789Sahrens zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 267789Sahrens { 268789Sahrens zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 269789Sahrens } 270789Sahrens 271789Sahrens static void 272789Sahrens zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 273789Sahrens { 274789Sahrens /* 275789Sahrens * If we previously claimed it, we need to free it. 276789Sahrens */ 277789Sahrens if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 278789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 279789Sahrens blkptr_t *bp = &lr->lr_blkptr; 280789Sahrens if (bp->blk_birth >= claim_txg && 281789Sahrens !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 282789Sahrens (void) arc_free(NULL, zilog->zl_spa, 283789Sahrens dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 284789Sahrens } 285789Sahrens } 286789Sahrens } 287789Sahrens 288789Sahrens /* 289789Sahrens * Create an on-disk intent log. 290789Sahrens */ 291789Sahrens static void 292789Sahrens zil_create(zilog_t *zilog) 293789Sahrens { 294789Sahrens lwb_t *lwb; 295789Sahrens uint64_t txg; 296789Sahrens dmu_tx_t *tx; 297789Sahrens blkptr_t blk; 298789Sahrens int error; 2991362Sperrin int no_blk; 300789Sahrens 301789Sahrens ASSERT(zilog->zl_header->zh_claim_txg == 0); 302789Sahrens ASSERT(zilog->zl_header->zh_replay_seq == 0); 303789Sahrens 304789Sahrens /* 305789Sahrens * Initialize the log header block. 306789Sahrens */ 307789Sahrens tx = dmu_tx_create(zilog->zl_os); 308789Sahrens (void) dmu_tx_assign(tx, TXG_WAIT); 309789Sahrens dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 310789Sahrens txg = dmu_tx_get_txg(tx); 311789Sahrens 312789Sahrens /* 3131362Sperrin * If we don't have a log block already then 3141362Sperrin * allocate the first log block and assign its checksum verifier. 315789Sahrens */ 3161362Sperrin no_blk = BP_IS_HOLE(&zilog->zl_header->zh_log); 3171362Sperrin if (no_blk) { 3181362Sperrin error = zio_alloc_blk(zilog->zl_spa, ZIO_CHECKSUM_ZILOG, 3191362Sperrin ZIL_MIN_BLKSZ, &blk, txg); 3201362Sperrin } else { 3211362Sperrin blk = zilog->zl_header->zh_log; 3221362Sperrin error = 0; 3231362Sperrin } 324789Sahrens if (error == 0) { 325789Sahrens ZIO_SET_CHECKSUM(&blk.blk_cksum, 326789Sahrens spa_get_random(-1ULL), spa_get_random(-1ULL), 327789Sahrens dmu_objset_id(zilog->zl_os), 1ULL); 328789Sahrens 329789Sahrens /* 330789Sahrens * Allocate a log write buffer (lwb) for the first log block. 331789Sahrens */ 332789Sahrens lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 333789Sahrens lwb->lwb_zilog = zilog; 334789Sahrens lwb->lwb_blk = blk; 335789Sahrens lwb->lwb_nused = 0; 336789Sahrens lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 337789Sahrens lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 338789Sahrens lwb->lwb_max_txg = txg; 339789Sahrens lwb->lwb_seq = 0; 340789Sahrens lwb->lwb_state = UNWRITTEN; 341789Sahrens mutex_enter(&zilog->zl_lock); 342789Sahrens list_insert_tail(&zilog->zl_lwb_list, lwb); 343789Sahrens mutex_exit(&zilog->zl_lock); 344789Sahrens } 345789Sahrens 346789Sahrens dmu_tx_commit(tx); 3471362Sperrin if (no_blk) 3481362Sperrin txg_wait_synced(zilog->zl_dmu_pool, txg); 349789Sahrens } 350789Sahrens 351789Sahrens /* 352789Sahrens * In one tx, free all log blocks and clear the log header. 353789Sahrens */ 354789Sahrens void 355789Sahrens zil_destroy(zilog_t *zilog) 356789Sahrens { 357789Sahrens dmu_tx_t *tx; 358789Sahrens uint64_t txg; 359789Sahrens 360789Sahrens mutex_enter(&zilog->zl_destroy_lock); 361789Sahrens 362789Sahrens if (BP_IS_HOLE(&zilog->zl_header->zh_log)) { 363789Sahrens mutex_exit(&zilog->zl_destroy_lock); 364789Sahrens return; 365789Sahrens } 366789Sahrens 367789Sahrens tx = dmu_tx_create(zilog->zl_os); 368789Sahrens (void) dmu_tx_assign(tx, TXG_WAIT); 369789Sahrens dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 370789Sahrens txg = dmu_tx_get_txg(tx); 371789Sahrens 372789Sahrens zil_parse(zilog, zil_free_log_block, zil_free_log_record, tx, 373789Sahrens zilog->zl_header->zh_claim_txg); 3741362Sperrin /* 3751362Sperrin * zil_sync clears the zil header as soon as the zl_destroy_txg commits 3761362Sperrin */ 377789Sahrens zilog->zl_destroy_txg = txg; 378789Sahrens 379789Sahrens dmu_tx_commit(tx); 380789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 381789Sahrens 382789Sahrens mutex_exit(&zilog->zl_destroy_lock); 383789Sahrens } 384789Sahrens 385789Sahrens void 386789Sahrens zil_claim(char *osname, void *txarg) 387789Sahrens { 388789Sahrens dmu_tx_t *tx = txarg; 389789Sahrens uint64_t first_txg = dmu_tx_get_txg(tx); 390789Sahrens zilog_t *zilog; 391789Sahrens zil_header_t *zh; 392789Sahrens objset_t *os; 393789Sahrens int error; 394789Sahrens 395789Sahrens error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_STANDARD, &os); 396789Sahrens if (error) { 397789Sahrens cmn_err(CE_WARN, "can't process intent log for %s", osname); 398789Sahrens return; 399789Sahrens } 400789Sahrens 401789Sahrens zilog = dmu_objset_zil(os); 402789Sahrens zh = zilog->zl_header; 403789Sahrens 404789Sahrens /* 405789Sahrens * Claim all log blocks if we haven't already done so. 406789Sahrens */ 407789Sahrens ASSERT3U(zh->zh_claim_txg, <=, first_txg); 408789Sahrens if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 409789Sahrens zh->zh_claim_txg = first_txg; 410789Sahrens zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, 411789Sahrens tx, first_txg); 412789Sahrens dsl_dataset_dirty(dmu_objset_ds(os), tx); 413789Sahrens } 414789Sahrens ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 415789Sahrens dmu_objset_close(os); 416789Sahrens } 417789Sahrens 418789Sahrens void 419789Sahrens zil_add_vdev(zilog_t *zilog, uint64_t vdev, uint64_t seq) 420789Sahrens { 421789Sahrens zil_vdev_t *zv; 422789Sahrens 423789Sahrens if (zil_noflush) 424789Sahrens return; 425789Sahrens 426789Sahrens ASSERT(MUTEX_HELD(&zilog->zl_lock)); 427789Sahrens zv = kmem_alloc(sizeof (zil_vdev_t), KM_SLEEP); 428789Sahrens zv->vdev = vdev; 429789Sahrens zv->seq = seq; 430789Sahrens list_insert_tail(&zilog->zl_vdev_list, zv); 431789Sahrens } 432789Sahrens 433789Sahrens void 434789Sahrens zil_flush_vdevs(zilog_t *zilog, uint64_t seq) 435789Sahrens { 436789Sahrens vdev_t *vd; 437789Sahrens zil_vdev_t *zv, *zv2; 438789Sahrens zio_t *zio; 439789Sahrens spa_t *spa; 440789Sahrens uint64_t vdev; 441789Sahrens 442789Sahrens if (zil_noflush) 443789Sahrens return; 444789Sahrens 445789Sahrens ASSERT(MUTEX_HELD(&zilog->zl_lock)); 446789Sahrens 447789Sahrens spa = zilog->zl_spa; 448789Sahrens zio = NULL; 449789Sahrens 450789Sahrens while ((zv = list_head(&zilog->zl_vdev_list)) != NULL && 451789Sahrens zv->seq <= seq) { 452789Sahrens vdev = zv->vdev; 453789Sahrens list_remove(&zilog->zl_vdev_list, zv); 454789Sahrens kmem_free(zv, sizeof (zil_vdev_t)); 455789Sahrens 456789Sahrens /* 457789Sahrens * remove all chained entries <= seq with same vdev 458789Sahrens */ 459789Sahrens zv = list_head(&zilog->zl_vdev_list); 460789Sahrens while (zv && zv->seq <= seq) { 461789Sahrens zv2 = list_next(&zilog->zl_vdev_list, zv); 462789Sahrens if (zv->vdev == vdev) { 463789Sahrens list_remove(&zilog->zl_vdev_list, zv); 464789Sahrens kmem_free(zv, sizeof (zil_vdev_t)); 465789Sahrens } 466789Sahrens zv = zv2; 467789Sahrens } 468789Sahrens 469789Sahrens /* flush the write cache for this vdev */ 470789Sahrens mutex_exit(&zilog->zl_lock); 471789Sahrens if (zio == NULL) 472789Sahrens zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 473789Sahrens vd = vdev_lookup_top(spa, vdev); 474789Sahrens ASSERT(vd); 475789Sahrens (void) zio_nowait(zio_ioctl(zio, spa, vd, DKIOCFLUSHWRITECACHE, 476789Sahrens NULL, NULL, ZIO_PRIORITY_NOW, 477789Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY)); 478789Sahrens mutex_enter(&zilog->zl_lock); 479789Sahrens } 480789Sahrens 481789Sahrens /* 482789Sahrens * Wait for all the flushes to complete. Not all devices actually 483789Sahrens * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 484789Sahrens */ 4851141Sperrin if (zio != NULL) { 4861141Sperrin mutex_exit(&zilog->zl_lock); 487789Sahrens (void) zio_wait(zio); 4881141Sperrin mutex_enter(&zilog->zl_lock); 4891141Sperrin } 490789Sahrens } 491789Sahrens 492789Sahrens /* 493789Sahrens * Function called when a log block write completes 494789Sahrens */ 495789Sahrens static void 496789Sahrens zil_lwb_write_done(zio_t *zio) 497789Sahrens { 498789Sahrens lwb_t *prev; 499789Sahrens lwb_t *lwb = zio->io_private; 500789Sahrens zilog_t *zilog = lwb->lwb_zilog; 501789Sahrens uint64_t max_seq; 502789Sahrens 503789Sahrens /* 504789Sahrens * Now that we've written this log block, we have a stable pointer 505789Sahrens * to the next block in the chain, so it's OK to let the txg in 506789Sahrens * which we allocated the next block sync. 507789Sahrens */ 508789Sahrens txg_rele_to_sync(&lwb->lwb_txgh); 509789Sahrens 510789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 511789Sahrens mutex_enter(&zilog->zl_lock); 512789Sahrens lwb->lwb_buf = NULL; 513789Sahrens if (zio->io_error) { 514789Sahrens zilog->zl_log_error = B_TRUE; 515789Sahrens mutex_exit(&zilog->zl_lock); 516789Sahrens cv_broadcast(&zilog->zl_cv_seq); 517789Sahrens return; 518789Sahrens } 519789Sahrens 520789Sahrens prev = list_prev(&zilog->zl_lwb_list, lwb); 521789Sahrens if (prev && prev->lwb_state != SEQ_COMPLETE) { 522789Sahrens /* There's an unwritten buffer in the chain before this one */ 523789Sahrens lwb->lwb_state = SEQ_INCOMPLETE; 524789Sahrens mutex_exit(&zilog->zl_lock); 525789Sahrens return; 526789Sahrens } 527789Sahrens 528789Sahrens max_seq = lwb->lwb_seq; 529789Sahrens lwb->lwb_state = SEQ_COMPLETE; 530789Sahrens /* 531789Sahrens * We must also follow up the chain for already written buffers 532789Sahrens * to see if we can set zl_ss_seq even higher. 533789Sahrens */ 534789Sahrens while (lwb = list_next(&zilog->zl_lwb_list, lwb)) { 535789Sahrens if (lwb->lwb_state != SEQ_INCOMPLETE) 536789Sahrens break; 537789Sahrens lwb->lwb_state = SEQ_COMPLETE; 538789Sahrens /* lwb_seq will be zero if we've written an empty buffer */ 539789Sahrens if (lwb->lwb_seq) { 540789Sahrens ASSERT3U(max_seq, <, lwb->lwb_seq); 541789Sahrens max_seq = lwb->lwb_seq; 542789Sahrens } 543789Sahrens } 544789Sahrens zilog->zl_ss_seq = MAX(max_seq, zilog->zl_ss_seq); 545789Sahrens mutex_exit(&zilog->zl_lock); 546789Sahrens cv_broadcast(&zilog->zl_cv_seq); 547789Sahrens } 548789Sahrens 549789Sahrens /* 550789Sahrens * Start a log block write and advance to the next log block. 551789Sahrens * Calls are serialized. 552789Sahrens */ 553789Sahrens static lwb_t * 554789Sahrens zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 555789Sahrens { 556789Sahrens lwb_t *nlwb; 557789Sahrens zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 558789Sahrens uint64_t txg; 559789Sahrens uint64_t zil_blksz; 5601544Seschrock zbookmark_t zb; 561789Sahrens int error; 562789Sahrens 563789Sahrens ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 564789Sahrens 565789Sahrens /* 566789Sahrens * Allocate the next block and save its address in this block 567789Sahrens * before writing it in order to establish the log chain. 568789Sahrens * Note that if the allocation of nlwb synced before we wrote 569789Sahrens * the block that points at it (lwb), we'd leak it if we crashed. 570789Sahrens * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 571789Sahrens */ 572789Sahrens txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 573789Sahrens txg_rele_to_quiesce(&lwb->lwb_txgh); 574789Sahrens 575789Sahrens /* 5761141Sperrin * Pick a ZIL blocksize. We request a size that is the 5771141Sperrin * maximum of the previous used size, the current used size and 5781141Sperrin * the amount waiting in the queue. 579789Sahrens */ 5801141Sperrin zil_blksz = MAX(zilog->zl_cur_used, zilog->zl_prev_used); 5811141Sperrin zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 5821141Sperrin zil_blksz = P2ROUNDUP(zil_blksz, ZIL_MIN_BLKSZ); 5831141Sperrin if (zil_blksz > ZIL_MAX_BLKSZ) 5841141Sperrin zil_blksz = ZIL_MAX_BLKSZ; 585789Sahrens 586789Sahrens error = zio_alloc_blk(zilog->zl_spa, ZIO_CHECKSUM_ZILOG, 587789Sahrens zil_blksz, &ztp->zit_next_blk, txg); 588789Sahrens if (error) { 5891544Seschrock /* 5901544Seschrock * Reinitialise the lwb. 5911544Seschrock * By returning NULL the caller will call tx_wait_synced() 5921544Seschrock */ 5931544Seschrock mutex_enter(&zilog->zl_lock); 5941544Seschrock ASSERT(lwb->lwb_state == UNWRITTEN); 5951544Seschrock lwb->lwb_nused = 0; 5961544Seschrock lwb->lwb_seq = 0; 5971544Seschrock mutex_exit(&zilog->zl_lock); 598789Sahrens txg_rele_to_sync(&lwb->lwb_txgh); 599789Sahrens return (NULL); 600789Sahrens } 601789Sahrens 602789Sahrens ASSERT3U(ztp->zit_next_blk.blk_birth, ==, txg); 6031544Seschrock ztp->zit_pad = 0; 604789Sahrens ztp->zit_nused = lwb->lwb_nused; 605789Sahrens ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 606789Sahrens ztp->zit_next_blk.blk_cksum = lwb->lwb_blk.blk_cksum; 607789Sahrens ztp->zit_next_blk.blk_cksum.zc_word[3]++; 608789Sahrens 609789Sahrens /* 610789Sahrens * Allocate a new log write buffer (lwb). 611789Sahrens */ 612789Sahrens nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 613789Sahrens 614789Sahrens nlwb->lwb_zilog = zilog; 615789Sahrens nlwb->lwb_blk = ztp->zit_next_blk; 616789Sahrens nlwb->lwb_nused = 0; 617789Sahrens nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 618789Sahrens nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 619789Sahrens nlwb->lwb_max_txg = txg; 620789Sahrens nlwb->lwb_seq = 0; 621789Sahrens nlwb->lwb_state = UNWRITTEN; 622789Sahrens 623789Sahrens /* 624789Sahrens * Put new lwb at the end of the log chain, 625789Sahrens * and record the vdev for later flushing 626789Sahrens */ 627789Sahrens mutex_enter(&zilog->zl_lock); 628789Sahrens list_insert_tail(&zilog->zl_lwb_list, nlwb); 629789Sahrens zil_add_vdev(zilog, DVA_GET_VDEV(BP_IDENTITY(&(lwb->lwb_blk))), 630789Sahrens lwb->lwb_seq); 631789Sahrens mutex_exit(&zilog->zl_lock); 632789Sahrens 633789Sahrens /* 634789Sahrens * write the old log block 635789Sahrens */ 636789Sahrens dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg); 6371544Seschrock 6381544Seschrock zb.zb_objset = lwb->lwb_blk.blk_cksum.zc_word[2]; 6391544Seschrock zb.zb_object = 0; 6401544Seschrock zb.zb_level = -1; 6411544Seschrock zb.zb_blkid = lwb->lwb_blk.blk_cksum.zc_word[3]; 6421544Seschrock 643789Sahrens zio_nowait(zio_rewrite(NULL, zilog->zl_spa, ZIO_CHECKSUM_ZILOG, 0, 644789Sahrens &lwb->lwb_blk, lwb->lwb_buf, lwb->lwb_sz, zil_lwb_write_done, lwb, 6451544Seschrock ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb)); 646789Sahrens 647789Sahrens return (nlwb); 648789Sahrens } 649789Sahrens 650789Sahrens static lwb_t * 651789Sahrens zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 652789Sahrens { 653789Sahrens lr_t *lrc = &itx->itx_lr; /* common log record */ 654789Sahrens uint64_t seq = lrc->lrc_seq; 655789Sahrens uint64_t txg = lrc->lrc_txg; 656789Sahrens uint64_t reclen = lrc->lrc_reclen; 657789Sahrens int error; 658789Sahrens 659789Sahrens if (lwb == NULL) 660789Sahrens return (NULL); 661789Sahrens ASSERT(lwb->lwb_buf != NULL); 662789Sahrens 663789Sahrens /* 664789Sahrens * If it's a write, fetch the data or get its blkptr as appropriate. 665789Sahrens */ 666789Sahrens if (lrc->lrc_txtype == TX_WRITE) { 667789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 668789Sahrens if (txg > spa_freeze_txg(zilog->zl_spa)) 669789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 670789Sahrens 671789Sahrens if (!itx->itx_data_copied && 672789Sahrens (error = zilog->zl_get_data(itx->itx_private, lr)) != 0) { 673789Sahrens if (error != ENOENT && error != EALREADY) { 674789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 675789Sahrens mutex_enter(&zilog->zl_lock); 676789Sahrens zilog->zl_ss_seq = MAX(seq, zilog->zl_ss_seq); 677789Sahrens zil_add_vdev(zilog, 678789Sahrens DVA_GET_VDEV(BP_IDENTITY(&(lr->lr_blkptr))), 679789Sahrens seq); 680789Sahrens mutex_exit(&zilog->zl_lock); 681789Sahrens return (lwb); 682789Sahrens } 683789Sahrens mutex_enter(&zilog->zl_lock); 684789Sahrens zil_add_vdev(zilog, 685789Sahrens DVA_GET_VDEV(BP_IDENTITY(&(lr->lr_blkptr))), seq); 686789Sahrens mutex_exit(&zilog->zl_lock); 687789Sahrens return (lwb); 688789Sahrens } 689789Sahrens } 690789Sahrens 6911141Sperrin zilog->zl_cur_used += reclen; 6921141Sperrin 693789Sahrens /* 694789Sahrens * If this record won't fit in the current log block, start a new one. 695789Sahrens */ 696789Sahrens if (lwb->lwb_nused + reclen > ZIL_BLK_DATA_SZ(lwb)) { 697789Sahrens lwb = zil_lwb_write_start(zilog, lwb); 698789Sahrens if (lwb == NULL) 699789Sahrens return (NULL); 7001544Seschrock ASSERT(lwb->lwb_nused == 0); 7011544Seschrock if (reclen > ZIL_BLK_DATA_SZ(lwb)) { 702789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 703789Sahrens mutex_enter(&zilog->zl_lock); 704789Sahrens zilog->zl_ss_seq = MAX(seq, zilog->zl_ss_seq); 705789Sahrens mutex_exit(&zilog->zl_lock); 706789Sahrens return (lwb); 707789Sahrens } 708789Sahrens } 709789Sahrens 710789Sahrens bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 711789Sahrens lwb->lwb_nused += reclen; 712789Sahrens lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 713789Sahrens ASSERT3U(lwb->lwb_seq, <, seq); 714789Sahrens lwb->lwb_seq = seq; 715789Sahrens ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 716789Sahrens ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 717789Sahrens 718789Sahrens return (lwb); 719789Sahrens } 720789Sahrens 721789Sahrens itx_t * 722789Sahrens zil_itx_create(int txtype, size_t lrsize) 723789Sahrens { 724789Sahrens itx_t *itx; 725789Sahrens 726789Sahrens lrsize = P2ROUNDUP(lrsize, sizeof (uint64_t)); 727789Sahrens 728789Sahrens itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 729789Sahrens itx->itx_lr.lrc_txtype = txtype; 730789Sahrens itx->itx_lr.lrc_reclen = lrsize; 731789Sahrens itx->itx_lr.lrc_seq = 0; /* defensive */ 732789Sahrens 733789Sahrens return (itx); 734789Sahrens } 735789Sahrens 736789Sahrens uint64_t 737789Sahrens zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 738789Sahrens { 739789Sahrens uint64_t seq; 740789Sahrens 741789Sahrens ASSERT(itx->itx_lr.lrc_seq == 0); 742789Sahrens 743789Sahrens mutex_enter(&zilog->zl_lock); 744789Sahrens list_insert_tail(&zilog->zl_itx_list, itx); 745789Sahrens zilog->zl_itx_list_sz += itx->itx_lr.lrc_reclen; 746789Sahrens itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 747789Sahrens itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 748789Sahrens mutex_exit(&zilog->zl_lock); 749789Sahrens 750789Sahrens return (seq); 751789Sahrens } 752789Sahrens 753789Sahrens /* 754789Sahrens * Free up all in-memory intent log transactions that have now been synced. 755789Sahrens */ 756789Sahrens static void 757789Sahrens zil_itx_clean(zilog_t *zilog) 758789Sahrens { 759789Sahrens uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 760789Sahrens uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 761789Sahrens uint64_t max_seq = 0; 762789Sahrens itx_t *itx; 763789Sahrens 764789Sahrens mutex_enter(&zilog->zl_lock); 765789Sahrens while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 766789Sahrens itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 767789Sahrens list_remove(&zilog->zl_itx_list, itx); 768789Sahrens zilog->zl_itx_list_sz -= itx->itx_lr.lrc_reclen; 769789Sahrens ASSERT3U(max_seq, <, itx->itx_lr.lrc_seq); 770789Sahrens max_seq = itx->itx_lr.lrc_seq; 771789Sahrens kmem_free(itx, offsetof(itx_t, itx_lr) 772789Sahrens + itx->itx_lr.lrc_reclen); 773789Sahrens } 774789Sahrens if (max_seq > zilog->zl_ss_seq) { 775789Sahrens zilog->zl_ss_seq = max_seq; 776789Sahrens cv_broadcast(&zilog->zl_cv_seq); 777789Sahrens } 778789Sahrens mutex_exit(&zilog->zl_lock); 779789Sahrens } 780789Sahrens 781789Sahrens void 782789Sahrens zil_clean(zilog_t *zilog) 783789Sahrens { 784789Sahrens /* 785789Sahrens * Check for any log blocks that can be freed. 786789Sahrens * Log blocks are only freed when the log block allocation and 787789Sahrens * log records contained within are both known to be committed. 788789Sahrens */ 789789Sahrens mutex_enter(&zilog->zl_lock); 790789Sahrens if (list_head(&zilog->zl_itx_list) != NULL) 791789Sahrens (void) taskq_dispatch(zilog->zl_clean_taskq, 792789Sahrens (void (*)(void *))zil_itx_clean, zilog, TQ_NOSLEEP); 793789Sahrens mutex_exit(&zilog->zl_lock); 794789Sahrens } 795789Sahrens 796789Sahrens /* 797789Sahrens * Push zfs transactions to stable storage up to the supplied sequence number. 798789Sahrens */ 799789Sahrens void 800789Sahrens zil_commit(zilog_t *zilog, uint64_t seq, int ioflag) 801789Sahrens { 802789Sahrens uint64_t txg; 803789Sahrens uint64_t max_seq; 804789Sahrens uint64_t reclen; 805789Sahrens itx_t *itx; 806789Sahrens lwb_t *lwb; 807789Sahrens spa_t *spa; 808789Sahrens 809789Sahrens if (zilog == NULL || seq == 0 || 810789Sahrens ((ioflag & (FSYNC | FDSYNC | FRSYNC)) == 0 && !zil_always)) 811789Sahrens return; 812789Sahrens 813789Sahrens spa = zilog->zl_spa; 814789Sahrens mutex_enter(&zilog->zl_lock); 815789Sahrens 816789Sahrens seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 817789Sahrens 818789Sahrens for (;;) { 819789Sahrens if (zilog->zl_ss_seq >= seq) { /* already on stable storage */ 820789Sahrens mutex_exit(&zilog->zl_lock); 821789Sahrens return; 822789Sahrens } 823789Sahrens 824789Sahrens if (zilog->zl_writer == B_FALSE) /* no one writing, do it */ 825789Sahrens break; 826789Sahrens 827789Sahrens cv_wait(&zilog->zl_cv_write, &zilog->zl_lock); 828789Sahrens } 829789Sahrens 830789Sahrens zilog->zl_writer = B_TRUE; 831789Sahrens max_seq = 0; 832789Sahrens 833789Sahrens if (zilog->zl_suspend) { 834789Sahrens lwb = NULL; 835789Sahrens } else { 836789Sahrens lwb = list_tail(&zilog->zl_lwb_list); 837789Sahrens if (lwb == NULL) { 838789Sahrens mutex_exit(&zilog->zl_lock); 839789Sahrens zil_create(zilog); 840789Sahrens mutex_enter(&zilog->zl_lock); 841789Sahrens lwb = list_tail(&zilog->zl_lwb_list); 842789Sahrens } 843789Sahrens } 844789Sahrens 845789Sahrens /* 846789Sahrens * Loop through in-memory log transactions filling log blocks, 847789Sahrens * until we reach the given sequence number and there's no more 848789Sahrens * room in the write buffer. 849789Sahrens */ 850789Sahrens for (;;) { 851789Sahrens itx = list_head(&zilog->zl_itx_list); 852789Sahrens if (itx == NULL) 853789Sahrens break; 854789Sahrens 855789Sahrens reclen = itx->itx_lr.lrc_reclen; 856789Sahrens if ((itx->itx_lr.lrc_seq > seq) && 857789Sahrens ((lwb == NULL) || (lwb->lwb_nused + reclen > 858789Sahrens ZIL_BLK_DATA_SZ(lwb)))) 859789Sahrens break; 860789Sahrens 861789Sahrens list_remove(&zilog->zl_itx_list, itx); 862789Sahrens txg = itx->itx_lr.lrc_txg; 863789Sahrens ASSERT(txg); 864789Sahrens 865789Sahrens mutex_exit(&zilog->zl_lock); 866789Sahrens if (txg > spa_last_synced_txg(spa) || 867789Sahrens txg > spa_freeze_txg(spa)) 868789Sahrens lwb = zil_lwb_commit(zilog, itx, lwb); 869789Sahrens else 870789Sahrens max_seq = itx->itx_lr.lrc_seq; 871789Sahrens kmem_free(itx, offsetof(itx_t, itx_lr) 872789Sahrens + itx->itx_lr.lrc_reclen); 873789Sahrens mutex_enter(&zilog->zl_lock); 874789Sahrens zilog->zl_itx_list_sz -= reclen; 875789Sahrens } 876789Sahrens 877789Sahrens mutex_exit(&zilog->zl_lock); 878789Sahrens 879789Sahrens /* write the last block out */ 880789Sahrens if (lwb != NULL && lwb->lwb_nused != 0) 881789Sahrens lwb = zil_lwb_write_start(zilog, lwb); 882789Sahrens 8831141Sperrin zilog->zl_prev_used = zilog->zl_cur_used; 8841141Sperrin zilog->zl_cur_used = 0; 8851141Sperrin 886789Sahrens mutex_enter(&zilog->zl_lock); 887789Sahrens if (max_seq > zilog->zl_ss_seq) { 888789Sahrens zilog->zl_ss_seq = max_seq; 889789Sahrens cv_broadcast(&zilog->zl_cv_seq); 890789Sahrens } 891789Sahrens /* 892789Sahrens * Wait if necessary for our seq to be committed. 893789Sahrens */ 894789Sahrens if (lwb) { 895789Sahrens while (zilog->zl_ss_seq < seq && zilog->zl_log_error == 0) 896789Sahrens cv_wait(&zilog->zl_cv_seq, &zilog->zl_lock); 897789Sahrens zil_flush_vdevs(zilog, seq); 898789Sahrens } 8991141Sperrin 900789Sahrens if (zilog->zl_log_error || lwb == NULL) { 901789Sahrens zilog->zl_log_error = 0; 902789Sahrens max_seq = zilog->zl_itx_seq; 903789Sahrens mutex_exit(&zilog->zl_lock); 904789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 905789Sahrens mutex_enter(&zilog->zl_lock); 906789Sahrens zilog->zl_ss_seq = MAX(max_seq, zilog->zl_ss_seq); 907789Sahrens cv_broadcast(&zilog->zl_cv_seq); 908789Sahrens } 9091141Sperrin /* wake up others waiting to start a write */ 9101141Sperrin zilog->zl_writer = B_FALSE; 911789Sahrens mutex_exit(&zilog->zl_lock); 9121472Sperrin cv_broadcast(&zilog->zl_cv_write); 913789Sahrens } 914789Sahrens 915789Sahrens /* 916789Sahrens * Called in syncing context to free committed log blocks and update log header. 917789Sahrens */ 918789Sahrens void 919789Sahrens zil_sync(zilog_t *zilog, dmu_tx_t *tx) 920789Sahrens { 921789Sahrens uint64_t txg = dmu_tx_get_txg(tx); 922789Sahrens spa_t *spa = zilog->zl_spa; 923789Sahrens lwb_t *lwb; 924789Sahrens 925789Sahrens ASSERT(zilog->zl_stop_sync == 0); 926789Sahrens 927789Sahrens zilog->zl_header->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK]; 928789Sahrens 929789Sahrens if (zilog->zl_destroy_txg == txg) { 930789Sahrens bzero(zilog->zl_header, sizeof (zil_header_t)); 931789Sahrens bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq)); 932789Sahrens zilog->zl_destroy_txg = 0; 933789Sahrens } 934789Sahrens 935789Sahrens mutex_enter(&zilog->zl_lock); 936789Sahrens for (;;) { 937789Sahrens lwb = list_head(&zilog->zl_lwb_list); 938789Sahrens if (lwb == NULL) { 939789Sahrens mutex_exit(&zilog->zl_lock); 940789Sahrens return; 941789Sahrens } 942789Sahrens if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 943789Sahrens break; 944789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 945789Sahrens zio_free_blk(spa, &lwb->lwb_blk, txg); 946789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 947789Sahrens } 948789Sahrens zilog->zl_header->zh_log = lwb->lwb_blk; 949789Sahrens mutex_exit(&zilog->zl_lock); 950789Sahrens } 951789Sahrens 952789Sahrens void 953789Sahrens zil_init(void) 954789Sahrens { 955789Sahrens zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 956789Sahrens sizeof (struct lwb), NULL, NULL, NULL, NULL, NULL, NULL, 0); 957789Sahrens } 958789Sahrens 959789Sahrens void 960789Sahrens zil_fini(void) 961789Sahrens { 962789Sahrens kmem_cache_destroy(zil_lwb_cache); 963789Sahrens } 964789Sahrens 965789Sahrens zilog_t * 966789Sahrens zil_alloc(objset_t *os, zil_header_t *zh_phys) 967789Sahrens { 968789Sahrens zilog_t *zilog; 969789Sahrens 970789Sahrens zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 971789Sahrens 972789Sahrens zilog->zl_header = zh_phys; 973789Sahrens zilog->zl_os = os; 974789Sahrens zilog->zl_spa = dmu_objset_spa(os); 975789Sahrens zilog->zl_dmu_pool = dmu_objset_pool(os); 976789Sahrens 977789Sahrens list_create(&zilog->zl_itx_list, sizeof (itx_t), 978789Sahrens offsetof(itx_t, itx_node)); 979789Sahrens 980789Sahrens list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 981789Sahrens offsetof(lwb_t, lwb_node)); 982789Sahrens 983789Sahrens list_create(&zilog->zl_vdev_list, sizeof (zil_vdev_t), 984789Sahrens offsetof(zil_vdev_t, vdev_seq_node)); 985789Sahrens 986789Sahrens return (zilog); 987789Sahrens } 988789Sahrens 989789Sahrens void 990789Sahrens zil_free(zilog_t *zilog) 991789Sahrens { 992789Sahrens lwb_t *lwb; 993789Sahrens zil_vdev_t *zv; 994789Sahrens 995789Sahrens zilog->zl_stop_sync = 1; 996789Sahrens 997789Sahrens while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 998789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 999789Sahrens if (lwb->lwb_buf != NULL) 1000789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1001789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 1002789Sahrens } 1003789Sahrens list_destroy(&zilog->zl_lwb_list); 1004789Sahrens 1005789Sahrens while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) { 1006789Sahrens list_remove(&zilog->zl_vdev_list, zv); 1007789Sahrens kmem_free(zv, sizeof (zil_vdev_t)); 1008789Sahrens } 1009789Sahrens list_destroy(&zilog->zl_vdev_list); 1010789Sahrens 1011789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1012789Sahrens list_destroy(&zilog->zl_itx_list); 1013789Sahrens 1014789Sahrens kmem_free(zilog, sizeof (zilog_t)); 1015789Sahrens } 1016789Sahrens 1017789Sahrens /* 1018*1646Sperrin * return true if the initial log block is not valid 10191362Sperrin */ 10201362Sperrin static int 10211362Sperrin zil_empty(zilog_t *zilog) 10221362Sperrin { 10231362Sperrin blkptr_t blk; 10241362Sperrin char *lrbuf; 10251362Sperrin int error; 10261362Sperrin 10271362Sperrin blk = zilog->zl_header->zh_log; 10281362Sperrin if (BP_IS_HOLE(&blk)) 10291362Sperrin return (1); 10301362Sperrin 10311362Sperrin lrbuf = zio_buf_alloc(SPA_MAXBLOCKSIZE); 10321362Sperrin error = zil_read_log_block(zilog, &blk, lrbuf); 10331362Sperrin zio_buf_free(lrbuf, SPA_MAXBLOCKSIZE); 10341362Sperrin return (error ? 1 : 0); 10351362Sperrin } 10361362Sperrin 10371362Sperrin /* 1038789Sahrens * Open an intent log. 1039789Sahrens */ 1040789Sahrens zilog_t * 1041789Sahrens zil_open(objset_t *os, zil_get_data_t *get_data) 1042789Sahrens { 1043789Sahrens zilog_t *zilog = dmu_objset_zil(os); 1044789Sahrens 1045789Sahrens zilog->zl_get_data = get_data; 1046789Sahrens zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1047789Sahrens 2, 2, TASKQ_PREPOPULATE); 1048789Sahrens 1049789Sahrens return (zilog); 1050789Sahrens } 1051789Sahrens 1052789Sahrens /* 1053789Sahrens * Close an intent log. 1054789Sahrens */ 1055789Sahrens void 1056789Sahrens zil_close(zilog_t *zilog) 1057789Sahrens { 1058*1646Sperrin if (!zil_is_committed(zilog)) 10591362Sperrin txg_wait_synced(zilog->zl_dmu_pool, 0); 1060789Sahrens taskq_destroy(zilog->zl_clean_taskq); 1061789Sahrens zilog->zl_clean_taskq = NULL; 1062789Sahrens zilog->zl_get_data = NULL; 1063789Sahrens 1064789Sahrens zil_itx_clean(zilog); 1065789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1066789Sahrens } 1067789Sahrens 1068789Sahrens /* 1069789Sahrens * Suspend an intent log. While in suspended mode, we still honor 1070789Sahrens * synchronous semantics, but we rely on txg_wait_synced() to do it. 1071789Sahrens * We suspend the log briefly when taking a snapshot so that the snapshot 1072789Sahrens * contains all the data it's supposed to, and has an empty intent log. 1073789Sahrens */ 1074789Sahrens int 1075789Sahrens zil_suspend(zilog_t *zilog) 1076789Sahrens { 1077789Sahrens lwb_t *lwb; 1078789Sahrens 1079789Sahrens mutex_enter(&zilog->zl_lock); 1080789Sahrens if (zilog->zl_header->zh_claim_txg != 0) { /* unplayed log */ 1081789Sahrens mutex_exit(&zilog->zl_lock); 1082789Sahrens return (EBUSY); 1083789Sahrens } 1084789Sahrens zilog->zl_suspend++; 1085789Sahrens mutex_exit(&zilog->zl_lock); 1086789Sahrens 1087789Sahrens zil_commit(zilog, UINT64_MAX, FSYNC); 1088789Sahrens 1089789Sahrens mutex_enter(&zilog->zl_lock); 1090789Sahrens while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1091789Sahrens if (lwb->lwb_buf != NULL) { 1092789Sahrens /* 1093789Sahrens * Wait for the buffer if it's in the process of 1094789Sahrens * being written. 1095789Sahrens */ 1096789Sahrens if ((lwb->lwb_seq != 0) && 1097789Sahrens (lwb->lwb_state != SEQ_COMPLETE)) { 1098789Sahrens cv_wait(&zilog->zl_cv_seq, &zilog->zl_lock); 1099789Sahrens continue; 1100789Sahrens } 1101789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1102789Sahrens } 1103789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 1104789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 1105789Sahrens } 1106789Sahrens mutex_exit(&zilog->zl_lock); 1107789Sahrens 1108789Sahrens zil_destroy(zilog); 1109789Sahrens 1110789Sahrens return (0); 1111789Sahrens } 1112789Sahrens 1113789Sahrens void 1114789Sahrens zil_resume(zilog_t *zilog) 1115789Sahrens { 1116789Sahrens mutex_enter(&zilog->zl_lock); 1117789Sahrens ASSERT(zilog->zl_suspend != 0); 1118789Sahrens zilog->zl_suspend--; 1119789Sahrens mutex_exit(&zilog->zl_lock); 1120789Sahrens } 1121789Sahrens 1122789Sahrens typedef struct zil_replay_arg { 1123789Sahrens objset_t *zr_os; 1124789Sahrens zil_replay_func_t **zr_replay; 1125789Sahrens void *zr_arg; 1126789Sahrens void (*zr_rm_sync)(void *arg); 1127789Sahrens uint64_t *zr_txgp; 1128789Sahrens boolean_t zr_byteswap; 1129789Sahrens char *zr_lrbuf; 1130789Sahrens } zil_replay_arg_t; 1131789Sahrens 1132789Sahrens static void 1133789Sahrens zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1134789Sahrens { 1135789Sahrens zil_replay_arg_t *zr = zra; 1136789Sahrens zil_header_t *zh = zilog->zl_header; 1137789Sahrens uint64_t reclen = lr->lrc_reclen; 1138789Sahrens uint64_t txtype = lr->lrc_txtype; 1139789Sahrens int pass, error; 1140789Sahrens 1141789Sahrens if (zilog->zl_stop_replay) 1142789Sahrens return; 1143789Sahrens 1144789Sahrens if (lr->lrc_txg < claim_txg) /* already committed */ 1145789Sahrens return; 1146789Sahrens 1147789Sahrens if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1148789Sahrens return; 1149789Sahrens 1150789Sahrens /* 1151789Sahrens * Make a copy of the data so we can revise and extend it. 1152789Sahrens */ 1153789Sahrens bcopy(lr, zr->zr_lrbuf, reclen); 1154789Sahrens 1155789Sahrens /* 1156789Sahrens * The log block containing this lr may have been byteswapped 1157789Sahrens * so that we can easily examine common fields like lrc_txtype. 1158789Sahrens * However, the log is a mix of different data types, and only the 1159789Sahrens * replay vectors know how to byteswap their records. Therefore, if 1160789Sahrens * the lr was byteswapped, undo it before invoking the replay vector. 1161789Sahrens */ 1162789Sahrens if (zr->zr_byteswap) 1163789Sahrens byteswap_uint64_array(zr->zr_lrbuf, reclen); 1164789Sahrens 1165789Sahrens /* 1166789Sahrens * If this is a TX_WRITE with a blkptr, suck in the data. 1167789Sahrens */ 1168789Sahrens if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 1169789Sahrens lr_write_t *lrw = (lr_write_t *)lr; 1170789Sahrens blkptr_t *wbp = &lrw->lr_blkptr; 1171789Sahrens uint64_t wlen = lrw->lr_length; 1172789Sahrens char *wbuf = zr->zr_lrbuf + reclen; 1173789Sahrens 1174789Sahrens if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 1175789Sahrens bzero(wbuf, wlen); 1176789Sahrens } else { 1177789Sahrens /* 1178789Sahrens * A subsequent write may have overwritten this block, 1179789Sahrens * in which case wbp may have been been freed and 1180789Sahrens * reallocated, and our read of wbp may fail with a 1181789Sahrens * checksum error. We can safely ignore this because 1182789Sahrens * the later write will provide the correct data. 1183789Sahrens */ 11841544Seschrock zbookmark_t zb; 11851544Seschrock 11861544Seschrock zb.zb_objset = dmu_objset_id(zilog->zl_os); 11871544Seschrock zb.zb_object = lrw->lr_foid; 11881544Seschrock zb.zb_level = -1; 11891544Seschrock zb.zb_blkid = lrw->lr_offset / BP_GET_LSIZE(wbp); 11901544Seschrock 1191789Sahrens (void) zio_wait(zio_read(NULL, zilog->zl_spa, 1192789Sahrens wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, 1193789Sahrens ZIO_PRIORITY_SYNC_READ, 11941544Seschrock ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &zb)); 1195789Sahrens (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); 1196789Sahrens } 1197789Sahrens } 1198789Sahrens 1199789Sahrens /* 1200789Sahrens * We must now do two things atomically: replay this log record, 1201789Sahrens * and update the log header to reflect the fact that we did so. 1202789Sahrens * We use the DMU's ability to assign into a specific txg to do this. 1203789Sahrens */ 1204789Sahrens for (pass = 1; /* CONSTANTCONDITION */; pass++) { 1205789Sahrens uint64_t replay_txg; 1206789Sahrens dmu_tx_t *replay_tx; 1207789Sahrens 1208789Sahrens replay_tx = dmu_tx_create(zr->zr_os); 1209789Sahrens error = dmu_tx_assign(replay_tx, TXG_WAIT); 1210789Sahrens if (error) { 1211789Sahrens dmu_tx_abort(replay_tx); 1212789Sahrens break; 1213789Sahrens } 1214789Sahrens 1215789Sahrens replay_txg = dmu_tx_get_txg(replay_tx); 1216789Sahrens 1217789Sahrens if (txtype == 0 || txtype >= TX_MAX_TYPE) { 1218789Sahrens error = EINVAL; 1219789Sahrens } else { 1220789Sahrens /* 1221789Sahrens * On the first pass, arrange for the replay vector 1222789Sahrens * to fail its dmu_tx_assign(). That's the only way 1223789Sahrens * to ensure that those code paths remain well tested. 1224789Sahrens */ 1225789Sahrens *zr->zr_txgp = replay_txg - (pass == 1); 1226789Sahrens error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 1227789Sahrens zr->zr_byteswap); 1228789Sahrens *zr->zr_txgp = TXG_NOWAIT; 1229789Sahrens } 1230789Sahrens 1231789Sahrens if (error == 0) { 1232789Sahrens dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx); 1233789Sahrens zilog->zl_replay_seq[replay_txg & TXG_MASK] = 1234789Sahrens lr->lrc_seq; 1235789Sahrens } 1236789Sahrens 1237789Sahrens dmu_tx_commit(replay_tx); 1238789Sahrens 1239789Sahrens if (error != ERESTART) 1240789Sahrens break; 1241789Sahrens 1242789Sahrens if (pass != 1) 1243789Sahrens txg_wait_open(spa_get_dsl(zilog->zl_spa), 1244789Sahrens replay_txg + 1); 1245789Sahrens 1246789Sahrens dprintf("pass %d, retrying\n", pass); 1247789Sahrens } 1248789Sahrens 1249789Sahrens if (error) { 1250789Sahrens char *name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1251789Sahrens dmu_objset_name(zr->zr_os, name); 1252789Sahrens cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1253789Sahrens "dataset %s, seq 0x%llx, txtype %llu\n", 1254789Sahrens error, name, 1255789Sahrens (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype); 1256789Sahrens zilog->zl_stop_replay = 1; 1257789Sahrens kmem_free(name, MAXNAMELEN); 1258789Sahrens } 1259789Sahrens 1260789Sahrens /* 1261789Sahrens * The DMU's dnode layer doesn't see removes until the txg commits, 1262789Sahrens * so a subsequent claim can spuriously fail with EEXIST. 1263789Sahrens * To prevent this, if we might have removed an object, 1264789Sahrens * wait for the delete thread to delete it, and then 1265789Sahrens * wait for the transaction group to sync. 1266789Sahrens */ 1267789Sahrens if (txtype == TX_REMOVE || txtype == TX_RMDIR || txtype == TX_RENAME) { 1268789Sahrens if (zr->zr_rm_sync != NULL) 1269789Sahrens zr->zr_rm_sync(zr->zr_arg); 1270789Sahrens txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1271789Sahrens } 1272789Sahrens } 1273789Sahrens 1274789Sahrens /* 12751362Sperrin * If this dataset has a non-empty intent log, replay it and destroy it. 1276789Sahrens */ 1277789Sahrens void 1278789Sahrens zil_replay(objset_t *os, void *arg, uint64_t *txgp, 1279789Sahrens zil_replay_func_t *replay_func[TX_MAX_TYPE], void (*rm_sync)(void *arg)) 1280789Sahrens { 1281789Sahrens zilog_t *zilog = dmu_objset_zil(os); 12821362Sperrin zil_replay_arg_t zr; 12831362Sperrin 12841362Sperrin if (zil_empty(zilog)) { 12851362Sperrin /* 12861362Sperrin * Initialise the log header but don't free the log block 12871362Sperrin * which will get reused. 12881362Sperrin */ 12891362Sperrin zilog->zl_header->zh_claim_txg = 0; 12901362Sperrin zilog->zl_header->zh_replay_seq = 0; 12911362Sperrin return; 12921362Sperrin } 1293789Sahrens 1294789Sahrens zr.zr_os = os; 1295789Sahrens zr.zr_replay = replay_func; 1296789Sahrens zr.zr_arg = arg; 1297789Sahrens zr.zr_rm_sync = rm_sync; 1298789Sahrens zr.zr_txgp = txgp; 1299789Sahrens zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zilog->zl_header->zh_log); 1300789Sahrens zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1301789Sahrens 1302789Sahrens /* 1303789Sahrens * Wait for in-progress removes to sync before starting replay. 1304789Sahrens */ 1305789Sahrens if (rm_sync != NULL) 1306789Sahrens rm_sync(arg); 1307789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 1308789Sahrens 1309789Sahrens zilog->zl_stop_replay = 0; 1310789Sahrens zil_parse(zilog, NULL, zil_replay_log_record, &zr, 1311789Sahrens zilog->zl_header->zh_claim_txg); 1312789Sahrens kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1313789Sahrens 1314789Sahrens zil_destroy(zilog); 1315789Sahrens } 1316*1646Sperrin 1317*1646Sperrin /* 1318*1646Sperrin * Report whether all transactions are committed 1319*1646Sperrin */ 1320*1646Sperrin int 1321*1646Sperrin zil_is_committed(zilog_t *zilog) 1322*1646Sperrin { 1323*1646Sperrin lwb_t *lwb; 1324*1646Sperrin 1325*1646Sperrin if (zilog == NULL || list_head(&zilog->zl_itx_list)) 1326*1646Sperrin return (B_FALSE); 1327*1646Sperrin 1328*1646Sperrin /* 1329*1646Sperrin * A log write buffer at the head of the list that is not UNWRITTEN 1330*1646Sperrin * means there's a lwb yet to be freed after a txg commit 1331*1646Sperrin */ 1332*1646Sperrin lwb = list_head(&zilog->zl_lwb_list); 1333*1646Sperrin if (lwb && lwb->lwb_state != UNWRITTEN) 1334*1646Sperrin return (B_FALSE); 1335*1646Sperrin ASSERT(zil_empty(zilog)); 1336*1646Sperrin return (B_TRUE); 1337*1646Sperrin } 1338