1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 5789Sahrens * Common Development and Distribution License, Version 1.0 only 6789Sahrens * (the "License"). You may not use this file except in compliance 7789Sahrens * with the License. 8789Sahrens * 9789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10789Sahrens * or http://www.opensolaris.org/os/licensing. 11789Sahrens * See the License for the specific language governing permissions 12789Sahrens * and limitations under the License. 13789Sahrens * 14789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 15789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16789Sahrens * If applicable, add the following below this CDDL HEADER, with the 17789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 18789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 19789Sahrens * 20789Sahrens * CDDL HEADER END 21789Sahrens */ 22789Sahrens /* 23*1362Sperrin * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24789Sahrens * Use is subject to license terms. 25789Sahrens */ 26789Sahrens 27789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 28789Sahrens 29789Sahrens #include <sys/zfs_context.h> 30789Sahrens #include <sys/spa.h> 31789Sahrens #include <sys/dmu.h> 32789Sahrens #include <sys/zap.h> 33789Sahrens #include <sys/arc.h> 34789Sahrens #include <sys/stat.h> 35789Sahrens #include <sys/resource.h> 36789Sahrens #include <sys/zil.h> 37789Sahrens #include <sys/zil_impl.h> 38789Sahrens #include <sys/dsl_dataset.h> 39789Sahrens #include <sys/vdev.h> 40789Sahrens 41789Sahrens /* 42789Sahrens * The zfs intent log (ZIL) saves transaction records of system calls 43789Sahrens * that change the file system in memory with enough information 44789Sahrens * to be able to replay them. These are stored in memory until 45789Sahrens * either the DMU transaction group (txg) commits them to the stable pool 46789Sahrens * and they can be discarded, or they are flushed to the stable log 47789Sahrens * (also in the pool) due to a fsync, O_DSYNC or other synchronous 48789Sahrens * requirement. In the event of a panic or power fail then those log 49789Sahrens * records (transactions) are replayed. 50789Sahrens * 51789Sahrens * There is one ZIL per file system. Its on-disk (pool) format consists 52789Sahrens * of 3 parts: 53789Sahrens * 54789Sahrens * - ZIL header 55789Sahrens * - ZIL blocks 56789Sahrens * - ZIL records 57789Sahrens * 58789Sahrens * A log record holds a system call transaction. Log blocks can 59789Sahrens * hold many log records and the blocks are chained together. 60789Sahrens * Each ZIL block contains a block pointer (blkptr_t) to the next 61789Sahrens * ZIL block in the chain. The ZIL header points to the first 62789Sahrens * block in the chain. Note there is not a fixed place in the pool 63789Sahrens * to hold blocks. They are dynamically allocated and freed as 64789Sahrens * needed from the blocks available. Figure X shows the ZIL structure: 65789Sahrens */ 66789Sahrens 67789Sahrens /* 68789Sahrens * These global ZIL switches affect all pools 69789Sahrens */ 70789Sahrens int zil_disable = 0; /* disable intent logging */ 71789Sahrens int zil_always = 0; /* make every transaction synchronous */ 72789Sahrens int zil_purge = 0; /* at pool open, just throw everything away */ 73789Sahrens int zil_noflush = 0; /* don't flush write cache buffers on disks */ 74789Sahrens 75789Sahrens static kmem_cache_t *zil_lwb_cache; 76789Sahrens 77789Sahrens static int 78789Sahrens zil_dva_compare(const void *x1, const void *x2) 79789Sahrens { 80789Sahrens const dva_t *dva1 = x1; 81789Sahrens const dva_t *dva2 = x2; 82789Sahrens 83789Sahrens if (DVA_GET_VDEV(dva1) < DVA_GET_VDEV(dva2)) 84789Sahrens return (-1); 85789Sahrens if (DVA_GET_VDEV(dva1) > DVA_GET_VDEV(dva2)) 86789Sahrens return (1); 87789Sahrens 88789Sahrens if (DVA_GET_OFFSET(dva1) < DVA_GET_OFFSET(dva2)) 89789Sahrens return (-1); 90789Sahrens if (DVA_GET_OFFSET(dva1) > DVA_GET_OFFSET(dva2)) 91789Sahrens return (1); 92789Sahrens 93789Sahrens return (0); 94789Sahrens } 95789Sahrens 96789Sahrens static void 97789Sahrens zil_dva_tree_init(avl_tree_t *t) 98789Sahrens { 99789Sahrens avl_create(t, zil_dva_compare, sizeof (zil_dva_node_t), 100789Sahrens offsetof(zil_dva_node_t, zn_node)); 101789Sahrens } 102789Sahrens 103789Sahrens static void 104789Sahrens zil_dva_tree_fini(avl_tree_t *t) 105789Sahrens { 106789Sahrens zil_dva_node_t *zn; 107789Sahrens void *cookie = NULL; 108789Sahrens 109789Sahrens while ((zn = avl_destroy_nodes(t, &cookie)) != NULL) 110789Sahrens kmem_free(zn, sizeof (zil_dva_node_t)); 111789Sahrens 112789Sahrens avl_destroy(t); 113789Sahrens } 114789Sahrens 115789Sahrens static int 116789Sahrens zil_dva_tree_add(avl_tree_t *t, dva_t *dva) 117789Sahrens { 118789Sahrens zil_dva_node_t *zn; 119789Sahrens avl_index_t where; 120789Sahrens 121789Sahrens if (avl_find(t, dva, &where) != NULL) 122789Sahrens return (EEXIST); 123789Sahrens 124789Sahrens zn = kmem_alloc(sizeof (zil_dva_node_t), KM_SLEEP); 125789Sahrens zn->zn_dva = *dva; 126789Sahrens avl_insert(t, zn, where); 127789Sahrens 128789Sahrens return (0); 129789Sahrens } 130789Sahrens 131789Sahrens /* 132789Sahrens * Read a log block, make sure it's valid, and byteswap it if necessary. 133789Sahrens */ 134789Sahrens static int 135789Sahrens zil_read_log_block(zilog_t *zilog, blkptr_t *bp, char *buf) 136789Sahrens { 137789Sahrens uint64_t blksz = BP_GET_LSIZE(bp); 138789Sahrens zil_trailer_t *ztp = (zil_trailer_t *)(buf + blksz) - 1; 139789Sahrens zio_cksum_t cksum; 140789Sahrens int error; 141789Sahrens 142789Sahrens error = zio_wait(zio_read(NULL, zilog->zl_spa, bp, buf, blksz, 143789Sahrens NULL, NULL, ZIO_PRIORITY_SYNC_READ, 144789Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE)); 145789Sahrens if (error) { 146789Sahrens dprintf_bp(bp, "zilog %p bp %p read failed, error %d: ", 147789Sahrens zilog, bp, error); 148789Sahrens return (error); 149789Sahrens } 150789Sahrens 151789Sahrens if (BP_SHOULD_BYTESWAP(bp)) 152789Sahrens byteswap_uint64_array(buf, blksz); 153789Sahrens 154789Sahrens /* 155789Sahrens * Sequence numbers should be... sequential. The checksum verifier for 156789Sahrens * the next block should be: <logid[0], logid[1], objset id, seq + 1>. 157789Sahrens */ 158789Sahrens cksum = bp->blk_cksum; 159789Sahrens cksum.zc_word[3]++; 160789Sahrens if (bcmp(&cksum, &ztp->zit_next_blk.blk_cksum, sizeof (cksum)) != 0) { 161789Sahrens dprintf_bp(bp, "zilog %p bp %p stale pointer: ", zilog, bp); 162789Sahrens return (ESTALE); 163789Sahrens } 164789Sahrens 165789Sahrens if (BP_IS_HOLE(&ztp->zit_next_blk)) { 166789Sahrens dprintf_bp(bp, "zilog %p bp %p hole: ", zilog, bp); 167789Sahrens return (ENOENT); 168789Sahrens } 169789Sahrens 170789Sahrens if (ztp->zit_nused > (blksz - sizeof (zil_trailer_t))) { 171789Sahrens dprintf("zilog %p bp %p nused exceeds blksz\n", zilog, bp); 172789Sahrens return (EOVERFLOW); 173789Sahrens } 174789Sahrens 175789Sahrens dprintf_bp(bp, "zilog %p bp %p good block: ", zilog, bp); 176789Sahrens 177789Sahrens return (0); 178789Sahrens } 179789Sahrens 180789Sahrens /* 181789Sahrens * Parse the intent log, and call parse_func for each valid record within. 182789Sahrens */ 183789Sahrens void 184789Sahrens zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, 185789Sahrens zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg) 186789Sahrens { 187789Sahrens blkptr_t blk; 188789Sahrens char *lrbuf, *lrp; 189789Sahrens zil_trailer_t *ztp; 190789Sahrens int reclen, error; 191789Sahrens 192789Sahrens blk = zilog->zl_header->zh_log; 193789Sahrens if (BP_IS_HOLE(&blk)) 194789Sahrens return; 195789Sahrens 196789Sahrens /* 197789Sahrens * Starting at the block pointed to by zh_log we read the log chain. 198789Sahrens * For each block in the chain we strongly check that block to 199789Sahrens * ensure its validity. We stop when an invalid block is found. 200789Sahrens * For each block pointer in the chain we call parse_blk_func(). 201789Sahrens * For each record in each valid block we call parse_lr_func(). 202789Sahrens */ 203789Sahrens zil_dva_tree_init(&zilog->zl_dva_tree); 204789Sahrens lrbuf = zio_buf_alloc(SPA_MAXBLOCKSIZE); 205789Sahrens for (;;) { 206789Sahrens error = zil_read_log_block(zilog, &blk, lrbuf); 207789Sahrens 208789Sahrens if (parse_blk_func != NULL) 209789Sahrens parse_blk_func(zilog, &blk, arg, txg); 210789Sahrens 211789Sahrens if (error) 212789Sahrens break; 213789Sahrens 214789Sahrens ztp = (zil_trailer_t *)(lrbuf + BP_GET_LSIZE(&blk)) - 1; 215789Sahrens blk = ztp->zit_next_blk; 216789Sahrens 217789Sahrens if (parse_lr_func == NULL) 218789Sahrens continue; 219789Sahrens 220789Sahrens for (lrp = lrbuf; lrp < lrbuf + ztp->zit_nused; lrp += reclen) { 221789Sahrens lr_t *lr = (lr_t *)lrp; 222789Sahrens reclen = lr->lrc_reclen; 223789Sahrens ASSERT3U(reclen, >=, sizeof (lr_t)); 224789Sahrens parse_lr_func(zilog, lr, arg, txg); 225789Sahrens } 226789Sahrens } 227789Sahrens zio_buf_free(lrbuf, SPA_MAXBLOCKSIZE); 228789Sahrens zil_dva_tree_fini(&zilog->zl_dva_tree); 229789Sahrens } 230789Sahrens 231789Sahrens /* ARGSUSED */ 232789Sahrens static void 233789Sahrens zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg) 234789Sahrens { 235789Sahrens spa_t *spa = zilog->zl_spa; 236789Sahrens int err; 237789Sahrens 238789Sahrens dprintf_bp(bp, "first_txg %llu: ", first_txg); 239789Sahrens 240789Sahrens /* 241789Sahrens * Claim log block if not already committed and not already claimed. 242789Sahrens */ 243789Sahrens if (bp->blk_birth >= first_txg && 244789Sahrens zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp)) == 0) { 245789Sahrens err = zio_wait(zio_claim(NULL, spa, first_txg, bp, NULL, NULL)); 246789Sahrens ASSERT(err == 0); 247789Sahrens } 248789Sahrens } 249789Sahrens 250789Sahrens static void 251789Sahrens zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg) 252789Sahrens { 253789Sahrens if (lrc->lrc_txtype == TX_WRITE) { 254789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 255789Sahrens zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg); 256789Sahrens } 257789Sahrens } 258789Sahrens 259789Sahrens /* ARGSUSED */ 260789Sahrens static void 261789Sahrens zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg) 262789Sahrens { 263789Sahrens zio_free_blk(zilog->zl_spa, bp, dmu_tx_get_txg(tx)); 264789Sahrens } 265789Sahrens 266789Sahrens static void 267789Sahrens zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg) 268789Sahrens { 269789Sahrens /* 270789Sahrens * If we previously claimed it, we need to free it. 271789Sahrens */ 272789Sahrens if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE) { 273789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 274789Sahrens blkptr_t *bp = &lr->lr_blkptr; 275789Sahrens if (bp->blk_birth >= claim_txg && 276789Sahrens !zil_dva_tree_add(&zilog->zl_dva_tree, BP_IDENTITY(bp))) { 277789Sahrens (void) arc_free(NULL, zilog->zl_spa, 278789Sahrens dmu_tx_get_txg(tx), bp, NULL, NULL, ARC_WAIT); 279789Sahrens } 280789Sahrens } 281789Sahrens } 282789Sahrens 283789Sahrens /* 284789Sahrens * Create an on-disk intent log. 285789Sahrens */ 286789Sahrens static void 287789Sahrens zil_create(zilog_t *zilog) 288789Sahrens { 289789Sahrens lwb_t *lwb; 290789Sahrens uint64_t txg; 291789Sahrens dmu_tx_t *tx; 292789Sahrens blkptr_t blk; 293789Sahrens int error; 294*1362Sperrin int no_blk; 295789Sahrens 296789Sahrens ASSERT(zilog->zl_header->zh_claim_txg == 0); 297789Sahrens ASSERT(zilog->zl_header->zh_replay_seq == 0); 298789Sahrens 299789Sahrens /* 300789Sahrens * Initialize the log header block. 301789Sahrens */ 302789Sahrens tx = dmu_tx_create(zilog->zl_os); 303789Sahrens (void) dmu_tx_assign(tx, TXG_WAIT); 304789Sahrens dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 305789Sahrens txg = dmu_tx_get_txg(tx); 306789Sahrens 307789Sahrens /* 308*1362Sperrin * If we don't have a log block already then 309*1362Sperrin * allocate the first log block and assign its checksum verifier. 310789Sahrens */ 311*1362Sperrin no_blk = BP_IS_HOLE(&zilog->zl_header->zh_log); 312*1362Sperrin if (no_blk) { 313*1362Sperrin error = zio_alloc_blk(zilog->zl_spa, ZIO_CHECKSUM_ZILOG, 314*1362Sperrin ZIL_MIN_BLKSZ, &blk, txg); 315*1362Sperrin } else { 316*1362Sperrin blk = zilog->zl_header->zh_log; 317*1362Sperrin error = 0; 318*1362Sperrin } 319789Sahrens if (error == 0) { 320789Sahrens ZIO_SET_CHECKSUM(&blk.blk_cksum, 321789Sahrens spa_get_random(-1ULL), spa_get_random(-1ULL), 322789Sahrens dmu_objset_id(zilog->zl_os), 1ULL); 323789Sahrens 324789Sahrens /* 325789Sahrens * Allocate a log write buffer (lwb) for the first log block. 326789Sahrens */ 327789Sahrens lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 328789Sahrens lwb->lwb_zilog = zilog; 329789Sahrens lwb->lwb_blk = blk; 330789Sahrens lwb->lwb_nused = 0; 331789Sahrens lwb->lwb_sz = BP_GET_LSIZE(&lwb->lwb_blk); 332789Sahrens lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz); 333789Sahrens lwb->lwb_max_txg = txg; 334789Sahrens lwb->lwb_seq = 0; 335789Sahrens lwb->lwb_state = UNWRITTEN; 336789Sahrens mutex_enter(&zilog->zl_lock); 337789Sahrens list_insert_tail(&zilog->zl_lwb_list, lwb); 338789Sahrens mutex_exit(&zilog->zl_lock); 339789Sahrens } 340789Sahrens 341789Sahrens dmu_tx_commit(tx); 342*1362Sperrin if (no_blk) 343*1362Sperrin txg_wait_synced(zilog->zl_dmu_pool, txg); 344789Sahrens } 345789Sahrens 346789Sahrens /* 347789Sahrens * In one tx, free all log blocks and clear the log header. 348789Sahrens */ 349789Sahrens void 350789Sahrens zil_destroy(zilog_t *zilog) 351789Sahrens { 352789Sahrens dmu_tx_t *tx; 353789Sahrens uint64_t txg; 354789Sahrens 355789Sahrens mutex_enter(&zilog->zl_destroy_lock); 356789Sahrens 357789Sahrens if (BP_IS_HOLE(&zilog->zl_header->zh_log)) { 358789Sahrens mutex_exit(&zilog->zl_destroy_lock); 359789Sahrens return; 360789Sahrens } 361789Sahrens 362789Sahrens tx = dmu_tx_create(zilog->zl_os); 363789Sahrens (void) dmu_tx_assign(tx, TXG_WAIT); 364789Sahrens dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 365789Sahrens txg = dmu_tx_get_txg(tx); 366789Sahrens 367789Sahrens zil_parse(zilog, zil_free_log_block, zil_free_log_record, tx, 368789Sahrens zilog->zl_header->zh_claim_txg); 369*1362Sperrin /* 370*1362Sperrin * zil_sync clears the zil header as soon as the zl_destroy_txg commits 371*1362Sperrin */ 372789Sahrens zilog->zl_destroy_txg = txg; 373789Sahrens 374789Sahrens dmu_tx_commit(tx); 375789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 376789Sahrens 377789Sahrens mutex_exit(&zilog->zl_destroy_lock); 378789Sahrens } 379789Sahrens 380789Sahrens void 381789Sahrens zil_claim(char *osname, void *txarg) 382789Sahrens { 383789Sahrens dmu_tx_t *tx = txarg; 384789Sahrens uint64_t first_txg = dmu_tx_get_txg(tx); 385789Sahrens zilog_t *zilog; 386789Sahrens zil_header_t *zh; 387789Sahrens objset_t *os; 388789Sahrens int error; 389789Sahrens 390789Sahrens error = dmu_objset_open(osname, DMU_OST_ANY, DS_MODE_STANDARD, &os); 391789Sahrens if (error) { 392789Sahrens cmn_err(CE_WARN, "can't process intent log for %s", osname); 393789Sahrens return; 394789Sahrens } 395789Sahrens 396789Sahrens zilog = dmu_objset_zil(os); 397789Sahrens zh = zilog->zl_header; 398789Sahrens 399789Sahrens /* 400789Sahrens * Claim all log blocks if we haven't already done so. 401789Sahrens */ 402789Sahrens ASSERT3U(zh->zh_claim_txg, <=, first_txg); 403789Sahrens if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) { 404789Sahrens zh->zh_claim_txg = first_txg; 405789Sahrens zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, 406789Sahrens tx, first_txg); 407789Sahrens dsl_dataset_dirty(dmu_objset_ds(os), tx); 408789Sahrens } 409789Sahrens ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1)); 410789Sahrens dmu_objset_close(os); 411789Sahrens } 412789Sahrens 413789Sahrens void 414789Sahrens zil_add_vdev(zilog_t *zilog, uint64_t vdev, uint64_t seq) 415789Sahrens { 416789Sahrens zil_vdev_t *zv; 417789Sahrens 418789Sahrens if (zil_noflush) 419789Sahrens return; 420789Sahrens 421789Sahrens ASSERT(MUTEX_HELD(&zilog->zl_lock)); 422789Sahrens zv = kmem_alloc(sizeof (zil_vdev_t), KM_SLEEP); 423789Sahrens zv->vdev = vdev; 424789Sahrens zv->seq = seq; 425789Sahrens list_insert_tail(&zilog->zl_vdev_list, zv); 426789Sahrens } 427789Sahrens 428789Sahrens void 429789Sahrens zil_flush_vdevs(zilog_t *zilog, uint64_t seq) 430789Sahrens { 431789Sahrens vdev_t *vd; 432789Sahrens zil_vdev_t *zv, *zv2; 433789Sahrens zio_t *zio; 434789Sahrens spa_t *spa; 435789Sahrens uint64_t vdev; 436789Sahrens 437789Sahrens if (zil_noflush) 438789Sahrens return; 439789Sahrens 440789Sahrens ASSERT(MUTEX_HELD(&zilog->zl_lock)); 441789Sahrens 442789Sahrens spa = zilog->zl_spa; 443789Sahrens zio = NULL; 444789Sahrens 445789Sahrens while ((zv = list_head(&zilog->zl_vdev_list)) != NULL && 446789Sahrens zv->seq <= seq) { 447789Sahrens vdev = zv->vdev; 448789Sahrens list_remove(&zilog->zl_vdev_list, zv); 449789Sahrens kmem_free(zv, sizeof (zil_vdev_t)); 450789Sahrens 451789Sahrens /* 452789Sahrens * remove all chained entries <= seq with same vdev 453789Sahrens */ 454789Sahrens zv = list_head(&zilog->zl_vdev_list); 455789Sahrens while (zv && zv->seq <= seq) { 456789Sahrens zv2 = list_next(&zilog->zl_vdev_list, zv); 457789Sahrens if (zv->vdev == vdev) { 458789Sahrens list_remove(&zilog->zl_vdev_list, zv); 459789Sahrens kmem_free(zv, sizeof (zil_vdev_t)); 460789Sahrens } 461789Sahrens zv = zv2; 462789Sahrens } 463789Sahrens 464789Sahrens /* flush the write cache for this vdev */ 465789Sahrens mutex_exit(&zilog->zl_lock); 466789Sahrens if (zio == NULL) 467789Sahrens zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 468789Sahrens vd = vdev_lookup_top(spa, vdev); 469789Sahrens ASSERT(vd); 470789Sahrens (void) zio_nowait(zio_ioctl(zio, spa, vd, DKIOCFLUSHWRITECACHE, 471789Sahrens NULL, NULL, ZIO_PRIORITY_NOW, 472789Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY)); 473789Sahrens mutex_enter(&zilog->zl_lock); 474789Sahrens } 475789Sahrens 476789Sahrens /* 477789Sahrens * Wait for all the flushes to complete. Not all devices actually 478789Sahrens * support the DKIOCFLUSHWRITECACHE ioctl, so it's OK if it fails. 479789Sahrens */ 4801141Sperrin if (zio != NULL) { 4811141Sperrin mutex_exit(&zilog->zl_lock); 482789Sahrens (void) zio_wait(zio); 4831141Sperrin mutex_enter(&zilog->zl_lock); 4841141Sperrin } 485789Sahrens } 486789Sahrens 487789Sahrens /* 488789Sahrens * Function called when a log block write completes 489789Sahrens */ 490789Sahrens static void 491789Sahrens zil_lwb_write_done(zio_t *zio) 492789Sahrens { 493789Sahrens lwb_t *prev; 494789Sahrens lwb_t *lwb = zio->io_private; 495789Sahrens zilog_t *zilog = lwb->lwb_zilog; 496789Sahrens uint64_t max_seq; 497789Sahrens 498789Sahrens /* 499789Sahrens * Now that we've written this log block, we have a stable pointer 500789Sahrens * to the next block in the chain, so it's OK to let the txg in 501789Sahrens * which we allocated the next block sync. 502789Sahrens */ 503789Sahrens txg_rele_to_sync(&lwb->lwb_txgh); 504789Sahrens 505789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 506789Sahrens mutex_enter(&zilog->zl_lock); 507789Sahrens lwb->lwb_buf = NULL; 508789Sahrens if (zio->io_error) { 509789Sahrens zilog->zl_log_error = B_TRUE; 510789Sahrens mutex_exit(&zilog->zl_lock); 511789Sahrens cv_broadcast(&zilog->zl_cv_seq); 512789Sahrens return; 513789Sahrens } 514789Sahrens 515789Sahrens prev = list_prev(&zilog->zl_lwb_list, lwb); 516789Sahrens if (prev && prev->lwb_state != SEQ_COMPLETE) { 517789Sahrens /* There's an unwritten buffer in the chain before this one */ 518789Sahrens lwb->lwb_state = SEQ_INCOMPLETE; 519789Sahrens mutex_exit(&zilog->zl_lock); 520789Sahrens return; 521789Sahrens } 522789Sahrens 523789Sahrens max_seq = lwb->lwb_seq; 524789Sahrens lwb->lwb_state = SEQ_COMPLETE; 525789Sahrens /* 526789Sahrens * We must also follow up the chain for already written buffers 527789Sahrens * to see if we can set zl_ss_seq even higher. 528789Sahrens */ 529789Sahrens while (lwb = list_next(&zilog->zl_lwb_list, lwb)) { 530789Sahrens if (lwb->lwb_state != SEQ_INCOMPLETE) 531789Sahrens break; 532789Sahrens lwb->lwb_state = SEQ_COMPLETE; 533789Sahrens /* lwb_seq will be zero if we've written an empty buffer */ 534789Sahrens if (lwb->lwb_seq) { 535789Sahrens ASSERT3U(max_seq, <, lwb->lwb_seq); 536789Sahrens max_seq = lwb->lwb_seq; 537789Sahrens } 538789Sahrens } 539789Sahrens zilog->zl_ss_seq = MAX(max_seq, zilog->zl_ss_seq); 540789Sahrens mutex_exit(&zilog->zl_lock); 541789Sahrens cv_broadcast(&zilog->zl_cv_seq); 542789Sahrens } 543789Sahrens 544789Sahrens /* 545789Sahrens * Start a log block write and advance to the next log block. 546789Sahrens * Calls are serialized. 547789Sahrens */ 548789Sahrens static lwb_t * 549789Sahrens zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) 550789Sahrens { 551789Sahrens lwb_t *nlwb; 552789Sahrens zil_trailer_t *ztp = (zil_trailer_t *)(lwb->lwb_buf + lwb->lwb_sz) - 1; 553789Sahrens uint64_t txg; 554789Sahrens uint64_t zil_blksz; 555789Sahrens int error; 556789Sahrens 557789Sahrens ASSERT(lwb->lwb_nused <= ZIL_BLK_DATA_SZ(lwb)); 558789Sahrens 559789Sahrens /* 560789Sahrens * Allocate the next block and save its address in this block 561789Sahrens * before writing it in order to establish the log chain. 562789Sahrens * Note that if the allocation of nlwb synced before we wrote 563789Sahrens * the block that points at it (lwb), we'd leak it if we crashed. 564789Sahrens * Therefore, we don't do txg_rele_to_sync() until zil_lwb_write_done(). 565789Sahrens */ 566789Sahrens txg = txg_hold_open(zilog->zl_dmu_pool, &lwb->lwb_txgh); 567789Sahrens txg_rele_to_quiesce(&lwb->lwb_txgh); 568789Sahrens 569789Sahrens /* 5701141Sperrin * Pick a ZIL blocksize. We request a size that is the 5711141Sperrin * maximum of the previous used size, the current used size and 5721141Sperrin * the amount waiting in the queue. 573789Sahrens */ 5741141Sperrin zil_blksz = MAX(zilog->zl_cur_used, zilog->zl_prev_used); 5751141Sperrin zil_blksz = MAX(zil_blksz, zilog->zl_itx_list_sz + sizeof (*ztp)); 5761141Sperrin zil_blksz = P2ROUNDUP(zil_blksz, ZIL_MIN_BLKSZ); 5771141Sperrin if (zil_blksz > ZIL_MAX_BLKSZ) 5781141Sperrin zil_blksz = ZIL_MAX_BLKSZ; 579789Sahrens 580789Sahrens error = zio_alloc_blk(zilog->zl_spa, ZIO_CHECKSUM_ZILOG, 581789Sahrens zil_blksz, &ztp->zit_next_blk, txg); 582789Sahrens if (error) { 583789Sahrens txg_rele_to_sync(&lwb->lwb_txgh); 584789Sahrens return (NULL); 585789Sahrens } 586789Sahrens 587789Sahrens ASSERT3U(ztp->zit_next_blk.blk_birth, ==, txg); 588789Sahrens ztp->zit_nused = lwb->lwb_nused; 589789Sahrens ztp->zit_bt.zbt_cksum = lwb->lwb_blk.blk_cksum; 590789Sahrens ztp->zit_next_blk.blk_cksum = lwb->lwb_blk.blk_cksum; 591789Sahrens ztp->zit_next_blk.blk_cksum.zc_word[3]++; 592789Sahrens 593789Sahrens /* 594789Sahrens * Allocate a new log write buffer (lwb). 595789Sahrens */ 596789Sahrens nlwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP); 597789Sahrens 598789Sahrens nlwb->lwb_zilog = zilog; 599789Sahrens nlwb->lwb_blk = ztp->zit_next_blk; 600789Sahrens nlwb->lwb_nused = 0; 601789Sahrens nlwb->lwb_sz = BP_GET_LSIZE(&nlwb->lwb_blk); 602789Sahrens nlwb->lwb_buf = zio_buf_alloc(nlwb->lwb_sz); 603789Sahrens nlwb->lwb_max_txg = txg; 604789Sahrens nlwb->lwb_seq = 0; 605789Sahrens nlwb->lwb_state = UNWRITTEN; 606789Sahrens 607789Sahrens /* 608789Sahrens * Put new lwb at the end of the log chain, 609789Sahrens * and record the vdev for later flushing 610789Sahrens */ 611789Sahrens mutex_enter(&zilog->zl_lock); 612789Sahrens list_insert_tail(&zilog->zl_lwb_list, nlwb); 613789Sahrens zil_add_vdev(zilog, DVA_GET_VDEV(BP_IDENTITY(&(lwb->lwb_blk))), 614789Sahrens lwb->lwb_seq); 615789Sahrens mutex_exit(&zilog->zl_lock); 616789Sahrens 617789Sahrens /* 618789Sahrens * write the old log block 619789Sahrens */ 620789Sahrens dprintf_bp(&lwb->lwb_blk, "lwb %p txg %llu: ", lwb, txg); 621789Sahrens zio_nowait(zio_rewrite(NULL, zilog->zl_spa, ZIO_CHECKSUM_ZILOG, 0, 622789Sahrens &lwb->lwb_blk, lwb->lwb_buf, lwb->lwb_sz, zil_lwb_write_done, lwb, 623789Sahrens ZIO_PRIORITY_LOG_WRITE, ZIO_FLAG_MUSTSUCCEED)); 624789Sahrens 625789Sahrens return (nlwb); 626789Sahrens } 627789Sahrens 628789Sahrens static lwb_t * 629789Sahrens zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) 630789Sahrens { 631789Sahrens lr_t *lrc = &itx->itx_lr; /* common log record */ 632789Sahrens uint64_t seq = lrc->lrc_seq; 633789Sahrens uint64_t txg = lrc->lrc_txg; 634789Sahrens uint64_t reclen = lrc->lrc_reclen; 635789Sahrens int error; 636789Sahrens 637789Sahrens if (lwb == NULL) 638789Sahrens return (NULL); 639789Sahrens ASSERT(lwb->lwb_buf != NULL); 640789Sahrens 641789Sahrens /* 642789Sahrens * If it's a write, fetch the data or get its blkptr as appropriate. 643789Sahrens */ 644789Sahrens if (lrc->lrc_txtype == TX_WRITE) { 645789Sahrens lr_write_t *lr = (lr_write_t *)lrc; 646789Sahrens if (txg > spa_freeze_txg(zilog->zl_spa)) 647789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 648789Sahrens 649789Sahrens if (!itx->itx_data_copied && 650789Sahrens (error = zilog->zl_get_data(itx->itx_private, lr)) != 0) { 651789Sahrens if (error != ENOENT && error != EALREADY) { 652789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 653789Sahrens mutex_enter(&zilog->zl_lock); 654789Sahrens zilog->zl_ss_seq = MAX(seq, zilog->zl_ss_seq); 655789Sahrens zil_add_vdev(zilog, 656789Sahrens DVA_GET_VDEV(BP_IDENTITY(&(lr->lr_blkptr))), 657789Sahrens seq); 658789Sahrens mutex_exit(&zilog->zl_lock); 659789Sahrens return (lwb); 660789Sahrens } 661789Sahrens mutex_enter(&zilog->zl_lock); 662789Sahrens zil_add_vdev(zilog, 663789Sahrens DVA_GET_VDEV(BP_IDENTITY(&(lr->lr_blkptr))), seq); 664789Sahrens mutex_exit(&zilog->zl_lock); 665789Sahrens return (lwb); 666789Sahrens } 667789Sahrens } 668789Sahrens 6691141Sperrin zilog->zl_cur_used += reclen; 6701141Sperrin 671789Sahrens /* 672789Sahrens * If this record won't fit in the current log block, start a new one. 673789Sahrens */ 674789Sahrens if (lwb->lwb_nused + reclen > ZIL_BLK_DATA_SZ(lwb)) { 675789Sahrens lwb = zil_lwb_write_start(zilog, lwb); 676789Sahrens if (lwb == NULL) 677789Sahrens return (NULL); 678789Sahrens if (lwb->lwb_nused + reclen > ZIL_BLK_DATA_SZ(lwb)) { 679789Sahrens txg_wait_synced(zilog->zl_dmu_pool, txg); 680789Sahrens mutex_enter(&zilog->zl_lock); 681789Sahrens zilog->zl_ss_seq = MAX(seq, zilog->zl_ss_seq); 682789Sahrens mutex_exit(&zilog->zl_lock); 683789Sahrens return (lwb); 684789Sahrens } 685789Sahrens } 686789Sahrens 687789Sahrens bcopy(lrc, lwb->lwb_buf + lwb->lwb_nused, reclen); 688789Sahrens lwb->lwb_nused += reclen; 689789Sahrens lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg); 690789Sahrens ASSERT3U(lwb->lwb_seq, <, seq); 691789Sahrens lwb->lwb_seq = seq; 692789Sahrens ASSERT3U(lwb->lwb_nused, <=, ZIL_BLK_DATA_SZ(lwb)); 693789Sahrens ASSERT3U(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)), ==, 0); 694789Sahrens 695789Sahrens return (lwb); 696789Sahrens } 697789Sahrens 698789Sahrens itx_t * 699789Sahrens zil_itx_create(int txtype, size_t lrsize) 700789Sahrens { 701789Sahrens itx_t *itx; 702789Sahrens 703789Sahrens lrsize = P2ROUNDUP(lrsize, sizeof (uint64_t)); 704789Sahrens 705789Sahrens itx = kmem_alloc(offsetof(itx_t, itx_lr) + lrsize, KM_SLEEP); 706789Sahrens itx->itx_lr.lrc_txtype = txtype; 707789Sahrens itx->itx_lr.lrc_reclen = lrsize; 708789Sahrens itx->itx_lr.lrc_seq = 0; /* defensive */ 709789Sahrens 710789Sahrens return (itx); 711789Sahrens } 712789Sahrens 713789Sahrens uint64_t 714789Sahrens zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) 715789Sahrens { 716789Sahrens uint64_t seq; 717789Sahrens 718789Sahrens ASSERT(itx->itx_lr.lrc_seq == 0); 719789Sahrens 720789Sahrens mutex_enter(&zilog->zl_lock); 721789Sahrens list_insert_tail(&zilog->zl_itx_list, itx); 722789Sahrens zilog->zl_itx_list_sz += itx->itx_lr.lrc_reclen; 723789Sahrens itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx); 724789Sahrens itx->itx_lr.lrc_seq = seq = ++zilog->zl_itx_seq; 725789Sahrens mutex_exit(&zilog->zl_lock); 726789Sahrens 727789Sahrens return (seq); 728789Sahrens } 729789Sahrens 730789Sahrens /* 731789Sahrens * Free up all in-memory intent log transactions that have now been synced. 732789Sahrens */ 733789Sahrens static void 734789Sahrens zil_itx_clean(zilog_t *zilog) 735789Sahrens { 736789Sahrens uint64_t synced_txg = spa_last_synced_txg(zilog->zl_spa); 737789Sahrens uint64_t freeze_txg = spa_freeze_txg(zilog->zl_spa); 738789Sahrens uint64_t max_seq = 0; 739789Sahrens itx_t *itx; 740789Sahrens 741789Sahrens mutex_enter(&zilog->zl_lock); 742789Sahrens while ((itx = list_head(&zilog->zl_itx_list)) != NULL && 743789Sahrens itx->itx_lr.lrc_txg <= MIN(synced_txg, freeze_txg)) { 744789Sahrens list_remove(&zilog->zl_itx_list, itx); 745789Sahrens zilog->zl_itx_list_sz -= itx->itx_lr.lrc_reclen; 746789Sahrens ASSERT3U(max_seq, <, itx->itx_lr.lrc_seq); 747789Sahrens max_seq = itx->itx_lr.lrc_seq; 748789Sahrens kmem_free(itx, offsetof(itx_t, itx_lr) 749789Sahrens + itx->itx_lr.lrc_reclen); 750789Sahrens } 751789Sahrens if (max_seq > zilog->zl_ss_seq) { 752789Sahrens zilog->zl_ss_seq = max_seq; 753789Sahrens cv_broadcast(&zilog->zl_cv_seq); 754789Sahrens } 755789Sahrens mutex_exit(&zilog->zl_lock); 756789Sahrens } 757789Sahrens 758789Sahrens void 759789Sahrens zil_clean(zilog_t *zilog) 760789Sahrens { 761789Sahrens /* 762789Sahrens * Check for any log blocks that can be freed. 763789Sahrens * Log blocks are only freed when the log block allocation and 764789Sahrens * log records contained within are both known to be committed. 765789Sahrens */ 766789Sahrens mutex_enter(&zilog->zl_lock); 767789Sahrens if (list_head(&zilog->zl_itx_list) != NULL) 768789Sahrens (void) taskq_dispatch(zilog->zl_clean_taskq, 769789Sahrens (void (*)(void *))zil_itx_clean, zilog, TQ_NOSLEEP); 770789Sahrens mutex_exit(&zilog->zl_lock); 771789Sahrens } 772789Sahrens 773789Sahrens /* 774789Sahrens * Push zfs transactions to stable storage up to the supplied sequence number. 775789Sahrens */ 776789Sahrens void 777789Sahrens zil_commit(zilog_t *zilog, uint64_t seq, int ioflag) 778789Sahrens { 779789Sahrens uint64_t txg; 780789Sahrens uint64_t max_seq; 781789Sahrens uint64_t reclen; 782789Sahrens itx_t *itx; 783789Sahrens lwb_t *lwb; 784789Sahrens spa_t *spa; 785789Sahrens 786789Sahrens if (zilog == NULL || seq == 0 || 787789Sahrens ((ioflag & (FSYNC | FDSYNC | FRSYNC)) == 0 && !zil_always)) 788789Sahrens return; 789789Sahrens 790789Sahrens spa = zilog->zl_spa; 791789Sahrens mutex_enter(&zilog->zl_lock); 792789Sahrens 793789Sahrens seq = MIN(seq, zilog->zl_itx_seq); /* cap seq at largest itx seq */ 794789Sahrens 795789Sahrens for (;;) { 796789Sahrens if (zilog->zl_ss_seq >= seq) { /* already on stable storage */ 797789Sahrens cv_signal(&zilog->zl_cv_write); 798789Sahrens mutex_exit(&zilog->zl_lock); 799789Sahrens return; 800789Sahrens } 801789Sahrens 802789Sahrens if (zilog->zl_writer == B_FALSE) /* no one writing, do it */ 803789Sahrens break; 804789Sahrens 805789Sahrens cv_wait(&zilog->zl_cv_write, &zilog->zl_lock); 806789Sahrens } 807789Sahrens 808789Sahrens zilog->zl_writer = B_TRUE; 809789Sahrens max_seq = 0; 810789Sahrens 811789Sahrens if (zilog->zl_suspend) { 812789Sahrens lwb = NULL; 813789Sahrens } else { 814789Sahrens lwb = list_tail(&zilog->zl_lwb_list); 815789Sahrens if (lwb == NULL) { 816789Sahrens mutex_exit(&zilog->zl_lock); 817789Sahrens zil_create(zilog); 818789Sahrens mutex_enter(&zilog->zl_lock); 819789Sahrens lwb = list_tail(&zilog->zl_lwb_list); 820789Sahrens } 821789Sahrens } 822789Sahrens 823789Sahrens /* 824789Sahrens * Loop through in-memory log transactions filling log blocks, 825789Sahrens * until we reach the given sequence number and there's no more 826789Sahrens * room in the write buffer. 827789Sahrens */ 828789Sahrens for (;;) { 829789Sahrens itx = list_head(&zilog->zl_itx_list); 830789Sahrens if (itx == NULL) 831789Sahrens break; 832789Sahrens 833789Sahrens reclen = itx->itx_lr.lrc_reclen; 834789Sahrens if ((itx->itx_lr.lrc_seq > seq) && 835789Sahrens ((lwb == NULL) || (lwb->lwb_nused + reclen > 836789Sahrens ZIL_BLK_DATA_SZ(lwb)))) 837789Sahrens break; 838789Sahrens 839789Sahrens list_remove(&zilog->zl_itx_list, itx); 840789Sahrens txg = itx->itx_lr.lrc_txg; 841789Sahrens ASSERT(txg); 842789Sahrens 843789Sahrens mutex_exit(&zilog->zl_lock); 844789Sahrens if (txg > spa_last_synced_txg(spa) || 845789Sahrens txg > spa_freeze_txg(spa)) 846789Sahrens lwb = zil_lwb_commit(zilog, itx, lwb); 847789Sahrens else 848789Sahrens max_seq = itx->itx_lr.lrc_seq; 849789Sahrens kmem_free(itx, offsetof(itx_t, itx_lr) 850789Sahrens + itx->itx_lr.lrc_reclen); 851789Sahrens mutex_enter(&zilog->zl_lock); 852789Sahrens zilog->zl_itx_list_sz -= reclen; 853789Sahrens } 854789Sahrens 855789Sahrens mutex_exit(&zilog->zl_lock); 856789Sahrens 857789Sahrens /* write the last block out */ 858789Sahrens if (lwb != NULL && lwb->lwb_nused != 0) 859789Sahrens lwb = zil_lwb_write_start(zilog, lwb); 860789Sahrens 8611141Sperrin zilog->zl_prev_used = zilog->zl_cur_used; 8621141Sperrin zilog->zl_cur_used = 0; 8631141Sperrin 864789Sahrens mutex_enter(&zilog->zl_lock); 865789Sahrens if (max_seq > zilog->zl_ss_seq) { 866789Sahrens zilog->zl_ss_seq = max_seq; 867789Sahrens cv_broadcast(&zilog->zl_cv_seq); 868789Sahrens } 869789Sahrens /* 870789Sahrens * Wait if necessary for our seq to be committed. 871789Sahrens */ 872789Sahrens if (lwb) { 873789Sahrens while (zilog->zl_ss_seq < seq && zilog->zl_log_error == 0) 874789Sahrens cv_wait(&zilog->zl_cv_seq, &zilog->zl_lock); 875789Sahrens zil_flush_vdevs(zilog, seq); 876789Sahrens } 8771141Sperrin 878789Sahrens if (zilog->zl_log_error || lwb == NULL) { 879789Sahrens zilog->zl_log_error = 0; 880789Sahrens max_seq = zilog->zl_itx_seq; 881789Sahrens mutex_exit(&zilog->zl_lock); 882789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 883789Sahrens mutex_enter(&zilog->zl_lock); 884789Sahrens zilog->zl_ss_seq = MAX(max_seq, zilog->zl_ss_seq); 885789Sahrens cv_broadcast(&zilog->zl_cv_seq); 886789Sahrens } 8871141Sperrin /* wake up others waiting to start a write */ 8881141Sperrin zilog->zl_writer = B_FALSE; 889789Sahrens mutex_exit(&zilog->zl_lock); 8901141Sperrin cv_signal(&zilog->zl_cv_write); 891789Sahrens } 892789Sahrens 893789Sahrens /* 894789Sahrens * Called in syncing context to free committed log blocks and update log header. 895789Sahrens */ 896789Sahrens void 897789Sahrens zil_sync(zilog_t *zilog, dmu_tx_t *tx) 898789Sahrens { 899789Sahrens uint64_t txg = dmu_tx_get_txg(tx); 900789Sahrens spa_t *spa = zilog->zl_spa; 901789Sahrens lwb_t *lwb; 902789Sahrens 903789Sahrens ASSERT(zilog->zl_stop_sync == 0); 904789Sahrens 905789Sahrens zilog->zl_header->zh_replay_seq = zilog->zl_replay_seq[txg & TXG_MASK]; 906789Sahrens 907789Sahrens if (zilog->zl_destroy_txg == txg) { 908789Sahrens bzero(zilog->zl_header, sizeof (zil_header_t)); 909789Sahrens bzero(zilog->zl_replay_seq, sizeof (zilog->zl_replay_seq)); 910789Sahrens zilog->zl_destroy_txg = 0; 911789Sahrens } 912789Sahrens 913789Sahrens mutex_enter(&zilog->zl_lock); 914789Sahrens for (;;) { 915789Sahrens lwb = list_head(&zilog->zl_lwb_list); 916789Sahrens if (lwb == NULL) { 917789Sahrens mutex_exit(&zilog->zl_lock); 918789Sahrens return; 919789Sahrens } 920789Sahrens if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg) 921789Sahrens break; 922789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 923789Sahrens zio_free_blk(spa, &lwb->lwb_blk, txg); 924789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 925789Sahrens } 926789Sahrens zilog->zl_header->zh_log = lwb->lwb_blk; 927789Sahrens mutex_exit(&zilog->zl_lock); 928789Sahrens } 929789Sahrens 930789Sahrens void 931789Sahrens zil_init(void) 932789Sahrens { 933789Sahrens zil_lwb_cache = kmem_cache_create("zil_lwb_cache", 934789Sahrens sizeof (struct lwb), NULL, NULL, NULL, NULL, NULL, NULL, 0); 935789Sahrens } 936789Sahrens 937789Sahrens void 938789Sahrens zil_fini(void) 939789Sahrens { 940789Sahrens kmem_cache_destroy(zil_lwb_cache); 941789Sahrens } 942789Sahrens 943789Sahrens zilog_t * 944789Sahrens zil_alloc(objset_t *os, zil_header_t *zh_phys) 945789Sahrens { 946789Sahrens zilog_t *zilog; 947789Sahrens 948789Sahrens zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); 949789Sahrens 950789Sahrens zilog->zl_header = zh_phys; 951789Sahrens zilog->zl_os = os; 952789Sahrens zilog->zl_spa = dmu_objset_spa(os); 953789Sahrens zilog->zl_dmu_pool = dmu_objset_pool(os); 954789Sahrens 955789Sahrens list_create(&zilog->zl_itx_list, sizeof (itx_t), 956789Sahrens offsetof(itx_t, itx_node)); 957789Sahrens 958789Sahrens list_create(&zilog->zl_lwb_list, sizeof (lwb_t), 959789Sahrens offsetof(lwb_t, lwb_node)); 960789Sahrens 961789Sahrens list_create(&zilog->zl_vdev_list, sizeof (zil_vdev_t), 962789Sahrens offsetof(zil_vdev_t, vdev_seq_node)); 963789Sahrens 964789Sahrens return (zilog); 965789Sahrens } 966789Sahrens 967789Sahrens void 968789Sahrens zil_free(zilog_t *zilog) 969789Sahrens { 970789Sahrens lwb_t *lwb; 971789Sahrens zil_vdev_t *zv; 972789Sahrens 973789Sahrens zilog->zl_stop_sync = 1; 974789Sahrens 975789Sahrens while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 976789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 977789Sahrens if (lwb->lwb_buf != NULL) 978789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 979789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 980789Sahrens } 981789Sahrens list_destroy(&zilog->zl_lwb_list); 982789Sahrens 983789Sahrens while ((zv = list_head(&zilog->zl_vdev_list)) != NULL) { 984789Sahrens list_remove(&zilog->zl_vdev_list, zv); 985789Sahrens kmem_free(zv, sizeof (zil_vdev_t)); 986789Sahrens } 987789Sahrens list_destroy(&zilog->zl_vdev_list); 988789Sahrens 989789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 990789Sahrens list_destroy(&zilog->zl_itx_list); 991789Sahrens 992789Sahrens kmem_free(zilog, sizeof (zilog_t)); 993789Sahrens } 994789Sahrens 995789Sahrens /* 996*1362Sperrin * return true if there is a valid initial zil log block 997*1362Sperrin */ 998*1362Sperrin static int 999*1362Sperrin zil_empty(zilog_t *zilog) 1000*1362Sperrin { 1001*1362Sperrin blkptr_t blk; 1002*1362Sperrin char *lrbuf; 1003*1362Sperrin int error; 1004*1362Sperrin 1005*1362Sperrin blk = zilog->zl_header->zh_log; 1006*1362Sperrin if (BP_IS_HOLE(&blk)) 1007*1362Sperrin return (1); 1008*1362Sperrin 1009*1362Sperrin lrbuf = zio_buf_alloc(SPA_MAXBLOCKSIZE); 1010*1362Sperrin error = zil_read_log_block(zilog, &blk, lrbuf); 1011*1362Sperrin zio_buf_free(lrbuf, SPA_MAXBLOCKSIZE); 1012*1362Sperrin return (error ? 1 : 0); 1013*1362Sperrin } 1014*1362Sperrin 1015*1362Sperrin /* 1016789Sahrens * Open an intent log. 1017789Sahrens */ 1018789Sahrens zilog_t * 1019789Sahrens zil_open(objset_t *os, zil_get_data_t *get_data) 1020789Sahrens { 1021789Sahrens zilog_t *zilog = dmu_objset_zil(os); 1022789Sahrens 1023789Sahrens zilog->zl_get_data = get_data; 1024789Sahrens zilog->zl_clean_taskq = taskq_create("zil_clean", 1, minclsyspri, 1025789Sahrens 2, 2, TASKQ_PREPOPULATE); 1026789Sahrens 1027789Sahrens return (zilog); 1028789Sahrens } 1029789Sahrens 1030789Sahrens /* 1031789Sahrens * Close an intent log. 1032789Sahrens */ 1033789Sahrens void 1034789Sahrens zil_close(zilog_t *zilog) 1035789Sahrens { 1036*1362Sperrin if (!zil_empty(zilog)) 1037*1362Sperrin txg_wait_synced(zilog->zl_dmu_pool, 0); 1038789Sahrens taskq_destroy(zilog->zl_clean_taskq); 1039789Sahrens zilog->zl_clean_taskq = NULL; 1040789Sahrens zilog->zl_get_data = NULL; 1041789Sahrens 1042789Sahrens zil_itx_clean(zilog); 1043789Sahrens ASSERT(list_head(&zilog->zl_itx_list) == NULL); 1044789Sahrens } 1045789Sahrens 1046789Sahrens /* 1047789Sahrens * Suspend an intent log. While in suspended mode, we still honor 1048789Sahrens * synchronous semantics, but we rely on txg_wait_synced() to do it. 1049789Sahrens * We suspend the log briefly when taking a snapshot so that the snapshot 1050789Sahrens * contains all the data it's supposed to, and has an empty intent log. 1051789Sahrens */ 1052789Sahrens int 1053789Sahrens zil_suspend(zilog_t *zilog) 1054789Sahrens { 1055789Sahrens lwb_t *lwb; 1056789Sahrens 1057789Sahrens mutex_enter(&zilog->zl_lock); 1058789Sahrens if (zilog->zl_header->zh_claim_txg != 0) { /* unplayed log */ 1059789Sahrens mutex_exit(&zilog->zl_lock); 1060789Sahrens return (EBUSY); 1061789Sahrens } 1062789Sahrens zilog->zl_suspend++; 1063789Sahrens mutex_exit(&zilog->zl_lock); 1064789Sahrens 1065789Sahrens zil_commit(zilog, UINT64_MAX, FSYNC); 1066789Sahrens 1067789Sahrens mutex_enter(&zilog->zl_lock); 1068789Sahrens while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) { 1069789Sahrens if (lwb->lwb_buf != NULL) { 1070789Sahrens /* 1071789Sahrens * Wait for the buffer if it's in the process of 1072789Sahrens * being written. 1073789Sahrens */ 1074789Sahrens if ((lwb->lwb_seq != 0) && 1075789Sahrens (lwb->lwb_state != SEQ_COMPLETE)) { 1076789Sahrens cv_wait(&zilog->zl_cv_seq, &zilog->zl_lock); 1077789Sahrens continue; 1078789Sahrens } 1079789Sahrens zio_buf_free(lwb->lwb_buf, lwb->lwb_sz); 1080789Sahrens } 1081789Sahrens list_remove(&zilog->zl_lwb_list, lwb); 1082789Sahrens kmem_cache_free(zil_lwb_cache, lwb); 1083789Sahrens } 1084789Sahrens mutex_exit(&zilog->zl_lock); 1085789Sahrens 1086789Sahrens zil_destroy(zilog); 1087789Sahrens 1088789Sahrens return (0); 1089789Sahrens } 1090789Sahrens 1091789Sahrens void 1092789Sahrens zil_resume(zilog_t *zilog) 1093789Sahrens { 1094789Sahrens mutex_enter(&zilog->zl_lock); 1095789Sahrens ASSERT(zilog->zl_suspend != 0); 1096789Sahrens zilog->zl_suspend--; 1097789Sahrens mutex_exit(&zilog->zl_lock); 1098789Sahrens } 1099789Sahrens 1100789Sahrens typedef struct zil_replay_arg { 1101789Sahrens objset_t *zr_os; 1102789Sahrens zil_replay_func_t **zr_replay; 1103789Sahrens void *zr_arg; 1104789Sahrens void (*zr_rm_sync)(void *arg); 1105789Sahrens uint64_t *zr_txgp; 1106789Sahrens boolean_t zr_byteswap; 1107789Sahrens char *zr_lrbuf; 1108789Sahrens } zil_replay_arg_t; 1109789Sahrens 1110789Sahrens static void 1111789Sahrens zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg) 1112789Sahrens { 1113789Sahrens zil_replay_arg_t *zr = zra; 1114789Sahrens zil_header_t *zh = zilog->zl_header; 1115789Sahrens uint64_t reclen = lr->lrc_reclen; 1116789Sahrens uint64_t txtype = lr->lrc_txtype; 1117789Sahrens int pass, error; 1118789Sahrens 1119789Sahrens if (zilog->zl_stop_replay) 1120789Sahrens return; 1121789Sahrens 1122789Sahrens if (lr->lrc_txg < claim_txg) /* already committed */ 1123789Sahrens return; 1124789Sahrens 1125789Sahrens if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */ 1126789Sahrens return; 1127789Sahrens 1128789Sahrens /* 1129789Sahrens * Make a copy of the data so we can revise and extend it. 1130789Sahrens */ 1131789Sahrens bcopy(lr, zr->zr_lrbuf, reclen); 1132789Sahrens 1133789Sahrens /* 1134789Sahrens * The log block containing this lr may have been byteswapped 1135789Sahrens * so that we can easily examine common fields like lrc_txtype. 1136789Sahrens * However, the log is a mix of different data types, and only the 1137789Sahrens * replay vectors know how to byteswap their records. Therefore, if 1138789Sahrens * the lr was byteswapped, undo it before invoking the replay vector. 1139789Sahrens */ 1140789Sahrens if (zr->zr_byteswap) 1141789Sahrens byteswap_uint64_array(zr->zr_lrbuf, reclen); 1142789Sahrens 1143789Sahrens /* 1144789Sahrens * If this is a TX_WRITE with a blkptr, suck in the data. 1145789Sahrens */ 1146789Sahrens if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) { 1147789Sahrens lr_write_t *lrw = (lr_write_t *)lr; 1148789Sahrens blkptr_t *wbp = &lrw->lr_blkptr; 1149789Sahrens uint64_t wlen = lrw->lr_length; 1150789Sahrens char *wbuf = zr->zr_lrbuf + reclen; 1151789Sahrens 1152789Sahrens if (BP_IS_HOLE(wbp)) { /* compressed to a hole */ 1153789Sahrens bzero(wbuf, wlen); 1154789Sahrens } else { 1155789Sahrens /* 1156789Sahrens * A subsequent write may have overwritten this block, 1157789Sahrens * in which case wbp may have been been freed and 1158789Sahrens * reallocated, and our read of wbp may fail with a 1159789Sahrens * checksum error. We can safely ignore this because 1160789Sahrens * the later write will provide the correct data. 1161789Sahrens */ 1162789Sahrens (void) zio_wait(zio_read(NULL, zilog->zl_spa, 1163789Sahrens wbp, wbuf, BP_GET_LSIZE(wbp), NULL, NULL, 1164789Sahrens ZIO_PRIORITY_SYNC_READ, 1165789Sahrens ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE)); 1166789Sahrens (void) memmove(wbuf, wbuf + lrw->lr_blkoff, wlen); 1167789Sahrens } 1168789Sahrens } 1169789Sahrens 1170789Sahrens /* 1171789Sahrens * We must now do two things atomically: replay this log record, 1172789Sahrens * and update the log header to reflect the fact that we did so. 1173789Sahrens * We use the DMU's ability to assign into a specific txg to do this. 1174789Sahrens */ 1175789Sahrens for (pass = 1; /* CONSTANTCONDITION */; pass++) { 1176789Sahrens uint64_t replay_txg; 1177789Sahrens dmu_tx_t *replay_tx; 1178789Sahrens 1179789Sahrens replay_tx = dmu_tx_create(zr->zr_os); 1180789Sahrens error = dmu_tx_assign(replay_tx, TXG_WAIT); 1181789Sahrens if (error) { 1182789Sahrens dmu_tx_abort(replay_tx); 1183789Sahrens break; 1184789Sahrens } 1185789Sahrens 1186789Sahrens replay_txg = dmu_tx_get_txg(replay_tx); 1187789Sahrens 1188789Sahrens if (txtype == 0 || txtype >= TX_MAX_TYPE) { 1189789Sahrens error = EINVAL; 1190789Sahrens } else { 1191789Sahrens /* 1192789Sahrens * On the first pass, arrange for the replay vector 1193789Sahrens * to fail its dmu_tx_assign(). That's the only way 1194789Sahrens * to ensure that those code paths remain well tested. 1195789Sahrens */ 1196789Sahrens *zr->zr_txgp = replay_txg - (pass == 1); 1197789Sahrens error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lrbuf, 1198789Sahrens zr->zr_byteswap); 1199789Sahrens *zr->zr_txgp = TXG_NOWAIT; 1200789Sahrens } 1201789Sahrens 1202789Sahrens if (error == 0) { 1203789Sahrens dsl_dataset_dirty(dmu_objset_ds(zr->zr_os), replay_tx); 1204789Sahrens zilog->zl_replay_seq[replay_txg & TXG_MASK] = 1205789Sahrens lr->lrc_seq; 1206789Sahrens } 1207789Sahrens 1208789Sahrens dmu_tx_commit(replay_tx); 1209789Sahrens 1210789Sahrens if (error != ERESTART) 1211789Sahrens break; 1212789Sahrens 1213789Sahrens if (pass != 1) 1214789Sahrens txg_wait_open(spa_get_dsl(zilog->zl_spa), 1215789Sahrens replay_txg + 1); 1216789Sahrens 1217789Sahrens dprintf("pass %d, retrying\n", pass); 1218789Sahrens } 1219789Sahrens 1220789Sahrens if (error) { 1221789Sahrens char *name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 1222789Sahrens dmu_objset_name(zr->zr_os, name); 1223789Sahrens cmn_err(CE_WARN, "ZFS replay transaction error %d, " 1224789Sahrens "dataset %s, seq 0x%llx, txtype %llu\n", 1225789Sahrens error, name, 1226789Sahrens (u_longlong_t)lr->lrc_seq, (u_longlong_t)txtype); 1227789Sahrens zilog->zl_stop_replay = 1; 1228789Sahrens kmem_free(name, MAXNAMELEN); 1229789Sahrens } 1230789Sahrens 1231789Sahrens /* 1232789Sahrens * The DMU's dnode layer doesn't see removes until the txg commits, 1233789Sahrens * so a subsequent claim can spuriously fail with EEXIST. 1234789Sahrens * To prevent this, if we might have removed an object, 1235789Sahrens * wait for the delete thread to delete it, and then 1236789Sahrens * wait for the transaction group to sync. 1237789Sahrens */ 1238789Sahrens if (txtype == TX_REMOVE || txtype == TX_RMDIR || txtype == TX_RENAME) { 1239789Sahrens if (zr->zr_rm_sync != NULL) 1240789Sahrens zr->zr_rm_sync(zr->zr_arg); 1241789Sahrens txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0); 1242789Sahrens } 1243789Sahrens } 1244789Sahrens 1245789Sahrens /* 1246*1362Sperrin * If this dataset has a non-empty intent log, replay it and destroy it. 1247789Sahrens */ 1248789Sahrens void 1249789Sahrens zil_replay(objset_t *os, void *arg, uint64_t *txgp, 1250789Sahrens zil_replay_func_t *replay_func[TX_MAX_TYPE], void (*rm_sync)(void *arg)) 1251789Sahrens { 1252789Sahrens zilog_t *zilog = dmu_objset_zil(os); 1253*1362Sperrin zil_replay_arg_t zr; 1254*1362Sperrin 1255*1362Sperrin if (zil_empty(zilog)) { 1256*1362Sperrin /* 1257*1362Sperrin * Initialise the log header but don't free the log block 1258*1362Sperrin * which will get reused. 1259*1362Sperrin */ 1260*1362Sperrin zilog->zl_header->zh_claim_txg = 0; 1261*1362Sperrin zilog->zl_header->zh_replay_seq = 0; 1262*1362Sperrin return; 1263*1362Sperrin } 1264789Sahrens 1265789Sahrens zr.zr_os = os; 1266789Sahrens zr.zr_replay = replay_func; 1267789Sahrens zr.zr_arg = arg; 1268789Sahrens zr.zr_rm_sync = rm_sync; 1269789Sahrens zr.zr_txgp = txgp; 1270789Sahrens zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zilog->zl_header->zh_log); 1271789Sahrens zr.zr_lrbuf = kmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP); 1272789Sahrens 1273789Sahrens /* 1274789Sahrens * Wait for in-progress removes to sync before starting replay. 1275789Sahrens */ 1276789Sahrens if (rm_sync != NULL) 1277789Sahrens rm_sync(arg); 1278789Sahrens txg_wait_synced(zilog->zl_dmu_pool, 0); 1279789Sahrens 1280789Sahrens zilog->zl_stop_replay = 0; 1281789Sahrens zil_parse(zilog, NULL, zil_replay_log_record, &zr, 1282789Sahrens zilog->zl_header->zh_claim_txg); 1283789Sahrens kmem_free(zr.zr_lrbuf, 2 * SPA_MAXBLOCKSIZE); 1284789Sahrens 1285789Sahrens zil_destroy(zilog); 1286789Sahrens } 1287