1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51484Sek110237 * Common Development and Distribution License (the "License"). 61484Sek110237 * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 226018Sbrendan * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23789Sahrens * Use is subject to license terms. 24789Sahrens */ 25789Sahrens 26789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 27789Sahrens 28789Sahrens /* 293403Sbmc * DVA-based Adjustable Replacement Cache 30789Sahrens * 311544Seschrock * While much of the theory of operation used here is 321544Seschrock * based on the self-tuning, low overhead replacement cache 33789Sahrens * presented by Megiddo and Modha at FAST 2003, there are some 34789Sahrens * significant differences: 35789Sahrens * 36789Sahrens * 1. The Megiddo and Modha model assumes any page is evictable. 37789Sahrens * Pages in its cache cannot be "locked" into memory. This makes 38789Sahrens * the eviction algorithm simple: evict the last page in the list. 39789Sahrens * This also make the performance characteristics easy to reason 40789Sahrens * about. Our cache is not so simple. At any given moment, some 41789Sahrens * subset of the blocks in the cache are un-evictable because we 42789Sahrens * have handed out a reference to them. Blocks are only evictable 43789Sahrens * when there are no external references active. This makes 44789Sahrens * eviction far more problematic: we choose to evict the evictable 45789Sahrens * blocks that are the "lowest" in the list. 46789Sahrens * 47789Sahrens * There are times when it is not possible to evict the requested 48789Sahrens * space. In these circumstances we are unable to adjust the cache 49789Sahrens * size. To prevent the cache growing unbounded at these times we 505450Sbrendan * implement a "cache throttle" that slows the flow of new data 515450Sbrendan * into the cache until we can make space available. 52789Sahrens * 53789Sahrens * 2. The Megiddo and Modha model assumes a fixed cache size. 54789Sahrens * Pages are evicted when the cache is full and there is a cache 55789Sahrens * miss. Our model has a variable sized cache. It grows with 565450Sbrendan * high use, but also tries to react to memory pressure from the 57789Sahrens * operating system: decreasing its size when system memory is 58789Sahrens * tight. 59789Sahrens * 60789Sahrens * 3. The Megiddo and Modha model assumes a fixed page size. All 61789Sahrens * elements of the cache are therefor exactly the same size. So 62789Sahrens * when adjusting the cache size following a cache miss, its simply 63789Sahrens * a matter of choosing a single page to evict. In our model, we 64789Sahrens * have variable sized cache blocks (rangeing from 512 bytes to 65789Sahrens * 128K bytes). We therefor choose a set of blocks to evict to make 66789Sahrens * space for a cache miss that approximates as closely as possible 67789Sahrens * the space used by the new block. 68789Sahrens * 69789Sahrens * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70789Sahrens * by N. Megiddo & D. Modha, FAST 2003 71789Sahrens */ 72789Sahrens 73789Sahrens /* 74789Sahrens * The locking model: 75789Sahrens * 76789Sahrens * A new reference to a cache buffer can be obtained in two 77789Sahrens * ways: 1) via a hash table lookup using the DVA as a key, 785450Sbrendan * or 2) via one of the ARC lists. The arc_read() interface 79789Sahrens * uses method 1, while the internal arc algorithms for 80789Sahrens * adjusting the cache use method 2. We therefor provide two 81789Sahrens * types of locks: 1) the hash table lock array, and 2) the 82789Sahrens * arc list locks. 83789Sahrens * 84789Sahrens * Buffers do not have their own mutexs, rather they rely on the 85789Sahrens * hash table mutexs for the bulk of their protection (i.e. most 86789Sahrens * fields in the arc_buf_hdr_t are protected by these mutexs). 87789Sahrens * 88789Sahrens * buf_hash_find() returns the appropriate mutex (held) when it 89789Sahrens * locates the requested buffer in the hash table. It returns 90789Sahrens * NULL for the mutex if the buffer was not in the table. 91789Sahrens * 92789Sahrens * buf_hash_remove() expects the appropriate hash mutex to be 93789Sahrens * already held before it is invoked. 94789Sahrens * 95789Sahrens * Each arc state also has a mutex which is used to protect the 96789Sahrens * buffer list associated with the state. When attempting to 97789Sahrens * obtain a hash table lock while holding an arc list lock you 98789Sahrens * must use: mutex_tryenter() to avoid deadlock. Also note that 992688Smaybee * the active state mutex must be held before the ghost state mutex. 100789Sahrens * 1011544Seschrock * Arc buffers may have an associated eviction callback function. 1021544Seschrock * This function will be invoked prior to removing the buffer (e.g. 1031544Seschrock * in arc_do_user_evicts()). Note however that the data associated 1041544Seschrock * with the buffer may be evicted prior to the callback. The callback 1051544Seschrock * must be made with *no locks held* (to prevent deadlock). Additionally, 1061544Seschrock * the users of callbacks must ensure that their private data is 1071544Seschrock * protected from simultaneous callbacks from arc_buf_evict() 1081544Seschrock * and arc_do_user_evicts(). 1091544Seschrock * 110789Sahrens * Note that the majority of the performance stats are manipulated 111789Sahrens * with atomic operations. 1125450Sbrendan * 1135450Sbrendan * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 1145450Sbrendan * 1155450Sbrendan * - L2ARC buflist creation 1165450Sbrendan * - L2ARC buflist eviction 1175450Sbrendan * - L2ARC write completion, which walks L2ARC buflists 1185450Sbrendan * - ARC header destruction, as it removes from L2ARC buflists 1195450Sbrendan * - ARC header release, as it removes from L2ARC buflists 120789Sahrens */ 121789Sahrens 122789Sahrens #include <sys/spa.h> 123789Sahrens #include <sys/zio.h> 1243093Sahrens #include <sys/zio_checksum.h> 125789Sahrens #include <sys/zfs_context.h> 126789Sahrens #include <sys/arc.h> 127789Sahrens #include <sys/refcount.h> 128*6643Seschrock #include <sys/vdev.h> 129789Sahrens #ifdef _KERNEL 130789Sahrens #include <sys/vmsystm.h> 131789Sahrens #include <vm/anon.h> 132789Sahrens #include <sys/fs/swapnode.h> 1331484Sek110237 #include <sys/dnlc.h> 134789Sahrens #endif 135789Sahrens #include <sys/callb.h> 1363403Sbmc #include <sys/kstat.h> 137789Sahrens 138789Sahrens static kmutex_t arc_reclaim_thr_lock; 139789Sahrens static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 140789Sahrens static uint8_t arc_thread_exit; 141789Sahrens 1426245Smaybee extern int zfs_write_limit_shift; 1436245Smaybee extern uint64_t zfs_write_limit_max; 1446245Smaybee extern uint64_t zfs_write_limit_inflated; 1456245Smaybee 1461484Sek110237 #define ARC_REDUCE_DNLC_PERCENT 3 1471484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 1481484Sek110237 149789Sahrens typedef enum arc_reclaim_strategy { 150789Sahrens ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 151789Sahrens ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 152789Sahrens } arc_reclaim_strategy_t; 153789Sahrens 154789Sahrens /* number of seconds before growing cache again */ 155789Sahrens static int arc_grow_retry = 60; 156789Sahrens 1572391Smaybee /* 1582638Sperrin * minimum lifespan of a prefetch block in clock ticks 1592638Sperrin * (initialized in arc_init()) 1602391Smaybee */ 1612638Sperrin static int arc_min_prefetch_lifespan; 1622391Smaybee 163789Sahrens static int arc_dead; 164789Sahrens 165789Sahrens /* 1662885Sahrens * These tunables are for performance analysis. 1672885Sahrens */ 1682885Sahrens uint64_t zfs_arc_max; 1692885Sahrens uint64_t zfs_arc_min; 1704645Sek110237 uint64_t zfs_arc_meta_limit = 0; 1712885Sahrens 1722885Sahrens /* 1735450Sbrendan * Note that buffers can be in one of 6 states: 174789Sahrens * ARC_anon - anonymous (discussed below) 1751544Seschrock * ARC_mru - recently used, currently cached 1761544Seschrock * ARC_mru_ghost - recentely used, no longer in cache 1771544Seschrock * ARC_mfu - frequently used, currently cached 1781544Seschrock * ARC_mfu_ghost - frequently used, no longer in cache 1795450Sbrendan * ARC_l2c_only - exists in L2ARC but not other states 1804309Smaybee * When there are no active references to the buffer, they are 1814309Smaybee * are linked onto a list in one of these arc states. These are 1824309Smaybee * the only buffers that can be evicted or deleted. Within each 1834309Smaybee * state there are multiple lists, one for meta-data and one for 1844309Smaybee * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 1854309Smaybee * etc.) is tracked separately so that it can be managed more 1865450Sbrendan * explicitly: favored over data, limited explicitly. 187789Sahrens * 188789Sahrens * Anonymous buffers are buffers that are not associated with 189789Sahrens * a DVA. These are buffers that hold dirty block copies 190789Sahrens * before they are written to stable storage. By definition, 1911544Seschrock * they are "ref'd" and are considered part of arc_mru 192789Sahrens * that cannot be freed. Generally, they will aquire a DVA 1931544Seschrock * as they are written and migrate onto the arc_mru list. 1945450Sbrendan * 1955450Sbrendan * The ARC_l2c_only state is for buffers that are in the second 1965450Sbrendan * level ARC but no longer in any of the ARC_m* lists. The second 1975450Sbrendan * level ARC itself may also contain buffers that are in any of 1985450Sbrendan * the ARC_m* states - meaning that a buffer can exist in two 1995450Sbrendan * places. The reason for the ARC_l2c_only state is to keep the 2005450Sbrendan * buffer header in the hash table, so that reads that hit the 2015450Sbrendan * second level ARC benefit from these fast lookups. 202789Sahrens */ 203789Sahrens 204789Sahrens typedef struct arc_state { 2054309Smaybee list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 2064309Smaybee uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 2074309Smaybee uint64_t arcs_size; /* total amount of data in this state */ 2083403Sbmc kmutex_t arcs_mtx; 209789Sahrens } arc_state_t; 210789Sahrens 2115450Sbrendan /* The 6 states: */ 212789Sahrens static arc_state_t ARC_anon; 2131544Seschrock static arc_state_t ARC_mru; 2141544Seschrock static arc_state_t ARC_mru_ghost; 2151544Seschrock static arc_state_t ARC_mfu; 2161544Seschrock static arc_state_t ARC_mfu_ghost; 2175450Sbrendan static arc_state_t ARC_l2c_only; 218789Sahrens 2193403Sbmc typedef struct arc_stats { 2203403Sbmc kstat_named_t arcstat_hits; 2213403Sbmc kstat_named_t arcstat_misses; 2223403Sbmc kstat_named_t arcstat_demand_data_hits; 2233403Sbmc kstat_named_t arcstat_demand_data_misses; 2243403Sbmc kstat_named_t arcstat_demand_metadata_hits; 2253403Sbmc kstat_named_t arcstat_demand_metadata_misses; 2263403Sbmc kstat_named_t arcstat_prefetch_data_hits; 2273403Sbmc kstat_named_t arcstat_prefetch_data_misses; 2283403Sbmc kstat_named_t arcstat_prefetch_metadata_hits; 2293403Sbmc kstat_named_t arcstat_prefetch_metadata_misses; 2303403Sbmc kstat_named_t arcstat_mru_hits; 2313403Sbmc kstat_named_t arcstat_mru_ghost_hits; 2323403Sbmc kstat_named_t arcstat_mfu_hits; 2333403Sbmc kstat_named_t arcstat_mfu_ghost_hits; 2343403Sbmc kstat_named_t arcstat_deleted; 2353403Sbmc kstat_named_t arcstat_recycle_miss; 2363403Sbmc kstat_named_t arcstat_mutex_miss; 2373403Sbmc kstat_named_t arcstat_evict_skip; 2383403Sbmc kstat_named_t arcstat_hash_elements; 2393403Sbmc kstat_named_t arcstat_hash_elements_max; 2403403Sbmc kstat_named_t arcstat_hash_collisions; 2413403Sbmc kstat_named_t arcstat_hash_chains; 2423403Sbmc kstat_named_t arcstat_hash_chain_max; 2433403Sbmc kstat_named_t arcstat_p; 2443403Sbmc kstat_named_t arcstat_c; 2453403Sbmc kstat_named_t arcstat_c_min; 2463403Sbmc kstat_named_t arcstat_c_max; 2473403Sbmc kstat_named_t arcstat_size; 2485450Sbrendan kstat_named_t arcstat_hdr_size; 2495450Sbrendan kstat_named_t arcstat_l2_hits; 2505450Sbrendan kstat_named_t arcstat_l2_misses; 2515450Sbrendan kstat_named_t arcstat_l2_feeds; 2525450Sbrendan kstat_named_t arcstat_l2_rw_clash; 2535450Sbrendan kstat_named_t arcstat_l2_writes_sent; 2545450Sbrendan kstat_named_t arcstat_l2_writes_done; 2555450Sbrendan kstat_named_t arcstat_l2_writes_error; 2565450Sbrendan kstat_named_t arcstat_l2_writes_hdr_miss; 2575450Sbrendan kstat_named_t arcstat_l2_evict_lock_retry; 2585450Sbrendan kstat_named_t arcstat_l2_evict_reading; 2595450Sbrendan kstat_named_t arcstat_l2_free_on_write; 2605450Sbrendan kstat_named_t arcstat_l2_abort_lowmem; 2615450Sbrendan kstat_named_t arcstat_l2_cksum_bad; 2625450Sbrendan kstat_named_t arcstat_l2_io_error; 2635450Sbrendan kstat_named_t arcstat_l2_size; 2645450Sbrendan kstat_named_t arcstat_l2_hdr_size; 2656245Smaybee kstat_named_t arcstat_memory_throttle_count; 2663403Sbmc } arc_stats_t; 2673403Sbmc 2683403Sbmc static arc_stats_t arc_stats = { 2693403Sbmc { "hits", KSTAT_DATA_UINT64 }, 2703403Sbmc { "misses", KSTAT_DATA_UINT64 }, 2713403Sbmc { "demand_data_hits", KSTAT_DATA_UINT64 }, 2723403Sbmc { "demand_data_misses", KSTAT_DATA_UINT64 }, 2733403Sbmc { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 2743403Sbmc { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 2753403Sbmc { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 2763403Sbmc { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 2773403Sbmc { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 2783403Sbmc { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 2793403Sbmc { "mru_hits", KSTAT_DATA_UINT64 }, 2803403Sbmc { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 2813403Sbmc { "mfu_hits", KSTAT_DATA_UINT64 }, 2823403Sbmc { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 2833403Sbmc { "deleted", KSTAT_DATA_UINT64 }, 2843403Sbmc { "recycle_miss", KSTAT_DATA_UINT64 }, 2853403Sbmc { "mutex_miss", KSTAT_DATA_UINT64 }, 2863403Sbmc { "evict_skip", KSTAT_DATA_UINT64 }, 2873403Sbmc { "hash_elements", KSTAT_DATA_UINT64 }, 2883403Sbmc { "hash_elements_max", KSTAT_DATA_UINT64 }, 2893403Sbmc { "hash_collisions", KSTAT_DATA_UINT64 }, 2903403Sbmc { "hash_chains", KSTAT_DATA_UINT64 }, 2913403Sbmc { "hash_chain_max", KSTAT_DATA_UINT64 }, 2923403Sbmc { "p", KSTAT_DATA_UINT64 }, 2933403Sbmc { "c", KSTAT_DATA_UINT64 }, 2943403Sbmc { "c_min", KSTAT_DATA_UINT64 }, 2953403Sbmc { "c_max", KSTAT_DATA_UINT64 }, 2965450Sbrendan { "size", KSTAT_DATA_UINT64 }, 2975450Sbrendan { "hdr_size", KSTAT_DATA_UINT64 }, 2985450Sbrendan { "l2_hits", KSTAT_DATA_UINT64 }, 2995450Sbrendan { "l2_misses", KSTAT_DATA_UINT64 }, 3005450Sbrendan { "l2_feeds", KSTAT_DATA_UINT64 }, 3015450Sbrendan { "l2_rw_clash", KSTAT_DATA_UINT64 }, 3025450Sbrendan { "l2_writes_sent", KSTAT_DATA_UINT64 }, 3035450Sbrendan { "l2_writes_done", KSTAT_DATA_UINT64 }, 3045450Sbrendan { "l2_writes_error", KSTAT_DATA_UINT64 }, 3055450Sbrendan { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 3065450Sbrendan { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 3075450Sbrendan { "l2_evict_reading", KSTAT_DATA_UINT64 }, 3085450Sbrendan { "l2_free_on_write", KSTAT_DATA_UINT64 }, 3095450Sbrendan { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 3105450Sbrendan { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 3115450Sbrendan { "l2_io_error", KSTAT_DATA_UINT64 }, 3125450Sbrendan { "l2_size", KSTAT_DATA_UINT64 }, 3136245Smaybee { "l2_hdr_size", KSTAT_DATA_UINT64 }, 3146245Smaybee { "memory_throttle_count", KSTAT_DATA_UINT64 } 3153403Sbmc }; 316789Sahrens 3173403Sbmc #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 3183403Sbmc 3193403Sbmc #define ARCSTAT_INCR(stat, val) \ 3203403Sbmc atomic_add_64(&arc_stats.stat.value.ui64, (val)); 3213403Sbmc 3223403Sbmc #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 3233403Sbmc #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 3243403Sbmc 3253403Sbmc #define ARCSTAT_MAX(stat, val) { \ 3263403Sbmc uint64_t m; \ 3273403Sbmc while ((val) > (m = arc_stats.stat.value.ui64) && \ 3283403Sbmc (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 3293403Sbmc continue; \ 3303403Sbmc } 3313403Sbmc 3323403Sbmc #define ARCSTAT_MAXSTAT(stat) \ 3333403Sbmc ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 334789Sahrens 3353403Sbmc /* 3363403Sbmc * We define a macro to allow ARC hits/misses to be easily broken down by 3373403Sbmc * two separate conditions, giving a total of four different subtypes for 3383403Sbmc * each of hits and misses (so eight statistics total). 3393403Sbmc */ 3403403Sbmc #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 3413403Sbmc if (cond1) { \ 3423403Sbmc if (cond2) { \ 3433403Sbmc ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 3443403Sbmc } else { \ 3453403Sbmc ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 3463403Sbmc } \ 3473403Sbmc } else { \ 3483403Sbmc if (cond2) { \ 3493403Sbmc ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 3503403Sbmc } else { \ 3513403Sbmc ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 3523403Sbmc } \ 3533403Sbmc } 354789Sahrens 3553403Sbmc kstat_t *arc_ksp; 3563403Sbmc static arc_state_t *arc_anon; 3573403Sbmc static arc_state_t *arc_mru; 3583403Sbmc static arc_state_t *arc_mru_ghost; 3593403Sbmc static arc_state_t *arc_mfu; 3603403Sbmc static arc_state_t *arc_mfu_ghost; 3615450Sbrendan static arc_state_t *arc_l2c_only; 3623403Sbmc 3633403Sbmc /* 3643403Sbmc * There are several ARC variables that are critical to export as kstats -- 3653403Sbmc * but we don't want to have to grovel around in the kstat whenever we wish to 3663403Sbmc * manipulate them. For these variables, we therefore define them to be in 3673403Sbmc * terms of the statistic variable. This assures that we are not introducing 3683403Sbmc * the possibility of inconsistency by having shadow copies of the variables, 3693403Sbmc * while still allowing the code to be readable. 3703403Sbmc */ 3713403Sbmc #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 3723403Sbmc #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 3733403Sbmc #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 3743403Sbmc #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 3753403Sbmc #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 3763403Sbmc 3773403Sbmc static int arc_no_grow; /* Don't try to grow cache size */ 3783403Sbmc static uint64_t arc_tempreserve; 3794309Smaybee static uint64_t arc_meta_used; 3804309Smaybee static uint64_t arc_meta_limit; 3814309Smaybee static uint64_t arc_meta_max = 0; 382789Sahrens 3835450Sbrendan typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 3845450Sbrendan 385789Sahrens typedef struct arc_callback arc_callback_t; 386789Sahrens 387789Sahrens struct arc_callback { 3883547Smaybee void *acb_private; 389789Sahrens arc_done_func_t *acb_done; 390789Sahrens arc_byteswap_func_t *acb_byteswap; 391789Sahrens arc_buf_t *acb_buf; 392789Sahrens zio_t *acb_zio_dummy; 393789Sahrens arc_callback_t *acb_next; 394789Sahrens }; 395789Sahrens 3963547Smaybee typedef struct arc_write_callback arc_write_callback_t; 3973547Smaybee 3983547Smaybee struct arc_write_callback { 3993547Smaybee void *awcb_private; 4003547Smaybee arc_done_func_t *awcb_ready; 4013547Smaybee arc_done_func_t *awcb_done; 4023547Smaybee arc_buf_t *awcb_buf; 4033547Smaybee }; 4043547Smaybee 405789Sahrens struct arc_buf_hdr { 406789Sahrens /* protected by hash lock */ 407789Sahrens dva_t b_dva; 408789Sahrens uint64_t b_birth; 409789Sahrens uint64_t b_cksum0; 410789Sahrens 4113093Sahrens kmutex_t b_freeze_lock; 4123093Sahrens zio_cksum_t *b_freeze_cksum; 4133093Sahrens 414789Sahrens arc_buf_hdr_t *b_hash_next; 415789Sahrens arc_buf_t *b_buf; 416789Sahrens uint32_t b_flags; 4171544Seschrock uint32_t b_datacnt; 418789Sahrens 4193290Sjohansen arc_callback_t *b_acb; 420789Sahrens kcondvar_t b_cv; 4213290Sjohansen 4223290Sjohansen /* immutable */ 4233290Sjohansen arc_buf_contents_t b_type; 4243290Sjohansen uint64_t b_size; 4253290Sjohansen spa_t *b_spa; 426789Sahrens 427789Sahrens /* protected by arc state mutex */ 428789Sahrens arc_state_t *b_state; 429789Sahrens list_node_t b_arc_node; 430789Sahrens 431789Sahrens /* updated atomically */ 432789Sahrens clock_t b_arc_access; 433789Sahrens 434789Sahrens /* self protecting */ 435789Sahrens refcount_t b_refcnt; 4365450Sbrendan 4375450Sbrendan l2arc_buf_hdr_t *b_l2hdr; 4385450Sbrendan list_node_t b_l2node; 439789Sahrens }; 440789Sahrens 4411544Seschrock static arc_buf_t *arc_eviction_list; 4421544Seschrock static kmutex_t arc_eviction_mtx; 4432887Smaybee static arc_buf_hdr_t arc_eviction_hdr; 4442688Smaybee static void arc_get_data_buf(arc_buf_t *buf); 4452688Smaybee static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 4464309Smaybee static int arc_evict_needed(arc_buf_contents_t type); 4475642Smaybee static void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); 4481544Seschrock 4491544Seschrock #define GHOST_STATE(state) \ 4505450Sbrendan ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 4515450Sbrendan (state) == arc_l2c_only) 4521544Seschrock 453789Sahrens /* 454789Sahrens * Private ARC flags. These flags are private ARC only flags that will show up 455789Sahrens * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 456789Sahrens * be passed in as arc_flags in things like arc_read. However, these flags 457789Sahrens * should never be passed and should only be set by ARC code. When adding new 458789Sahrens * public flags, make sure not to smash the private ones. 459789Sahrens */ 460789Sahrens 4611544Seschrock #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 462789Sahrens #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 463789Sahrens #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 464789Sahrens #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 4651544Seschrock #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 4662391Smaybee #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 4675450Sbrendan #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 4685450Sbrendan #define ARC_DONT_L2CACHE (1 << 16) /* originated by prefetch */ 4695450Sbrendan #define ARC_L2_READING (1 << 17) /* L2ARC read in progress */ 4705450Sbrendan #define ARC_L2_WRITING (1 << 18) /* L2ARC write in progress */ 4715450Sbrendan #define ARC_L2_EVICTED (1 << 19) /* evicted during I/O */ 4725450Sbrendan #define ARC_L2_WRITE_HEAD (1 << 20) /* head of write list */ 473789Sahrens 4741544Seschrock #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 475789Sahrens #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 476789Sahrens #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 477789Sahrens #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 4781544Seschrock #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 4795450Sbrendan #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 4805450Sbrendan #define HDR_DONT_L2CACHE(hdr) ((hdr)->b_flags & ARC_DONT_L2CACHE) 4815450Sbrendan #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_L2_READING) 4825450Sbrendan #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 4835450Sbrendan #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 4845450Sbrendan #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 485789Sahrens 486789Sahrens /* 4876018Sbrendan * Other sizes 4886018Sbrendan */ 4896018Sbrendan 4906018Sbrendan #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 4916018Sbrendan #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 4926018Sbrendan 4936018Sbrendan /* 494789Sahrens * Hash table routines 495789Sahrens */ 496789Sahrens 497789Sahrens #define HT_LOCK_PAD 64 498789Sahrens 499789Sahrens struct ht_lock { 500789Sahrens kmutex_t ht_lock; 501789Sahrens #ifdef _KERNEL 502789Sahrens unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 503789Sahrens #endif 504789Sahrens }; 505789Sahrens 506789Sahrens #define BUF_LOCKS 256 507789Sahrens typedef struct buf_hash_table { 508789Sahrens uint64_t ht_mask; 509789Sahrens arc_buf_hdr_t **ht_table; 510789Sahrens struct ht_lock ht_locks[BUF_LOCKS]; 511789Sahrens } buf_hash_table_t; 512789Sahrens 513789Sahrens static buf_hash_table_t buf_hash_table; 514789Sahrens 515789Sahrens #define BUF_HASH_INDEX(spa, dva, birth) \ 516789Sahrens (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 517789Sahrens #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 518789Sahrens #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 519789Sahrens #define HDR_LOCK(buf) \ 520789Sahrens (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 521789Sahrens 522789Sahrens uint64_t zfs_crc64_table[256]; 523789Sahrens 5245450Sbrendan /* 5255450Sbrendan * Level 2 ARC 5265450Sbrendan */ 5275450Sbrendan 5285450Sbrendan #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 5295450Sbrendan #define L2ARC_HEADROOM 4 /* num of writes */ 5305450Sbrendan #define L2ARC_FEED_DELAY 180 /* starting grace */ 5315450Sbrendan #define L2ARC_FEED_SECS 1 /* caching interval */ 5325450Sbrendan 5335450Sbrendan #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 5345450Sbrendan #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 5355450Sbrendan 5365450Sbrendan /* 5375450Sbrendan * L2ARC Performance Tunables 5385450Sbrendan */ 5395450Sbrendan uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 5405450Sbrendan uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 5415450Sbrendan uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 5425450Sbrendan boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 5435450Sbrendan 5445450Sbrendan /* 5455450Sbrendan * L2ARC Internals 5465450Sbrendan */ 5475450Sbrendan typedef struct l2arc_dev { 5485450Sbrendan vdev_t *l2ad_vdev; /* vdev */ 5495450Sbrendan spa_t *l2ad_spa; /* spa */ 5505450Sbrendan uint64_t l2ad_hand; /* next write location */ 5515450Sbrendan uint64_t l2ad_write; /* desired write size, bytes */ 5525450Sbrendan uint64_t l2ad_start; /* first addr on device */ 5535450Sbrendan uint64_t l2ad_end; /* last addr on device */ 5545450Sbrendan uint64_t l2ad_evict; /* last addr eviction reached */ 5555450Sbrendan boolean_t l2ad_first; /* first sweep through */ 5565450Sbrendan list_t *l2ad_buflist; /* buffer list */ 5575450Sbrendan list_node_t l2ad_node; /* device list node */ 5585450Sbrendan } l2arc_dev_t; 5595450Sbrendan 5605450Sbrendan static list_t L2ARC_dev_list; /* device list */ 5615450Sbrendan static list_t *l2arc_dev_list; /* device list pointer */ 5625450Sbrendan static kmutex_t l2arc_dev_mtx; /* device list mutex */ 5635450Sbrendan static l2arc_dev_t *l2arc_dev_last; /* last device used */ 5645450Sbrendan static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 5655450Sbrendan static list_t L2ARC_free_on_write; /* free after write buf list */ 5665450Sbrendan static list_t *l2arc_free_on_write; /* free after write list ptr */ 5675450Sbrendan static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 5685450Sbrendan static uint64_t l2arc_ndev; /* number of devices */ 5695450Sbrendan 5705450Sbrendan typedef struct l2arc_read_callback { 5715450Sbrendan arc_buf_t *l2rcb_buf; /* read buffer */ 5725450Sbrendan spa_t *l2rcb_spa; /* spa */ 5735450Sbrendan blkptr_t l2rcb_bp; /* original blkptr */ 5745450Sbrendan zbookmark_t l2rcb_zb; /* original bookmark */ 5755450Sbrendan int l2rcb_flags; /* original flags */ 5765450Sbrendan } l2arc_read_callback_t; 5775450Sbrendan 5785450Sbrendan typedef struct l2arc_write_callback { 5795450Sbrendan l2arc_dev_t *l2wcb_dev; /* device info */ 5805450Sbrendan arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 5815450Sbrendan } l2arc_write_callback_t; 5825450Sbrendan 5835450Sbrendan struct l2arc_buf_hdr { 5845450Sbrendan /* protected by arc_buf_hdr mutex */ 5855450Sbrendan l2arc_dev_t *b_dev; /* L2ARC device */ 5865450Sbrendan daddr_t b_daddr; /* disk address, offset byte */ 5875450Sbrendan }; 5885450Sbrendan 5895450Sbrendan typedef struct l2arc_data_free { 5905450Sbrendan /* protected by l2arc_free_on_write_mtx */ 5915450Sbrendan void *l2df_data; 5925450Sbrendan size_t l2df_size; 5935450Sbrendan void (*l2df_func)(void *, size_t); 5945450Sbrendan list_node_t l2df_list_node; 5955450Sbrendan } l2arc_data_free_t; 5965450Sbrendan 5975450Sbrendan static kmutex_t l2arc_feed_thr_lock; 5985450Sbrendan static kcondvar_t l2arc_feed_thr_cv; 5995450Sbrendan static uint8_t l2arc_thread_exit; 6005450Sbrendan 6015450Sbrendan static void l2arc_read_done(zio_t *zio); 6025450Sbrendan static void l2arc_hdr_stat_add(void); 6035450Sbrendan static void l2arc_hdr_stat_remove(void); 6045450Sbrendan 605789Sahrens static uint64_t 606789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 607789Sahrens { 608789Sahrens uintptr_t spav = (uintptr_t)spa; 609789Sahrens uint8_t *vdva = (uint8_t *)dva; 610789Sahrens uint64_t crc = -1ULL; 611789Sahrens int i; 612789Sahrens 613789Sahrens ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 614789Sahrens 615789Sahrens for (i = 0; i < sizeof (dva_t); i++) 616789Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 617789Sahrens 618789Sahrens crc ^= (spav>>8) ^ birth; 619789Sahrens 620789Sahrens return (crc); 621789Sahrens } 622789Sahrens 623789Sahrens #define BUF_EMPTY(buf) \ 624789Sahrens ((buf)->b_dva.dva_word[0] == 0 && \ 625789Sahrens (buf)->b_dva.dva_word[1] == 0 && \ 626789Sahrens (buf)->b_birth == 0) 627789Sahrens 628789Sahrens #define BUF_EQUAL(spa, dva, birth, buf) \ 629789Sahrens ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 630789Sahrens ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 631789Sahrens ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 632789Sahrens 633789Sahrens static arc_buf_hdr_t * 634789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 635789Sahrens { 636789Sahrens uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 637789Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 638789Sahrens arc_buf_hdr_t *buf; 639789Sahrens 640789Sahrens mutex_enter(hash_lock); 641789Sahrens for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 642789Sahrens buf = buf->b_hash_next) { 643789Sahrens if (BUF_EQUAL(spa, dva, birth, buf)) { 644789Sahrens *lockp = hash_lock; 645789Sahrens return (buf); 646789Sahrens } 647789Sahrens } 648789Sahrens mutex_exit(hash_lock); 649789Sahrens *lockp = NULL; 650789Sahrens return (NULL); 651789Sahrens } 652789Sahrens 653789Sahrens /* 654789Sahrens * Insert an entry into the hash table. If there is already an element 655789Sahrens * equal to elem in the hash table, then the already existing element 656789Sahrens * will be returned and the new element will not be inserted. 657789Sahrens * Otherwise returns NULL. 658789Sahrens */ 659789Sahrens static arc_buf_hdr_t * 660789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 661789Sahrens { 662789Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 663789Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 664789Sahrens arc_buf_hdr_t *fbuf; 6653403Sbmc uint32_t i; 666789Sahrens 6671544Seschrock ASSERT(!HDR_IN_HASH_TABLE(buf)); 668789Sahrens *lockp = hash_lock; 669789Sahrens mutex_enter(hash_lock); 670789Sahrens for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 671789Sahrens fbuf = fbuf->b_hash_next, i++) { 672789Sahrens if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 673789Sahrens return (fbuf); 674789Sahrens } 675789Sahrens 676789Sahrens buf->b_hash_next = buf_hash_table.ht_table[idx]; 677789Sahrens buf_hash_table.ht_table[idx] = buf; 6781544Seschrock buf->b_flags |= ARC_IN_HASH_TABLE; 679789Sahrens 680789Sahrens /* collect some hash table performance data */ 681789Sahrens if (i > 0) { 6823403Sbmc ARCSTAT_BUMP(arcstat_hash_collisions); 683789Sahrens if (i == 1) 6843403Sbmc ARCSTAT_BUMP(arcstat_hash_chains); 6853403Sbmc 6863403Sbmc ARCSTAT_MAX(arcstat_hash_chain_max, i); 687789Sahrens } 6883403Sbmc 6893403Sbmc ARCSTAT_BUMP(arcstat_hash_elements); 6903403Sbmc ARCSTAT_MAXSTAT(arcstat_hash_elements); 691789Sahrens 692789Sahrens return (NULL); 693789Sahrens } 694789Sahrens 695789Sahrens static void 696789Sahrens buf_hash_remove(arc_buf_hdr_t *buf) 697789Sahrens { 698789Sahrens arc_buf_hdr_t *fbuf, **bufp; 699789Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 700789Sahrens 701789Sahrens ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 7021544Seschrock ASSERT(HDR_IN_HASH_TABLE(buf)); 703789Sahrens 704789Sahrens bufp = &buf_hash_table.ht_table[idx]; 705789Sahrens while ((fbuf = *bufp) != buf) { 706789Sahrens ASSERT(fbuf != NULL); 707789Sahrens bufp = &fbuf->b_hash_next; 708789Sahrens } 709789Sahrens *bufp = buf->b_hash_next; 710789Sahrens buf->b_hash_next = NULL; 7111544Seschrock buf->b_flags &= ~ARC_IN_HASH_TABLE; 712789Sahrens 713789Sahrens /* collect some hash table performance data */ 7143403Sbmc ARCSTAT_BUMPDOWN(arcstat_hash_elements); 7153403Sbmc 716789Sahrens if (buf_hash_table.ht_table[idx] && 717789Sahrens buf_hash_table.ht_table[idx]->b_hash_next == NULL) 7183403Sbmc ARCSTAT_BUMPDOWN(arcstat_hash_chains); 719789Sahrens } 720789Sahrens 721789Sahrens /* 722789Sahrens * Global data structures and functions for the buf kmem cache. 723789Sahrens */ 724789Sahrens static kmem_cache_t *hdr_cache; 725789Sahrens static kmem_cache_t *buf_cache; 726789Sahrens 727789Sahrens static void 728789Sahrens buf_fini(void) 729789Sahrens { 730789Sahrens int i; 731789Sahrens 732789Sahrens kmem_free(buf_hash_table.ht_table, 733789Sahrens (buf_hash_table.ht_mask + 1) * sizeof (void *)); 734789Sahrens for (i = 0; i < BUF_LOCKS; i++) 735789Sahrens mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 736789Sahrens kmem_cache_destroy(hdr_cache); 737789Sahrens kmem_cache_destroy(buf_cache); 738789Sahrens } 739789Sahrens 740789Sahrens /* 741789Sahrens * Constructor callback - called when the cache is empty 742789Sahrens * and a new buf is requested. 743789Sahrens */ 744789Sahrens /* ARGSUSED */ 745789Sahrens static int 746789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag) 747789Sahrens { 748789Sahrens arc_buf_hdr_t *buf = vbuf; 749789Sahrens 750789Sahrens bzero(buf, sizeof (arc_buf_hdr_t)); 751789Sahrens refcount_create(&buf->b_refcnt); 752789Sahrens cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 7534831Sgw25295 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 7545450Sbrendan 7556018Sbrendan ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 756789Sahrens return (0); 757789Sahrens } 758789Sahrens 759789Sahrens /* 760789Sahrens * Destructor callback - called when a cached buf is 761789Sahrens * no longer required. 762789Sahrens */ 763789Sahrens /* ARGSUSED */ 764789Sahrens static void 765789Sahrens hdr_dest(void *vbuf, void *unused) 766789Sahrens { 767789Sahrens arc_buf_hdr_t *buf = vbuf; 768789Sahrens 769789Sahrens refcount_destroy(&buf->b_refcnt); 770789Sahrens cv_destroy(&buf->b_cv); 7714831Sgw25295 mutex_destroy(&buf->b_freeze_lock); 7725450Sbrendan 7736018Sbrendan ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 774789Sahrens } 775789Sahrens 776789Sahrens /* 777789Sahrens * Reclaim callback -- invoked when memory is low. 778789Sahrens */ 779789Sahrens /* ARGSUSED */ 780789Sahrens static void 781789Sahrens hdr_recl(void *unused) 782789Sahrens { 783789Sahrens dprintf("hdr_recl called\n"); 7843158Smaybee /* 7853158Smaybee * umem calls the reclaim func when we destroy the buf cache, 7863158Smaybee * which is after we do arc_fini(). 7873158Smaybee */ 7883158Smaybee if (!arc_dead) 7893158Smaybee cv_signal(&arc_reclaim_thr_cv); 790789Sahrens } 791789Sahrens 792789Sahrens static void 793789Sahrens buf_init(void) 794789Sahrens { 795789Sahrens uint64_t *ct; 7961544Seschrock uint64_t hsize = 1ULL << 12; 797789Sahrens int i, j; 798789Sahrens 799789Sahrens /* 800789Sahrens * The hash table is big enough to fill all of physical memory 8011544Seschrock * with an average 64K block size. The table will take up 8021544Seschrock * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 803789Sahrens */ 8041544Seschrock while (hsize * 65536 < physmem * PAGESIZE) 805789Sahrens hsize <<= 1; 8061544Seschrock retry: 807789Sahrens buf_hash_table.ht_mask = hsize - 1; 8081544Seschrock buf_hash_table.ht_table = 8091544Seschrock kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 8101544Seschrock if (buf_hash_table.ht_table == NULL) { 8111544Seschrock ASSERT(hsize > (1ULL << 8)); 8121544Seschrock hsize >>= 1; 8131544Seschrock goto retry; 8141544Seschrock } 815789Sahrens 816789Sahrens hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 817789Sahrens 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 818789Sahrens buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 819789Sahrens 0, NULL, NULL, NULL, NULL, NULL, 0); 820789Sahrens 821789Sahrens for (i = 0; i < 256; i++) 822789Sahrens for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 823789Sahrens *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 824789Sahrens 825789Sahrens for (i = 0; i < BUF_LOCKS; i++) { 826789Sahrens mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 827789Sahrens NULL, MUTEX_DEFAULT, NULL); 828789Sahrens } 829789Sahrens } 830789Sahrens 831789Sahrens #define ARC_MINTIME (hz>>4) /* 62 ms */ 832789Sahrens 833789Sahrens static void 8343093Sahrens arc_cksum_verify(arc_buf_t *buf) 8353093Sahrens { 8363093Sahrens zio_cksum_t zc; 8373093Sahrens 8383312Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 8393093Sahrens return; 8403093Sahrens 8413093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 8423265Sahrens if (buf->b_hdr->b_freeze_cksum == NULL || 8433265Sahrens (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 8443093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8453093Sahrens return; 8463093Sahrens } 8473093Sahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 8483093Sahrens if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 8493093Sahrens panic("buffer modified while frozen!"); 8503093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8513093Sahrens } 8523093Sahrens 8535450Sbrendan static int 8545450Sbrendan arc_cksum_equal(arc_buf_t *buf) 8555450Sbrendan { 8565450Sbrendan zio_cksum_t zc; 8575450Sbrendan int equal; 8585450Sbrendan 8595450Sbrendan mutex_enter(&buf->b_hdr->b_freeze_lock); 8605450Sbrendan fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 8615450Sbrendan equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 8625450Sbrendan mutex_exit(&buf->b_hdr->b_freeze_lock); 8635450Sbrendan 8645450Sbrendan return (equal); 8655450Sbrendan } 8665450Sbrendan 8673093Sahrens static void 8685450Sbrendan arc_cksum_compute(arc_buf_t *buf, boolean_t force) 8693093Sahrens { 8705450Sbrendan if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 8713093Sahrens return; 8723093Sahrens 8733093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 8743093Sahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 8753093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8763093Sahrens return; 8773093Sahrens } 8783093Sahrens buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 8793093Sahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 8803093Sahrens buf->b_hdr->b_freeze_cksum); 8813093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8823093Sahrens } 8833093Sahrens 8843093Sahrens void 8853093Sahrens arc_buf_thaw(arc_buf_t *buf) 8863093Sahrens { 8875450Sbrendan if (zfs_flags & ZFS_DEBUG_MODIFY) { 8885450Sbrendan if (buf->b_hdr->b_state != arc_anon) 8895450Sbrendan panic("modifying non-anon buffer!"); 8905450Sbrendan if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 8915450Sbrendan panic("modifying buffer while i/o in progress!"); 8925450Sbrendan arc_cksum_verify(buf); 8935450Sbrendan } 8945450Sbrendan 8953093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 8963093Sahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 8973093Sahrens kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 8983093Sahrens buf->b_hdr->b_freeze_cksum = NULL; 8993093Sahrens } 9003093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 9013093Sahrens } 9023093Sahrens 9033093Sahrens void 9043093Sahrens arc_buf_freeze(arc_buf_t *buf) 9053093Sahrens { 9063312Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 9073312Sahrens return; 9083312Sahrens 9093093Sahrens ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 9103403Sbmc buf->b_hdr->b_state == arc_anon); 9115450Sbrendan arc_cksum_compute(buf, B_FALSE); 9123093Sahrens } 9133093Sahrens 9143093Sahrens static void 915789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 916789Sahrens { 917789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 918789Sahrens 919789Sahrens if ((refcount_add(&ab->b_refcnt, tag) == 1) && 9203403Sbmc (ab->b_state != arc_anon)) { 9213700Sek110237 uint64_t delta = ab->b_size * ab->b_datacnt; 9224309Smaybee list_t *list = &ab->b_state->arcs_list[ab->b_type]; 9234309Smaybee uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 924789Sahrens 9253403Sbmc ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 9263403Sbmc mutex_enter(&ab->b_state->arcs_mtx); 927789Sahrens ASSERT(list_link_active(&ab->b_arc_node)); 9284309Smaybee list_remove(list, ab); 9291544Seschrock if (GHOST_STATE(ab->b_state)) { 9301544Seschrock ASSERT3U(ab->b_datacnt, ==, 0); 9311544Seschrock ASSERT3P(ab->b_buf, ==, NULL); 9321544Seschrock delta = ab->b_size; 9331544Seschrock } 9341544Seschrock ASSERT(delta > 0); 9354309Smaybee ASSERT3U(*size, >=, delta); 9364309Smaybee atomic_add_64(size, -delta); 9373403Sbmc mutex_exit(&ab->b_state->arcs_mtx); 9382391Smaybee /* remove the prefetch flag is we get a reference */ 9392391Smaybee if (ab->b_flags & ARC_PREFETCH) 9402391Smaybee ab->b_flags &= ~ARC_PREFETCH; 941789Sahrens } 942789Sahrens } 943789Sahrens 944789Sahrens static int 945789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 946789Sahrens { 947789Sahrens int cnt; 9483403Sbmc arc_state_t *state = ab->b_state; 949789Sahrens 9503403Sbmc ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 9513403Sbmc ASSERT(!GHOST_STATE(state)); 952789Sahrens 953789Sahrens if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 9543403Sbmc (state != arc_anon)) { 9554309Smaybee uint64_t *size = &state->arcs_lsize[ab->b_type]; 9564309Smaybee 9573403Sbmc ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 9583403Sbmc mutex_enter(&state->arcs_mtx); 959789Sahrens ASSERT(!list_link_active(&ab->b_arc_node)); 9604309Smaybee list_insert_head(&state->arcs_list[ab->b_type], ab); 9611544Seschrock ASSERT(ab->b_datacnt > 0); 9624309Smaybee atomic_add_64(size, ab->b_size * ab->b_datacnt); 9633403Sbmc mutex_exit(&state->arcs_mtx); 964789Sahrens } 965789Sahrens return (cnt); 966789Sahrens } 967789Sahrens 968789Sahrens /* 969789Sahrens * Move the supplied buffer to the indicated state. The mutex 970789Sahrens * for the buffer must be held by the caller. 971789Sahrens */ 972789Sahrens static void 9731544Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 974789Sahrens { 9751544Seschrock arc_state_t *old_state = ab->b_state; 9763700Sek110237 int64_t refcnt = refcount_count(&ab->b_refcnt); 9773700Sek110237 uint64_t from_delta, to_delta; 978789Sahrens 979789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 9801544Seschrock ASSERT(new_state != old_state); 9811544Seschrock ASSERT(refcnt == 0 || ab->b_datacnt > 0); 9821544Seschrock ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 9831544Seschrock 9841544Seschrock from_delta = to_delta = ab->b_datacnt * ab->b_size; 985789Sahrens 986789Sahrens /* 987789Sahrens * If this buffer is evictable, transfer it from the 988789Sahrens * old state list to the new state list. 989789Sahrens */ 9901544Seschrock if (refcnt == 0) { 9913403Sbmc if (old_state != arc_anon) { 9923403Sbmc int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 9934309Smaybee uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 9941544Seschrock 9951544Seschrock if (use_mutex) 9963403Sbmc mutex_enter(&old_state->arcs_mtx); 9971544Seschrock 9981544Seschrock ASSERT(list_link_active(&ab->b_arc_node)); 9994309Smaybee list_remove(&old_state->arcs_list[ab->b_type], ab); 1000789Sahrens 10012391Smaybee /* 10022391Smaybee * If prefetching out of the ghost cache, 10032391Smaybee * we will have a non-null datacnt. 10042391Smaybee */ 10052391Smaybee if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 10062391Smaybee /* ghost elements have a ghost size */ 10071544Seschrock ASSERT(ab->b_buf == NULL); 10081544Seschrock from_delta = ab->b_size; 1009789Sahrens } 10104309Smaybee ASSERT3U(*size, >=, from_delta); 10114309Smaybee atomic_add_64(size, -from_delta); 10121544Seschrock 10131544Seschrock if (use_mutex) 10143403Sbmc mutex_exit(&old_state->arcs_mtx); 1015789Sahrens } 10163403Sbmc if (new_state != arc_anon) { 10173403Sbmc int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 10184309Smaybee uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1019789Sahrens 10201544Seschrock if (use_mutex) 10213403Sbmc mutex_enter(&new_state->arcs_mtx); 10221544Seschrock 10234309Smaybee list_insert_head(&new_state->arcs_list[ab->b_type], ab); 10241544Seschrock 10251544Seschrock /* ghost elements have a ghost size */ 10261544Seschrock if (GHOST_STATE(new_state)) { 10271544Seschrock ASSERT(ab->b_datacnt == 0); 10281544Seschrock ASSERT(ab->b_buf == NULL); 10291544Seschrock to_delta = ab->b_size; 10301544Seschrock } 10314309Smaybee atomic_add_64(size, to_delta); 10321544Seschrock 10331544Seschrock if (use_mutex) 10343403Sbmc mutex_exit(&new_state->arcs_mtx); 1035789Sahrens } 1036789Sahrens } 1037789Sahrens 1038789Sahrens ASSERT(!BUF_EMPTY(ab)); 10395450Sbrendan if (new_state == arc_anon) { 1040789Sahrens buf_hash_remove(ab); 1041789Sahrens } 1042789Sahrens 10431544Seschrock /* adjust state sizes */ 10441544Seschrock if (to_delta) 10453403Sbmc atomic_add_64(&new_state->arcs_size, to_delta); 10461544Seschrock if (from_delta) { 10473403Sbmc ASSERT3U(old_state->arcs_size, >=, from_delta); 10483403Sbmc atomic_add_64(&old_state->arcs_size, -from_delta); 1049789Sahrens } 1050789Sahrens ab->b_state = new_state; 10515450Sbrendan 10525450Sbrendan /* adjust l2arc hdr stats */ 10535450Sbrendan if (new_state == arc_l2c_only) 10545450Sbrendan l2arc_hdr_stat_add(); 10555450Sbrendan else if (old_state == arc_l2c_only) 10565450Sbrendan l2arc_hdr_stat_remove(); 1057789Sahrens } 1058789Sahrens 10594309Smaybee void 10604309Smaybee arc_space_consume(uint64_t space) 10614309Smaybee { 10624309Smaybee atomic_add_64(&arc_meta_used, space); 10634309Smaybee atomic_add_64(&arc_size, space); 10644309Smaybee } 10654309Smaybee 10664309Smaybee void 10674309Smaybee arc_space_return(uint64_t space) 10684309Smaybee { 10694309Smaybee ASSERT(arc_meta_used >= space); 10704309Smaybee if (arc_meta_max < arc_meta_used) 10714309Smaybee arc_meta_max = arc_meta_used; 10724309Smaybee atomic_add_64(&arc_meta_used, -space); 10734309Smaybee ASSERT(arc_size >= space); 10744309Smaybee atomic_add_64(&arc_size, -space); 10754309Smaybee } 10764309Smaybee 10774309Smaybee void * 10784309Smaybee arc_data_buf_alloc(uint64_t size) 10794309Smaybee { 10804309Smaybee if (arc_evict_needed(ARC_BUFC_DATA)) 10814309Smaybee cv_signal(&arc_reclaim_thr_cv); 10824309Smaybee atomic_add_64(&arc_size, size); 10834309Smaybee return (zio_data_buf_alloc(size)); 10844309Smaybee } 10854309Smaybee 10864309Smaybee void 10874309Smaybee arc_data_buf_free(void *buf, uint64_t size) 10884309Smaybee { 10894309Smaybee zio_data_buf_free(buf, size); 10904309Smaybee ASSERT(arc_size >= size); 10914309Smaybee atomic_add_64(&arc_size, -size); 10924309Smaybee } 10934309Smaybee 1094789Sahrens arc_buf_t * 10953290Sjohansen arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1096789Sahrens { 1097789Sahrens arc_buf_hdr_t *hdr; 1098789Sahrens arc_buf_t *buf; 1099789Sahrens 1100789Sahrens ASSERT3U(size, >, 0); 11016245Smaybee hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1102789Sahrens ASSERT(BUF_EMPTY(hdr)); 1103789Sahrens hdr->b_size = size; 11043290Sjohansen hdr->b_type = type; 1105789Sahrens hdr->b_spa = spa; 11063403Sbmc hdr->b_state = arc_anon; 1107789Sahrens hdr->b_arc_access = 0; 11086245Smaybee buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1109789Sahrens buf->b_hdr = hdr; 11102688Smaybee buf->b_data = NULL; 11111544Seschrock buf->b_efunc = NULL; 11121544Seschrock buf->b_private = NULL; 1113789Sahrens buf->b_next = NULL; 1114789Sahrens hdr->b_buf = buf; 11152688Smaybee arc_get_data_buf(buf); 11161544Seschrock hdr->b_datacnt = 1; 1117789Sahrens hdr->b_flags = 0; 1118789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1119789Sahrens (void) refcount_add(&hdr->b_refcnt, tag); 1120789Sahrens 1121789Sahrens return (buf); 1122789Sahrens } 1123789Sahrens 11242688Smaybee static arc_buf_t * 11252688Smaybee arc_buf_clone(arc_buf_t *from) 11261544Seschrock { 11272688Smaybee arc_buf_t *buf; 11282688Smaybee arc_buf_hdr_t *hdr = from->b_hdr; 11292688Smaybee uint64_t size = hdr->b_size; 11301544Seschrock 11316245Smaybee buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 11322688Smaybee buf->b_hdr = hdr; 11332688Smaybee buf->b_data = NULL; 11342688Smaybee buf->b_efunc = NULL; 11352688Smaybee buf->b_private = NULL; 11362688Smaybee buf->b_next = hdr->b_buf; 11372688Smaybee hdr->b_buf = buf; 11382688Smaybee arc_get_data_buf(buf); 11392688Smaybee bcopy(from->b_data, buf->b_data, size); 11402688Smaybee hdr->b_datacnt += 1; 11412688Smaybee return (buf); 11421544Seschrock } 11431544Seschrock 11441544Seschrock void 11451544Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag) 11461544Seschrock { 11472887Smaybee arc_buf_hdr_t *hdr; 11481544Seschrock kmutex_t *hash_lock; 11491544Seschrock 11502724Smaybee /* 11512724Smaybee * Check to see if this buffer is currently being evicted via 11522887Smaybee * arc_do_user_evicts(). 11532724Smaybee */ 11542887Smaybee mutex_enter(&arc_eviction_mtx); 11552887Smaybee hdr = buf->b_hdr; 11562887Smaybee if (hdr == NULL) { 11572887Smaybee mutex_exit(&arc_eviction_mtx); 11582724Smaybee return; 11592887Smaybee } 11602887Smaybee hash_lock = HDR_LOCK(hdr); 11612887Smaybee mutex_exit(&arc_eviction_mtx); 11622724Smaybee 11632724Smaybee mutex_enter(hash_lock); 11641544Seschrock if (buf->b_data == NULL) { 11651544Seschrock /* 11661544Seschrock * This buffer is evicted. 11671544Seschrock */ 11682724Smaybee mutex_exit(hash_lock); 11691544Seschrock return; 11701544Seschrock } 11711544Seschrock 11722724Smaybee ASSERT(buf->b_hdr == hdr); 11733403Sbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 11741544Seschrock add_reference(hdr, hash_lock, tag); 11752688Smaybee arc_access(hdr, hash_lock); 11762688Smaybee mutex_exit(hash_lock); 11773403Sbmc ARCSTAT_BUMP(arcstat_hits); 11783403Sbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 11793403Sbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 11803403Sbmc data, metadata, hits); 11811544Seschrock } 11821544Seschrock 11835450Sbrendan /* 11845450Sbrendan * Free the arc data buffer. If it is an l2arc write in progress, 11855450Sbrendan * the buffer is placed on l2arc_free_on_write to be freed later. 11865450Sbrendan */ 11875450Sbrendan static void 11885450Sbrendan arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 11895450Sbrendan void *data, size_t size) 11905450Sbrendan { 11915450Sbrendan if (HDR_L2_WRITING(hdr)) { 11925450Sbrendan l2arc_data_free_t *df; 11935450Sbrendan df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 11945450Sbrendan df->l2df_data = data; 11955450Sbrendan df->l2df_size = size; 11965450Sbrendan df->l2df_func = free_func; 11975450Sbrendan mutex_enter(&l2arc_free_on_write_mtx); 11985450Sbrendan list_insert_head(l2arc_free_on_write, df); 11995450Sbrendan mutex_exit(&l2arc_free_on_write_mtx); 12005450Sbrendan ARCSTAT_BUMP(arcstat_l2_free_on_write); 12015450Sbrendan } else { 12025450Sbrendan free_func(data, size); 12035450Sbrendan } 12045450Sbrendan } 12055450Sbrendan 1206789Sahrens static void 12072688Smaybee arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 12081544Seschrock { 12091544Seschrock arc_buf_t **bufp; 12101544Seschrock 12111544Seschrock /* free up data associated with the buf */ 12121544Seschrock if (buf->b_data) { 12131544Seschrock arc_state_t *state = buf->b_hdr->b_state; 12141544Seschrock uint64_t size = buf->b_hdr->b_size; 12153290Sjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 12161544Seschrock 12173093Sahrens arc_cksum_verify(buf); 12182688Smaybee if (!recycle) { 12193290Sjohansen if (type == ARC_BUFC_METADATA) { 12205450Sbrendan arc_buf_data_free(buf->b_hdr, zio_buf_free, 12215450Sbrendan buf->b_data, size); 12224309Smaybee arc_space_return(size); 12233290Sjohansen } else { 12243290Sjohansen ASSERT(type == ARC_BUFC_DATA); 12255450Sbrendan arc_buf_data_free(buf->b_hdr, 12265450Sbrendan zio_data_buf_free, buf->b_data, size); 12274309Smaybee atomic_add_64(&arc_size, -size); 12283290Sjohansen } 12292688Smaybee } 12301544Seschrock if (list_link_active(&buf->b_hdr->b_arc_node)) { 12314309Smaybee uint64_t *cnt = &state->arcs_lsize[type]; 12324309Smaybee 12331544Seschrock ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 12343403Sbmc ASSERT(state != arc_anon); 12354309Smaybee 12364309Smaybee ASSERT3U(*cnt, >=, size); 12374309Smaybee atomic_add_64(cnt, -size); 12381544Seschrock } 12393403Sbmc ASSERT3U(state->arcs_size, >=, size); 12403403Sbmc atomic_add_64(&state->arcs_size, -size); 12411544Seschrock buf->b_data = NULL; 12421544Seschrock ASSERT(buf->b_hdr->b_datacnt > 0); 12431544Seschrock buf->b_hdr->b_datacnt -= 1; 12441544Seschrock } 12451544Seschrock 12461544Seschrock /* only remove the buf if requested */ 12471544Seschrock if (!all) 12481544Seschrock return; 12491544Seschrock 12501544Seschrock /* remove the buf from the hdr list */ 12511544Seschrock for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 12521544Seschrock continue; 12531544Seschrock *bufp = buf->b_next; 12541544Seschrock 12551544Seschrock ASSERT(buf->b_efunc == NULL); 12561544Seschrock 12571544Seschrock /* clean up the buf */ 12581544Seschrock buf->b_hdr = NULL; 12591544Seschrock kmem_cache_free(buf_cache, buf); 12601544Seschrock } 12611544Seschrock 12621544Seschrock static void 12631544Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr) 1264789Sahrens { 1265789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 12663403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 12671544Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1268789Sahrens 12695450Sbrendan if (hdr->b_l2hdr != NULL) { 12705450Sbrendan if (!MUTEX_HELD(&l2arc_buflist_mtx)) { 12715450Sbrendan /* 12725450Sbrendan * To prevent arc_free() and l2arc_evict() from 12735450Sbrendan * attempting to free the same buffer at the same time, 12745450Sbrendan * a FREE_IN_PROGRESS flag is given to arc_free() to 12755450Sbrendan * give it priority. l2arc_evict() can't destroy this 12765450Sbrendan * header while we are waiting on l2arc_buflist_mtx. 12775450Sbrendan */ 12785450Sbrendan mutex_enter(&l2arc_buflist_mtx); 12795450Sbrendan ASSERT(hdr->b_l2hdr != NULL); 12805450Sbrendan 12815450Sbrendan list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 12825450Sbrendan mutex_exit(&l2arc_buflist_mtx); 12835450Sbrendan } else { 12845450Sbrendan list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 12855450Sbrendan } 12865450Sbrendan ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 12875450Sbrendan kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); 12885450Sbrendan if (hdr->b_state == arc_l2c_only) 12895450Sbrendan l2arc_hdr_stat_remove(); 12905450Sbrendan hdr->b_l2hdr = NULL; 12915450Sbrendan } 12925450Sbrendan 1293789Sahrens if (!BUF_EMPTY(hdr)) { 12941544Seschrock ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1295789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 1296789Sahrens hdr->b_birth = 0; 1297789Sahrens hdr->b_cksum0 = 0; 1298789Sahrens } 12991544Seschrock while (hdr->b_buf) { 1300789Sahrens arc_buf_t *buf = hdr->b_buf; 1301789Sahrens 13021544Seschrock if (buf->b_efunc) { 13031544Seschrock mutex_enter(&arc_eviction_mtx); 13041544Seschrock ASSERT(buf->b_hdr != NULL); 13052688Smaybee arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 13061544Seschrock hdr->b_buf = buf->b_next; 13072887Smaybee buf->b_hdr = &arc_eviction_hdr; 13081544Seschrock buf->b_next = arc_eviction_list; 13091544Seschrock arc_eviction_list = buf; 13101544Seschrock mutex_exit(&arc_eviction_mtx); 13111544Seschrock } else { 13122688Smaybee arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 13131544Seschrock } 1314789Sahrens } 13153093Sahrens if (hdr->b_freeze_cksum != NULL) { 13163093Sahrens kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 13173093Sahrens hdr->b_freeze_cksum = NULL; 13183093Sahrens } 13191544Seschrock 1320789Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 1321789Sahrens ASSERT3P(hdr->b_hash_next, ==, NULL); 1322789Sahrens ASSERT3P(hdr->b_acb, ==, NULL); 1323789Sahrens kmem_cache_free(hdr_cache, hdr); 1324789Sahrens } 1325789Sahrens 1326789Sahrens void 1327789Sahrens arc_buf_free(arc_buf_t *buf, void *tag) 1328789Sahrens { 1329789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 13303403Sbmc int hashed = hdr->b_state != arc_anon; 13311544Seschrock 13321544Seschrock ASSERT(buf->b_efunc == NULL); 13331544Seschrock ASSERT(buf->b_data != NULL); 13341544Seschrock 13351544Seschrock if (hashed) { 13361544Seschrock kmutex_t *hash_lock = HDR_LOCK(hdr); 13371544Seschrock 13381544Seschrock mutex_enter(hash_lock); 13391544Seschrock (void) remove_reference(hdr, hash_lock, tag); 13401544Seschrock if (hdr->b_datacnt > 1) 13412688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 13421544Seschrock else 13431544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 13441544Seschrock mutex_exit(hash_lock); 13451544Seschrock } else if (HDR_IO_IN_PROGRESS(hdr)) { 13461544Seschrock int destroy_hdr; 13471544Seschrock /* 13481544Seschrock * We are in the middle of an async write. Don't destroy 13491544Seschrock * this buffer unless the write completes before we finish 13501544Seschrock * decrementing the reference count. 13511544Seschrock */ 13521544Seschrock mutex_enter(&arc_eviction_mtx); 13531544Seschrock (void) remove_reference(hdr, NULL, tag); 13541544Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 13551544Seschrock destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 13561544Seschrock mutex_exit(&arc_eviction_mtx); 13571544Seschrock if (destroy_hdr) 13581544Seschrock arc_hdr_destroy(hdr); 13591544Seschrock } else { 13601544Seschrock if (remove_reference(hdr, NULL, tag) > 0) { 13611544Seschrock ASSERT(HDR_IO_ERROR(hdr)); 13622688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 13631544Seschrock } else { 13641544Seschrock arc_hdr_destroy(hdr); 13651544Seschrock } 13661544Seschrock } 13671544Seschrock } 13681544Seschrock 13691544Seschrock int 13701544Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag) 13711544Seschrock { 13721544Seschrock arc_buf_hdr_t *hdr = buf->b_hdr; 1373789Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 13741544Seschrock int no_callback = (buf->b_efunc == NULL); 13751544Seschrock 13763403Sbmc if (hdr->b_state == arc_anon) { 13771544Seschrock arc_buf_free(buf, tag); 13781544Seschrock return (no_callback); 13791544Seschrock } 1380789Sahrens 1381789Sahrens mutex_enter(hash_lock); 13823403Sbmc ASSERT(hdr->b_state != arc_anon); 13831544Seschrock ASSERT(buf->b_data != NULL); 1384789Sahrens 13851544Seschrock (void) remove_reference(hdr, hash_lock, tag); 13861544Seschrock if (hdr->b_datacnt > 1) { 13871544Seschrock if (no_callback) 13882688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 13891544Seschrock } else if (no_callback) { 13901544Seschrock ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 13911544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 1392789Sahrens } 13931544Seschrock ASSERT(no_callback || hdr->b_datacnt > 1 || 13941544Seschrock refcount_is_zero(&hdr->b_refcnt)); 1395789Sahrens mutex_exit(hash_lock); 13961544Seschrock return (no_callback); 1397789Sahrens } 1398789Sahrens 1399789Sahrens int 1400789Sahrens arc_buf_size(arc_buf_t *buf) 1401789Sahrens { 1402789Sahrens return (buf->b_hdr->b_size); 1403789Sahrens } 1404789Sahrens 1405789Sahrens /* 1406789Sahrens * Evict buffers from list until we've removed the specified number of 1407789Sahrens * bytes. Move the removed buffers to the appropriate evict state. 14082688Smaybee * If the recycle flag is set, then attempt to "recycle" a buffer: 14092688Smaybee * - look for a buffer to evict that is `bytes' long. 14102688Smaybee * - return the data block from this buffer rather than freeing it. 14112688Smaybee * This flag is used by callers that are trying to make space for a 14122688Smaybee * new buffer in a full arc cache. 14135642Smaybee * 14145642Smaybee * This function makes a "best effort". It skips over any buffers 14155642Smaybee * it can't get a hash_lock on, and so may not catch all candidates. 14165642Smaybee * It may also return without evicting as much space as requested. 1417789Sahrens */ 14182688Smaybee static void * 14195642Smaybee arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, 14203290Sjohansen arc_buf_contents_t type) 1421789Sahrens { 1422789Sahrens arc_state_t *evicted_state; 14232688Smaybee uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 14242918Smaybee arc_buf_hdr_t *ab, *ab_prev = NULL; 14254309Smaybee list_t *list = &state->arcs_list[type]; 1426789Sahrens kmutex_t *hash_lock; 14272688Smaybee boolean_t have_lock; 14282918Smaybee void *stolen = NULL; 1429789Sahrens 14303403Sbmc ASSERT(state == arc_mru || state == arc_mfu); 1431789Sahrens 14323403Sbmc evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1433789Sahrens 14343403Sbmc mutex_enter(&state->arcs_mtx); 14353403Sbmc mutex_enter(&evicted_state->arcs_mtx); 1436789Sahrens 14374309Smaybee for (ab = list_tail(list); ab; ab = ab_prev) { 14384309Smaybee ab_prev = list_prev(list, ab); 14392391Smaybee /* prefetch buffers have a minimum lifespan */ 14402688Smaybee if (HDR_IO_IN_PROGRESS(ab) || 14415642Smaybee (spa && ab->b_spa != spa) || 14422688Smaybee (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 14432688Smaybee lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 14442391Smaybee skipped++; 14452391Smaybee continue; 14462391Smaybee } 14472918Smaybee /* "lookahead" for better eviction candidate */ 14482918Smaybee if (recycle && ab->b_size != bytes && 14492918Smaybee ab_prev && ab_prev->b_size == bytes) 14502688Smaybee continue; 1451789Sahrens hash_lock = HDR_LOCK(ab); 14522688Smaybee have_lock = MUTEX_HELD(hash_lock); 14532688Smaybee if (have_lock || mutex_tryenter(hash_lock)) { 1454789Sahrens ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 14551544Seschrock ASSERT(ab->b_datacnt > 0); 14561544Seschrock while (ab->b_buf) { 14571544Seschrock arc_buf_t *buf = ab->b_buf; 14582688Smaybee if (buf->b_data) { 14591544Seschrock bytes_evicted += ab->b_size; 14603290Sjohansen if (recycle && ab->b_type == type && 14615450Sbrendan ab->b_size == bytes && 14625450Sbrendan !HDR_L2_WRITING(ab)) { 14632918Smaybee stolen = buf->b_data; 14642918Smaybee recycle = FALSE; 14652918Smaybee } 14662688Smaybee } 14671544Seschrock if (buf->b_efunc) { 14681544Seschrock mutex_enter(&arc_eviction_mtx); 14692918Smaybee arc_buf_destroy(buf, 14702918Smaybee buf->b_data == stolen, FALSE); 14711544Seschrock ab->b_buf = buf->b_next; 14722887Smaybee buf->b_hdr = &arc_eviction_hdr; 14731544Seschrock buf->b_next = arc_eviction_list; 14741544Seschrock arc_eviction_list = buf; 14751544Seschrock mutex_exit(&arc_eviction_mtx); 14761544Seschrock } else { 14772918Smaybee arc_buf_destroy(buf, 14782918Smaybee buf->b_data == stolen, TRUE); 14791544Seschrock } 14801544Seschrock } 14811544Seschrock ASSERT(ab->b_datacnt == 0); 1482789Sahrens arc_change_state(evicted_state, ab, hash_lock); 14831544Seschrock ASSERT(HDR_IN_HASH_TABLE(ab)); 14845450Sbrendan ab->b_flags |= ARC_IN_HASH_TABLE; 14855450Sbrendan ab->b_flags &= ~ARC_BUF_AVAILABLE; 1486789Sahrens DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 14872688Smaybee if (!have_lock) 14882688Smaybee mutex_exit(hash_lock); 14891544Seschrock if (bytes >= 0 && bytes_evicted >= bytes) 1490789Sahrens break; 1491789Sahrens } else { 14922688Smaybee missed += 1; 1493789Sahrens } 1494789Sahrens } 14953403Sbmc 14963403Sbmc mutex_exit(&evicted_state->arcs_mtx); 14973403Sbmc mutex_exit(&state->arcs_mtx); 1498789Sahrens 1499789Sahrens if (bytes_evicted < bytes) 1500789Sahrens dprintf("only evicted %lld bytes from %x", 1501789Sahrens (longlong_t)bytes_evicted, state); 1502789Sahrens 15032688Smaybee if (skipped) 15043403Sbmc ARCSTAT_INCR(arcstat_evict_skip, skipped); 15053403Sbmc 15062688Smaybee if (missed) 15073403Sbmc ARCSTAT_INCR(arcstat_mutex_miss, missed); 15083403Sbmc 15094709Smaybee /* 15104709Smaybee * We have just evicted some date into the ghost state, make 15114709Smaybee * sure we also adjust the ghost state size if necessary. 15124709Smaybee */ 15134709Smaybee if (arc_no_grow && 15144709Smaybee arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 15154709Smaybee int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 15164709Smaybee arc_mru_ghost->arcs_size - arc_c; 15174709Smaybee 15184709Smaybee if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 15194709Smaybee int64_t todelete = 15204709Smaybee MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 15215642Smaybee arc_evict_ghost(arc_mru_ghost, NULL, todelete); 15224709Smaybee } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 15234709Smaybee int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 15244709Smaybee arc_mru_ghost->arcs_size + 15254709Smaybee arc_mfu_ghost->arcs_size - arc_c); 15265642Smaybee arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 15274709Smaybee } 15284709Smaybee } 15294709Smaybee 15302918Smaybee return (stolen); 1531789Sahrens } 1532789Sahrens 1533789Sahrens /* 1534789Sahrens * Remove buffers from list until we've removed the specified number of 1535789Sahrens * bytes. Destroy the buffers that are removed. 1536789Sahrens */ 1537789Sahrens static void 15385642Smaybee arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) 1539789Sahrens { 1540789Sahrens arc_buf_hdr_t *ab, *ab_prev; 15414309Smaybee list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1542789Sahrens kmutex_t *hash_lock; 15431544Seschrock uint64_t bytes_deleted = 0; 15443700Sek110237 uint64_t bufs_skipped = 0; 1545789Sahrens 15461544Seschrock ASSERT(GHOST_STATE(state)); 1547789Sahrens top: 15483403Sbmc mutex_enter(&state->arcs_mtx); 15494309Smaybee for (ab = list_tail(list); ab; ab = ab_prev) { 15504309Smaybee ab_prev = list_prev(list, ab); 15515642Smaybee if (spa && ab->b_spa != spa) 15525642Smaybee continue; 1553789Sahrens hash_lock = HDR_LOCK(ab); 1554789Sahrens if (mutex_tryenter(hash_lock)) { 15552391Smaybee ASSERT(!HDR_IO_IN_PROGRESS(ab)); 15561544Seschrock ASSERT(ab->b_buf == NULL); 15573403Sbmc ARCSTAT_BUMP(arcstat_deleted); 15581544Seschrock bytes_deleted += ab->b_size; 15595450Sbrendan 15605450Sbrendan if (ab->b_l2hdr != NULL) { 15615450Sbrendan /* 15625450Sbrendan * This buffer is cached on the 2nd Level ARC; 15635450Sbrendan * don't destroy the header. 15645450Sbrendan */ 15655450Sbrendan arc_change_state(arc_l2c_only, ab, hash_lock); 15665450Sbrendan mutex_exit(hash_lock); 15675450Sbrendan } else { 15685450Sbrendan arc_change_state(arc_anon, ab, hash_lock); 15695450Sbrendan mutex_exit(hash_lock); 15705450Sbrendan arc_hdr_destroy(ab); 15715450Sbrendan } 15725450Sbrendan 1573789Sahrens DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1574789Sahrens if (bytes >= 0 && bytes_deleted >= bytes) 1575789Sahrens break; 1576789Sahrens } else { 1577789Sahrens if (bytes < 0) { 15783403Sbmc mutex_exit(&state->arcs_mtx); 1579789Sahrens mutex_enter(hash_lock); 1580789Sahrens mutex_exit(hash_lock); 1581789Sahrens goto top; 1582789Sahrens } 1583789Sahrens bufs_skipped += 1; 1584789Sahrens } 1585789Sahrens } 15863403Sbmc mutex_exit(&state->arcs_mtx); 1587789Sahrens 15884309Smaybee if (list == &state->arcs_list[ARC_BUFC_DATA] && 15894309Smaybee (bytes < 0 || bytes_deleted < bytes)) { 15904309Smaybee list = &state->arcs_list[ARC_BUFC_METADATA]; 15914309Smaybee goto top; 15924309Smaybee } 15934309Smaybee 1594789Sahrens if (bufs_skipped) { 15953403Sbmc ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1596789Sahrens ASSERT(bytes >= 0); 1597789Sahrens } 1598789Sahrens 1599789Sahrens if (bytes_deleted < bytes) 1600789Sahrens dprintf("only deleted %lld bytes from %p", 1601789Sahrens (longlong_t)bytes_deleted, state); 1602789Sahrens } 1603789Sahrens 1604789Sahrens static void 1605789Sahrens arc_adjust(void) 1606789Sahrens { 16073403Sbmc int64_t top_sz, mru_over, arc_over, todelete; 1608789Sahrens 16095642Smaybee top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used; 1610789Sahrens 16114309Smaybee if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 16124309Smaybee int64_t toevict = 16134309Smaybee MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); 16145642Smaybee (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA); 16154309Smaybee top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 16164309Smaybee } 16174309Smaybee 16184309Smaybee if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 16194309Smaybee int64_t toevict = 16204309Smaybee MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); 16215642Smaybee (void) arc_evict(arc_mru, NULL, toevict, FALSE, 16225642Smaybee ARC_BUFC_METADATA); 16233403Sbmc top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1624789Sahrens } 1625789Sahrens 16263403Sbmc mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1627789Sahrens 1628789Sahrens if (mru_over > 0) { 16294309Smaybee if (arc_mru_ghost->arcs_size > 0) { 16304309Smaybee todelete = MIN(arc_mru_ghost->arcs_size, mru_over); 16315642Smaybee arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1632789Sahrens } 1633789Sahrens } 1634789Sahrens 16353403Sbmc if ((arc_over = arc_size - arc_c) > 0) { 16361544Seschrock int64_t tbl_over; 1637789Sahrens 16384309Smaybee if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 16394309Smaybee int64_t toevict = 16404309Smaybee MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); 16415642Smaybee (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 16424309Smaybee ARC_BUFC_DATA); 16434309Smaybee arc_over = arc_size - arc_c; 1644789Sahrens } 1645789Sahrens 16464309Smaybee if (arc_over > 0 && 16474309Smaybee arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 16484309Smaybee int64_t toevict = 16494309Smaybee MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], 16504309Smaybee arc_over); 16515642Smaybee (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 16524309Smaybee ARC_BUFC_METADATA); 16534309Smaybee } 16544309Smaybee 16554309Smaybee tbl_over = arc_size + arc_mru_ghost->arcs_size + 16564309Smaybee arc_mfu_ghost->arcs_size - arc_c * 2; 16574309Smaybee 16584309Smaybee if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { 16594309Smaybee todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); 16605642Smaybee arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1661789Sahrens } 1662789Sahrens } 1663789Sahrens } 1664789Sahrens 16651544Seschrock static void 16661544Seschrock arc_do_user_evicts(void) 16671544Seschrock { 16681544Seschrock mutex_enter(&arc_eviction_mtx); 16691544Seschrock while (arc_eviction_list != NULL) { 16701544Seschrock arc_buf_t *buf = arc_eviction_list; 16711544Seschrock arc_eviction_list = buf->b_next; 16721544Seschrock buf->b_hdr = NULL; 16731544Seschrock mutex_exit(&arc_eviction_mtx); 16741544Seschrock 16751819Smaybee if (buf->b_efunc != NULL) 16761819Smaybee VERIFY(buf->b_efunc(buf) == 0); 16771544Seschrock 16781544Seschrock buf->b_efunc = NULL; 16791544Seschrock buf->b_private = NULL; 16801544Seschrock kmem_cache_free(buf_cache, buf); 16811544Seschrock mutex_enter(&arc_eviction_mtx); 16821544Seschrock } 16831544Seschrock mutex_exit(&arc_eviction_mtx); 16841544Seschrock } 16851544Seschrock 1686789Sahrens /* 16875642Smaybee * Flush all *evictable* data from the cache for the given spa. 1688789Sahrens * NOTE: this will not touch "active" (i.e. referenced) data. 1689789Sahrens */ 1690789Sahrens void 16915642Smaybee arc_flush(spa_t *spa) 1692789Sahrens { 16935642Smaybee while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 16945642Smaybee (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); 16955642Smaybee if (spa) 16965642Smaybee break; 16975642Smaybee } 16985642Smaybee while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 16995642Smaybee (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); 17005642Smaybee if (spa) 17015642Smaybee break; 17025642Smaybee } 17035642Smaybee while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 17045642Smaybee (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); 17055642Smaybee if (spa) 17065642Smaybee break; 17075642Smaybee } 17085642Smaybee while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 17095642Smaybee (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); 17105642Smaybee if (spa) 17115642Smaybee break; 17125642Smaybee } 17135642Smaybee 17145642Smaybee arc_evict_ghost(arc_mru_ghost, spa, -1); 17155642Smaybee arc_evict_ghost(arc_mfu_ghost, spa, -1); 17161544Seschrock 17171544Seschrock mutex_enter(&arc_reclaim_thr_lock); 17181544Seschrock arc_do_user_evicts(); 17191544Seschrock mutex_exit(&arc_reclaim_thr_lock); 17205642Smaybee ASSERT(spa || arc_eviction_list == NULL); 1721789Sahrens } 1722789Sahrens 17233158Smaybee int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 17242391Smaybee 1725789Sahrens void 17263158Smaybee arc_shrink(void) 1727789Sahrens { 17283403Sbmc if (arc_c > arc_c_min) { 17293158Smaybee uint64_t to_free; 1730789Sahrens 17312048Sstans #ifdef _KERNEL 17323403Sbmc to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 17332048Sstans #else 17343403Sbmc to_free = arc_c >> arc_shrink_shift; 17352048Sstans #endif 17363403Sbmc if (arc_c > arc_c_min + to_free) 17373403Sbmc atomic_add_64(&arc_c, -to_free); 17383158Smaybee else 17393403Sbmc arc_c = arc_c_min; 17402048Sstans 17413403Sbmc atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 17423403Sbmc if (arc_c > arc_size) 17433403Sbmc arc_c = MAX(arc_size, arc_c_min); 17443403Sbmc if (arc_p > arc_c) 17453403Sbmc arc_p = (arc_c >> 1); 17463403Sbmc ASSERT(arc_c >= arc_c_min); 17473403Sbmc ASSERT((int64_t)arc_p >= 0); 17483158Smaybee } 1749789Sahrens 17503403Sbmc if (arc_size > arc_c) 17513158Smaybee arc_adjust(); 1752789Sahrens } 1753789Sahrens 1754789Sahrens static int 1755789Sahrens arc_reclaim_needed(void) 1756789Sahrens { 1757789Sahrens uint64_t extra; 1758789Sahrens 1759789Sahrens #ifdef _KERNEL 17602048Sstans 17612048Sstans if (needfree) 17622048Sstans return (1); 17632048Sstans 1764789Sahrens /* 1765789Sahrens * take 'desfree' extra pages, so we reclaim sooner, rather than later 1766789Sahrens */ 1767789Sahrens extra = desfree; 1768789Sahrens 1769789Sahrens /* 1770789Sahrens * check that we're out of range of the pageout scanner. It starts to 1771789Sahrens * schedule paging if freemem is less than lotsfree and needfree. 1772789Sahrens * lotsfree is the high-water mark for pageout, and needfree is the 1773789Sahrens * number of needed free pages. We add extra pages here to make sure 1774789Sahrens * the scanner doesn't start up while we're freeing memory. 1775789Sahrens */ 1776789Sahrens if (freemem < lotsfree + needfree + extra) 1777789Sahrens return (1); 1778789Sahrens 1779789Sahrens /* 1780789Sahrens * check to make sure that swapfs has enough space so that anon 17815450Sbrendan * reservations can still succeed. anon_resvmem() checks that the 1782789Sahrens * availrmem is greater than swapfs_minfree, and the number of reserved 1783789Sahrens * swap pages. We also add a bit of extra here just to prevent 1784789Sahrens * circumstances from getting really dire. 1785789Sahrens */ 1786789Sahrens if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1787789Sahrens return (1); 1788789Sahrens 17891936Smaybee #if defined(__i386) 1790789Sahrens /* 1791789Sahrens * If we're on an i386 platform, it's possible that we'll exhaust the 1792789Sahrens * kernel heap space before we ever run out of available physical 1793789Sahrens * memory. Most checks of the size of the heap_area compare against 1794789Sahrens * tune.t_minarmem, which is the minimum available real memory that we 1795789Sahrens * can have in the system. However, this is generally fixed at 25 pages 1796789Sahrens * which is so low that it's useless. In this comparison, we seek to 1797789Sahrens * calculate the total heap-size, and reclaim if more than 3/4ths of the 17985450Sbrendan * heap is allocated. (Or, in the calculation, if less than 1/4th is 1799789Sahrens * free) 1800789Sahrens */ 1801789Sahrens if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1802789Sahrens (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1803789Sahrens return (1); 1804789Sahrens #endif 1805789Sahrens 1806789Sahrens #else 1807789Sahrens if (spa_get_random(100) == 0) 1808789Sahrens return (1); 1809789Sahrens #endif 1810789Sahrens return (0); 1811789Sahrens } 1812789Sahrens 1813789Sahrens static void 1814789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1815789Sahrens { 1816789Sahrens size_t i; 1817789Sahrens kmem_cache_t *prev_cache = NULL; 18183290Sjohansen kmem_cache_t *prev_data_cache = NULL; 1819789Sahrens extern kmem_cache_t *zio_buf_cache[]; 18203290Sjohansen extern kmem_cache_t *zio_data_buf_cache[]; 1821789Sahrens 18221484Sek110237 #ifdef _KERNEL 18234309Smaybee if (arc_meta_used >= arc_meta_limit) { 18244309Smaybee /* 18254309Smaybee * We are exceeding our meta-data cache limit. 18264309Smaybee * Purge some DNLC entries to release holds on meta-data. 18274309Smaybee */ 18284309Smaybee dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 18294309Smaybee } 18301936Smaybee #if defined(__i386) 18311936Smaybee /* 18321936Smaybee * Reclaim unused memory from all kmem caches. 18331936Smaybee */ 18341936Smaybee kmem_reap(); 18351936Smaybee #endif 18361484Sek110237 #endif 18371484Sek110237 1838789Sahrens /* 18395450Sbrendan * An aggressive reclamation will shrink the cache size as well as 18401544Seschrock * reap free buffers from the arc kmem caches. 1841789Sahrens */ 1842789Sahrens if (strat == ARC_RECLAIM_AGGR) 18433158Smaybee arc_shrink(); 1844789Sahrens 1845789Sahrens for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1846789Sahrens if (zio_buf_cache[i] != prev_cache) { 1847789Sahrens prev_cache = zio_buf_cache[i]; 1848789Sahrens kmem_cache_reap_now(zio_buf_cache[i]); 1849789Sahrens } 18503290Sjohansen if (zio_data_buf_cache[i] != prev_data_cache) { 18513290Sjohansen prev_data_cache = zio_data_buf_cache[i]; 18523290Sjohansen kmem_cache_reap_now(zio_data_buf_cache[i]); 18533290Sjohansen } 1854789Sahrens } 18551544Seschrock kmem_cache_reap_now(buf_cache); 18561544Seschrock kmem_cache_reap_now(hdr_cache); 1857789Sahrens } 1858789Sahrens 1859789Sahrens static void 1860789Sahrens arc_reclaim_thread(void) 1861789Sahrens { 1862789Sahrens clock_t growtime = 0; 1863789Sahrens arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1864789Sahrens callb_cpr_t cpr; 1865789Sahrens 1866789Sahrens CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1867789Sahrens 1868789Sahrens mutex_enter(&arc_reclaim_thr_lock); 1869789Sahrens while (arc_thread_exit == 0) { 1870789Sahrens if (arc_reclaim_needed()) { 1871789Sahrens 18723403Sbmc if (arc_no_grow) { 1873789Sahrens if (last_reclaim == ARC_RECLAIM_CONS) { 1874789Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1875789Sahrens } else { 1876789Sahrens last_reclaim = ARC_RECLAIM_CONS; 1877789Sahrens } 1878789Sahrens } else { 18793403Sbmc arc_no_grow = TRUE; 1880789Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1881789Sahrens membar_producer(); 1882789Sahrens } 1883789Sahrens 1884789Sahrens /* reset the growth delay for every reclaim */ 1885789Sahrens growtime = lbolt + (arc_grow_retry * hz); 1886789Sahrens 1887789Sahrens arc_kmem_reap_now(last_reclaim); 1888789Sahrens 18894309Smaybee } else if (arc_no_grow && lbolt >= growtime) { 18903403Sbmc arc_no_grow = FALSE; 1891789Sahrens } 1892789Sahrens 18933403Sbmc if (2 * arc_c < arc_size + 18943403Sbmc arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) 18953298Smaybee arc_adjust(); 18963298Smaybee 18971544Seschrock if (arc_eviction_list != NULL) 18981544Seschrock arc_do_user_evicts(); 18991544Seschrock 1900789Sahrens /* block until needed, or one second, whichever is shorter */ 1901789Sahrens CALLB_CPR_SAFE_BEGIN(&cpr); 1902789Sahrens (void) cv_timedwait(&arc_reclaim_thr_cv, 1903789Sahrens &arc_reclaim_thr_lock, (lbolt + hz)); 1904789Sahrens CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1905789Sahrens } 1906789Sahrens 1907789Sahrens arc_thread_exit = 0; 1908789Sahrens cv_broadcast(&arc_reclaim_thr_cv); 1909789Sahrens CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1910789Sahrens thread_exit(); 1911789Sahrens } 1912789Sahrens 19131544Seschrock /* 19141544Seschrock * Adapt arc info given the number of bytes we are trying to add and 19151544Seschrock * the state that we are comming from. This function is only called 19161544Seschrock * when we are adding new content to the cache. 19171544Seschrock */ 1918789Sahrens static void 19191544Seschrock arc_adapt(int bytes, arc_state_t *state) 1920789Sahrens { 19211544Seschrock int mult; 19221544Seschrock 19235450Sbrendan if (state == arc_l2c_only) 19245450Sbrendan return; 19255450Sbrendan 19261544Seschrock ASSERT(bytes > 0); 1927789Sahrens /* 19281544Seschrock * Adapt the target size of the MRU list: 19291544Seschrock * - if we just hit in the MRU ghost list, then increase 19301544Seschrock * the target size of the MRU list. 19311544Seschrock * - if we just hit in the MFU ghost list, then increase 19321544Seschrock * the target size of the MFU list by decreasing the 19331544Seschrock * target size of the MRU list. 1934789Sahrens */ 19353403Sbmc if (state == arc_mru_ghost) { 19363403Sbmc mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 19373403Sbmc 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 19381544Seschrock 19393403Sbmc arc_p = MIN(arc_c, arc_p + bytes * mult); 19403403Sbmc } else if (state == arc_mfu_ghost) { 19413403Sbmc mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 19423403Sbmc 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 19431544Seschrock 19443403Sbmc arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 19451544Seschrock } 19463403Sbmc ASSERT((int64_t)arc_p >= 0); 1947789Sahrens 1948789Sahrens if (arc_reclaim_needed()) { 1949789Sahrens cv_signal(&arc_reclaim_thr_cv); 1950789Sahrens return; 1951789Sahrens } 1952789Sahrens 19533403Sbmc if (arc_no_grow) 1954789Sahrens return; 1955789Sahrens 19563403Sbmc if (arc_c >= arc_c_max) 19571544Seschrock return; 19581544Seschrock 1959789Sahrens /* 19601544Seschrock * If we're within (2 * maxblocksize) bytes of the target 19611544Seschrock * cache size, increment the target cache size 1962789Sahrens */ 19633403Sbmc if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 19643403Sbmc atomic_add_64(&arc_c, (int64_t)bytes); 19653403Sbmc if (arc_c > arc_c_max) 19663403Sbmc arc_c = arc_c_max; 19673403Sbmc else if (state == arc_anon) 19683403Sbmc atomic_add_64(&arc_p, (int64_t)bytes); 19693403Sbmc if (arc_p > arc_c) 19703403Sbmc arc_p = arc_c; 1971789Sahrens } 19723403Sbmc ASSERT((int64_t)arc_p >= 0); 1973789Sahrens } 1974789Sahrens 1975789Sahrens /* 19761544Seschrock * Check if the cache has reached its limits and eviction is required 19771544Seschrock * prior to insert. 1978789Sahrens */ 1979789Sahrens static int 19804309Smaybee arc_evict_needed(arc_buf_contents_t type) 1981789Sahrens { 19824309Smaybee if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 19834309Smaybee return (1); 19844309Smaybee 19854309Smaybee #ifdef _KERNEL 19864309Smaybee /* 19874309Smaybee * If zio data pages are being allocated out of a separate heap segment, 19884309Smaybee * then enforce that the size of available vmem for this area remains 19894309Smaybee * above about 1/32nd free. 19904309Smaybee */ 19914309Smaybee if (type == ARC_BUFC_DATA && zio_arena != NULL && 19924309Smaybee vmem_size(zio_arena, VMEM_FREE) < 19934309Smaybee (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 19944309Smaybee return (1); 19954309Smaybee #endif 19964309Smaybee 1997789Sahrens if (arc_reclaim_needed()) 1998789Sahrens return (1); 1999789Sahrens 20003403Sbmc return (arc_size > arc_c); 2001789Sahrens } 2002789Sahrens 2003789Sahrens /* 20042688Smaybee * The buffer, supplied as the first argument, needs a data block. 20052688Smaybee * So, if we are at cache max, determine which cache should be victimized. 20062688Smaybee * We have the following cases: 2007789Sahrens * 20083403Sbmc * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2009789Sahrens * In this situation if we're out of space, but the resident size of the MFU is 2010789Sahrens * under the limit, victimize the MFU cache to satisfy this insertion request. 2011789Sahrens * 20123403Sbmc * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2013789Sahrens * Here, we've used up all of the available space for the MRU, so we need to 2014789Sahrens * evict from our own cache instead. Evict from the set of resident MRU 2015789Sahrens * entries. 2016789Sahrens * 20173403Sbmc * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2018789Sahrens * c minus p represents the MFU space in the cache, since p is the size of the 2019789Sahrens * cache that is dedicated to the MRU. In this situation there's still space on 2020789Sahrens * the MFU side, so the MRU side needs to be victimized. 2021789Sahrens * 20223403Sbmc * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2023789Sahrens * MFU's resident set is consuming more space than it has been allotted. In 2024789Sahrens * this situation, we must victimize our own cache, the MFU, for this insertion. 2025789Sahrens */ 2026789Sahrens static void 20272688Smaybee arc_get_data_buf(arc_buf_t *buf) 2028789Sahrens { 20293290Sjohansen arc_state_t *state = buf->b_hdr->b_state; 20303290Sjohansen uint64_t size = buf->b_hdr->b_size; 20313290Sjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 20322688Smaybee 20332688Smaybee arc_adapt(size, state); 2034789Sahrens 20352688Smaybee /* 20362688Smaybee * We have not yet reached cache maximum size, 20372688Smaybee * just allocate a new buffer. 20382688Smaybee */ 20394309Smaybee if (!arc_evict_needed(type)) { 20403290Sjohansen if (type == ARC_BUFC_METADATA) { 20413290Sjohansen buf->b_data = zio_buf_alloc(size); 20424309Smaybee arc_space_consume(size); 20433290Sjohansen } else { 20443290Sjohansen ASSERT(type == ARC_BUFC_DATA); 20453290Sjohansen buf->b_data = zio_data_buf_alloc(size); 20464309Smaybee atomic_add_64(&arc_size, size); 20473290Sjohansen } 20482688Smaybee goto out; 20492688Smaybee } 20502688Smaybee 20512688Smaybee /* 20522688Smaybee * If we are prefetching from the mfu ghost list, this buffer 20532688Smaybee * will end up on the mru list; so steal space from there. 20542688Smaybee */ 20553403Sbmc if (state == arc_mfu_ghost) 20563403Sbmc state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 20573403Sbmc else if (state == arc_mru_ghost) 20583403Sbmc state = arc_mru; 2059789Sahrens 20603403Sbmc if (state == arc_mru || state == arc_anon) { 20613403Sbmc uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 20624309Smaybee state = (arc_mfu->arcs_lsize[type] > 0 && 20634309Smaybee arc_p > mru_used) ? arc_mfu : arc_mru; 2064789Sahrens } else { 20652688Smaybee /* MFU cases */ 20663403Sbmc uint64_t mfu_space = arc_c - arc_p; 20674309Smaybee state = (arc_mru->arcs_lsize[type] > 0 && 20684309Smaybee mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 20692688Smaybee } 20705642Smaybee if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 20713290Sjohansen if (type == ARC_BUFC_METADATA) { 20723290Sjohansen buf->b_data = zio_buf_alloc(size); 20734309Smaybee arc_space_consume(size); 20743290Sjohansen } else { 20753290Sjohansen ASSERT(type == ARC_BUFC_DATA); 20763290Sjohansen buf->b_data = zio_data_buf_alloc(size); 20774309Smaybee atomic_add_64(&arc_size, size); 20783290Sjohansen } 20793403Sbmc ARCSTAT_BUMP(arcstat_recycle_miss); 20802688Smaybee } 20812688Smaybee ASSERT(buf->b_data != NULL); 20822688Smaybee out: 20832688Smaybee /* 20842688Smaybee * Update the state size. Note that ghost states have a 20852688Smaybee * "ghost size" and so don't need to be updated. 20862688Smaybee */ 20872688Smaybee if (!GHOST_STATE(buf->b_hdr->b_state)) { 20882688Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 20892688Smaybee 20903403Sbmc atomic_add_64(&hdr->b_state->arcs_size, size); 20912688Smaybee if (list_link_active(&hdr->b_arc_node)) { 20922688Smaybee ASSERT(refcount_is_zero(&hdr->b_refcnt)); 20934309Smaybee atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2094789Sahrens } 20953298Smaybee /* 20963298Smaybee * If we are growing the cache, and we are adding anonymous 20973403Sbmc * data, and we have outgrown arc_p, update arc_p 20983298Smaybee */ 20993403Sbmc if (arc_size < arc_c && hdr->b_state == arc_anon && 21003403Sbmc arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 21013403Sbmc arc_p = MIN(arc_c, arc_p + size); 2102789Sahrens } 2103789Sahrens } 2104789Sahrens 2105789Sahrens /* 2106789Sahrens * This routine is called whenever a buffer is accessed. 21071544Seschrock * NOTE: the hash lock is dropped in this function. 2108789Sahrens */ 2109789Sahrens static void 21102688Smaybee arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2111789Sahrens { 2112789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 2113789Sahrens 21143403Sbmc if (buf->b_state == arc_anon) { 2115789Sahrens /* 2116789Sahrens * This buffer is not in the cache, and does not 2117789Sahrens * appear in our "ghost" list. Add the new buffer 2118789Sahrens * to the MRU state. 2119789Sahrens */ 2120789Sahrens 2121789Sahrens ASSERT(buf->b_arc_access == 0); 2122789Sahrens buf->b_arc_access = lbolt; 21231544Seschrock DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 21243403Sbmc arc_change_state(arc_mru, buf, hash_lock); 2125789Sahrens 21263403Sbmc } else if (buf->b_state == arc_mru) { 2127789Sahrens /* 21282391Smaybee * If this buffer is here because of a prefetch, then either: 21292391Smaybee * - clear the flag if this is a "referencing" read 21302391Smaybee * (any subsequent access will bump this into the MFU state). 21312391Smaybee * or 21322391Smaybee * - move the buffer to the head of the list if this is 21332391Smaybee * another prefetch (to make it less likely to be evicted). 2134789Sahrens */ 2135789Sahrens if ((buf->b_flags & ARC_PREFETCH) != 0) { 21362391Smaybee if (refcount_count(&buf->b_refcnt) == 0) { 21372391Smaybee ASSERT(list_link_active(&buf->b_arc_node)); 21382391Smaybee } else { 21392391Smaybee buf->b_flags &= ~ARC_PREFETCH; 21403403Sbmc ARCSTAT_BUMP(arcstat_mru_hits); 21412391Smaybee } 21422391Smaybee buf->b_arc_access = lbolt; 2143789Sahrens return; 2144789Sahrens } 2145789Sahrens 2146789Sahrens /* 2147789Sahrens * This buffer has been "accessed" only once so far, 2148789Sahrens * but it is still in the cache. Move it to the MFU 2149789Sahrens * state. 2150789Sahrens */ 2151789Sahrens if (lbolt > buf->b_arc_access + ARC_MINTIME) { 2152789Sahrens /* 2153789Sahrens * More than 125ms have passed since we 2154789Sahrens * instantiated this buffer. Move it to the 2155789Sahrens * most frequently used state. 2156789Sahrens */ 2157789Sahrens buf->b_arc_access = lbolt; 21581544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 21593403Sbmc arc_change_state(arc_mfu, buf, hash_lock); 2160789Sahrens } 21613403Sbmc ARCSTAT_BUMP(arcstat_mru_hits); 21623403Sbmc } else if (buf->b_state == arc_mru_ghost) { 2163789Sahrens arc_state_t *new_state; 2164789Sahrens /* 2165789Sahrens * This buffer has been "accessed" recently, but 2166789Sahrens * was evicted from the cache. Move it to the 2167789Sahrens * MFU state. 2168789Sahrens */ 2169789Sahrens 2170789Sahrens if (buf->b_flags & ARC_PREFETCH) { 21713403Sbmc new_state = arc_mru; 21722391Smaybee if (refcount_count(&buf->b_refcnt) > 0) 21732391Smaybee buf->b_flags &= ~ARC_PREFETCH; 21741544Seschrock DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2175789Sahrens } else { 21763403Sbmc new_state = arc_mfu; 21771544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2178789Sahrens } 2179789Sahrens 2180789Sahrens buf->b_arc_access = lbolt; 2181789Sahrens arc_change_state(new_state, buf, hash_lock); 2182789Sahrens 21833403Sbmc ARCSTAT_BUMP(arcstat_mru_ghost_hits); 21843403Sbmc } else if (buf->b_state == arc_mfu) { 2185789Sahrens /* 2186789Sahrens * This buffer has been accessed more than once and is 2187789Sahrens * still in the cache. Keep it in the MFU state. 2188789Sahrens * 21892391Smaybee * NOTE: an add_reference() that occurred when we did 21902391Smaybee * the arc_read() will have kicked this off the list. 21912391Smaybee * If it was a prefetch, we will explicitly move it to 21922391Smaybee * the head of the list now. 2193789Sahrens */ 21942391Smaybee if ((buf->b_flags & ARC_PREFETCH) != 0) { 21952391Smaybee ASSERT(refcount_count(&buf->b_refcnt) == 0); 21962391Smaybee ASSERT(list_link_active(&buf->b_arc_node)); 21972391Smaybee } 21983403Sbmc ARCSTAT_BUMP(arcstat_mfu_hits); 21992391Smaybee buf->b_arc_access = lbolt; 22003403Sbmc } else if (buf->b_state == arc_mfu_ghost) { 22013403Sbmc arc_state_t *new_state = arc_mfu; 2202789Sahrens /* 2203789Sahrens * This buffer has been accessed more than once but has 2204789Sahrens * been evicted from the cache. Move it back to the 2205789Sahrens * MFU state. 2206789Sahrens */ 2207789Sahrens 22082391Smaybee if (buf->b_flags & ARC_PREFETCH) { 22092391Smaybee /* 22102391Smaybee * This is a prefetch access... 22112391Smaybee * move this block back to the MRU state. 22122391Smaybee */ 22132391Smaybee ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 22143403Sbmc new_state = arc_mru; 22152391Smaybee } 22162391Smaybee 2217789Sahrens buf->b_arc_access = lbolt; 22181544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 22192391Smaybee arc_change_state(new_state, buf, hash_lock); 2220789Sahrens 22213403Sbmc ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 22225450Sbrendan } else if (buf->b_state == arc_l2c_only) { 22235450Sbrendan /* 22245450Sbrendan * This buffer is on the 2nd Level ARC. 22255450Sbrendan */ 22265450Sbrendan 22275450Sbrendan buf->b_arc_access = lbolt; 22285450Sbrendan DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 22295450Sbrendan arc_change_state(arc_mfu, buf, hash_lock); 2230789Sahrens } else { 2231789Sahrens ASSERT(!"invalid arc state"); 2232789Sahrens } 2233789Sahrens } 2234789Sahrens 2235789Sahrens /* a generic arc_done_func_t which you can use */ 2236789Sahrens /* ARGSUSED */ 2237789Sahrens void 2238789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2239789Sahrens { 2240789Sahrens bcopy(buf->b_data, arg, buf->b_hdr->b_size); 22411544Seschrock VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2242789Sahrens } 2243789Sahrens 22444309Smaybee /* a generic arc_done_func_t */ 2245789Sahrens void 2246789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2247789Sahrens { 2248789Sahrens arc_buf_t **bufp = arg; 2249789Sahrens if (zio && zio->io_error) { 22501544Seschrock VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2251789Sahrens *bufp = NULL; 2252789Sahrens } else { 2253789Sahrens *bufp = buf; 2254789Sahrens } 2255789Sahrens } 2256789Sahrens 2257789Sahrens static void 2258789Sahrens arc_read_done(zio_t *zio) 2259789Sahrens { 22601589Smaybee arc_buf_hdr_t *hdr, *found; 2261789Sahrens arc_buf_t *buf; 2262789Sahrens arc_buf_t *abuf; /* buffer we're assigning to callback */ 2263789Sahrens kmutex_t *hash_lock; 2264789Sahrens arc_callback_t *callback_list, *acb; 2265789Sahrens int freeable = FALSE; 2266789Sahrens 2267789Sahrens buf = zio->io_private; 2268789Sahrens hdr = buf->b_hdr; 2269789Sahrens 22701589Smaybee /* 22711589Smaybee * The hdr was inserted into hash-table and removed from lists 22721589Smaybee * prior to starting I/O. We should find this header, since 22731589Smaybee * it's in the hash table, and it should be legit since it's 22741589Smaybee * not possible to evict it during the I/O. The only possible 22751589Smaybee * reason for it not to be found is if we were freed during the 22761589Smaybee * read. 22771589Smaybee */ 22781589Smaybee found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 22793093Sahrens &hash_lock); 2280789Sahrens 22811589Smaybee ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 22825450Sbrendan (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 22835450Sbrendan (found == hdr && HDR_L2_READING(hdr))); 22845450Sbrendan 22855450Sbrendan hdr->b_flags &= ~(ARC_L2_READING|ARC_L2_EVICTED); 22865450Sbrendan if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 22875450Sbrendan hdr->b_flags |= ARC_DONT_L2CACHE; 2288789Sahrens 2289789Sahrens /* byteswap if necessary */ 2290789Sahrens callback_list = hdr->b_acb; 2291789Sahrens ASSERT(callback_list != NULL); 2292789Sahrens if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 2293789Sahrens callback_list->acb_byteswap(buf->b_data, hdr->b_size); 2294789Sahrens 22955450Sbrendan arc_cksum_compute(buf, B_FALSE); 22963093Sahrens 2297789Sahrens /* create copies of the data buffer for the callers */ 2298789Sahrens abuf = buf; 2299789Sahrens for (acb = callback_list; acb; acb = acb->acb_next) { 2300789Sahrens if (acb->acb_done) { 23012688Smaybee if (abuf == NULL) 23022688Smaybee abuf = arc_buf_clone(buf); 2303789Sahrens acb->acb_buf = abuf; 2304789Sahrens abuf = NULL; 2305789Sahrens } 2306789Sahrens } 2307789Sahrens hdr->b_acb = NULL; 2308789Sahrens hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 23091544Seschrock ASSERT(!HDR_BUF_AVAILABLE(hdr)); 23101544Seschrock if (abuf == buf) 23111544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 2312789Sahrens 2313789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2314789Sahrens 2315789Sahrens if (zio->io_error != 0) { 2316789Sahrens hdr->b_flags |= ARC_IO_ERROR; 23173403Sbmc if (hdr->b_state != arc_anon) 23183403Sbmc arc_change_state(arc_anon, hdr, hash_lock); 23191544Seschrock if (HDR_IN_HASH_TABLE(hdr)) 23201544Seschrock buf_hash_remove(hdr); 2321789Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 23222391Smaybee /* convert checksum errors into IO errors */ 23231544Seschrock if (zio->io_error == ECKSUM) 23241544Seschrock zio->io_error = EIO; 2325789Sahrens } 2326789Sahrens 23271544Seschrock /* 23282391Smaybee * Broadcast before we drop the hash_lock to avoid the possibility 23292391Smaybee * that the hdr (and hence the cv) might be freed before we get to 23302391Smaybee * the cv_broadcast(). 23311544Seschrock */ 23321544Seschrock cv_broadcast(&hdr->b_cv); 23331544Seschrock 23341589Smaybee if (hash_lock) { 2335789Sahrens /* 2336789Sahrens * Only call arc_access on anonymous buffers. This is because 2337789Sahrens * if we've issued an I/O for an evicted buffer, we've already 2338789Sahrens * called arc_access (to prevent any simultaneous readers from 2339789Sahrens * getting confused). 2340789Sahrens */ 23413403Sbmc if (zio->io_error == 0 && hdr->b_state == arc_anon) 23422688Smaybee arc_access(hdr, hash_lock); 23432688Smaybee mutex_exit(hash_lock); 2344789Sahrens } else { 2345789Sahrens /* 2346789Sahrens * This block was freed while we waited for the read to 2347789Sahrens * complete. It has been removed from the hash table and 2348789Sahrens * moved to the anonymous state (so that it won't show up 2349789Sahrens * in the cache). 2350789Sahrens */ 23513403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2352789Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 2353789Sahrens } 2354789Sahrens 2355789Sahrens /* execute each callback and free its structure */ 2356789Sahrens while ((acb = callback_list) != NULL) { 2357789Sahrens if (acb->acb_done) 2358789Sahrens acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2359789Sahrens 2360789Sahrens if (acb->acb_zio_dummy != NULL) { 2361789Sahrens acb->acb_zio_dummy->io_error = zio->io_error; 2362789Sahrens zio_nowait(acb->acb_zio_dummy); 2363789Sahrens } 2364789Sahrens 2365789Sahrens callback_list = acb->acb_next; 2366789Sahrens kmem_free(acb, sizeof (arc_callback_t)); 2367789Sahrens } 2368789Sahrens 2369789Sahrens if (freeable) 23701544Seschrock arc_hdr_destroy(hdr); 2371789Sahrens } 2372789Sahrens 2373789Sahrens /* 2374789Sahrens * "Read" the block block at the specified DVA (in bp) via the 2375789Sahrens * cache. If the block is found in the cache, invoke the provided 2376789Sahrens * callback immediately and return. Note that the `zio' parameter 2377789Sahrens * in the callback will be NULL in this case, since no IO was 2378789Sahrens * required. If the block is not in the cache pass the read request 2379789Sahrens * on to the spa with a substitute callback function, so that the 2380789Sahrens * requested block will be added to the cache. 2381789Sahrens * 2382789Sahrens * If a read request arrives for a block that has a read in-progress, 2383789Sahrens * either wait for the in-progress read to complete (and return the 2384789Sahrens * results); or, if this is a read with a "done" func, add a record 2385789Sahrens * to the read to invoke the "done" func when the read completes, 2386789Sahrens * and return; or just return. 2387789Sahrens * 2388789Sahrens * arc_read_done() will invoke all the requested "done" functions 2389789Sahrens * for readers of this block. 2390789Sahrens */ 2391789Sahrens int 2392789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 2393789Sahrens arc_done_func_t *done, void *private, int priority, int flags, 23942391Smaybee uint32_t *arc_flags, zbookmark_t *zb) 2395789Sahrens { 2396789Sahrens arc_buf_hdr_t *hdr; 2397789Sahrens arc_buf_t *buf; 2398789Sahrens kmutex_t *hash_lock; 23995450Sbrendan zio_t *rzio; 2400789Sahrens 2401789Sahrens top: 2402789Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 24031544Seschrock if (hdr && hdr->b_datacnt > 0) { 2404789Sahrens 24052391Smaybee *arc_flags |= ARC_CACHED; 24062391Smaybee 2407789Sahrens if (HDR_IO_IN_PROGRESS(hdr)) { 24082391Smaybee 24092391Smaybee if (*arc_flags & ARC_WAIT) { 24102391Smaybee cv_wait(&hdr->b_cv, hash_lock); 24112391Smaybee mutex_exit(hash_lock); 24122391Smaybee goto top; 24132391Smaybee } 24142391Smaybee ASSERT(*arc_flags & ARC_NOWAIT); 24152391Smaybee 24162391Smaybee if (done) { 2417789Sahrens arc_callback_t *acb = NULL; 2418789Sahrens 2419789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), 2420789Sahrens KM_SLEEP); 2421789Sahrens acb->acb_done = done; 2422789Sahrens acb->acb_private = private; 2423789Sahrens acb->acb_byteswap = swap; 2424789Sahrens if (pio != NULL) 2425789Sahrens acb->acb_zio_dummy = zio_null(pio, 2426789Sahrens spa, NULL, NULL, flags); 2427789Sahrens 2428789Sahrens ASSERT(acb->acb_done != NULL); 2429789Sahrens acb->acb_next = hdr->b_acb; 2430789Sahrens hdr->b_acb = acb; 2431789Sahrens add_reference(hdr, hash_lock, private); 2432789Sahrens mutex_exit(hash_lock); 2433789Sahrens return (0); 2434789Sahrens } 2435789Sahrens mutex_exit(hash_lock); 2436789Sahrens return (0); 2437789Sahrens } 2438789Sahrens 24393403Sbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2440789Sahrens 24411544Seschrock if (done) { 24422688Smaybee add_reference(hdr, hash_lock, private); 24431544Seschrock /* 24441544Seschrock * If this block is already in use, create a new 24451544Seschrock * copy of the data so that we will be guaranteed 24461544Seschrock * that arc_release() will always succeed. 24471544Seschrock */ 24481544Seschrock buf = hdr->b_buf; 24491544Seschrock ASSERT(buf); 24501544Seschrock ASSERT(buf->b_data); 24512688Smaybee if (HDR_BUF_AVAILABLE(hdr)) { 24521544Seschrock ASSERT(buf->b_efunc == NULL); 24531544Seschrock hdr->b_flags &= ~ARC_BUF_AVAILABLE; 24542688Smaybee } else { 24552688Smaybee buf = arc_buf_clone(buf); 24561544Seschrock } 24572391Smaybee } else if (*arc_flags & ARC_PREFETCH && 24582391Smaybee refcount_count(&hdr->b_refcnt) == 0) { 24592391Smaybee hdr->b_flags |= ARC_PREFETCH; 2460789Sahrens } 2461789Sahrens DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 24622688Smaybee arc_access(hdr, hash_lock); 24632688Smaybee mutex_exit(hash_lock); 24643403Sbmc ARCSTAT_BUMP(arcstat_hits); 24653403Sbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 24663403Sbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 24673403Sbmc data, metadata, hits); 24683403Sbmc 2469789Sahrens if (done) 2470789Sahrens done(NULL, buf, private); 2471789Sahrens } else { 2472789Sahrens uint64_t size = BP_GET_LSIZE(bp); 2473789Sahrens arc_callback_t *acb; 2474789Sahrens 2475789Sahrens if (hdr == NULL) { 2476789Sahrens /* this block is not in the cache */ 2477789Sahrens arc_buf_hdr_t *exists; 24783290Sjohansen arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 24793290Sjohansen buf = arc_buf_alloc(spa, size, private, type); 2480789Sahrens hdr = buf->b_hdr; 2481789Sahrens hdr->b_dva = *BP_IDENTITY(bp); 2482789Sahrens hdr->b_birth = bp->blk_birth; 2483789Sahrens hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2484789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2485789Sahrens if (exists) { 2486789Sahrens /* somebody beat us to the hash insert */ 2487789Sahrens mutex_exit(hash_lock); 2488789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 2489789Sahrens hdr->b_birth = 0; 2490789Sahrens hdr->b_cksum0 = 0; 24911544Seschrock (void) arc_buf_remove_ref(buf, private); 2492789Sahrens goto top; /* restart the IO request */ 2493789Sahrens } 24942391Smaybee /* if this is a prefetch, we don't have a reference */ 24952391Smaybee if (*arc_flags & ARC_PREFETCH) { 24962391Smaybee (void) remove_reference(hdr, hash_lock, 24972391Smaybee private); 24982391Smaybee hdr->b_flags |= ARC_PREFETCH; 24992391Smaybee } 25002391Smaybee if (BP_GET_LEVEL(bp) > 0) 25012391Smaybee hdr->b_flags |= ARC_INDIRECT; 2502789Sahrens } else { 2503789Sahrens /* this block is in the ghost cache */ 25041544Seschrock ASSERT(GHOST_STATE(hdr->b_state)); 25051544Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 25062391Smaybee ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 25072391Smaybee ASSERT(hdr->b_buf == NULL); 2508789Sahrens 25092391Smaybee /* if this is a prefetch, we don't have a reference */ 25102391Smaybee if (*arc_flags & ARC_PREFETCH) 25112391Smaybee hdr->b_flags |= ARC_PREFETCH; 25122391Smaybee else 25132391Smaybee add_reference(hdr, hash_lock, private); 25146245Smaybee buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 25151544Seschrock buf->b_hdr = hdr; 25162688Smaybee buf->b_data = NULL; 25171544Seschrock buf->b_efunc = NULL; 25181544Seschrock buf->b_private = NULL; 25191544Seschrock buf->b_next = NULL; 25201544Seschrock hdr->b_buf = buf; 25212688Smaybee arc_get_data_buf(buf); 25221544Seschrock ASSERT(hdr->b_datacnt == 0); 25231544Seschrock hdr->b_datacnt = 1; 25242391Smaybee 2525789Sahrens } 2526789Sahrens 2527789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2528789Sahrens acb->acb_done = done; 2529789Sahrens acb->acb_private = private; 2530789Sahrens acb->acb_byteswap = swap; 2531789Sahrens 2532789Sahrens ASSERT(hdr->b_acb == NULL); 2533789Sahrens hdr->b_acb = acb; 2534789Sahrens hdr->b_flags |= ARC_IO_IN_PROGRESS; 2535789Sahrens 2536789Sahrens /* 2537789Sahrens * If the buffer has been evicted, migrate it to a present state 2538789Sahrens * before issuing the I/O. Once we drop the hash-table lock, 2539789Sahrens * the header will be marked as I/O in progress and have an 2540789Sahrens * attached buffer. At this point, anybody who finds this 2541789Sahrens * buffer ought to notice that it's legit but has a pending I/O. 2542789Sahrens */ 2543789Sahrens 25441544Seschrock if (GHOST_STATE(hdr->b_state)) 25452688Smaybee arc_access(hdr, hash_lock); 2546789Sahrens 2547789Sahrens ASSERT3U(hdr->b_size, ==, size); 25481596Sahrens DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 25491596Sahrens zbookmark_t *, zb); 25503403Sbmc ARCSTAT_BUMP(arcstat_misses); 25513403Sbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 25523403Sbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 25533403Sbmc data, metadata, misses); 25541544Seschrock 25555450Sbrendan if (l2arc_ndev != 0) { 25565450Sbrendan /* 25575450Sbrendan * Read from the L2ARC if the following are true: 25585450Sbrendan * 1. This buffer has L2ARC metadata. 25595450Sbrendan * 2. This buffer isn't currently writing to the L2ARC. 25605450Sbrendan */ 25615450Sbrendan if (hdr->b_l2hdr != NULL && !HDR_L2_WRITING(hdr)) { 25625450Sbrendan vdev_t *vd = hdr->b_l2hdr->b_dev->l2ad_vdev; 25635450Sbrendan daddr_t addr = hdr->b_l2hdr->b_daddr; 25645450Sbrendan l2arc_read_callback_t *cb; 25655450Sbrendan 2566*6643Seschrock if (vdev_is_dead(vd)) 2567*6643Seschrock goto skip_l2arc; 25685450Sbrendan 25695450Sbrendan hdr->b_flags |= ARC_L2_READING; 25705450Sbrendan mutex_exit(hash_lock); 25715450Sbrendan 2572*6643Seschrock DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2573*6643Seschrock ARCSTAT_BUMP(arcstat_l2_hits); 2574*6643Seschrock 25755450Sbrendan cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 25765450Sbrendan KM_SLEEP); 25775450Sbrendan cb->l2rcb_buf = buf; 25785450Sbrendan cb->l2rcb_spa = spa; 25795450Sbrendan cb->l2rcb_bp = *bp; 25805450Sbrendan cb->l2rcb_zb = *zb; 25815450Sbrendan cb->l2rcb_flags = flags; 25825450Sbrendan 25835450Sbrendan /* 25845450Sbrendan * l2arc read. 25855450Sbrendan */ 25865450Sbrendan rzio = zio_read_phys(pio, vd, addr, size, 25875450Sbrendan buf->b_data, ZIO_CHECKSUM_OFF, 25885450Sbrendan l2arc_read_done, cb, priority, 25895450Sbrendan flags | ZIO_FLAG_DONT_CACHE, B_FALSE); 25905450Sbrendan DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 25915450Sbrendan zio_t *, rzio); 25925450Sbrendan 25935450Sbrendan if (*arc_flags & ARC_WAIT) 25945450Sbrendan return (zio_wait(rzio)); 25955450Sbrendan 25965450Sbrendan ASSERT(*arc_flags & ARC_NOWAIT); 25975450Sbrendan zio_nowait(rzio); 25985450Sbrendan return (0); 25995450Sbrendan } else { 26005450Sbrendan DTRACE_PROBE1(l2arc__miss, 26015450Sbrendan arc_buf_hdr_t *, hdr); 26025450Sbrendan ARCSTAT_BUMP(arcstat_l2_misses); 26035450Sbrendan if (HDR_L2_WRITING(hdr)) 26045450Sbrendan ARCSTAT_BUMP(arcstat_l2_rw_clash); 26055450Sbrendan } 26065450Sbrendan } 2607*6643Seschrock 2608*6643Seschrock skip_l2arc: 26095450Sbrendan mutex_exit(hash_lock); 26105450Sbrendan 2611789Sahrens rzio = zio_read(pio, spa, bp, buf->b_data, size, 26121544Seschrock arc_read_done, buf, priority, flags, zb); 2613789Sahrens 26142391Smaybee if (*arc_flags & ARC_WAIT) 2615789Sahrens return (zio_wait(rzio)); 2616789Sahrens 26172391Smaybee ASSERT(*arc_flags & ARC_NOWAIT); 2618789Sahrens zio_nowait(rzio); 2619789Sahrens } 2620789Sahrens return (0); 2621789Sahrens } 2622789Sahrens 2623789Sahrens /* 2624789Sahrens * arc_read() variant to support pool traversal. If the block is already 2625789Sahrens * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2626789Sahrens * The idea is that we don't want pool traversal filling up memory, but 2627789Sahrens * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2628789Sahrens */ 2629789Sahrens int 2630789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2631789Sahrens { 2632789Sahrens arc_buf_hdr_t *hdr; 2633789Sahrens kmutex_t *hash_mtx; 2634789Sahrens int rc = 0; 2635789Sahrens 2636789Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2637789Sahrens 26381544Seschrock if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 26391544Seschrock arc_buf_t *buf = hdr->b_buf; 26401544Seschrock 26411544Seschrock ASSERT(buf); 26421544Seschrock while (buf->b_data == NULL) { 26431544Seschrock buf = buf->b_next; 26441544Seschrock ASSERT(buf); 26451544Seschrock } 26461544Seschrock bcopy(buf->b_data, data, hdr->b_size); 26471544Seschrock } else { 2648789Sahrens rc = ENOENT; 26491544Seschrock } 2650789Sahrens 2651789Sahrens if (hash_mtx) 2652789Sahrens mutex_exit(hash_mtx); 2653789Sahrens 2654789Sahrens return (rc); 2655789Sahrens } 2656789Sahrens 26571544Seschrock void 26581544Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 26591544Seschrock { 26601544Seschrock ASSERT(buf->b_hdr != NULL); 26613403Sbmc ASSERT(buf->b_hdr->b_state != arc_anon); 26621544Seschrock ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 26631544Seschrock buf->b_efunc = func; 26641544Seschrock buf->b_private = private; 26651544Seschrock } 26661544Seschrock 26671544Seschrock /* 26681544Seschrock * This is used by the DMU to let the ARC know that a buffer is 26691544Seschrock * being evicted, so the ARC should clean up. If this arc buf 26701544Seschrock * is not yet in the evicted state, it will be put there. 26711544Seschrock */ 26721544Seschrock int 26731544Seschrock arc_buf_evict(arc_buf_t *buf) 26741544Seschrock { 26752887Smaybee arc_buf_hdr_t *hdr; 26761544Seschrock kmutex_t *hash_lock; 26771544Seschrock arc_buf_t **bufp; 26781544Seschrock 26792887Smaybee mutex_enter(&arc_eviction_mtx); 26802887Smaybee hdr = buf->b_hdr; 26811544Seschrock if (hdr == NULL) { 26821544Seschrock /* 26831544Seschrock * We are in arc_do_user_evicts(). 26841544Seschrock */ 26851544Seschrock ASSERT(buf->b_data == NULL); 26862887Smaybee mutex_exit(&arc_eviction_mtx); 26871544Seschrock return (0); 26881544Seschrock } 26892887Smaybee hash_lock = HDR_LOCK(hdr); 26902887Smaybee mutex_exit(&arc_eviction_mtx); 26911544Seschrock 26921544Seschrock mutex_enter(hash_lock); 26931544Seschrock 26942724Smaybee if (buf->b_data == NULL) { 26952724Smaybee /* 26962724Smaybee * We are on the eviction list. 26972724Smaybee */ 26982724Smaybee mutex_exit(hash_lock); 26992724Smaybee mutex_enter(&arc_eviction_mtx); 27002724Smaybee if (buf->b_hdr == NULL) { 27012724Smaybee /* 27022724Smaybee * We are already in arc_do_user_evicts(). 27032724Smaybee */ 27042724Smaybee mutex_exit(&arc_eviction_mtx); 27052724Smaybee return (0); 27062724Smaybee } else { 27072724Smaybee arc_buf_t copy = *buf; /* structure assignment */ 27082724Smaybee /* 27092724Smaybee * Process this buffer now 27102724Smaybee * but let arc_do_user_evicts() do the reaping. 27112724Smaybee */ 27122724Smaybee buf->b_efunc = NULL; 27132724Smaybee mutex_exit(&arc_eviction_mtx); 27142724Smaybee VERIFY(copy.b_efunc(©) == 0); 27152724Smaybee return (1); 27162724Smaybee } 27172724Smaybee } 27182724Smaybee 27192724Smaybee ASSERT(buf->b_hdr == hdr); 27202724Smaybee ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 27213403Sbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 27221544Seschrock 27231544Seschrock /* 27241544Seschrock * Pull this buffer off of the hdr 27251544Seschrock */ 27261544Seschrock bufp = &hdr->b_buf; 27271544Seschrock while (*bufp != buf) 27281544Seschrock bufp = &(*bufp)->b_next; 27291544Seschrock *bufp = buf->b_next; 27301544Seschrock 27311544Seschrock ASSERT(buf->b_data != NULL); 27322688Smaybee arc_buf_destroy(buf, FALSE, FALSE); 27331544Seschrock 27341544Seschrock if (hdr->b_datacnt == 0) { 27351544Seschrock arc_state_t *old_state = hdr->b_state; 27361544Seschrock arc_state_t *evicted_state; 27371544Seschrock 27381544Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 27391544Seschrock 27401544Seschrock evicted_state = 27413403Sbmc (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 27421544Seschrock 27433403Sbmc mutex_enter(&old_state->arcs_mtx); 27443403Sbmc mutex_enter(&evicted_state->arcs_mtx); 27451544Seschrock 27461544Seschrock arc_change_state(evicted_state, hdr, hash_lock); 27471544Seschrock ASSERT(HDR_IN_HASH_TABLE(hdr)); 27485450Sbrendan hdr->b_flags |= ARC_IN_HASH_TABLE; 27495450Sbrendan hdr->b_flags &= ~ARC_BUF_AVAILABLE; 27501544Seschrock 27513403Sbmc mutex_exit(&evicted_state->arcs_mtx); 27523403Sbmc mutex_exit(&old_state->arcs_mtx); 27531544Seschrock } 27541544Seschrock mutex_exit(hash_lock); 27551819Smaybee 27561544Seschrock VERIFY(buf->b_efunc(buf) == 0); 27571544Seschrock buf->b_efunc = NULL; 27581544Seschrock buf->b_private = NULL; 27591544Seschrock buf->b_hdr = NULL; 27601544Seschrock kmem_cache_free(buf_cache, buf); 27611544Seschrock return (1); 27621544Seschrock } 27631544Seschrock 2764789Sahrens /* 2765789Sahrens * Release this buffer from the cache. This must be done 2766789Sahrens * after a read and prior to modifying the buffer contents. 2767789Sahrens * If the buffer has more than one reference, we must make 2768789Sahrens * make a new hdr for the buffer. 2769789Sahrens */ 2770789Sahrens void 2771789Sahrens arc_release(arc_buf_t *buf, void *tag) 2772789Sahrens { 2773789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 2774789Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 27755450Sbrendan l2arc_buf_hdr_t *l2hdr = NULL; 27765450Sbrendan uint64_t buf_size; 2777789Sahrens 2778789Sahrens /* this buffer is not on any list */ 2779789Sahrens ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2780789Sahrens 27813403Sbmc if (hdr->b_state == arc_anon) { 2782789Sahrens /* this buffer is already released */ 2783789Sahrens ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2784789Sahrens ASSERT(BUF_EMPTY(hdr)); 27851544Seschrock ASSERT(buf->b_efunc == NULL); 27863093Sahrens arc_buf_thaw(buf); 2787789Sahrens return; 2788789Sahrens } 2789789Sahrens 2790789Sahrens mutex_enter(hash_lock); 2791789Sahrens 27921544Seschrock /* 27931544Seschrock * Do we have more than one buf? 27941544Seschrock */ 27951544Seschrock if (hdr->b_buf != buf || buf->b_next != NULL) { 2796789Sahrens arc_buf_hdr_t *nhdr; 2797789Sahrens arc_buf_t **bufp; 2798789Sahrens uint64_t blksz = hdr->b_size; 2799789Sahrens spa_t *spa = hdr->b_spa; 28003290Sjohansen arc_buf_contents_t type = hdr->b_type; 28015450Sbrendan uint32_t flags = hdr->b_flags; 2802789Sahrens 28031544Seschrock ASSERT(hdr->b_datacnt > 1); 2804789Sahrens /* 2805789Sahrens * Pull the data off of this buf and attach it to 2806789Sahrens * a new anonymous buf. 2807789Sahrens */ 28081544Seschrock (void) remove_reference(hdr, hash_lock, tag); 2809789Sahrens bufp = &hdr->b_buf; 28101544Seschrock while (*bufp != buf) 2811789Sahrens bufp = &(*bufp)->b_next; 2812789Sahrens *bufp = (*bufp)->b_next; 28133897Smaybee buf->b_next = NULL; 28141544Seschrock 28153403Sbmc ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 28163403Sbmc atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 28171544Seschrock if (refcount_is_zero(&hdr->b_refcnt)) { 28184309Smaybee uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 28194309Smaybee ASSERT3U(*size, >=, hdr->b_size); 28204309Smaybee atomic_add_64(size, -hdr->b_size); 28211544Seschrock } 28221544Seschrock hdr->b_datacnt -= 1; 28235450Sbrendan if (hdr->b_l2hdr != NULL) { 28245450Sbrendan mutex_enter(&l2arc_buflist_mtx); 28255450Sbrendan l2hdr = hdr->b_l2hdr; 28265450Sbrendan hdr->b_l2hdr = NULL; 28275450Sbrendan buf_size = hdr->b_size; 28285450Sbrendan } 28293547Smaybee arc_cksum_verify(buf); 28301544Seschrock 2831789Sahrens mutex_exit(hash_lock); 2832789Sahrens 28336245Smaybee nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 2834789Sahrens nhdr->b_size = blksz; 2835789Sahrens nhdr->b_spa = spa; 28363290Sjohansen nhdr->b_type = type; 2837789Sahrens nhdr->b_buf = buf; 28383403Sbmc nhdr->b_state = arc_anon; 2839789Sahrens nhdr->b_arc_access = 0; 28405450Sbrendan nhdr->b_flags = flags & ARC_L2_WRITING; 28415450Sbrendan nhdr->b_l2hdr = NULL; 28421544Seschrock nhdr->b_datacnt = 1; 28433547Smaybee nhdr->b_freeze_cksum = NULL; 28443897Smaybee (void) refcount_add(&nhdr->b_refcnt, tag); 2845789Sahrens buf->b_hdr = nhdr; 28463403Sbmc atomic_add_64(&arc_anon->arcs_size, blksz); 2847789Sahrens } else { 28481544Seschrock ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2849789Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 2850789Sahrens ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 28513403Sbmc arc_change_state(arc_anon, hdr, hash_lock); 2852789Sahrens hdr->b_arc_access = 0; 28535450Sbrendan if (hdr->b_l2hdr != NULL) { 28545450Sbrendan mutex_enter(&l2arc_buflist_mtx); 28555450Sbrendan l2hdr = hdr->b_l2hdr; 28565450Sbrendan hdr->b_l2hdr = NULL; 28575450Sbrendan buf_size = hdr->b_size; 28585450Sbrendan } 2859789Sahrens mutex_exit(hash_lock); 28605450Sbrendan 2861789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 2862789Sahrens hdr->b_birth = 0; 2863789Sahrens hdr->b_cksum0 = 0; 28643547Smaybee arc_buf_thaw(buf); 2865789Sahrens } 28661544Seschrock buf->b_efunc = NULL; 28671544Seschrock buf->b_private = NULL; 28685450Sbrendan 28695450Sbrendan if (l2hdr) { 28705450Sbrendan list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 28715450Sbrendan kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 28725450Sbrendan ARCSTAT_INCR(arcstat_l2_size, -buf_size); 28735450Sbrendan } 28745450Sbrendan if (MUTEX_HELD(&l2arc_buflist_mtx)) 28755450Sbrendan mutex_exit(&l2arc_buflist_mtx); 2876789Sahrens } 2877789Sahrens 2878789Sahrens int 2879789Sahrens arc_released(arc_buf_t *buf) 2880789Sahrens { 28813403Sbmc return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 28821544Seschrock } 28831544Seschrock 28841544Seschrock int 28851544Seschrock arc_has_callback(arc_buf_t *buf) 28861544Seschrock { 28871544Seschrock return (buf->b_efunc != NULL); 2888789Sahrens } 2889789Sahrens 28901544Seschrock #ifdef ZFS_DEBUG 28911544Seschrock int 28921544Seschrock arc_referenced(arc_buf_t *buf) 28931544Seschrock { 28941544Seschrock return (refcount_count(&buf->b_hdr->b_refcnt)); 28951544Seschrock } 28961544Seschrock #endif 28971544Seschrock 2898789Sahrens static void 28993547Smaybee arc_write_ready(zio_t *zio) 29003547Smaybee { 29013547Smaybee arc_write_callback_t *callback = zio->io_private; 29023547Smaybee arc_buf_t *buf = callback->awcb_buf; 29035329Sgw25295 arc_buf_hdr_t *hdr = buf->b_hdr; 29045329Sgw25295 29055329Sgw25295 if (zio->io_error == 0 && callback->awcb_ready) { 29063547Smaybee ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 29073547Smaybee callback->awcb_ready(zio, buf, callback->awcb_private); 29083547Smaybee } 29095329Sgw25295 /* 29105329Sgw25295 * If the IO is already in progress, then this is a re-write 29115329Sgw25295 * attempt, so we need to thaw and re-compute the cksum. It is 29125329Sgw25295 * the responsibility of the callback to handle the freeing 29135329Sgw25295 * and accounting for any re-write attempt. If we don't have a 29145329Sgw25295 * callback registered then simply free the block here. 29155329Sgw25295 */ 29165329Sgw25295 if (HDR_IO_IN_PROGRESS(hdr)) { 29175329Sgw25295 if (!BP_IS_HOLE(&zio->io_bp_orig) && 29185329Sgw25295 callback->awcb_ready == NULL) { 29195329Sgw25295 zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg, 29205329Sgw25295 &zio->io_bp_orig, NULL, NULL)); 29215329Sgw25295 } 29225329Sgw25295 mutex_enter(&hdr->b_freeze_lock); 29235329Sgw25295 if (hdr->b_freeze_cksum != NULL) { 29245329Sgw25295 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 29255329Sgw25295 hdr->b_freeze_cksum = NULL; 29265329Sgw25295 } 29275329Sgw25295 mutex_exit(&hdr->b_freeze_lock); 29285329Sgw25295 } 29295450Sbrendan arc_cksum_compute(buf, B_FALSE); 29305329Sgw25295 hdr->b_flags |= ARC_IO_IN_PROGRESS; 29313547Smaybee } 29323547Smaybee 29333547Smaybee static void 2934789Sahrens arc_write_done(zio_t *zio) 2935789Sahrens { 29363547Smaybee arc_write_callback_t *callback = zio->io_private; 29373547Smaybee arc_buf_t *buf = callback->awcb_buf; 29383547Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 2939789Sahrens 2940789Sahrens hdr->b_acb = NULL; 2941789Sahrens 2942789Sahrens /* this buffer is on no lists and is not in the hash table */ 29433403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2944789Sahrens 2945789Sahrens hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2946789Sahrens hdr->b_birth = zio->io_bp->blk_birth; 2947789Sahrens hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 29481544Seschrock /* 29491544Seschrock * If the block to be written was all-zero, we may have 29501544Seschrock * compressed it away. In this case no write was performed 29511544Seschrock * so there will be no dva/birth-date/checksum. The buffer 29521544Seschrock * must therefor remain anonymous (and uncached). 29531544Seschrock */ 2954789Sahrens if (!BUF_EMPTY(hdr)) { 2955789Sahrens arc_buf_hdr_t *exists; 2956789Sahrens kmutex_t *hash_lock; 2957789Sahrens 29583093Sahrens arc_cksum_verify(buf); 29593093Sahrens 2960789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2961789Sahrens if (exists) { 2962789Sahrens /* 2963789Sahrens * This can only happen if we overwrite for 2964789Sahrens * sync-to-convergence, because we remove 2965789Sahrens * buffers from the hash table when we arc_free(). 2966789Sahrens */ 2967789Sahrens ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2968789Sahrens BP_IDENTITY(zio->io_bp))); 2969789Sahrens ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2970789Sahrens zio->io_bp->blk_birth); 2971789Sahrens 2972789Sahrens ASSERT(refcount_is_zero(&exists->b_refcnt)); 29733403Sbmc arc_change_state(arc_anon, exists, hash_lock); 2974789Sahrens mutex_exit(hash_lock); 29751544Seschrock arc_hdr_destroy(exists); 2976789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2977789Sahrens ASSERT3P(exists, ==, NULL); 2978789Sahrens } 29791544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 29802688Smaybee arc_access(hdr, hash_lock); 29812688Smaybee mutex_exit(hash_lock); 29823547Smaybee } else if (callback->awcb_done == NULL) { 29831544Seschrock int destroy_hdr; 29841544Seschrock /* 29851544Seschrock * This is an anonymous buffer with no user callback, 29861544Seschrock * destroy it if there are no active references. 29871544Seschrock */ 29881544Seschrock mutex_enter(&arc_eviction_mtx); 29891544Seschrock destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 29901544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 29911544Seschrock mutex_exit(&arc_eviction_mtx); 29921544Seschrock if (destroy_hdr) 29931544Seschrock arc_hdr_destroy(hdr); 29941544Seschrock } else { 29951544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2996789Sahrens } 29971544Seschrock 29983547Smaybee if (callback->awcb_done) { 2999789Sahrens ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 30003547Smaybee callback->awcb_done(zio, buf, callback->awcb_private); 3001789Sahrens } 3002789Sahrens 30033547Smaybee kmem_free(callback, sizeof (arc_write_callback_t)); 3004789Sahrens } 3005789Sahrens 30063547Smaybee zio_t * 30071775Sbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 3008789Sahrens uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 30093547Smaybee arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 30103547Smaybee int flags, zbookmark_t *zb) 3011789Sahrens { 3012789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 30133547Smaybee arc_write_callback_t *callback; 30143547Smaybee zio_t *zio; 3015789Sahrens 3016789Sahrens /* this is a private buffer - no locking required */ 30173403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 3018789Sahrens ASSERT(BUF_EMPTY(hdr)); 3019789Sahrens ASSERT(!HDR_IO_ERROR(hdr)); 30202237Smaybee ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 30212237Smaybee ASSERT(hdr->b_acb == 0); 30223547Smaybee callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 30233547Smaybee callback->awcb_ready = ready; 30243547Smaybee callback->awcb_done = done; 30253547Smaybee callback->awcb_private = private; 30263547Smaybee callback->awcb_buf = buf; 30273547Smaybee zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 30283547Smaybee buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback, 30293547Smaybee priority, flags, zb); 3030789Sahrens 30313547Smaybee return (zio); 3032789Sahrens } 3033789Sahrens 3034789Sahrens int 3035789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 3036789Sahrens zio_done_func_t *done, void *private, uint32_t arc_flags) 3037789Sahrens { 3038789Sahrens arc_buf_hdr_t *ab; 3039789Sahrens kmutex_t *hash_lock; 3040789Sahrens zio_t *zio; 3041789Sahrens 3042789Sahrens /* 3043789Sahrens * If this buffer is in the cache, release it, so it 3044789Sahrens * can be re-used. 3045789Sahrens */ 3046789Sahrens ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 3047789Sahrens if (ab != NULL) { 3048789Sahrens /* 3049789Sahrens * The checksum of blocks to free is not always 3050789Sahrens * preserved (eg. on the deadlist). However, if it is 3051789Sahrens * nonzero, it should match what we have in the cache. 3052789Sahrens */ 3053789Sahrens ASSERT(bp->blk_cksum.zc_word[0] == 0 || 3054789Sahrens ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 30553403Sbmc if (ab->b_state != arc_anon) 30563403Sbmc arc_change_state(arc_anon, ab, hash_lock); 30572391Smaybee if (HDR_IO_IN_PROGRESS(ab)) { 30582391Smaybee /* 30592391Smaybee * This should only happen when we prefetch. 30602391Smaybee */ 30612391Smaybee ASSERT(ab->b_flags & ARC_PREFETCH); 30622391Smaybee ASSERT3U(ab->b_datacnt, ==, 1); 30632391Smaybee ab->b_flags |= ARC_FREED_IN_READ; 30642391Smaybee if (HDR_IN_HASH_TABLE(ab)) 30652391Smaybee buf_hash_remove(ab); 30662391Smaybee ab->b_arc_access = 0; 30672391Smaybee bzero(&ab->b_dva, sizeof (dva_t)); 30682391Smaybee ab->b_birth = 0; 30692391Smaybee ab->b_cksum0 = 0; 30702391Smaybee ab->b_buf->b_efunc = NULL; 30712391Smaybee ab->b_buf->b_private = NULL; 30722391Smaybee mutex_exit(hash_lock); 30732391Smaybee } else if (refcount_is_zero(&ab->b_refcnt)) { 30745450Sbrendan ab->b_flags |= ARC_FREE_IN_PROGRESS; 3075789Sahrens mutex_exit(hash_lock); 30761544Seschrock arc_hdr_destroy(ab); 30773403Sbmc ARCSTAT_BUMP(arcstat_deleted); 3078789Sahrens } else { 30791589Smaybee /* 30802391Smaybee * We still have an active reference on this 30812391Smaybee * buffer. This can happen, e.g., from 30822391Smaybee * dbuf_unoverride(). 30831589Smaybee */ 30842391Smaybee ASSERT(!HDR_IN_HASH_TABLE(ab)); 3085789Sahrens ab->b_arc_access = 0; 3086789Sahrens bzero(&ab->b_dva, sizeof (dva_t)); 3087789Sahrens ab->b_birth = 0; 3088789Sahrens ab->b_cksum0 = 0; 30891544Seschrock ab->b_buf->b_efunc = NULL; 30901544Seschrock ab->b_buf->b_private = NULL; 3091789Sahrens mutex_exit(hash_lock); 3092789Sahrens } 3093789Sahrens } 3094789Sahrens 3095789Sahrens zio = zio_free(pio, spa, txg, bp, done, private); 3096789Sahrens 3097789Sahrens if (arc_flags & ARC_WAIT) 3098789Sahrens return (zio_wait(zio)); 3099789Sahrens 3100789Sahrens ASSERT(arc_flags & ARC_NOWAIT); 3101789Sahrens zio_nowait(zio); 3102789Sahrens 3103789Sahrens return (0); 3104789Sahrens } 3105789Sahrens 31066245Smaybee static int 31076245Smaybee arc_memory_throttle(uint64_t reserve, uint64_t txg) 31086245Smaybee { 31096245Smaybee #ifdef _KERNEL 31106245Smaybee uint64_t inflight_data = arc_anon->arcs_size; 31116245Smaybee uint64_t available_memory = ptob(freemem); 31126245Smaybee static uint64_t page_load = 0; 31136245Smaybee static uint64_t last_txg = 0; 31146245Smaybee 31156245Smaybee #if defined(__i386) 31166245Smaybee available_memory = 31176245Smaybee MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 31186245Smaybee #endif 31196245Smaybee if (available_memory >= zfs_write_limit_max) 31206245Smaybee return (0); 31216245Smaybee 31226245Smaybee if (txg > last_txg) { 31236245Smaybee last_txg = txg; 31246245Smaybee page_load = 0; 31256245Smaybee } 31266245Smaybee /* 31276245Smaybee * If we are in pageout, we know that memory is already tight, 31286245Smaybee * the arc is already going to be evicting, so we just want to 31296245Smaybee * continue to let page writes occur as quickly as possible. 31306245Smaybee */ 31316245Smaybee if (curproc == proc_pageout) { 31326245Smaybee if (page_load > MAX(ptob(minfree), available_memory) / 4) 31336245Smaybee return (ERESTART); 31346245Smaybee /* Note: reserve is inflated, so we deflate */ 31356245Smaybee page_load += reserve / 8; 31366245Smaybee return (0); 31376245Smaybee } else if (page_load > 0 && arc_reclaim_needed()) { 31386245Smaybee /* memory is low, delay before restarting */ 31396245Smaybee ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 31406245Smaybee return (EAGAIN); 31416245Smaybee } 31426245Smaybee page_load = 0; 31436245Smaybee 31446245Smaybee if (arc_size > arc_c_min) { 31456245Smaybee uint64_t evictable_memory = 31466245Smaybee arc_mru->arcs_lsize[ARC_BUFC_DATA] + 31476245Smaybee arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 31486245Smaybee arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 31496245Smaybee arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 31506245Smaybee available_memory += MIN(evictable_memory, arc_size - arc_c_min); 31516245Smaybee } 31526245Smaybee 31536245Smaybee if (inflight_data > available_memory / 4) { 31546245Smaybee ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 31556245Smaybee return (ERESTART); 31566245Smaybee } 31576245Smaybee #endif 31586245Smaybee return (0); 31596245Smaybee } 31606245Smaybee 3161789Sahrens void 31626245Smaybee arc_tempreserve_clear(uint64_t reserve) 3163789Sahrens { 31646245Smaybee atomic_add_64(&arc_tempreserve, -reserve); 3165789Sahrens ASSERT((int64_t)arc_tempreserve >= 0); 3166789Sahrens } 3167789Sahrens 3168789Sahrens int 31696245Smaybee arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3170789Sahrens { 31716245Smaybee int error; 31726245Smaybee 3173789Sahrens #ifdef ZFS_DEBUG 3174789Sahrens /* 3175789Sahrens * Once in a while, fail for no reason. Everything should cope. 3176789Sahrens */ 3177789Sahrens if (spa_get_random(10000) == 0) { 3178789Sahrens dprintf("forcing random failure\n"); 3179789Sahrens return (ERESTART); 3180789Sahrens } 3181789Sahrens #endif 31826245Smaybee if (reserve > arc_c/4 && !arc_no_grow) 31836245Smaybee arc_c = MIN(arc_c_max, reserve * 4); 31846245Smaybee if (reserve > arc_c) 3185982Smaybee return (ENOMEM); 3186982Smaybee 3187789Sahrens /* 31886245Smaybee * Writes will, almost always, require additional memory allocations 31896245Smaybee * in order to compress/encrypt/etc the data. We therefor need to 31906245Smaybee * make sure that there is sufficient available memory for this. 31916245Smaybee */ 31926245Smaybee if (error = arc_memory_throttle(reserve, txg)) 31936245Smaybee return (error); 31946245Smaybee 31956245Smaybee /* 3196982Smaybee * Throttle writes when the amount of dirty data in the cache 3197982Smaybee * gets too large. We try to keep the cache less than half full 3198982Smaybee * of dirty blocks so that our sync times don't grow too large. 3199982Smaybee * Note: if two requests come in concurrently, we might let them 3200982Smaybee * both succeed, when one of them should fail. Not a huge deal. 3201789Sahrens */ 32026245Smaybee if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 32036245Smaybee arc_anon->arcs_size > arc_c / 4) { 32044309Smaybee dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 32054309Smaybee "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 32064309Smaybee arc_tempreserve>>10, 32074309Smaybee arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 32084309Smaybee arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 32096245Smaybee reserve>>10, arc_c>>10); 3210789Sahrens return (ERESTART); 3211789Sahrens } 32126245Smaybee atomic_add_64(&arc_tempreserve, reserve); 3213789Sahrens return (0); 3214789Sahrens } 3215789Sahrens 3216789Sahrens void 3217789Sahrens arc_init(void) 3218789Sahrens { 3219789Sahrens mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3220789Sahrens cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3221789Sahrens 32222391Smaybee /* Convert seconds to clock ticks */ 32232638Sperrin arc_min_prefetch_lifespan = 1 * hz; 32242391Smaybee 3225789Sahrens /* Start out with 1/8 of all memory */ 32263403Sbmc arc_c = physmem * PAGESIZE / 8; 3227789Sahrens 3228789Sahrens #ifdef _KERNEL 3229789Sahrens /* 3230789Sahrens * On architectures where the physical memory can be larger 3231789Sahrens * than the addressable space (intel in 32-bit mode), we may 3232789Sahrens * need to limit the cache to 1/8 of VM size. 3233789Sahrens */ 32343403Sbmc arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3235789Sahrens #endif 3236789Sahrens 3237982Smaybee /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 32383403Sbmc arc_c_min = MAX(arc_c / 4, 64<<20); 3239982Smaybee /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 32403403Sbmc if (arc_c * 8 >= 1<<30) 32413403Sbmc arc_c_max = (arc_c * 8) - (1<<30); 3242789Sahrens else 32433403Sbmc arc_c_max = arc_c_min; 32443403Sbmc arc_c_max = MAX(arc_c * 6, arc_c_max); 32452885Sahrens 32462885Sahrens /* 32472885Sahrens * Allow the tunables to override our calculations if they are 32482885Sahrens * reasonable (ie. over 64MB) 32492885Sahrens */ 32502885Sahrens if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 32513403Sbmc arc_c_max = zfs_arc_max; 32523403Sbmc if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 32533403Sbmc arc_c_min = zfs_arc_min; 32542885Sahrens 32553403Sbmc arc_c = arc_c_max; 32563403Sbmc arc_p = (arc_c >> 1); 3257789Sahrens 32584309Smaybee /* limit meta-data to 1/4 of the arc capacity */ 32594309Smaybee arc_meta_limit = arc_c_max / 4; 32604645Sek110237 32614645Sek110237 /* Allow the tunable to override if it is reasonable */ 32624645Sek110237 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 32634645Sek110237 arc_meta_limit = zfs_arc_meta_limit; 32644645Sek110237 32654309Smaybee if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 32664309Smaybee arc_c_min = arc_meta_limit / 2; 32674309Smaybee 3268789Sahrens /* if kmem_flags are set, lets try to use less memory */ 3269789Sahrens if (kmem_debugging()) 32703403Sbmc arc_c = arc_c / 2; 32713403Sbmc if (arc_c < arc_c_min) 32723403Sbmc arc_c = arc_c_min; 3273789Sahrens 32743403Sbmc arc_anon = &ARC_anon; 32753403Sbmc arc_mru = &ARC_mru; 32763403Sbmc arc_mru_ghost = &ARC_mru_ghost; 32773403Sbmc arc_mfu = &ARC_mfu; 32783403Sbmc arc_mfu_ghost = &ARC_mfu_ghost; 32795450Sbrendan arc_l2c_only = &ARC_l2c_only; 32803403Sbmc arc_size = 0; 3281789Sahrens 32823403Sbmc mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 32833403Sbmc mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 32843403Sbmc mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 32853403Sbmc mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 32863403Sbmc mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 32875450Sbrendan mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 32882688Smaybee 32894309Smaybee list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 32904309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32914309Smaybee list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 32924309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32934309Smaybee list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 32944309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32954309Smaybee list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 32964309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32974309Smaybee list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 32984309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32994309Smaybee list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 33004309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 33014309Smaybee list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 33024309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 33034309Smaybee list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 33044309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 33055450Sbrendan list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 33065450Sbrendan sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 33075450Sbrendan list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 33085450Sbrendan sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3309789Sahrens 3310789Sahrens buf_init(); 3311789Sahrens 3312789Sahrens arc_thread_exit = 0; 33131544Seschrock arc_eviction_list = NULL; 33141544Seschrock mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 33152887Smaybee bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3316789Sahrens 33173403Sbmc arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 33183403Sbmc sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 33193403Sbmc 33203403Sbmc if (arc_ksp != NULL) { 33213403Sbmc arc_ksp->ks_data = &arc_stats; 33223403Sbmc kstat_install(arc_ksp); 33233403Sbmc } 33243403Sbmc 3325789Sahrens (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3326789Sahrens TS_RUN, minclsyspri); 33273158Smaybee 33283158Smaybee arc_dead = FALSE; 33296245Smaybee 33306245Smaybee if (zfs_write_limit_max == 0) 33316245Smaybee zfs_write_limit_max = physmem * PAGESIZE >> 33326245Smaybee zfs_write_limit_shift; 33336245Smaybee else 33346245Smaybee zfs_write_limit_shift = 0; 3335789Sahrens } 3336789Sahrens 3337789Sahrens void 3338789Sahrens arc_fini(void) 3339789Sahrens { 3340789Sahrens mutex_enter(&arc_reclaim_thr_lock); 3341789Sahrens arc_thread_exit = 1; 3342789Sahrens while (arc_thread_exit != 0) 3343789Sahrens cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3344789Sahrens mutex_exit(&arc_reclaim_thr_lock); 3345789Sahrens 33465642Smaybee arc_flush(NULL); 3347789Sahrens 3348789Sahrens arc_dead = TRUE; 3349789Sahrens 33503403Sbmc if (arc_ksp != NULL) { 33513403Sbmc kstat_delete(arc_ksp); 33523403Sbmc arc_ksp = NULL; 33533403Sbmc } 33543403Sbmc 33551544Seschrock mutex_destroy(&arc_eviction_mtx); 3356789Sahrens mutex_destroy(&arc_reclaim_thr_lock); 3357789Sahrens cv_destroy(&arc_reclaim_thr_cv); 3358789Sahrens 33594309Smaybee list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 33604309Smaybee list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 33614309Smaybee list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 33624309Smaybee list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 33634309Smaybee list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 33644309Smaybee list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 33654309Smaybee list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 33664309Smaybee list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3367789Sahrens 33683403Sbmc mutex_destroy(&arc_anon->arcs_mtx); 33693403Sbmc mutex_destroy(&arc_mru->arcs_mtx); 33703403Sbmc mutex_destroy(&arc_mru_ghost->arcs_mtx); 33713403Sbmc mutex_destroy(&arc_mfu->arcs_mtx); 33723403Sbmc mutex_destroy(&arc_mfu_ghost->arcs_mtx); 33732856Snd150628 3374789Sahrens buf_fini(); 3375789Sahrens } 33765450Sbrendan 33775450Sbrendan /* 33785450Sbrendan * Level 2 ARC 33795450Sbrendan * 33805450Sbrendan * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 33815450Sbrendan * It uses dedicated storage devices to hold cached data, which are populated 33825450Sbrendan * using large infrequent writes. The main role of this cache is to boost 33835450Sbrendan * the performance of random read workloads. The intended L2ARC devices 33845450Sbrendan * include short-stroked disks, solid state disks, and other media with 33855450Sbrendan * substantially faster read latency than disk. 33865450Sbrendan * 33875450Sbrendan * +-----------------------+ 33885450Sbrendan * | ARC | 33895450Sbrendan * +-----------------------+ 33905450Sbrendan * | ^ ^ 33915450Sbrendan * | | | 33925450Sbrendan * l2arc_feed_thread() arc_read() 33935450Sbrendan * | | | 33945450Sbrendan * | l2arc read | 33955450Sbrendan * V | | 33965450Sbrendan * +---------------+ | 33975450Sbrendan * | L2ARC | | 33985450Sbrendan * +---------------+ | 33995450Sbrendan * | ^ | 34005450Sbrendan * l2arc_write() | | 34015450Sbrendan * | | | 34025450Sbrendan * V | | 34035450Sbrendan * +-------+ +-------+ 34045450Sbrendan * | vdev | | vdev | 34055450Sbrendan * | cache | | cache | 34065450Sbrendan * +-------+ +-------+ 34075450Sbrendan * +=========+ .-----. 34085450Sbrendan * : L2ARC : |-_____-| 34095450Sbrendan * : devices : | Disks | 34105450Sbrendan * +=========+ `-_____-' 34115450Sbrendan * 34125450Sbrendan * Read requests are satisfied from the following sources, in order: 34135450Sbrendan * 34145450Sbrendan * 1) ARC 34155450Sbrendan * 2) vdev cache of L2ARC devices 34165450Sbrendan * 3) L2ARC devices 34175450Sbrendan * 4) vdev cache of disks 34185450Sbrendan * 5) disks 34195450Sbrendan * 34205450Sbrendan * Some L2ARC device types exhibit extremely slow write performance. 34215450Sbrendan * To accommodate for this there are some significant differences between 34225450Sbrendan * the L2ARC and traditional cache design: 34235450Sbrendan * 34245450Sbrendan * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 34255450Sbrendan * the ARC behave as usual, freeing buffers and placing headers on ghost 34265450Sbrendan * lists. The ARC does not send buffers to the L2ARC during eviction as 34275450Sbrendan * this would add inflated write latencies for all ARC memory pressure. 34285450Sbrendan * 34295450Sbrendan * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 34305450Sbrendan * It does this by periodically scanning buffers from the eviction-end of 34315450Sbrendan * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 34325450Sbrendan * not already there. It scans until a headroom of buffers is satisfied, 34335450Sbrendan * which itself is a buffer for ARC eviction. The thread that does this is 34345450Sbrendan * l2arc_feed_thread(), illustrated below; example sizes are included to 34355450Sbrendan * provide a better sense of ratio than this diagram: 34365450Sbrendan * 34375450Sbrendan * head --> tail 34385450Sbrendan * +---------------------+----------+ 34395450Sbrendan * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 34405450Sbrendan * +---------------------+----------+ | o L2ARC eligible 34415450Sbrendan * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 34425450Sbrendan * +---------------------+----------+ | 34435450Sbrendan * 15.9 Gbytes ^ 32 Mbytes | 34445450Sbrendan * headroom | 34455450Sbrendan * l2arc_feed_thread() 34465450Sbrendan * | 34475450Sbrendan * l2arc write hand <--[oooo]--' 34485450Sbrendan * | 8 Mbyte 34495450Sbrendan * | write max 34505450Sbrendan * V 34515450Sbrendan * +==============================+ 34525450Sbrendan * L2ARC dev |####|#|###|###| |####| ... | 34535450Sbrendan * +==============================+ 34545450Sbrendan * 32 Gbytes 34555450Sbrendan * 34565450Sbrendan * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 34575450Sbrendan * evicted, then the L2ARC has cached a buffer much sooner than it probably 34585450Sbrendan * needed to, potentially wasting L2ARC device bandwidth and storage. It is 34595450Sbrendan * safe to say that this is an uncommon case, since buffers at the end of 34605450Sbrendan * the ARC lists have moved there due to inactivity. 34615450Sbrendan * 34625450Sbrendan * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 34635450Sbrendan * then the L2ARC simply misses copying some buffers. This serves as a 34645450Sbrendan * pressure valve to prevent heavy read workloads from both stalling the ARC 34655450Sbrendan * with waits and clogging the L2ARC with writes. This also helps prevent 34665450Sbrendan * the potential for the L2ARC to churn if it attempts to cache content too 34675450Sbrendan * quickly, such as during backups of the entire pool. 34685450Sbrendan * 34695450Sbrendan * 5. Writes to the L2ARC devices are grouped and sent in-sequence, so that 34705450Sbrendan * the vdev queue can aggregate them into larger and fewer writes. Each 34715450Sbrendan * device is written to in a rotor fashion, sweeping writes through 34725450Sbrendan * available space then repeating. 34735450Sbrendan * 34745450Sbrendan * 6. The L2ARC does not store dirty content. It never needs to flush 34755450Sbrendan * write buffers back to disk based storage. 34765450Sbrendan * 34775450Sbrendan * 7. If an ARC buffer is written (and dirtied) which also exists in the 34785450Sbrendan * L2ARC, the now stale L2ARC buffer is immediately dropped. 34795450Sbrendan * 34805450Sbrendan * The performance of the L2ARC can be tweaked by a number of tunables, which 34815450Sbrendan * may be necessary for different workloads: 34825450Sbrendan * 34835450Sbrendan * l2arc_write_max max write bytes per interval 34845450Sbrendan * l2arc_noprefetch skip caching prefetched buffers 34855450Sbrendan * l2arc_headroom number of max device writes to precache 34865450Sbrendan * l2arc_feed_secs seconds between L2ARC writing 34875450Sbrendan * 34885450Sbrendan * Tunables may be removed or added as future performance improvements are 34895450Sbrendan * integrated, and also may become zpool properties. 34905450Sbrendan */ 34915450Sbrendan 34925450Sbrendan static void 34935450Sbrendan l2arc_hdr_stat_add(void) 34945450Sbrendan { 34956018Sbrendan ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 34966018Sbrendan ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 34975450Sbrendan } 34985450Sbrendan 34995450Sbrendan static void 35005450Sbrendan l2arc_hdr_stat_remove(void) 35015450Sbrendan { 35026018Sbrendan ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 35036018Sbrendan ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 35045450Sbrendan } 35055450Sbrendan 35065450Sbrendan /* 35075450Sbrendan * Cycle through L2ARC devices. This is how L2ARC load balances. 35085450Sbrendan * This is called with l2arc_dev_mtx held, which also locks out spa removal. 35095450Sbrendan */ 35105450Sbrendan static l2arc_dev_t * 35115450Sbrendan l2arc_dev_get_next(void) 35125450Sbrendan { 3513*6643Seschrock l2arc_dev_t *next, *first; 3514*6643Seschrock 3515*6643Seschrock /* if there are no vdevs, there is nothing to do */ 3516*6643Seschrock if (l2arc_ndev == 0) 3517*6643Seschrock return (NULL); 3518*6643Seschrock 3519*6643Seschrock first = NULL; 3520*6643Seschrock next = l2arc_dev_last; 3521*6643Seschrock do { 3522*6643Seschrock /* loop around the list looking for a non-faulted vdev */ 3523*6643Seschrock if (next == NULL) { 35245450Sbrendan next = list_head(l2arc_dev_list); 3525*6643Seschrock } else { 3526*6643Seschrock next = list_next(l2arc_dev_list, next); 3527*6643Seschrock if (next == NULL) 3528*6643Seschrock next = list_head(l2arc_dev_list); 3529*6643Seschrock } 3530*6643Seschrock 3531*6643Seschrock /* if we have come back to the start, bail out */ 3532*6643Seschrock if (first == NULL) 3533*6643Seschrock first = next; 3534*6643Seschrock else if (next == first) 3535*6643Seschrock break; 3536*6643Seschrock 3537*6643Seschrock } while (vdev_is_dead(next->l2ad_vdev)); 3538*6643Seschrock 3539*6643Seschrock /* if we were unable to find any usable vdevs, return NULL */ 3540*6643Seschrock if (vdev_is_dead(next->l2ad_vdev)) 3541*6643Seschrock return (NULL); 35425450Sbrendan 35435450Sbrendan l2arc_dev_last = next; 35445450Sbrendan 35455450Sbrendan return (next); 35465450Sbrendan } 35475450Sbrendan 35485450Sbrendan /* 35495450Sbrendan * A write to a cache device has completed. Update all headers to allow 35505450Sbrendan * reads from these buffers to begin. 35515450Sbrendan */ 35525450Sbrendan static void 35535450Sbrendan l2arc_write_done(zio_t *zio) 35545450Sbrendan { 35555450Sbrendan l2arc_write_callback_t *cb; 35565450Sbrendan l2arc_dev_t *dev; 35575450Sbrendan list_t *buflist; 35585450Sbrendan l2arc_data_free_t *df, *df_prev; 35595450Sbrendan arc_buf_hdr_t *head, *ab, *ab_prev; 35605450Sbrendan kmutex_t *hash_lock; 35615450Sbrendan 35625450Sbrendan cb = zio->io_private; 35635450Sbrendan ASSERT(cb != NULL); 35645450Sbrendan dev = cb->l2wcb_dev; 35655450Sbrendan ASSERT(dev != NULL); 35665450Sbrendan head = cb->l2wcb_head; 35675450Sbrendan ASSERT(head != NULL); 35685450Sbrendan buflist = dev->l2ad_buflist; 35695450Sbrendan ASSERT(buflist != NULL); 35705450Sbrendan DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 35715450Sbrendan l2arc_write_callback_t *, cb); 35725450Sbrendan 35735450Sbrendan if (zio->io_error != 0) 35745450Sbrendan ARCSTAT_BUMP(arcstat_l2_writes_error); 35755450Sbrendan 35765450Sbrendan mutex_enter(&l2arc_buflist_mtx); 35775450Sbrendan 35785450Sbrendan /* 35795450Sbrendan * All writes completed, or an error was hit. 35805450Sbrendan */ 35815450Sbrendan for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 35825450Sbrendan ab_prev = list_prev(buflist, ab); 35835450Sbrendan 35845450Sbrendan hash_lock = HDR_LOCK(ab); 35855450Sbrendan if (!mutex_tryenter(hash_lock)) { 35865450Sbrendan /* 35875450Sbrendan * This buffer misses out. It may be in a stage 35885450Sbrendan * of eviction. Its ARC_L2_WRITING flag will be 35895450Sbrendan * left set, denying reads to this buffer. 35905450Sbrendan */ 35915450Sbrendan ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 35925450Sbrendan continue; 35935450Sbrendan } 35945450Sbrendan 35955450Sbrendan if (zio->io_error != 0) { 35965450Sbrendan /* 35975450Sbrendan * Error - invalidate L2ARC entry. 35985450Sbrendan */ 35995450Sbrendan ab->b_l2hdr = NULL; 36005450Sbrendan } 36015450Sbrendan 36025450Sbrendan /* 36035450Sbrendan * Allow ARC to begin reads to this L2ARC entry. 36045450Sbrendan */ 36055450Sbrendan ab->b_flags &= ~ARC_L2_WRITING; 36065450Sbrendan 36075450Sbrendan mutex_exit(hash_lock); 36085450Sbrendan } 36095450Sbrendan 36105450Sbrendan atomic_inc_64(&l2arc_writes_done); 36115450Sbrendan list_remove(buflist, head); 36125450Sbrendan kmem_cache_free(hdr_cache, head); 36135450Sbrendan mutex_exit(&l2arc_buflist_mtx); 36145450Sbrendan 36155450Sbrendan /* 36165450Sbrendan * Free buffers that were tagged for destruction. 36175450Sbrendan */ 36185450Sbrendan mutex_enter(&l2arc_free_on_write_mtx); 36195450Sbrendan buflist = l2arc_free_on_write; 36205450Sbrendan for (df = list_tail(buflist); df; df = df_prev) { 36215450Sbrendan df_prev = list_prev(buflist, df); 36225450Sbrendan ASSERT(df->l2df_data != NULL); 36235450Sbrendan ASSERT(df->l2df_func != NULL); 36245450Sbrendan df->l2df_func(df->l2df_data, df->l2df_size); 36255450Sbrendan list_remove(buflist, df); 36265450Sbrendan kmem_free(df, sizeof (l2arc_data_free_t)); 36275450Sbrendan } 36285450Sbrendan mutex_exit(&l2arc_free_on_write_mtx); 36295450Sbrendan 36305450Sbrendan kmem_free(cb, sizeof (l2arc_write_callback_t)); 36315450Sbrendan } 36325450Sbrendan 36335450Sbrendan /* 36345450Sbrendan * A read to a cache device completed. Validate buffer contents before 36355450Sbrendan * handing over to the regular ARC routines. 36365450Sbrendan */ 36375450Sbrendan static void 36385450Sbrendan l2arc_read_done(zio_t *zio) 36395450Sbrendan { 36405450Sbrendan l2arc_read_callback_t *cb; 36415450Sbrendan arc_buf_hdr_t *hdr; 36425450Sbrendan arc_buf_t *buf; 36435450Sbrendan zio_t *rzio; 36445450Sbrendan kmutex_t *hash_lock; 36455450Sbrendan int equal, err = 0; 36465450Sbrendan 36475450Sbrendan cb = zio->io_private; 36485450Sbrendan ASSERT(cb != NULL); 36495450Sbrendan buf = cb->l2rcb_buf; 36505450Sbrendan ASSERT(buf != NULL); 36515450Sbrendan hdr = buf->b_hdr; 36525450Sbrendan ASSERT(hdr != NULL); 36535450Sbrendan 36545450Sbrendan hash_lock = HDR_LOCK(hdr); 36555450Sbrendan mutex_enter(hash_lock); 36565450Sbrendan 36575450Sbrendan /* 36585450Sbrendan * Check this survived the L2ARC journey. 36595450Sbrendan */ 36605450Sbrendan equal = arc_cksum_equal(buf); 36615450Sbrendan if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 36625450Sbrendan mutex_exit(hash_lock); 36635450Sbrendan zio->io_private = buf; 36645450Sbrendan arc_read_done(zio); 36655450Sbrendan } else { 36665450Sbrendan mutex_exit(hash_lock); 36675450Sbrendan /* 36685450Sbrendan * Buffer didn't survive caching. Increment stats and 36695450Sbrendan * reissue to the original storage device. 36705450Sbrendan */ 36715450Sbrendan if (zio->io_error != 0) 36725450Sbrendan ARCSTAT_BUMP(arcstat_l2_io_error); 36735450Sbrendan if (!equal) 36745450Sbrendan ARCSTAT_BUMP(arcstat_l2_cksum_bad); 36755450Sbrendan 36765450Sbrendan zio->io_flags &= ~ZIO_FLAG_DONT_CACHE; 36775450Sbrendan rzio = zio_read(NULL, cb->l2rcb_spa, &cb->l2rcb_bp, 36785450Sbrendan buf->b_data, zio->io_size, arc_read_done, buf, 36795450Sbrendan zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb); 36805450Sbrendan 36815450Sbrendan /* 36825450Sbrendan * Since this is a seperate thread, we can wait on this 36835450Sbrendan * I/O whether there is an io_waiter or not. 36845450Sbrendan */ 36855450Sbrendan err = zio_wait(rzio); 36865450Sbrendan 36875450Sbrendan /* 36885450Sbrendan * Let the resent I/O call arc_read_done() instead. 36895450Sbrendan * io_error is set to the reissued I/O error status. 36905450Sbrendan */ 36915450Sbrendan zio->io_done = NULL; 36925450Sbrendan zio->io_waiter = NULL; 36935450Sbrendan zio->io_error = err; 36945450Sbrendan } 36955450Sbrendan 36965450Sbrendan kmem_free(cb, sizeof (l2arc_read_callback_t)); 36975450Sbrendan } 36985450Sbrendan 36995450Sbrendan /* 37005450Sbrendan * This is the list priority from which the L2ARC will search for pages to 37015450Sbrendan * cache. This is used within loops (0..3) to cycle through lists in the 37025450Sbrendan * desired order. This order can have a significant effect on cache 37035450Sbrendan * performance. 37045450Sbrendan * 37055450Sbrendan * Currently the metadata lists are hit first, MFU then MRU, followed by 37065450Sbrendan * the data lists. This function returns a locked list, and also returns 37075450Sbrendan * the lock pointer. 37085450Sbrendan */ 37095450Sbrendan static list_t * 37105450Sbrendan l2arc_list_locked(int list_num, kmutex_t **lock) 37115450Sbrendan { 37125450Sbrendan list_t *list; 37135450Sbrendan 37145450Sbrendan ASSERT(list_num >= 0 && list_num <= 3); 37155450Sbrendan 37165450Sbrendan switch (list_num) { 37175450Sbrendan case 0: 37185450Sbrendan list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 37195450Sbrendan *lock = &arc_mfu->arcs_mtx; 37205450Sbrendan break; 37215450Sbrendan case 1: 37225450Sbrendan list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 37235450Sbrendan *lock = &arc_mru->arcs_mtx; 37245450Sbrendan break; 37255450Sbrendan case 2: 37265450Sbrendan list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 37275450Sbrendan *lock = &arc_mfu->arcs_mtx; 37285450Sbrendan break; 37295450Sbrendan case 3: 37305450Sbrendan list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 37315450Sbrendan *lock = &arc_mru->arcs_mtx; 37325450Sbrendan break; 37335450Sbrendan } 37345450Sbrendan 37355450Sbrendan ASSERT(!(MUTEX_HELD(*lock))); 37365450Sbrendan mutex_enter(*lock); 37375450Sbrendan return (list); 37385450Sbrendan } 37395450Sbrendan 37405450Sbrendan /* 37415450Sbrendan * Evict buffers from the device write hand to the distance specified in 37425450Sbrendan * bytes. This distance may span populated buffers, it may span nothing. 37435450Sbrendan * This is clearing a region on the L2ARC device ready for writing. 37445450Sbrendan * If the 'all' boolean is set, every buffer is evicted. 37455450Sbrendan */ 37465450Sbrendan static void 37475450Sbrendan l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 37485450Sbrendan { 37495450Sbrendan list_t *buflist; 37505450Sbrendan l2arc_buf_hdr_t *abl2; 37515450Sbrendan arc_buf_hdr_t *ab, *ab_prev; 37525450Sbrendan kmutex_t *hash_lock; 37535450Sbrendan uint64_t taddr; 37545450Sbrendan 37555450Sbrendan ASSERT(MUTEX_HELD(&l2arc_dev_mtx)); 37565450Sbrendan 37575450Sbrendan buflist = dev->l2ad_buflist; 37585450Sbrendan 37595450Sbrendan if (buflist == NULL) 37605450Sbrendan return; 37615450Sbrendan 37625450Sbrendan if (!all && dev->l2ad_first) { 37635450Sbrendan /* 37645450Sbrendan * This is the first sweep through the device. There is 37655450Sbrendan * nothing to evict. 37665450Sbrendan */ 37675450Sbrendan return; 37685450Sbrendan } 37695450Sbrendan 37705450Sbrendan if (dev->l2ad_hand >= (dev->l2ad_end - (2 * dev->l2ad_write))) { 37715450Sbrendan /* 37725450Sbrendan * When nearing the end of the device, evict to the end 37735450Sbrendan * before the device write hand jumps to the start. 37745450Sbrendan */ 37755450Sbrendan taddr = dev->l2ad_end; 37765450Sbrendan } else { 37775450Sbrendan taddr = dev->l2ad_hand + distance; 37785450Sbrendan } 37795450Sbrendan DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 37805450Sbrendan uint64_t, taddr, boolean_t, all); 37815450Sbrendan 37825450Sbrendan top: 37835450Sbrendan mutex_enter(&l2arc_buflist_mtx); 37845450Sbrendan for (ab = list_tail(buflist); ab; ab = ab_prev) { 37855450Sbrendan ab_prev = list_prev(buflist, ab); 37865450Sbrendan 37875450Sbrendan hash_lock = HDR_LOCK(ab); 37885450Sbrendan if (!mutex_tryenter(hash_lock)) { 37895450Sbrendan /* 37905450Sbrendan * Missed the hash lock. Retry. 37915450Sbrendan */ 37925450Sbrendan ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 37935450Sbrendan mutex_exit(&l2arc_buflist_mtx); 37945450Sbrendan mutex_enter(hash_lock); 37955450Sbrendan mutex_exit(hash_lock); 37965450Sbrendan goto top; 37975450Sbrendan } 37985450Sbrendan 37995450Sbrendan if (HDR_L2_WRITE_HEAD(ab)) { 38005450Sbrendan /* 38015450Sbrendan * We hit a write head node. Leave it for 38025450Sbrendan * l2arc_write_done(). 38035450Sbrendan */ 38045450Sbrendan list_remove(buflist, ab); 38055450Sbrendan mutex_exit(hash_lock); 38065450Sbrendan continue; 38075450Sbrendan } 38085450Sbrendan 38095450Sbrendan if (!all && ab->b_l2hdr != NULL && 38105450Sbrendan (ab->b_l2hdr->b_daddr > taddr || 38115450Sbrendan ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 38125450Sbrendan /* 38135450Sbrendan * We've evicted to the target address, 38145450Sbrendan * or the end of the device. 38155450Sbrendan */ 38165450Sbrendan mutex_exit(hash_lock); 38175450Sbrendan break; 38185450Sbrendan } 38195450Sbrendan 38205450Sbrendan if (HDR_FREE_IN_PROGRESS(ab)) { 38215450Sbrendan /* 38225450Sbrendan * Already on the path to destruction. 38235450Sbrendan */ 38245450Sbrendan mutex_exit(hash_lock); 38255450Sbrendan continue; 38265450Sbrendan } 38275450Sbrendan 38285450Sbrendan if (ab->b_state == arc_l2c_only) { 38295450Sbrendan ASSERT(!HDR_L2_READING(ab)); 38305450Sbrendan /* 38315450Sbrendan * This doesn't exist in the ARC. Destroy. 38325450Sbrendan * arc_hdr_destroy() will call list_remove() 38335450Sbrendan * and decrement arcstat_l2_size. 38345450Sbrendan */ 38355450Sbrendan arc_change_state(arc_anon, ab, hash_lock); 38365450Sbrendan arc_hdr_destroy(ab); 38375450Sbrendan } else { 38385450Sbrendan /* 38395450Sbrendan * Tell ARC this no longer exists in L2ARC. 38405450Sbrendan */ 38415450Sbrendan if (ab->b_l2hdr != NULL) { 38425450Sbrendan abl2 = ab->b_l2hdr; 38435450Sbrendan ab->b_l2hdr = NULL; 38445450Sbrendan kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 38455450Sbrendan ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 38465450Sbrendan } 38475450Sbrendan list_remove(buflist, ab); 38485450Sbrendan 38495450Sbrendan /* 38505450Sbrendan * This may have been leftover after a 38515450Sbrendan * failed write. 38525450Sbrendan */ 38535450Sbrendan ab->b_flags &= ~ARC_L2_WRITING; 38545450Sbrendan 38555450Sbrendan /* 38565450Sbrendan * Invalidate issued or about to be issued 38575450Sbrendan * reads, since we may be about to write 38585450Sbrendan * over this location. 38595450Sbrendan */ 38605450Sbrendan if (HDR_L2_READING(ab)) { 38615450Sbrendan ARCSTAT_BUMP(arcstat_l2_evict_reading); 38625450Sbrendan ab->b_flags |= ARC_L2_EVICTED; 38635450Sbrendan } 38645450Sbrendan } 38655450Sbrendan mutex_exit(hash_lock); 38665450Sbrendan } 38675450Sbrendan mutex_exit(&l2arc_buflist_mtx); 38685450Sbrendan 38695450Sbrendan spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); 38705450Sbrendan dev->l2ad_evict = taddr; 38715450Sbrendan } 38725450Sbrendan 38735450Sbrendan /* 38745450Sbrendan * Find and write ARC buffers to the L2ARC device. 38755450Sbrendan * 38765450Sbrendan * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 38775450Sbrendan * for reading until they have completed writing. 38785450Sbrendan */ 38795450Sbrendan static void 38805450Sbrendan l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev) 38815450Sbrendan { 38825450Sbrendan arc_buf_hdr_t *ab, *ab_prev, *head; 38835450Sbrendan l2arc_buf_hdr_t *hdrl2; 38845450Sbrendan list_t *list; 38855450Sbrendan uint64_t passed_sz, write_sz, buf_sz; 38865450Sbrendan uint64_t target_sz = dev->l2ad_write; 38875450Sbrendan uint64_t headroom = dev->l2ad_write * l2arc_headroom; 38885450Sbrendan void *buf_data; 38895450Sbrendan kmutex_t *hash_lock, *list_lock; 38905450Sbrendan boolean_t have_lock, full; 38915450Sbrendan l2arc_write_callback_t *cb; 38925450Sbrendan zio_t *pio, *wzio; 38935450Sbrendan 38945450Sbrendan ASSERT(MUTEX_HELD(&l2arc_dev_mtx)); 38955450Sbrendan ASSERT(dev->l2ad_vdev != NULL); 38965450Sbrendan 38975450Sbrendan pio = NULL; 38985450Sbrendan write_sz = 0; 38995450Sbrendan full = B_FALSE; 39006245Smaybee head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 39015450Sbrendan head->b_flags |= ARC_L2_WRITE_HEAD; 39025450Sbrendan 39035450Sbrendan /* 39045450Sbrendan * Copy buffers for L2ARC writing. 39055450Sbrendan */ 39065450Sbrendan mutex_enter(&l2arc_buflist_mtx); 39075450Sbrendan for (int try = 0; try <= 3; try++) { 39085450Sbrendan list = l2arc_list_locked(try, &list_lock); 39095450Sbrendan passed_sz = 0; 39105450Sbrendan 39115450Sbrendan for (ab = list_tail(list); ab; ab = ab_prev) { 39125450Sbrendan ab_prev = list_prev(list, ab); 39135450Sbrendan 39145450Sbrendan hash_lock = HDR_LOCK(ab); 39155450Sbrendan have_lock = MUTEX_HELD(hash_lock); 39165450Sbrendan if (!have_lock && !mutex_tryenter(hash_lock)) { 39175450Sbrendan /* 39185450Sbrendan * Skip this buffer rather than waiting. 39195450Sbrendan */ 39205450Sbrendan continue; 39215450Sbrendan } 39225450Sbrendan 39235450Sbrendan passed_sz += ab->b_size; 39245450Sbrendan if (passed_sz > headroom) { 39255450Sbrendan /* 39265450Sbrendan * Searched too far. 39275450Sbrendan */ 39285450Sbrendan mutex_exit(hash_lock); 39295450Sbrendan break; 39305450Sbrendan } 39315450Sbrendan 39325450Sbrendan if (ab->b_spa != spa) { 39335450Sbrendan mutex_exit(hash_lock); 39345450Sbrendan continue; 39355450Sbrendan } 39365450Sbrendan 39375450Sbrendan if (ab->b_l2hdr != NULL) { 39385450Sbrendan /* 39395450Sbrendan * Already in L2ARC. 39405450Sbrendan */ 39415450Sbrendan mutex_exit(hash_lock); 39425450Sbrendan continue; 39435450Sbrendan } 39445450Sbrendan 39455450Sbrendan if (HDR_IO_IN_PROGRESS(ab) || HDR_DONT_L2CACHE(ab)) { 39465450Sbrendan mutex_exit(hash_lock); 39475450Sbrendan continue; 39485450Sbrendan } 39495450Sbrendan 39505450Sbrendan if ((write_sz + ab->b_size) > target_sz) { 39515450Sbrendan full = B_TRUE; 39525450Sbrendan mutex_exit(hash_lock); 39535450Sbrendan break; 39545450Sbrendan } 39555450Sbrendan 39565450Sbrendan if (ab->b_buf == NULL) { 39575450Sbrendan DTRACE_PROBE1(l2arc__buf__null, void *, ab); 39585450Sbrendan mutex_exit(hash_lock); 39595450Sbrendan continue; 39605450Sbrendan } 39615450Sbrendan 39625450Sbrendan if (pio == NULL) { 39635450Sbrendan /* 39645450Sbrendan * Insert a dummy header on the buflist so 39655450Sbrendan * l2arc_write_done() can find where the 39665450Sbrendan * write buffers begin without searching. 39675450Sbrendan */ 39685450Sbrendan list_insert_head(dev->l2ad_buflist, head); 39695450Sbrendan 39705450Sbrendan cb = kmem_alloc( 39715450Sbrendan sizeof (l2arc_write_callback_t), KM_SLEEP); 39725450Sbrendan cb->l2wcb_dev = dev; 39735450Sbrendan cb->l2wcb_head = head; 39745450Sbrendan pio = zio_root(spa, l2arc_write_done, cb, 39755450Sbrendan ZIO_FLAG_CANFAIL); 39765450Sbrendan } 39775450Sbrendan 39785450Sbrendan /* 39795450Sbrendan * Create and add a new L2ARC header. 39805450Sbrendan */ 39815450Sbrendan hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 39825450Sbrendan hdrl2->b_dev = dev; 39835450Sbrendan hdrl2->b_daddr = dev->l2ad_hand; 39845450Sbrendan 39855450Sbrendan ab->b_flags |= ARC_L2_WRITING; 39865450Sbrendan ab->b_l2hdr = hdrl2; 39875450Sbrendan list_insert_head(dev->l2ad_buflist, ab); 39885450Sbrendan buf_data = ab->b_buf->b_data; 39895450Sbrendan buf_sz = ab->b_size; 39905450Sbrendan 39915450Sbrendan /* 39925450Sbrendan * Compute and store the buffer cksum before 39935450Sbrendan * writing. On debug the cksum is verified first. 39945450Sbrendan */ 39955450Sbrendan arc_cksum_verify(ab->b_buf); 39965450Sbrendan arc_cksum_compute(ab->b_buf, B_TRUE); 39975450Sbrendan 39985450Sbrendan mutex_exit(hash_lock); 39995450Sbrendan 40005450Sbrendan wzio = zio_write_phys(pio, dev->l2ad_vdev, 40015450Sbrendan dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 40025450Sbrendan NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 40035450Sbrendan ZIO_FLAG_CANFAIL, B_FALSE); 40045450Sbrendan 40055450Sbrendan DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 40065450Sbrendan zio_t *, wzio); 40075450Sbrendan (void) zio_nowait(wzio); 40085450Sbrendan 40095450Sbrendan write_sz += buf_sz; 40105450Sbrendan dev->l2ad_hand += buf_sz; 40115450Sbrendan } 40125450Sbrendan 40135450Sbrendan mutex_exit(list_lock); 40145450Sbrendan 40155450Sbrendan if (full == B_TRUE) 40165450Sbrendan break; 40175450Sbrendan } 40185450Sbrendan mutex_exit(&l2arc_buflist_mtx); 40195450Sbrendan 40205450Sbrendan if (pio == NULL) { 40215450Sbrendan ASSERT3U(write_sz, ==, 0); 40225450Sbrendan kmem_cache_free(hdr_cache, head); 40235450Sbrendan return; 40245450Sbrendan } 40255450Sbrendan 40265450Sbrendan ASSERT3U(write_sz, <=, target_sz); 40275450Sbrendan ARCSTAT_BUMP(arcstat_l2_writes_sent); 40285450Sbrendan ARCSTAT_INCR(arcstat_l2_size, write_sz); 40295450Sbrendan spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); 40305450Sbrendan 40315450Sbrendan /* 40325450Sbrendan * Bump device hand to the device start if it is approaching the end. 40335450Sbrendan * l2arc_evict() will already have evicted ahead for this case. 40345450Sbrendan */ 40355450Sbrendan if (dev->l2ad_hand >= (dev->l2ad_end - dev->l2ad_write)) { 40365450Sbrendan spa_l2cache_space_update(dev->l2ad_vdev, 0, 40375450Sbrendan dev->l2ad_end - dev->l2ad_hand); 40385450Sbrendan dev->l2ad_hand = dev->l2ad_start; 40395450Sbrendan dev->l2ad_evict = dev->l2ad_start; 40405450Sbrendan dev->l2ad_first = B_FALSE; 40415450Sbrendan } 40425450Sbrendan 40435450Sbrendan (void) zio_wait(pio); 40445450Sbrendan } 40455450Sbrendan 40465450Sbrendan /* 40475450Sbrendan * This thread feeds the L2ARC at regular intervals. This is the beating 40485450Sbrendan * heart of the L2ARC. 40495450Sbrendan */ 40505450Sbrendan static void 40515450Sbrendan l2arc_feed_thread(void) 40525450Sbrendan { 40535450Sbrendan callb_cpr_t cpr; 40545450Sbrendan l2arc_dev_t *dev; 40555450Sbrendan spa_t *spa; 40565450Sbrendan int interval; 40575450Sbrendan boolean_t startup = B_TRUE; 40585450Sbrendan 40595450Sbrendan CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 40605450Sbrendan 40615450Sbrendan mutex_enter(&l2arc_feed_thr_lock); 40625450Sbrendan 40635450Sbrendan while (l2arc_thread_exit == 0) { 40645450Sbrendan /* 40655450Sbrendan * Initially pause for L2ARC_FEED_DELAY seconds as a grace 40665450Sbrendan * interval during boot, followed by l2arc_feed_secs seconds 40675450Sbrendan * thereafter. 40685450Sbrendan */ 40695450Sbrendan CALLB_CPR_SAFE_BEGIN(&cpr); 40705450Sbrendan if (startup) { 40715450Sbrendan interval = L2ARC_FEED_DELAY; 40725450Sbrendan startup = B_FALSE; 40735450Sbrendan } else { 40745450Sbrendan interval = l2arc_feed_secs; 40755450Sbrendan } 40765450Sbrendan (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 40775450Sbrendan lbolt + (hz * interval)); 40785450Sbrendan CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 40795450Sbrendan 4080*6643Seschrock mutex_enter(&l2arc_dev_mtx); 4081*6643Seschrock 40825450Sbrendan /* 4083*6643Seschrock * This selects the next l2arc device to write to, and in 4084*6643Seschrock * doing so the next spa to feed from: dev->l2ad_spa. This 4085*6643Seschrock * will return NULL if there are no l2arc devices or if they 4086*6643Seschrock * are all faulted. 40875450Sbrendan */ 4088*6643Seschrock if ((dev = l2arc_dev_get_next()) == NULL) { 40895450Sbrendan mutex_exit(&l2arc_dev_mtx); 40905450Sbrendan continue; 40915450Sbrendan } 40925450Sbrendan 40935450Sbrendan /* 40945450Sbrendan * Avoid contributing to memory pressure. 40955450Sbrendan */ 40965450Sbrendan if (arc_reclaim_needed()) { 40975450Sbrendan ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 40985450Sbrendan mutex_exit(&l2arc_dev_mtx); 40995450Sbrendan continue; 41005450Sbrendan } 41015450Sbrendan 41025450Sbrendan spa = dev->l2ad_spa; 41035450Sbrendan ASSERT(spa != NULL); 41045450Sbrendan ARCSTAT_BUMP(arcstat_l2_feeds); 41055450Sbrendan 41065450Sbrendan /* 41075450Sbrendan * Evict L2ARC buffers that will be overwritten. 41085450Sbrendan */ 41095450Sbrendan l2arc_evict(dev, dev->l2ad_write, B_FALSE); 41105450Sbrendan 41115450Sbrendan /* 41125450Sbrendan * Write ARC buffers. 41135450Sbrendan */ 41145450Sbrendan l2arc_write_buffers(spa, dev); 41155450Sbrendan mutex_exit(&l2arc_dev_mtx); 41165450Sbrendan } 41175450Sbrendan 41185450Sbrendan l2arc_thread_exit = 0; 41195450Sbrendan cv_broadcast(&l2arc_feed_thr_cv); 41205450Sbrendan CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 41215450Sbrendan thread_exit(); 41225450Sbrendan } 41235450Sbrendan 4124*6643Seschrock boolean_t 4125*6643Seschrock l2arc_vdev_present(vdev_t *vd) 4126*6643Seschrock { 4127*6643Seschrock l2arc_dev_t *dev; 4128*6643Seschrock 4129*6643Seschrock mutex_enter(&l2arc_dev_mtx); 4130*6643Seschrock for (dev = list_head(l2arc_dev_list); dev != NULL; 4131*6643Seschrock dev = list_next(l2arc_dev_list, dev)) { 4132*6643Seschrock if (dev->l2ad_vdev == vd) 4133*6643Seschrock break; 4134*6643Seschrock } 4135*6643Seschrock mutex_exit(&l2arc_dev_mtx); 4136*6643Seschrock 4137*6643Seschrock return (dev != NULL); 4138*6643Seschrock } 4139*6643Seschrock 41405450Sbrendan /* 41415450Sbrendan * Add a vdev for use by the L2ARC. By this point the spa has already 41425450Sbrendan * validated the vdev and opened it. 41435450Sbrendan */ 41445450Sbrendan void 41455450Sbrendan l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) 41465450Sbrendan { 41475450Sbrendan l2arc_dev_t *adddev; 41485450Sbrendan 4149*6643Seschrock ASSERT(!l2arc_vdev_present(vd)); 4150*6643Seschrock 41515450Sbrendan /* 41525450Sbrendan * Create a new l2arc device entry. 41535450Sbrendan */ 41545450Sbrendan adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 41555450Sbrendan adddev->l2ad_spa = spa; 41565450Sbrendan adddev->l2ad_vdev = vd; 41575450Sbrendan adddev->l2ad_write = l2arc_write_max; 41585450Sbrendan adddev->l2ad_start = start; 41595450Sbrendan adddev->l2ad_end = end; 41605450Sbrendan adddev->l2ad_hand = adddev->l2ad_start; 41615450Sbrendan adddev->l2ad_evict = adddev->l2ad_start; 41625450Sbrendan adddev->l2ad_first = B_TRUE; 41635450Sbrendan ASSERT3U(adddev->l2ad_write, >, 0); 41645450Sbrendan 41655450Sbrendan /* 41665450Sbrendan * This is a list of all ARC buffers that are still valid on the 41675450Sbrendan * device. 41685450Sbrendan */ 41695450Sbrendan adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 41705450Sbrendan list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 41715450Sbrendan offsetof(arc_buf_hdr_t, b_l2node)); 41725450Sbrendan 41735450Sbrendan spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); 41745450Sbrendan 41755450Sbrendan /* 41765450Sbrendan * Add device to global list 41775450Sbrendan */ 41785450Sbrendan mutex_enter(&l2arc_dev_mtx); 41795450Sbrendan list_insert_head(l2arc_dev_list, adddev); 41805450Sbrendan atomic_inc_64(&l2arc_ndev); 41815450Sbrendan mutex_exit(&l2arc_dev_mtx); 41825450Sbrendan } 41835450Sbrendan 41845450Sbrendan /* 41855450Sbrendan * Remove a vdev from the L2ARC. 41865450Sbrendan */ 41875450Sbrendan void 41885450Sbrendan l2arc_remove_vdev(vdev_t *vd) 41895450Sbrendan { 41905450Sbrendan l2arc_dev_t *dev, *nextdev, *remdev = NULL; 41915450Sbrendan 41925450Sbrendan /* 41935450Sbrendan * We can only grab the spa config lock when cache device writes 41945450Sbrendan * complete. 41955450Sbrendan */ 41965450Sbrendan ASSERT3U(l2arc_writes_sent, ==, l2arc_writes_done); 41975450Sbrendan 41985450Sbrendan /* 41995450Sbrendan * Find the device by vdev 42005450Sbrendan */ 42015450Sbrendan mutex_enter(&l2arc_dev_mtx); 42025450Sbrendan for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 42035450Sbrendan nextdev = list_next(l2arc_dev_list, dev); 42045450Sbrendan if (vd == dev->l2ad_vdev) { 42055450Sbrendan remdev = dev; 42065450Sbrendan break; 42075450Sbrendan } 42085450Sbrendan } 42095450Sbrendan ASSERT(remdev != NULL); 42105450Sbrendan 42115450Sbrendan /* 42125450Sbrendan * Remove device from global list 42135450Sbrendan */ 42145450Sbrendan list_remove(l2arc_dev_list, remdev); 42155450Sbrendan l2arc_dev_last = NULL; /* may have been invalidated */ 42165450Sbrendan 42175450Sbrendan /* 42185450Sbrendan * Clear all buflists and ARC references. L2ARC device flush. 42195450Sbrendan */ 42205450Sbrendan l2arc_evict(remdev, 0, B_TRUE); 42215450Sbrendan list_destroy(remdev->l2ad_buflist); 42225450Sbrendan kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 42235450Sbrendan kmem_free(remdev, sizeof (l2arc_dev_t)); 42245450Sbrendan 42255450Sbrendan atomic_dec_64(&l2arc_ndev); 42265450Sbrendan mutex_exit(&l2arc_dev_mtx); 42275450Sbrendan } 42285450Sbrendan 42295450Sbrendan void 42305450Sbrendan l2arc_init() 42315450Sbrendan { 42325450Sbrendan l2arc_thread_exit = 0; 42335450Sbrendan l2arc_ndev = 0; 42345450Sbrendan l2arc_writes_sent = 0; 42355450Sbrendan l2arc_writes_done = 0; 42365450Sbrendan 42375450Sbrendan mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 42385450Sbrendan cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 42395450Sbrendan mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 42405450Sbrendan mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 42415450Sbrendan mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 42425450Sbrendan 42435450Sbrendan l2arc_dev_list = &L2ARC_dev_list; 42445450Sbrendan l2arc_free_on_write = &L2ARC_free_on_write; 42455450Sbrendan list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 42465450Sbrendan offsetof(l2arc_dev_t, l2ad_node)); 42475450Sbrendan list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 42485450Sbrendan offsetof(l2arc_data_free_t, l2df_list_node)); 42495450Sbrendan 42505450Sbrendan (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 42515450Sbrendan TS_RUN, minclsyspri); 42525450Sbrendan } 42535450Sbrendan 42545450Sbrendan void 42555450Sbrendan l2arc_fini() 42565450Sbrendan { 42575450Sbrendan mutex_enter(&l2arc_feed_thr_lock); 42585450Sbrendan cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 42595450Sbrendan l2arc_thread_exit = 1; 42605450Sbrendan while (l2arc_thread_exit != 0) 42615450Sbrendan cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 42625450Sbrendan mutex_exit(&l2arc_feed_thr_lock); 42635450Sbrendan 42645450Sbrendan mutex_destroy(&l2arc_feed_thr_lock); 42655450Sbrendan cv_destroy(&l2arc_feed_thr_cv); 42665450Sbrendan mutex_destroy(&l2arc_dev_mtx); 42675450Sbrendan mutex_destroy(&l2arc_buflist_mtx); 42685450Sbrendan mutex_destroy(&l2arc_free_on_write_mtx); 42695450Sbrendan 42705450Sbrendan list_destroy(l2arc_dev_list); 42715450Sbrendan list_destroy(l2arc_free_on_write); 42725450Sbrendan } 4273