1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51484Sek110237 * Common Development and Distribution License (the "License"). 61484Sek110237 * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 226018Sbrendan * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23789Sahrens * Use is subject to license terms. 24789Sahrens */ 25789Sahrens 26789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 27789Sahrens 28789Sahrens /* 293403Sbmc * DVA-based Adjustable Replacement Cache 30789Sahrens * 311544Seschrock * While much of the theory of operation used here is 321544Seschrock * based on the self-tuning, low overhead replacement cache 33789Sahrens * presented by Megiddo and Modha at FAST 2003, there are some 34789Sahrens * significant differences: 35789Sahrens * 36789Sahrens * 1. The Megiddo and Modha model assumes any page is evictable. 37789Sahrens * Pages in its cache cannot be "locked" into memory. This makes 38789Sahrens * the eviction algorithm simple: evict the last page in the list. 39789Sahrens * This also make the performance characteristics easy to reason 40789Sahrens * about. Our cache is not so simple. At any given moment, some 41789Sahrens * subset of the blocks in the cache are un-evictable because we 42789Sahrens * have handed out a reference to them. Blocks are only evictable 43789Sahrens * when there are no external references active. This makes 44789Sahrens * eviction far more problematic: we choose to evict the evictable 45789Sahrens * blocks that are the "lowest" in the list. 46789Sahrens * 47789Sahrens * There are times when it is not possible to evict the requested 48789Sahrens * space. In these circumstances we are unable to adjust the cache 49789Sahrens * size. To prevent the cache growing unbounded at these times we 505450Sbrendan * implement a "cache throttle" that slows the flow of new data 515450Sbrendan * into the cache until we can make space available. 52789Sahrens * 53789Sahrens * 2. The Megiddo and Modha model assumes a fixed cache size. 54789Sahrens * Pages are evicted when the cache is full and there is a cache 55789Sahrens * miss. Our model has a variable sized cache. It grows with 565450Sbrendan * high use, but also tries to react to memory pressure from the 57789Sahrens * operating system: decreasing its size when system memory is 58789Sahrens * tight. 59789Sahrens * 60789Sahrens * 3. The Megiddo and Modha model assumes a fixed page size. All 61789Sahrens * elements of the cache are therefor exactly the same size. So 62789Sahrens * when adjusting the cache size following a cache miss, its simply 63789Sahrens * a matter of choosing a single page to evict. In our model, we 64789Sahrens * have variable sized cache blocks (rangeing from 512 bytes to 65789Sahrens * 128K bytes). We therefor choose a set of blocks to evict to make 66789Sahrens * space for a cache miss that approximates as closely as possible 67789Sahrens * the space used by the new block. 68789Sahrens * 69789Sahrens * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70789Sahrens * by N. Megiddo & D. Modha, FAST 2003 71789Sahrens */ 72789Sahrens 73789Sahrens /* 74789Sahrens * The locking model: 75789Sahrens * 76789Sahrens * A new reference to a cache buffer can be obtained in two 77789Sahrens * ways: 1) via a hash table lookup using the DVA as a key, 785450Sbrendan * or 2) via one of the ARC lists. The arc_read() interface 79789Sahrens * uses method 1, while the internal arc algorithms for 80789Sahrens * adjusting the cache use method 2. We therefor provide two 81789Sahrens * types of locks: 1) the hash table lock array, and 2) the 82789Sahrens * arc list locks. 83789Sahrens * 84789Sahrens * Buffers do not have their own mutexs, rather they rely on the 85789Sahrens * hash table mutexs for the bulk of their protection (i.e. most 86789Sahrens * fields in the arc_buf_hdr_t are protected by these mutexs). 87789Sahrens * 88789Sahrens * buf_hash_find() returns the appropriate mutex (held) when it 89789Sahrens * locates the requested buffer in the hash table. It returns 90789Sahrens * NULL for the mutex if the buffer was not in the table. 91789Sahrens * 92789Sahrens * buf_hash_remove() expects the appropriate hash mutex to be 93789Sahrens * already held before it is invoked. 94789Sahrens * 95789Sahrens * Each arc state also has a mutex which is used to protect the 96789Sahrens * buffer list associated with the state. When attempting to 97789Sahrens * obtain a hash table lock while holding an arc list lock you 98789Sahrens * must use: mutex_tryenter() to avoid deadlock. Also note that 992688Smaybee * the active state mutex must be held before the ghost state mutex. 100789Sahrens * 1011544Seschrock * Arc buffers may have an associated eviction callback function. 1021544Seschrock * This function will be invoked prior to removing the buffer (e.g. 1031544Seschrock * in arc_do_user_evicts()). Note however that the data associated 1041544Seschrock * with the buffer may be evicted prior to the callback. The callback 1051544Seschrock * must be made with *no locks held* (to prevent deadlock). Additionally, 1061544Seschrock * the users of callbacks must ensure that their private data is 1071544Seschrock * protected from simultaneous callbacks from arc_buf_evict() 1081544Seschrock * and arc_do_user_evicts(). 1091544Seschrock * 110789Sahrens * Note that the majority of the performance stats are manipulated 111789Sahrens * with atomic operations. 1125450Sbrendan * 1135450Sbrendan * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 1145450Sbrendan * 1155450Sbrendan * - L2ARC buflist creation 1165450Sbrendan * - L2ARC buflist eviction 1175450Sbrendan * - L2ARC write completion, which walks L2ARC buflists 1185450Sbrendan * - ARC header destruction, as it removes from L2ARC buflists 1195450Sbrendan * - ARC header release, as it removes from L2ARC buflists 120789Sahrens */ 121789Sahrens 122789Sahrens #include <sys/spa.h> 123789Sahrens #include <sys/zio.h> 1243093Sahrens #include <sys/zio_checksum.h> 125789Sahrens #include <sys/zfs_context.h> 126789Sahrens #include <sys/arc.h> 127789Sahrens #include <sys/refcount.h> 128789Sahrens #ifdef _KERNEL 129789Sahrens #include <sys/vmsystm.h> 130789Sahrens #include <vm/anon.h> 131789Sahrens #include <sys/fs/swapnode.h> 1321484Sek110237 #include <sys/dnlc.h> 133789Sahrens #endif 134789Sahrens #include <sys/callb.h> 1353403Sbmc #include <sys/kstat.h> 136789Sahrens 137789Sahrens static kmutex_t arc_reclaim_thr_lock; 138789Sahrens static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 139789Sahrens static uint8_t arc_thread_exit; 140789Sahrens 141*6245Smaybee extern int zfs_write_limit_shift; 142*6245Smaybee extern uint64_t zfs_write_limit_max; 143*6245Smaybee extern uint64_t zfs_write_limit_inflated; 144*6245Smaybee 1451484Sek110237 #define ARC_REDUCE_DNLC_PERCENT 3 1461484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 1471484Sek110237 148789Sahrens typedef enum arc_reclaim_strategy { 149789Sahrens ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 150789Sahrens ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 151789Sahrens } arc_reclaim_strategy_t; 152789Sahrens 153789Sahrens /* number of seconds before growing cache again */ 154789Sahrens static int arc_grow_retry = 60; 155789Sahrens 1562391Smaybee /* 1572638Sperrin * minimum lifespan of a prefetch block in clock ticks 1582638Sperrin * (initialized in arc_init()) 1592391Smaybee */ 1602638Sperrin static int arc_min_prefetch_lifespan; 1612391Smaybee 162789Sahrens static int arc_dead; 163789Sahrens 164789Sahrens /* 1652885Sahrens * These tunables are for performance analysis. 1662885Sahrens */ 1672885Sahrens uint64_t zfs_arc_max; 1682885Sahrens uint64_t zfs_arc_min; 1694645Sek110237 uint64_t zfs_arc_meta_limit = 0; 1702885Sahrens 1712885Sahrens /* 1725450Sbrendan * Note that buffers can be in one of 6 states: 173789Sahrens * ARC_anon - anonymous (discussed below) 1741544Seschrock * ARC_mru - recently used, currently cached 1751544Seschrock * ARC_mru_ghost - recentely used, no longer in cache 1761544Seschrock * ARC_mfu - frequently used, currently cached 1771544Seschrock * ARC_mfu_ghost - frequently used, no longer in cache 1785450Sbrendan * ARC_l2c_only - exists in L2ARC but not other states 1794309Smaybee * When there are no active references to the buffer, they are 1804309Smaybee * are linked onto a list in one of these arc states. These are 1814309Smaybee * the only buffers that can be evicted or deleted. Within each 1824309Smaybee * state there are multiple lists, one for meta-data and one for 1834309Smaybee * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 1844309Smaybee * etc.) is tracked separately so that it can be managed more 1855450Sbrendan * explicitly: favored over data, limited explicitly. 186789Sahrens * 187789Sahrens * Anonymous buffers are buffers that are not associated with 188789Sahrens * a DVA. These are buffers that hold dirty block copies 189789Sahrens * before they are written to stable storage. By definition, 1901544Seschrock * they are "ref'd" and are considered part of arc_mru 191789Sahrens * that cannot be freed. Generally, they will aquire a DVA 1921544Seschrock * as they are written and migrate onto the arc_mru list. 1935450Sbrendan * 1945450Sbrendan * The ARC_l2c_only state is for buffers that are in the second 1955450Sbrendan * level ARC but no longer in any of the ARC_m* lists. The second 1965450Sbrendan * level ARC itself may also contain buffers that are in any of 1975450Sbrendan * the ARC_m* states - meaning that a buffer can exist in two 1985450Sbrendan * places. The reason for the ARC_l2c_only state is to keep the 1995450Sbrendan * buffer header in the hash table, so that reads that hit the 2005450Sbrendan * second level ARC benefit from these fast lookups. 201789Sahrens */ 202789Sahrens 203789Sahrens typedef struct arc_state { 2044309Smaybee list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 2054309Smaybee uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 2064309Smaybee uint64_t arcs_size; /* total amount of data in this state */ 2073403Sbmc kmutex_t arcs_mtx; 208789Sahrens } arc_state_t; 209789Sahrens 2105450Sbrendan /* The 6 states: */ 211789Sahrens static arc_state_t ARC_anon; 2121544Seschrock static arc_state_t ARC_mru; 2131544Seschrock static arc_state_t ARC_mru_ghost; 2141544Seschrock static arc_state_t ARC_mfu; 2151544Seschrock static arc_state_t ARC_mfu_ghost; 2165450Sbrendan static arc_state_t ARC_l2c_only; 217789Sahrens 2183403Sbmc typedef struct arc_stats { 2193403Sbmc kstat_named_t arcstat_hits; 2203403Sbmc kstat_named_t arcstat_misses; 2213403Sbmc kstat_named_t arcstat_demand_data_hits; 2223403Sbmc kstat_named_t arcstat_demand_data_misses; 2233403Sbmc kstat_named_t arcstat_demand_metadata_hits; 2243403Sbmc kstat_named_t arcstat_demand_metadata_misses; 2253403Sbmc kstat_named_t arcstat_prefetch_data_hits; 2263403Sbmc kstat_named_t arcstat_prefetch_data_misses; 2273403Sbmc kstat_named_t arcstat_prefetch_metadata_hits; 2283403Sbmc kstat_named_t arcstat_prefetch_metadata_misses; 2293403Sbmc kstat_named_t arcstat_mru_hits; 2303403Sbmc kstat_named_t arcstat_mru_ghost_hits; 2313403Sbmc kstat_named_t arcstat_mfu_hits; 2323403Sbmc kstat_named_t arcstat_mfu_ghost_hits; 2333403Sbmc kstat_named_t arcstat_deleted; 2343403Sbmc kstat_named_t arcstat_recycle_miss; 2353403Sbmc kstat_named_t arcstat_mutex_miss; 2363403Sbmc kstat_named_t arcstat_evict_skip; 2373403Sbmc kstat_named_t arcstat_hash_elements; 2383403Sbmc kstat_named_t arcstat_hash_elements_max; 2393403Sbmc kstat_named_t arcstat_hash_collisions; 2403403Sbmc kstat_named_t arcstat_hash_chains; 2413403Sbmc kstat_named_t arcstat_hash_chain_max; 2423403Sbmc kstat_named_t arcstat_p; 2433403Sbmc kstat_named_t arcstat_c; 2443403Sbmc kstat_named_t arcstat_c_min; 2453403Sbmc kstat_named_t arcstat_c_max; 2463403Sbmc kstat_named_t arcstat_size; 2475450Sbrendan kstat_named_t arcstat_hdr_size; 2485450Sbrendan kstat_named_t arcstat_l2_hits; 2495450Sbrendan kstat_named_t arcstat_l2_misses; 2505450Sbrendan kstat_named_t arcstat_l2_feeds; 2515450Sbrendan kstat_named_t arcstat_l2_rw_clash; 2525450Sbrendan kstat_named_t arcstat_l2_writes_sent; 2535450Sbrendan kstat_named_t arcstat_l2_writes_done; 2545450Sbrendan kstat_named_t arcstat_l2_writes_error; 2555450Sbrendan kstat_named_t arcstat_l2_writes_hdr_miss; 2565450Sbrendan kstat_named_t arcstat_l2_evict_lock_retry; 2575450Sbrendan kstat_named_t arcstat_l2_evict_reading; 2585450Sbrendan kstat_named_t arcstat_l2_free_on_write; 2595450Sbrendan kstat_named_t arcstat_l2_abort_lowmem; 2605450Sbrendan kstat_named_t arcstat_l2_cksum_bad; 2615450Sbrendan kstat_named_t arcstat_l2_io_error; 2625450Sbrendan kstat_named_t arcstat_l2_size; 2635450Sbrendan kstat_named_t arcstat_l2_hdr_size; 264*6245Smaybee kstat_named_t arcstat_memory_throttle_count; 2653403Sbmc } arc_stats_t; 2663403Sbmc 2673403Sbmc static arc_stats_t arc_stats = { 2683403Sbmc { "hits", KSTAT_DATA_UINT64 }, 2693403Sbmc { "misses", KSTAT_DATA_UINT64 }, 2703403Sbmc { "demand_data_hits", KSTAT_DATA_UINT64 }, 2713403Sbmc { "demand_data_misses", KSTAT_DATA_UINT64 }, 2723403Sbmc { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 2733403Sbmc { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 2743403Sbmc { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 2753403Sbmc { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 2763403Sbmc { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 2773403Sbmc { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 2783403Sbmc { "mru_hits", KSTAT_DATA_UINT64 }, 2793403Sbmc { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 2803403Sbmc { "mfu_hits", KSTAT_DATA_UINT64 }, 2813403Sbmc { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 2823403Sbmc { "deleted", KSTAT_DATA_UINT64 }, 2833403Sbmc { "recycle_miss", KSTAT_DATA_UINT64 }, 2843403Sbmc { "mutex_miss", KSTAT_DATA_UINT64 }, 2853403Sbmc { "evict_skip", KSTAT_DATA_UINT64 }, 2863403Sbmc { "hash_elements", KSTAT_DATA_UINT64 }, 2873403Sbmc { "hash_elements_max", KSTAT_DATA_UINT64 }, 2883403Sbmc { "hash_collisions", KSTAT_DATA_UINT64 }, 2893403Sbmc { "hash_chains", KSTAT_DATA_UINT64 }, 2903403Sbmc { "hash_chain_max", KSTAT_DATA_UINT64 }, 2913403Sbmc { "p", KSTAT_DATA_UINT64 }, 2923403Sbmc { "c", KSTAT_DATA_UINT64 }, 2933403Sbmc { "c_min", KSTAT_DATA_UINT64 }, 2943403Sbmc { "c_max", KSTAT_DATA_UINT64 }, 2955450Sbrendan { "size", KSTAT_DATA_UINT64 }, 2965450Sbrendan { "hdr_size", KSTAT_DATA_UINT64 }, 2975450Sbrendan { "l2_hits", KSTAT_DATA_UINT64 }, 2985450Sbrendan { "l2_misses", KSTAT_DATA_UINT64 }, 2995450Sbrendan { "l2_feeds", KSTAT_DATA_UINT64 }, 3005450Sbrendan { "l2_rw_clash", KSTAT_DATA_UINT64 }, 3015450Sbrendan { "l2_writes_sent", KSTAT_DATA_UINT64 }, 3025450Sbrendan { "l2_writes_done", KSTAT_DATA_UINT64 }, 3035450Sbrendan { "l2_writes_error", KSTAT_DATA_UINT64 }, 3045450Sbrendan { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 3055450Sbrendan { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 3065450Sbrendan { "l2_evict_reading", KSTAT_DATA_UINT64 }, 3075450Sbrendan { "l2_free_on_write", KSTAT_DATA_UINT64 }, 3085450Sbrendan { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 3095450Sbrendan { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 3105450Sbrendan { "l2_io_error", KSTAT_DATA_UINT64 }, 3115450Sbrendan { "l2_size", KSTAT_DATA_UINT64 }, 312*6245Smaybee { "l2_hdr_size", KSTAT_DATA_UINT64 }, 313*6245Smaybee { "memory_throttle_count", KSTAT_DATA_UINT64 } 3143403Sbmc }; 315789Sahrens 3163403Sbmc #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 3173403Sbmc 3183403Sbmc #define ARCSTAT_INCR(stat, val) \ 3193403Sbmc atomic_add_64(&arc_stats.stat.value.ui64, (val)); 3203403Sbmc 3213403Sbmc #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 3223403Sbmc #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 3233403Sbmc 3243403Sbmc #define ARCSTAT_MAX(stat, val) { \ 3253403Sbmc uint64_t m; \ 3263403Sbmc while ((val) > (m = arc_stats.stat.value.ui64) && \ 3273403Sbmc (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 3283403Sbmc continue; \ 3293403Sbmc } 3303403Sbmc 3313403Sbmc #define ARCSTAT_MAXSTAT(stat) \ 3323403Sbmc ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 333789Sahrens 3343403Sbmc /* 3353403Sbmc * We define a macro to allow ARC hits/misses to be easily broken down by 3363403Sbmc * two separate conditions, giving a total of four different subtypes for 3373403Sbmc * each of hits and misses (so eight statistics total). 3383403Sbmc */ 3393403Sbmc #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 3403403Sbmc if (cond1) { \ 3413403Sbmc if (cond2) { \ 3423403Sbmc ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 3433403Sbmc } else { \ 3443403Sbmc ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 3453403Sbmc } \ 3463403Sbmc } else { \ 3473403Sbmc if (cond2) { \ 3483403Sbmc ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 3493403Sbmc } else { \ 3503403Sbmc ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 3513403Sbmc } \ 3523403Sbmc } 353789Sahrens 3543403Sbmc kstat_t *arc_ksp; 3553403Sbmc static arc_state_t *arc_anon; 3563403Sbmc static arc_state_t *arc_mru; 3573403Sbmc static arc_state_t *arc_mru_ghost; 3583403Sbmc static arc_state_t *arc_mfu; 3593403Sbmc static arc_state_t *arc_mfu_ghost; 3605450Sbrendan static arc_state_t *arc_l2c_only; 3613403Sbmc 3623403Sbmc /* 3633403Sbmc * There are several ARC variables that are critical to export as kstats -- 3643403Sbmc * but we don't want to have to grovel around in the kstat whenever we wish to 3653403Sbmc * manipulate them. For these variables, we therefore define them to be in 3663403Sbmc * terms of the statistic variable. This assures that we are not introducing 3673403Sbmc * the possibility of inconsistency by having shadow copies of the variables, 3683403Sbmc * while still allowing the code to be readable. 3693403Sbmc */ 3703403Sbmc #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 3713403Sbmc #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 3723403Sbmc #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 3733403Sbmc #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 3743403Sbmc #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 3753403Sbmc 3763403Sbmc static int arc_no_grow; /* Don't try to grow cache size */ 3773403Sbmc static uint64_t arc_tempreserve; 3784309Smaybee static uint64_t arc_meta_used; 3794309Smaybee static uint64_t arc_meta_limit; 3804309Smaybee static uint64_t arc_meta_max = 0; 381789Sahrens 3825450Sbrendan typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 3835450Sbrendan 384789Sahrens typedef struct arc_callback arc_callback_t; 385789Sahrens 386789Sahrens struct arc_callback { 3873547Smaybee void *acb_private; 388789Sahrens arc_done_func_t *acb_done; 389789Sahrens arc_byteswap_func_t *acb_byteswap; 390789Sahrens arc_buf_t *acb_buf; 391789Sahrens zio_t *acb_zio_dummy; 392789Sahrens arc_callback_t *acb_next; 393789Sahrens }; 394789Sahrens 3953547Smaybee typedef struct arc_write_callback arc_write_callback_t; 3963547Smaybee 3973547Smaybee struct arc_write_callback { 3983547Smaybee void *awcb_private; 3993547Smaybee arc_done_func_t *awcb_ready; 4003547Smaybee arc_done_func_t *awcb_done; 4013547Smaybee arc_buf_t *awcb_buf; 4023547Smaybee }; 4033547Smaybee 404789Sahrens struct arc_buf_hdr { 405789Sahrens /* protected by hash lock */ 406789Sahrens dva_t b_dva; 407789Sahrens uint64_t b_birth; 408789Sahrens uint64_t b_cksum0; 409789Sahrens 4103093Sahrens kmutex_t b_freeze_lock; 4113093Sahrens zio_cksum_t *b_freeze_cksum; 4123093Sahrens 413789Sahrens arc_buf_hdr_t *b_hash_next; 414789Sahrens arc_buf_t *b_buf; 415789Sahrens uint32_t b_flags; 4161544Seschrock uint32_t b_datacnt; 417789Sahrens 4183290Sjohansen arc_callback_t *b_acb; 419789Sahrens kcondvar_t b_cv; 4203290Sjohansen 4213290Sjohansen /* immutable */ 4223290Sjohansen arc_buf_contents_t b_type; 4233290Sjohansen uint64_t b_size; 4243290Sjohansen spa_t *b_spa; 425789Sahrens 426789Sahrens /* protected by arc state mutex */ 427789Sahrens arc_state_t *b_state; 428789Sahrens list_node_t b_arc_node; 429789Sahrens 430789Sahrens /* updated atomically */ 431789Sahrens clock_t b_arc_access; 432789Sahrens 433789Sahrens /* self protecting */ 434789Sahrens refcount_t b_refcnt; 4355450Sbrendan 4365450Sbrendan l2arc_buf_hdr_t *b_l2hdr; 4375450Sbrendan list_node_t b_l2node; 438789Sahrens }; 439789Sahrens 4401544Seschrock static arc_buf_t *arc_eviction_list; 4411544Seschrock static kmutex_t arc_eviction_mtx; 4422887Smaybee static arc_buf_hdr_t arc_eviction_hdr; 4432688Smaybee static void arc_get_data_buf(arc_buf_t *buf); 4442688Smaybee static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 4454309Smaybee static int arc_evict_needed(arc_buf_contents_t type); 4465642Smaybee static void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); 4471544Seschrock 4481544Seschrock #define GHOST_STATE(state) \ 4495450Sbrendan ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 4505450Sbrendan (state) == arc_l2c_only) 4511544Seschrock 452789Sahrens /* 453789Sahrens * Private ARC flags. These flags are private ARC only flags that will show up 454789Sahrens * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 455789Sahrens * be passed in as arc_flags in things like arc_read. However, these flags 456789Sahrens * should never be passed and should only be set by ARC code. When adding new 457789Sahrens * public flags, make sure not to smash the private ones. 458789Sahrens */ 459789Sahrens 4601544Seschrock #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 461789Sahrens #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 462789Sahrens #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 463789Sahrens #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 4641544Seschrock #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 4652391Smaybee #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 4665450Sbrendan #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 4675450Sbrendan #define ARC_DONT_L2CACHE (1 << 16) /* originated by prefetch */ 4685450Sbrendan #define ARC_L2_READING (1 << 17) /* L2ARC read in progress */ 4695450Sbrendan #define ARC_L2_WRITING (1 << 18) /* L2ARC write in progress */ 4705450Sbrendan #define ARC_L2_EVICTED (1 << 19) /* evicted during I/O */ 4715450Sbrendan #define ARC_L2_WRITE_HEAD (1 << 20) /* head of write list */ 472789Sahrens 4731544Seschrock #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 474789Sahrens #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 475789Sahrens #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 476789Sahrens #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 4771544Seschrock #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 4785450Sbrendan #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 4795450Sbrendan #define HDR_DONT_L2CACHE(hdr) ((hdr)->b_flags & ARC_DONT_L2CACHE) 4805450Sbrendan #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_L2_READING) 4815450Sbrendan #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 4825450Sbrendan #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 4835450Sbrendan #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 484789Sahrens 485789Sahrens /* 4866018Sbrendan * Other sizes 4876018Sbrendan */ 4886018Sbrendan 4896018Sbrendan #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 4906018Sbrendan #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 4916018Sbrendan 4926018Sbrendan /* 493789Sahrens * Hash table routines 494789Sahrens */ 495789Sahrens 496789Sahrens #define HT_LOCK_PAD 64 497789Sahrens 498789Sahrens struct ht_lock { 499789Sahrens kmutex_t ht_lock; 500789Sahrens #ifdef _KERNEL 501789Sahrens unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 502789Sahrens #endif 503789Sahrens }; 504789Sahrens 505789Sahrens #define BUF_LOCKS 256 506789Sahrens typedef struct buf_hash_table { 507789Sahrens uint64_t ht_mask; 508789Sahrens arc_buf_hdr_t **ht_table; 509789Sahrens struct ht_lock ht_locks[BUF_LOCKS]; 510789Sahrens } buf_hash_table_t; 511789Sahrens 512789Sahrens static buf_hash_table_t buf_hash_table; 513789Sahrens 514789Sahrens #define BUF_HASH_INDEX(spa, dva, birth) \ 515789Sahrens (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 516789Sahrens #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 517789Sahrens #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 518789Sahrens #define HDR_LOCK(buf) \ 519789Sahrens (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 520789Sahrens 521789Sahrens uint64_t zfs_crc64_table[256]; 522789Sahrens 5235450Sbrendan /* 5245450Sbrendan * Level 2 ARC 5255450Sbrendan */ 5265450Sbrendan 5275450Sbrendan #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 5285450Sbrendan #define L2ARC_HEADROOM 4 /* num of writes */ 5295450Sbrendan #define L2ARC_FEED_DELAY 180 /* starting grace */ 5305450Sbrendan #define L2ARC_FEED_SECS 1 /* caching interval */ 5315450Sbrendan 5325450Sbrendan #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 5335450Sbrendan #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 5345450Sbrendan 5355450Sbrendan /* 5365450Sbrendan * L2ARC Performance Tunables 5375450Sbrendan */ 5385450Sbrendan uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 5395450Sbrendan uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 5405450Sbrendan uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 5415450Sbrendan boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 5425450Sbrendan 5435450Sbrendan /* 5445450Sbrendan * L2ARC Internals 5455450Sbrendan */ 5465450Sbrendan typedef struct l2arc_dev { 5475450Sbrendan vdev_t *l2ad_vdev; /* vdev */ 5485450Sbrendan spa_t *l2ad_spa; /* spa */ 5495450Sbrendan uint64_t l2ad_hand; /* next write location */ 5505450Sbrendan uint64_t l2ad_write; /* desired write size, bytes */ 5515450Sbrendan uint64_t l2ad_start; /* first addr on device */ 5525450Sbrendan uint64_t l2ad_end; /* last addr on device */ 5535450Sbrendan uint64_t l2ad_evict; /* last addr eviction reached */ 5545450Sbrendan boolean_t l2ad_first; /* first sweep through */ 5555450Sbrendan list_t *l2ad_buflist; /* buffer list */ 5565450Sbrendan list_node_t l2ad_node; /* device list node */ 5575450Sbrendan } l2arc_dev_t; 5585450Sbrendan 5595450Sbrendan static list_t L2ARC_dev_list; /* device list */ 5605450Sbrendan static list_t *l2arc_dev_list; /* device list pointer */ 5615450Sbrendan static kmutex_t l2arc_dev_mtx; /* device list mutex */ 5625450Sbrendan static l2arc_dev_t *l2arc_dev_last; /* last device used */ 5635450Sbrendan static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 5645450Sbrendan static list_t L2ARC_free_on_write; /* free after write buf list */ 5655450Sbrendan static list_t *l2arc_free_on_write; /* free after write list ptr */ 5665450Sbrendan static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 5675450Sbrendan static uint64_t l2arc_ndev; /* number of devices */ 5685450Sbrendan 5695450Sbrendan typedef struct l2arc_read_callback { 5705450Sbrendan arc_buf_t *l2rcb_buf; /* read buffer */ 5715450Sbrendan spa_t *l2rcb_spa; /* spa */ 5725450Sbrendan blkptr_t l2rcb_bp; /* original blkptr */ 5735450Sbrendan zbookmark_t l2rcb_zb; /* original bookmark */ 5745450Sbrendan int l2rcb_flags; /* original flags */ 5755450Sbrendan } l2arc_read_callback_t; 5765450Sbrendan 5775450Sbrendan typedef struct l2arc_write_callback { 5785450Sbrendan l2arc_dev_t *l2wcb_dev; /* device info */ 5795450Sbrendan arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 5805450Sbrendan } l2arc_write_callback_t; 5815450Sbrendan 5825450Sbrendan struct l2arc_buf_hdr { 5835450Sbrendan /* protected by arc_buf_hdr mutex */ 5845450Sbrendan l2arc_dev_t *b_dev; /* L2ARC device */ 5855450Sbrendan daddr_t b_daddr; /* disk address, offset byte */ 5865450Sbrendan }; 5875450Sbrendan 5885450Sbrendan typedef struct l2arc_data_free { 5895450Sbrendan /* protected by l2arc_free_on_write_mtx */ 5905450Sbrendan void *l2df_data; 5915450Sbrendan size_t l2df_size; 5925450Sbrendan void (*l2df_func)(void *, size_t); 5935450Sbrendan list_node_t l2df_list_node; 5945450Sbrendan } l2arc_data_free_t; 5955450Sbrendan 5965450Sbrendan static kmutex_t l2arc_feed_thr_lock; 5975450Sbrendan static kcondvar_t l2arc_feed_thr_cv; 5985450Sbrendan static uint8_t l2arc_thread_exit; 5995450Sbrendan 6005450Sbrendan static void l2arc_read_done(zio_t *zio); 6015450Sbrendan static void l2arc_hdr_stat_add(void); 6025450Sbrendan static void l2arc_hdr_stat_remove(void); 6035450Sbrendan 604789Sahrens static uint64_t 605789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 606789Sahrens { 607789Sahrens uintptr_t spav = (uintptr_t)spa; 608789Sahrens uint8_t *vdva = (uint8_t *)dva; 609789Sahrens uint64_t crc = -1ULL; 610789Sahrens int i; 611789Sahrens 612789Sahrens ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 613789Sahrens 614789Sahrens for (i = 0; i < sizeof (dva_t); i++) 615789Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 616789Sahrens 617789Sahrens crc ^= (spav>>8) ^ birth; 618789Sahrens 619789Sahrens return (crc); 620789Sahrens } 621789Sahrens 622789Sahrens #define BUF_EMPTY(buf) \ 623789Sahrens ((buf)->b_dva.dva_word[0] == 0 && \ 624789Sahrens (buf)->b_dva.dva_word[1] == 0 && \ 625789Sahrens (buf)->b_birth == 0) 626789Sahrens 627789Sahrens #define BUF_EQUAL(spa, dva, birth, buf) \ 628789Sahrens ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 629789Sahrens ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 630789Sahrens ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 631789Sahrens 632789Sahrens static arc_buf_hdr_t * 633789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 634789Sahrens { 635789Sahrens uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 636789Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 637789Sahrens arc_buf_hdr_t *buf; 638789Sahrens 639789Sahrens mutex_enter(hash_lock); 640789Sahrens for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 641789Sahrens buf = buf->b_hash_next) { 642789Sahrens if (BUF_EQUAL(spa, dva, birth, buf)) { 643789Sahrens *lockp = hash_lock; 644789Sahrens return (buf); 645789Sahrens } 646789Sahrens } 647789Sahrens mutex_exit(hash_lock); 648789Sahrens *lockp = NULL; 649789Sahrens return (NULL); 650789Sahrens } 651789Sahrens 652789Sahrens /* 653789Sahrens * Insert an entry into the hash table. If there is already an element 654789Sahrens * equal to elem in the hash table, then the already existing element 655789Sahrens * will be returned and the new element will not be inserted. 656789Sahrens * Otherwise returns NULL. 657789Sahrens */ 658789Sahrens static arc_buf_hdr_t * 659789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 660789Sahrens { 661789Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 662789Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 663789Sahrens arc_buf_hdr_t *fbuf; 6643403Sbmc uint32_t i; 665789Sahrens 6661544Seschrock ASSERT(!HDR_IN_HASH_TABLE(buf)); 667789Sahrens *lockp = hash_lock; 668789Sahrens mutex_enter(hash_lock); 669789Sahrens for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 670789Sahrens fbuf = fbuf->b_hash_next, i++) { 671789Sahrens if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 672789Sahrens return (fbuf); 673789Sahrens } 674789Sahrens 675789Sahrens buf->b_hash_next = buf_hash_table.ht_table[idx]; 676789Sahrens buf_hash_table.ht_table[idx] = buf; 6771544Seschrock buf->b_flags |= ARC_IN_HASH_TABLE; 678789Sahrens 679789Sahrens /* collect some hash table performance data */ 680789Sahrens if (i > 0) { 6813403Sbmc ARCSTAT_BUMP(arcstat_hash_collisions); 682789Sahrens if (i == 1) 6833403Sbmc ARCSTAT_BUMP(arcstat_hash_chains); 6843403Sbmc 6853403Sbmc ARCSTAT_MAX(arcstat_hash_chain_max, i); 686789Sahrens } 6873403Sbmc 6883403Sbmc ARCSTAT_BUMP(arcstat_hash_elements); 6893403Sbmc ARCSTAT_MAXSTAT(arcstat_hash_elements); 690789Sahrens 691789Sahrens return (NULL); 692789Sahrens } 693789Sahrens 694789Sahrens static void 695789Sahrens buf_hash_remove(arc_buf_hdr_t *buf) 696789Sahrens { 697789Sahrens arc_buf_hdr_t *fbuf, **bufp; 698789Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 699789Sahrens 700789Sahrens ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 7011544Seschrock ASSERT(HDR_IN_HASH_TABLE(buf)); 702789Sahrens 703789Sahrens bufp = &buf_hash_table.ht_table[idx]; 704789Sahrens while ((fbuf = *bufp) != buf) { 705789Sahrens ASSERT(fbuf != NULL); 706789Sahrens bufp = &fbuf->b_hash_next; 707789Sahrens } 708789Sahrens *bufp = buf->b_hash_next; 709789Sahrens buf->b_hash_next = NULL; 7101544Seschrock buf->b_flags &= ~ARC_IN_HASH_TABLE; 711789Sahrens 712789Sahrens /* collect some hash table performance data */ 7133403Sbmc ARCSTAT_BUMPDOWN(arcstat_hash_elements); 7143403Sbmc 715789Sahrens if (buf_hash_table.ht_table[idx] && 716789Sahrens buf_hash_table.ht_table[idx]->b_hash_next == NULL) 7173403Sbmc ARCSTAT_BUMPDOWN(arcstat_hash_chains); 718789Sahrens } 719789Sahrens 720789Sahrens /* 721789Sahrens * Global data structures and functions for the buf kmem cache. 722789Sahrens */ 723789Sahrens static kmem_cache_t *hdr_cache; 724789Sahrens static kmem_cache_t *buf_cache; 725789Sahrens 726789Sahrens static void 727789Sahrens buf_fini(void) 728789Sahrens { 729789Sahrens int i; 730789Sahrens 731789Sahrens kmem_free(buf_hash_table.ht_table, 732789Sahrens (buf_hash_table.ht_mask + 1) * sizeof (void *)); 733789Sahrens for (i = 0; i < BUF_LOCKS; i++) 734789Sahrens mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 735789Sahrens kmem_cache_destroy(hdr_cache); 736789Sahrens kmem_cache_destroy(buf_cache); 737789Sahrens } 738789Sahrens 739789Sahrens /* 740789Sahrens * Constructor callback - called when the cache is empty 741789Sahrens * and a new buf is requested. 742789Sahrens */ 743789Sahrens /* ARGSUSED */ 744789Sahrens static int 745789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag) 746789Sahrens { 747789Sahrens arc_buf_hdr_t *buf = vbuf; 748789Sahrens 749789Sahrens bzero(buf, sizeof (arc_buf_hdr_t)); 750789Sahrens refcount_create(&buf->b_refcnt); 751789Sahrens cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 7524831Sgw25295 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 7535450Sbrendan 7546018Sbrendan ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 755789Sahrens return (0); 756789Sahrens } 757789Sahrens 758789Sahrens /* 759789Sahrens * Destructor callback - called when a cached buf is 760789Sahrens * no longer required. 761789Sahrens */ 762789Sahrens /* ARGSUSED */ 763789Sahrens static void 764789Sahrens hdr_dest(void *vbuf, void *unused) 765789Sahrens { 766789Sahrens arc_buf_hdr_t *buf = vbuf; 767789Sahrens 768789Sahrens refcount_destroy(&buf->b_refcnt); 769789Sahrens cv_destroy(&buf->b_cv); 7704831Sgw25295 mutex_destroy(&buf->b_freeze_lock); 7715450Sbrendan 7726018Sbrendan ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 773789Sahrens } 774789Sahrens 775789Sahrens /* 776789Sahrens * Reclaim callback -- invoked when memory is low. 777789Sahrens */ 778789Sahrens /* ARGSUSED */ 779789Sahrens static void 780789Sahrens hdr_recl(void *unused) 781789Sahrens { 782789Sahrens dprintf("hdr_recl called\n"); 7833158Smaybee /* 7843158Smaybee * umem calls the reclaim func when we destroy the buf cache, 7853158Smaybee * which is after we do arc_fini(). 7863158Smaybee */ 7873158Smaybee if (!arc_dead) 7883158Smaybee cv_signal(&arc_reclaim_thr_cv); 789789Sahrens } 790789Sahrens 791789Sahrens static void 792789Sahrens buf_init(void) 793789Sahrens { 794789Sahrens uint64_t *ct; 7951544Seschrock uint64_t hsize = 1ULL << 12; 796789Sahrens int i, j; 797789Sahrens 798789Sahrens /* 799789Sahrens * The hash table is big enough to fill all of physical memory 8001544Seschrock * with an average 64K block size. The table will take up 8011544Seschrock * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 802789Sahrens */ 8031544Seschrock while (hsize * 65536 < physmem * PAGESIZE) 804789Sahrens hsize <<= 1; 8051544Seschrock retry: 806789Sahrens buf_hash_table.ht_mask = hsize - 1; 8071544Seschrock buf_hash_table.ht_table = 8081544Seschrock kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 8091544Seschrock if (buf_hash_table.ht_table == NULL) { 8101544Seschrock ASSERT(hsize > (1ULL << 8)); 8111544Seschrock hsize >>= 1; 8121544Seschrock goto retry; 8131544Seschrock } 814789Sahrens 815789Sahrens hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 816789Sahrens 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 817789Sahrens buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 818789Sahrens 0, NULL, NULL, NULL, NULL, NULL, 0); 819789Sahrens 820789Sahrens for (i = 0; i < 256; i++) 821789Sahrens for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 822789Sahrens *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 823789Sahrens 824789Sahrens for (i = 0; i < BUF_LOCKS; i++) { 825789Sahrens mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 826789Sahrens NULL, MUTEX_DEFAULT, NULL); 827789Sahrens } 828789Sahrens } 829789Sahrens 830789Sahrens #define ARC_MINTIME (hz>>4) /* 62 ms */ 831789Sahrens 832789Sahrens static void 8333093Sahrens arc_cksum_verify(arc_buf_t *buf) 8343093Sahrens { 8353093Sahrens zio_cksum_t zc; 8363093Sahrens 8373312Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 8383093Sahrens return; 8393093Sahrens 8403093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 8413265Sahrens if (buf->b_hdr->b_freeze_cksum == NULL || 8423265Sahrens (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 8433093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8443093Sahrens return; 8453093Sahrens } 8463093Sahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 8473093Sahrens if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 8483093Sahrens panic("buffer modified while frozen!"); 8493093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8503093Sahrens } 8513093Sahrens 8525450Sbrendan static int 8535450Sbrendan arc_cksum_equal(arc_buf_t *buf) 8545450Sbrendan { 8555450Sbrendan zio_cksum_t zc; 8565450Sbrendan int equal; 8575450Sbrendan 8585450Sbrendan mutex_enter(&buf->b_hdr->b_freeze_lock); 8595450Sbrendan fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 8605450Sbrendan equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 8615450Sbrendan mutex_exit(&buf->b_hdr->b_freeze_lock); 8625450Sbrendan 8635450Sbrendan return (equal); 8645450Sbrendan } 8655450Sbrendan 8663093Sahrens static void 8675450Sbrendan arc_cksum_compute(arc_buf_t *buf, boolean_t force) 8683093Sahrens { 8695450Sbrendan if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 8703093Sahrens return; 8713093Sahrens 8723093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 8733093Sahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 8743093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8753093Sahrens return; 8763093Sahrens } 8773093Sahrens buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 8783093Sahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 8793093Sahrens buf->b_hdr->b_freeze_cksum); 8803093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 8813093Sahrens } 8823093Sahrens 8833093Sahrens void 8843093Sahrens arc_buf_thaw(arc_buf_t *buf) 8853093Sahrens { 8865450Sbrendan if (zfs_flags & ZFS_DEBUG_MODIFY) { 8875450Sbrendan if (buf->b_hdr->b_state != arc_anon) 8885450Sbrendan panic("modifying non-anon buffer!"); 8895450Sbrendan if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 8905450Sbrendan panic("modifying buffer while i/o in progress!"); 8915450Sbrendan arc_cksum_verify(buf); 8925450Sbrendan } 8935450Sbrendan 8943093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 8953093Sahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 8963093Sahrens kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 8973093Sahrens buf->b_hdr->b_freeze_cksum = NULL; 8983093Sahrens } 8993093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 9003093Sahrens } 9013093Sahrens 9023093Sahrens void 9033093Sahrens arc_buf_freeze(arc_buf_t *buf) 9043093Sahrens { 9053312Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 9063312Sahrens return; 9073312Sahrens 9083093Sahrens ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 9093403Sbmc buf->b_hdr->b_state == arc_anon); 9105450Sbrendan arc_cksum_compute(buf, B_FALSE); 9113093Sahrens } 9123093Sahrens 9133093Sahrens static void 914789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 915789Sahrens { 916789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 917789Sahrens 918789Sahrens if ((refcount_add(&ab->b_refcnt, tag) == 1) && 9193403Sbmc (ab->b_state != arc_anon)) { 9203700Sek110237 uint64_t delta = ab->b_size * ab->b_datacnt; 9214309Smaybee list_t *list = &ab->b_state->arcs_list[ab->b_type]; 9224309Smaybee uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 923789Sahrens 9243403Sbmc ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 9253403Sbmc mutex_enter(&ab->b_state->arcs_mtx); 926789Sahrens ASSERT(list_link_active(&ab->b_arc_node)); 9274309Smaybee list_remove(list, ab); 9281544Seschrock if (GHOST_STATE(ab->b_state)) { 9291544Seschrock ASSERT3U(ab->b_datacnt, ==, 0); 9301544Seschrock ASSERT3P(ab->b_buf, ==, NULL); 9311544Seschrock delta = ab->b_size; 9321544Seschrock } 9331544Seschrock ASSERT(delta > 0); 9344309Smaybee ASSERT3U(*size, >=, delta); 9354309Smaybee atomic_add_64(size, -delta); 9363403Sbmc mutex_exit(&ab->b_state->arcs_mtx); 9372391Smaybee /* remove the prefetch flag is we get a reference */ 9382391Smaybee if (ab->b_flags & ARC_PREFETCH) 9392391Smaybee ab->b_flags &= ~ARC_PREFETCH; 940789Sahrens } 941789Sahrens } 942789Sahrens 943789Sahrens static int 944789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 945789Sahrens { 946789Sahrens int cnt; 9473403Sbmc arc_state_t *state = ab->b_state; 948789Sahrens 9493403Sbmc ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 9503403Sbmc ASSERT(!GHOST_STATE(state)); 951789Sahrens 952789Sahrens if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 9533403Sbmc (state != arc_anon)) { 9544309Smaybee uint64_t *size = &state->arcs_lsize[ab->b_type]; 9554309Smaybee 9563403Sbmc ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 9573403Sbmc mutex_enter(&state->arcs_mtx); 958789Sahrens ASSERT(!list_link_active(&ab->b_arc_node)); 9594309Smaybee list_insert_head(&state->arcs_list[ab->b_type], ab); 9601544Seschrock ASSERT(ab->b_datacnt > 0); 9614309Smaybee atomic_add_64(size, ab->b_size * ab->b_datacnt); 9623403Sbmc mutex_exit(&state->arcs_mtx); 963789Sahrens } 964789Sahrens return (cnt); 965789Sahrens } 966789Sahrens 967789Sahrens /* 968789Sahrens * Move the supplied buffer to the indicated state. The mutex 969789Sahrens * for the buffer must be held by the caller. 970789Sahrens */ 971789Sahrens static void 9721544Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 973789Sahrens { 9741544Seschrock arc_state_t *old_state = ab->b_state; 9753700Sek110237 int64_t refcnt = refcount_count(&ab->b_refcnt); 9763700Sek110237 uint64_t from_delta, to_delta; 977789Sahrens 978789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 9791544Seschrock ASSERT(new_state != old_state); 9801544Seschrock ASSERT(refcnt == 0 || ab->b_datacnt > 0); 9811544Seschrock ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 9821544Seschrock 9831544Seschrock from_delta = to_delta = ab->b_datacnt * ab->b_size; 984789Sahrens 985789Sahrens /* 986789Sahrens * If this buffer is evictable, transfer it from the 987789Sahrens * old state list to the new state list. 988789Sahrens */ 9891544Seschrock if (refcnt == 0) { 9903403Sbmc if (old_state != arc_anon) { 9913403Sbmc int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 9924309Smaybee uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 9931544Seschrock 9941544Seschrock if (use_mutex) 9953403Sbmc mutex_enter(&old_state->arcs_mtx); 9961544Seschrock 9971544Seschrock ASSERT(list_link_active(&ab->b_arc_node)); 9984309Smaybee list_remove(&old_state->arcs_list[ab->b_type], ab); 999789Sahrens 10002391Smaybee /* 10012391Smaybee * If prefetching out of the ghost cache, 10022391Smaybee * we will have a non-null datacnt. 10032391Smaybee */ 10042391Smaybee if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 10052391Smaybee /* ghost elements have a ghost size */ 10061544Seschrock ASSERT(ab->b_buf == NULL); 10071544Seschrock from_delta = ab->b_size; 1008789Sahrens } 10094309Smaybee ASSERT3U(*size, >=, from_delta); 10104309Smaybee atomic_add_64(size, -from_delta); 10111544Seschrock 10121544Seschrock if (use_mutex) 10133403Sbmc mutex_exit(&old_state->arcs_mtx); 1014789Sahrens } 10153403Sbmc if (new_state != arc_anon) { 10163403Sbmc int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 10174309Smaybee uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1018789Sahrens 10191544Seschrock if (use_mutex) 10203403Sbmc mutex_enter(&new_state->arcs_mtx); 10211544Seschrock 10224309Smaybee list_insert_head(&new_state->arcs_list[ab->b_type], ab); 10231544Seschrock 10241544Seschrock /* ghost elements have a ghost size */ 10251544Seschrock if (GHOST_STATE(new_state)) { 10261544Seschrock ASSERT(ab->b_datacnt == 0); 10271544Seschrock ASSERT(ab->b_buf == NULL); 10281544Seschrock to_delta = ab->b_size; 10291544Seschrock } 10304309Smaybee atomic_add_64(size, to_delta); 10311544Seschrock 10321544Seschrock if (use_mutex) 10333403Sbmc mutex_exit(&new_state->arcs_mtx); 1034789Sahrens } 1035789Sahrens } 1036789Sahrens 1037789Sahrens ASSERT(!BUF_EMPTY(ab)); 10385450Sbrendan if (new_state == arc_anon) { 1039789Sahrens buf_hash_remove(ab); 1040789Sahrens } 1041789Sahrens 10421544Seschrock /* adjust state sizes */ 10431544Seschrock if (to_delta) 10443403Sbmc atomic_add_64(&new_state->arcs_size, to_delta); 10451544Seschrock if (from_delta) { 10463403Sbmc ASSERT3U(old_state->arcs_size, >=, from_delta); 10473403Sbmc atomic_add_64(&old_state->arcs_size, -from_delta); 1048789Sahrens } 1049789Sahrens ab->b_state = new_state; 10505450Sbrendan 10515450Sbrendan /* adjust l2arc hdr stats */ 10525450Sbrendan if (new_state == arc_l2c_only) 10535450Sbrendan l2arc_hdr_stat_add(); 10545450Sbrendan else if (old_state == arc_l2c_only) 10555450Sbrendan l2arc_hdr_stat_remove(); 1056789Sahrens } 1057789Sahrens 10584309Smaybee void 10594309Smaybee arc_space_consume(uint64_t space) 10604309Smaybee { 10614309Smaybee atomic_add_64(&arc_meta_used, space); 10624309Smaybee atomic_add_64(&arc_size, space); 10634309Smaybee } 10644309Smaybee 10654309Smaybee void 10664309Smaybee arc_space_return(uint64_t space) 10674309Smaybee { 10684309Smaybee ASSERT(arc_meta_used >= space); 10694309Smaybee if (arc_meta_max < arc_meta_used) 10704309Smaybee arc_meta_max = arc_meta_used; 10714309Smaybee atomic_add_64(&arc_meta_used, -space); 10724309Smaybee ASSERT(arc_size >= space); 10734309Smaybee atomic_add_64(&arc_size, -space); 10744309Smaybee } 10754309Smaybee 10764309Smaybee void * 10774309Smaybee arc_data_buf_alloc(uint64_t size) 10784309Smaybee { 10794309Smaybee if (arc_evict_needed(ARC_BUFC_DATA)) 10804309Smaybee cv_signal(&arc_reclaim_thr_cv); 10814309Smaybee atomic_add_64(&arc_size, size); 10824309Smaybee return (zio_data_buf_alloc(size)); 10834309Smaybee } 10844309Smaybee 10854309Smaybee void 10864309Smaybee arc_data_buf_free(void *buf, uint64_t size) 10874309Smaybee { 10884309Smaybee zio_data_buf_free(buf, size); 10894309Smaybee ASSERT(arc_size >= size); 10904309Smaybee atomic_add_64(&arc_size, -size); 10914309Smaybee } 10924309Smaybee 1093789Sahrens arc_buf_t * 10943290Sjohansen arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1095789Sahrens { 1096789Sahrens arc_buf_hdr_t *hdr; 1097789Sahrens arc_buf_t *buf; 1098789Sahrens 1099789Sahrens ASSERT3U(size, >, 0); 1100*6245Smaybee hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1101789Sahrens ASSERT(BUF_EMPTY(hdr)); 1102789Sahrens hdr->b_size = size; 11033290Sjohansen hdr->b_type = type; 1104789Sahrens hdr->b_spa = spa; 11053403Sbmc hdr->b_state = arc_anon; 1106789Sahrens hdr->b_arc_access = 0; 1107*6245Smaybee buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1108789Sahrens buf->b_hdr = hdr; 11092688Smaybee buf->b_data = NULL; 11101544Seschrock buf->b_efunc = NULL; 11111544Seschrock buf->b_private = NULL; 1112789Sahrens buf->b_next = NULL; 1113789Sahrens hdr->b_buf = buf; 11142688Smaybee arc_get_data_buf(buf); 11151544Seschrock hdr->b_datacnt = 1; 1116789Sahrens hdr->b_flags = 0; 1117789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1118789Sahrens (void) refcount_add(&hdr->b_refcnt, tag); 1119789Sahrens 1120789Sahrens return (buf); 1121789Sahrens } 1122789Sahrens 11232688Smaybee static arc_buf_t * 11242688Smaybee arc_buf_clone(arc_buf_t *from) 11251544Seschrock { 11262688Smaybee arc_buf_t *buf; 11272688Smaybee arc_buf_hdr_t *hdr = from->b_hdr; 11282688Smaybee uint64_t size = hdr->b_size; 11291544Seschrock 1130*6245Smaybee buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 11312688Smaybee buf->b_hdr = hdr; 11322688Smaybee buf->b_data = NULL; 11332688Smaybee buf->b_efunc = NULL; 11342688Smaybee buf->b_private = NULL; 11352688Smaybee buf->b_next = hdr->b_buf; 11362688Smaybee hdr->b_buf = buf; 11372688Smaybee arc_get_data_buf(buf); 11382688Smaybee bcopy(from->b_data, buf->b_data, size); 11392688Smaybee hdr->b_datacnt += 1; 11402688Smaybee return (buf); 11411544Seschrock } 11421544Seschrock 11431544Seschrock void 11441544Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag) 11451544Seschrock { 11462887Smaybee arc_buf_hdr_t *hdr; 11471544Seschrock kmutex_t *hash_lock; 11481544Seschrock 11492724Smaybee /* 11502724Smaybee * Check to see if this buffer is currently being evicted via 11512887Smaybee * arc_do_user_evicts(). 11522724Smaybee */ 11532887Smaybee mutex_enter(&arc_eviction_mtx); 11542887Smaybee hdr = buf->b_hdr; 11552887Smaybee if (hdr == NULL) { 11562887Smaybee mutex_exit(&arc_eviction_mtx); 11572724Smaybee return; 11582887Smaybee } 11592887Smaybee hash_lock = HDR_LOCK(hdr); 11602887Smaybee mutex_exit(&arc_eviction_mtx); 11612724Smaybee 11622724Smaybee mutex_enter(hash_lock); 11631544Seschrock if (buf->b_data == NULL) { 11641544Seschrock /* 11651544Seschrock * This buffer is evicted. 11661544Seschrock */ 11672724Smaybee mutex_exit(hash_lock); 11681544Seschrock return; 11691544Seschrock } 11701544Seschrock 11712724Smaybee ASSERT(buf->b_hdr == hdr); 11723403Sbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 11731544Seschrock add_reference(hdr, hash_lock, tag); 11742688Smaybee arc_access(hdr, hash_lock); 11752688Smaybee mutex_exit(hash_lock); 11763403Sbmc ARCSTAT_BUMP(arcstat_hits); 11773403Sbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 11783403Sbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 11793403Sbmc data, metadata, hits); 11801544Seschrock } 11811544Seschrock 11825450Sbrendan /* 11835450Sbrendan * Free the arc data buffer. If it is an l2arc write in progress, 11845450Sbrendan * the buffer is placed on l2arc_free_on_write to be freed later. 11855450Sbrendan */ 11865450Sbrendan static void 11875450Sbrendan arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 11885450Sbrendan void *data, size_t size) 11895450Sbrendan { 11905450Sbrendan if (HDR_L2_WRITING(hdr)) { 11915450Sbrendan l2arc_data_free_t *df; 11925450Sbrendan df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 11935450Sbrendan df->l2df_data = data; 11945450Sbrendan df->l2df_size = size; 11955450Sbrendan df->l2df_func = free_func; 11965450Sbrendan mutex_enter(&l2arc_free_on_write_mtx); 11975450Sbrendan list_insert_head(l2arc_free_on_write, df); 11985450Sbrendan mutex_exit(&l2arc_free_on_write_mtx); 11995450Sbrendan ARCSTAT_BUMP(arcstat_l2_free_on_write); 12005450Sbrendan } else { 12015450Sbrendan free_func(data, size); 12025450Sbrendan } 12035450Sbrendan } 12045450Sbrendan 1205789Sahrens static void 12062688Smaybee arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 12071544Seschrock { 12081544Seschrock arc_buf_t **bufp; 12091544Seschrock 12101544Seschrock /* free up data associated with the buf */ 12111544Seschrock if (buf->b_data) { 12121544Seschrock arc_state_t *state = buf->b_hdr->b_state; 12131544Seschrock uint64_t size = buf->b_hdr->b_size; 12143290Sjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 12151544Seschrock 12163093Sahrens arc_cksum_verify(buf); 12172688Smaybee if (!recycle) { 12183290Sjohansen if (type == ARC_BUFC_METADATA) { 12195450Sbrendan arc_buf_data_free(buf->b_hdr, zio_buf_free, 12205450Sbrendan buf->b_data, size); 12214309Smaybee arc_space_return(size); 12223290Sjohansen } else { 12233290Sjohansen ASSERT(type == ARC_BUFC_DATA); 12245450Sbrendan arc_buf_data_free(buf->b_hdr, 12255450Sbrendan zio_data_buf_free, buf->b_data, size); 12264309Smaybee atomic_add_64(&arc_size, -size); 12273290Sjohansen } 12282688Smaybee } 12291544Seschrock if (list_link_active(&buf->b_hdr->b_arc_node)) { 12304309Smaybee uint64_t *cnt = &state->arcs_lsize[type]; 12314309Smaybee 12321544Seschrock ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 12333403Sbmc ASSERT(state != arc_anon); 12344309Smaybee 12354309Smaybee ASSERT3U(*cnt, >=, size); 12364309Smaybee atomic_add_64(cnt, -size); 12371544Seschrock } 12383403Sbmc ASSERT3U(state->arcs_size, >=, size); 12393403Sbmc atomic_add_64(&state->arcs_size, -size); 12401544Seschrock buf->b_data = NULL; 12411544Seschrock ASSERT(buf->b_hdr->b_datacnt > 0); 12421544Seschrock buf->b_hdr->b_datacnt -= 1; 12431544Seschrock } 12441544Seschrock 12451544Seschrock /* only remove the buf if requested */ 12461544Seschrock if (!all) 12471544Seschrock return; 12481544Seschrock 12491544Seschrock /* remove the buf from the hdr list */ 12501544Seschrock for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 12511544Seschrock continue; 12521544Seschrock *bufp = buf->b_next; 12531544Seschrock 12541544Seschrock ASSERT(buf->b_efunc == NULL); 12551544Seschrock 12561544Seschrock /* clean up the buf */ 12571544Seschrock buf->b_hdr = NULL; 12581544Seschrock kmem_cache_free(buf_cache, buf); 12591544Seschrock } 12601544Seschrock 12611544Seschrock static void 12621544Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr) 1263789Sahrens { 1264789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 12653403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 12661544Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1267789Sahrens 12685450Sbrendan if (hdr->b_l2hdr != NULL) { 12695450Sbrendan if (!MUTEX_HELD(&l2arc_buflist_mtx)) { 12705450Sbrendan /* 12715450Sbrendan * To prevent arc_free() and l2arc_evict() from 12725450Sbrendan * attempting to free the same buffer at the same time, 12735450Sbrendan * a FREE_IN_PROGRESS flag is given to arc_free() to 12745450Sbrendan * give it priority. l2arc_evict() can't destroy this 12755450Sbrendan * header while we are waiting on l2arc_buflist_mtx. 12765450Sbrendan */ 12775450Sbrendan mutex_enter(&l2arc_buflist_mtx); 12785450Sbrendan ASSERT(hdr->b_l2hdr != NULL); 12795450Sbrendan 12805450Sbrendan list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 12815450Sbrendan mutex_exit(&l2arc_buflist_mtx); 12825450Sbrendan } else { 12835450Sbrendan list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 12845450Sbrendan } 12855450Sbrendan ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 12865450Sbrendan kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); 12875450Sbrendan if (hdr->b_state == arc_l2c_only) 12885450Sbrendan l2arc_hdr_stat_remove(); 12895450Sbrendan hdr->b_l2hdr = NULL; 12905450Sbrendan } 12915450Sbrendan 1292789Sahrens if (!BUF_EMPTY(hdr)) { 12931544Seschrock ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1294789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 1295789Sahrens hdr->b_birth = 0; 1296789Sahrens hdr->b_cksum0 = 0; 1297789Sahrens } 12981544Seschrock while (hdr->b_buf) { 1299789Sahrens arc_buf_t *buf = hdr->b_buf; 1300789Sahrens 13011544Seschrock if (buf->b_efunc) { 13021544Seschrock mutex_enter(&arc_eviction_mtx); 13031544Seschrock ASSERT(buf->b_hdr != NULL); 13042688Smaybee arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 13051544Seschrock hdr->b_buf = buf->b_next; 13062887Smaybee buf->b_hdr = &arc_eviction_hdr; 13071544Seschrock buf->b_next = arc_eviction_list; 13081544Seschrock arc_eviction_list = buf; 13091544Seschrock mutex_exit(&arc_eviction_mtx); 13101544Seschrock } else { 13112688Smaybee arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 13121544Seschrock } 1313789Sahrens } 13143093Sahrens if (hdr->b_freeze_cksum != NULL) { 13153093Sahrens kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 13163093Sahrens hdr->b_freeze_cksum = NULL; 13173093Sahrens } 13181544Seschrock 1319789Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 1320789Sahrens ASSERT3P(hdr->b_hash_next, ==, NULL); 1321789Sahrens ASSERT3P(hdr->b_acb, ==, NULL); 1322789Sahrens kmem_cache_free(hdr_cache, hdr); 1323789Sahrens } 1324789Sahrens 1325789Sahrens void 1326789Sahrens arc_buf_free(arc_buf_t *buf, void *tag) 1327789Sahrens { 1328789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 13293403Sbmc int hashed = hdr->b_state != arc_anon; 13301544Seschrock 13311544Seschrock ASSERT(buf->b_efunc == NULL); 13321544Seschrock ASSERT(buf->b_data != NULL); 13331544Seschrock 13341544Seschrock if (hashed) { 13351544Seschrock kmutex_t *hash_lock = HDR_LOCK(hdr); 13361544Seschrock 13371544Seschrock mutex_enter(hash_lock); 13381544Seschrock (void) remove_reference(hdr, hash_lock, tag); 13391544Seschrock if (hdr->b_datacnt > 1) 13402688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 13411544Seschrock else 13421544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 13431544Seschrock mutex_exit(hash_lock); 13441544Seschrock } else if (HDR_IO_IN_PROGRESS(hdr)) { 13451544Seschrock int destroy_hdr; 13461544Seschrock /* 13471544Seschrock * We are in the middle of an async write. Don't destroy 13481544Seschrock * this buffer unless the write completes before we finish 13491544Seschrock * decrementing the reference count. 13501544Seschrock */ 13511544Seschrock mutex_enter(&arc_eviction_mtx); 13521544Seschrock (void) remove_reference(hdr, NULL, tag); 13531544Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 13541544Seschrock destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 13551544Seschrock mutex_exit(&arc_eviction_mtx); 13561544Seschrock if (destroy_hdr) 13571544Seschrock arc_hdr_destroy(hdr); 13581544Seschrock } else { 13591544Seschrock if (remove_reference(hdr, NULL, tag) > 0) { 13601544Seschrock ASSERT(HDR_IO_ERROR(hdr)); 13612688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 13621544Seschrock } else { 13631544Seschrock arc_hdr_destroy(hdr); 13641544Seschrock } 13651544Seschrock } 13661544Seschrock } 13671544Seschrock 13681544Seschrock int 13691544Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag) 13701544Seschrock { 13711544Seschrock arc_buf_hdr_t *hdr = buf->b_hdr; 1372789Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 13731544Seschrock int no_callback = (buf->b_efunc == NULL); 13741544Seschrock 13753403Sbmc if (hdr->b_state == arc_anon) { 13761544Seschrock arc_buf_free(buf, tag); 13771544Seschrock return (no_callback); 13781544Seschrock } 1379789Sahrens 1380789Sahrens mutex_enter(hash_lock); 13813403Sbmc ASSERT(hdr->b_state != arc_anon); 13821544Seschrock ASSERT(buf->b_data != NULL); 1383789Sahrens 13841544Seschrock (void) remove_reference(hdr, hash_lock, tag); 13851544Seschrock if (hdr->b_datacnt > 1) { 13861544Seschrock if (no_callback) 13872688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 13881544Seschrock } else if (no_callback) { 13891544Seschrock ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 13901544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 1391789Sahrens } 13921544Seschrock ASSERT(no_callback || hdr->b_datacnt > 1 || 13931544Seschrock refcount_is_zero(&hdr->b_refcnt)); 1394789Sahrens mutex_exit(hash_lock); 13951544Seschrock return (no_callback); 1396789Sahrens } 1397789Sahrens 1398789Sahrens int 1399789Sahrens arc_buf_size(arc_buf_t *buf) 1400789Sahrens { 1401789Sahrens return (buf->b_hdr->b_size); 1402789Sahrens } 1403789Sahrens 1404789Sahrens /* 1405789Sahrens * Evict buffers from list until we've removed the specified number of 1406789Sahrens * bytes. Move the removed buffers to the appropriate evict state. 14072688Smaybee * If the recycle flag is set, then attempt to "recycle" a buffer: 14082688Smaybee * - look for a buffer to evict that is `bytes' long. 14092688Smaybee * - return the data block from this buffer rather than freeing it. 14102688Smaybee * This flag is used by callers that are trying to make space for a 14112688Smaybee * new buffer in a full arc cache. 14125642Smaybee * 14135642Smaybee * This function makes a "best effort". It skips over any buffers 14145642Smaybee * it can't get a hash_lock on, and so may not catch all candidates. 14155642Smaybee * It may also return without evicting as much space as requested. 1416789Sahrens */ 14172688Smaybee static void * 14185642Smaybee arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, 14193290Sjohansen arc_buf_contents_t type) 1420789Sahrens { 1421789Sahrens arc_state_t *evicted_state; 14222688Smaybee uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 14232918Smaybee arc_buf_hdr_t *ab, *ab_prev = NULL; 14244309Smaybee list_t *list = &state->arcs_list[type]; 1425789Sahrens kmutex_t *hash_lock; 14262688Smaybee boolean_t have_lock; 14272918Smaybee void *stolen = NULL; 1428789Sahrens 14293403Sbmc ASSERT(state == arc_mru || state == arc_mfu); 1430789Sahrens 14313403Sbmc evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1432789Sahrens 14333403Sbmc mutex_enter(&state->arcs_mtx); 14343403Sbmc mutex_enter(&evicted_state->arcs_mtx); 1435789Sahrens 14364309Smaybee for (ab = list_tail(list); ab; ab = ab_prev) { 14374309Smaybee ab_prev = list_prev(list, ab); 14382391Smaybee /* prefetch buffers have a minimum lifespan */ 14392688Smaybee if (HDR_IO_IN_PROGRESS(ab) || 14405642Smaybee (spa && ab->b_spa != spa) || 14412688Smaybee (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 14422688Smaybee lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 14432391Smaybee skipped++; 14442391Smaybee continue; 14452391Smaybee } 14462918Smaybee /* "lookahead" for better eviction candidate */ 14472918Smaybee if (recycle && ab->b_size != bytes && 14482918Smaybee ab_prev && ab_prev->b_size == bytes) 14492688Smaybee continue; 1450789Sahrens hash_lock = HDR_LOCK(ab); 14512688Smaybee have_lock = MUTEX_HELD(hash_lock); 14522688Smaybee if (have_lock || mutex_tryenter(hash_lock)) { 1453789Sahrens ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 14541544Seschrock ASSERT(ab->b_datacnt > 0); 14551544Seschrock while (ab->b_buf) { 14561544Seschrock arc_buf_t *buf = ab->b_buf; 14572688Smaybee if (buf->b_data) { 14581544Seschrock bytes_evicted += ab->b_size; 14593290Sjohansen if (recycle && ab->b_type == type && 14605450Sbrendan ab->b_size == bytes && 14615450Sbrendan !HDR_L2_WRITING(ab)) { 14622918Smaybee stolen = buf->b_data; 14632918Smaybee recycle = FALSE; 14642918Smaybee } 14652688Smaybee } 14661544Seschrock if (buf->b_efunc) { 14671544Seschrock mutex_enter(&arc_eviction_mtx); 14682918Smaybee arc_buf_destroy(buf, 14692918Smaybee buf->b_data == stolen, FALSE); 14701544Seschrock ab->b_buf = buf->b_next; 14712887Smaybee buf->b_hdr = &arc_eviction_hdr; 14721544Seschrock buf->b_next = arc_eviction_list; 14731544Seschrock arc_eviction_list = buf; 14741544Seschrock mutex_exit(&arc_eviction_mtx); 14751544Seschrock } else { 14762918Smaybee arc_buf_destroy(buf, 14772918Smaybee buf->b_data == stolen, TRUE); 14781544Seschrock } 14791544Seschrock } 14801544Seschrock ASSERT(ab->b_datacnt == 0); 1481789Sahrens arc_change_state(evicted_state, ab, hash_lock); 14821544Seschrock ASSERT(HDR_IN_HASH_TABLE(ab)); 14835450Sbrendan ab->b_flags |= ARC_IN_HASH_TABLE; 14845450Sbrendan ab->b_flags &= ~ARC_BUF_AVAILABLE; 1485789Sahrens DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 14862688Smaybee if (!have_lock) 14872688Smaybee mutex_exit(hash_lock); 14881544Seschrock if (bytes >= 0 && bytes_evicted >= bytes) 1489789Sahrens break; 1490789Sahrens } else { 14912688Smaybee missed += 1; 1492789Sahrens } 1493789Sahrens } 14943403Sbmc 14953403Sbmc mutex_exit(&evicted_state->arcs_mtx); 14963403Sbmc mutex_exit(&state->arcs_mtx); 1497789Sahrens 1498789Sahrens if (bytes_evicted < bytes) 1499789Sahrens dprintf("only evicted %lld bytes from %x", 1500789Sahrens (longlong_t)bytes_evicted, state); 1501789Sahrens 15022688Smaybee if (skipped) 15033403Sbmc ARCSTAT_INCR(arcstat_evict_skip, skipped); 15043403Sbmc 15052688Smaybee if (missed) 15063403Sbmc ARCSTAT_INCR(arcstat_mutex_miss, missed); 15073403Sbmc 15084709Smaybee /* 15094709Smaybee * We have just evicted some date into the ghost state, make 15104709Smaybee * sure we also adjust the ghost state size if necessary. 15114709Smaybee */ 15124709Smaybee if (arc_no_grow && 15134709Smaybee arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 15144709Smaybee int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 15154709Smaybee arc_mru_ghost->arcs_size - arc_c; 15164709Smaybee 15174709Smaybee if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 15184709Smaybee int64_t todelete = 15194709Smaybee MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 15205642Smaybee arc_evict_ghost(arc_mru_ghost, NULL, todelete); 15214709Smaybee } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 15224709Smaybee int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 15234709Smaybee arc_mru_ghost->arcs_size + 15244709Smaybee arc_mfu_ghost->arcs_size - arc_c); 15255642Smaybee arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 15264709Smaybee } 15274709Smaybee } 15284709Smaybee 15292918Smaybee return (stolen); 1530789Sahrens } 1531789Sahrens 1532789Sahrens /* 1533789Sahrens * Remove buffers from list until we've removed the specified number of 1534789Sahrens * bytes. Destroy the buffers that are removed. 1535789Sahrens */ 1536789Sahrens static void 15375642Smaybee arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) 1538789Sahrens { 1539789Sahrens arc_buf_hdr_t *ab, *ab_prev; 15404309Smaybee list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1541789Sahrens kmutex_t *hash_lock; 15421544Seschrock uint64_t bytes_deleted = 0; 15433700Sek110237 uint64_t bufs_skipped = 0; 1544789Sahrens 15451544Seschrock ASSERT(GHOST_STATE(state)); 1546789Sahrens top: 15473403Sbmc mutex_enter(&state->arcs_mtx); 15484309Smaybee for (ab = list_tail(list); ab; ab = ab_prev) { 15494309Smaybee ab_prev = list_prev(list, ab); 15505642Smaybee if (spa && ab->b_spa != spa) 15515642Smaybee continue; 1552789Sahrens hash_lock = HDR_LOCK(ab); 1553789Sahrens if (mutex_tryenter(hash_lock)) { 15542391Smaybee ASSERT(!HDR_IO_IN_PROGRESS(ab)); 15551544Seschrock ASSERT(ab->b_buf == NULL); 15563403Sbmc ARCSTAT_BUMP(arcstat_deleted); 15571544Seschrock bytes_deleted += ab->b_size; 15585450Sbrendan 15595450Sbrendan if (ab->b_l2hdr != NULL) { 15605450Sbrendan /* 15615450Sbrendan * This buffer is cached on the 2nd Level ARC; 15625450Sbrendan * don't destroy the header. 15635450Sbrendan */ 15645450Sbrendan arc_change_state(arc_l2c_only, ab, hash_lock); 15655450Sbrendan mutex_exit(hash_lock); 15665450Sbrendan } else { 15675450Sbrendan arc_change_state(arc_anon, ab, hash_lock); 15685450Sbrendan mutex_exit(hash_lock); 15695450Sbrendan arc_hdr_destroy(ab); 15705450Sbrendan } 15715450Sbrendan 1572789Sahrens DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1573789Sahrens if (bytes >= 0 && bytes_deleted >= bytes) 1574789Sahrens break; 1575789Sahrens } else { 1576789Sahrens if (bytes < 0) { 15773403Sbmc mutex_exit(&state->arcs_mtx); 1578789Sahrens mutex_enter(hash_lock); 1579789Sahrens mutex_exit(hash_lock); 1580789Sahrens goto top; 1581789Sahrens } 1582789Sahrens bufs_skipped += 1; 1583789Sahrens } 1584789Sahrens } 15853403Sbmc mutex_exit(&state->arcs_mtx); 1586789Sahrens 15874309Smaybee if (list == &state->arcs_list[ARC_BUFC_DATA] && 15884309Smaybee (bytes < 0 || bytes_deleted < bytes)) { 15894309Smaybee list = &state->arcs_list[ARC_BUFC_METADATA]; 15904309Smaybee goto top; 15914309Smaybee } 15924309Smaybee 1593789Sahrens if (bufs_skipped) { 15943403Sbmc ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1595789Sahrens ASSERT(bytes >= 0); 1596789Sahrens } 1597789Sahrens 1598789Sahrens if (bytes_deleted < bytes) 1599789Sahrens dprintf("only deleted %lld bytes from %p", 1600789Sahrens (longlong_t)bytes_deleted, state); 1601789Sahrens } 1602789Sahrens 1603789Sahrens static void 1604789Sahrens arc_adjust(void) 1605789Sahrens { 16063403Sbmc int64_t top_sz, mru_over, arc_over, todelete; 1607789Sahrens 16085642Smaybee top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used; 1609789Sahrens 16104309Smaybee if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 16114309Smaybee int64_t toevict = 16124309Smaybee MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); 16135642Smaybee (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA); 16144309Smaybee top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 16154309Smaybee } 16164309Smaybee 16174309Smaybee if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 16184309Smaybee int64_t toevict = 16194309Smaybee MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); 16205642Smaybee (void) arc_evict(arc_mru, NULL, toevict, FALSE, 16215642Smaybee ARC_BUFC_METADATA); 16223403Sbmc top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1623789Sahrens } 1624789Sahrens 16253403Sbmc mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1626789Sahrens 1627789Sahrens if (mru_over > 0) { 16284309Smaybee if (arc_mru_ghost->arcs_size > 0) { 16294309Smaybee todelete = MIN(arc_mru_ghost->arcs_size, mru_over); 16305642Smaybee arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1631789Sahrens } 1632789Sahrens } 1633789Sahrens 16343403Sbmc if ((arc_over = arc_size - arc_c) > 0) { 16351544Seschrock int64_t tbl_over; 1636789Sahrens 16374309Smaybee if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 16384309Smaybee int64_t toevict = 16394309Smaybee MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); 16405642Smaybee (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 16414309Smaybee ARC_BUFC_DATA); 16424309Smaybee arc_over = arc_size - arc_c; 1643789Sahrens } 1644789Sahrens 16454309Smaybee if (arc_over > 0 && 16464309Smaybee arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 16474309Smaybee int64_t toevict = 16484309Smaybee MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], 16494309Smaybee arc_over); 16505642Smaybee (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 16514309Smaybee ARC_BUFC_METADATA); 16524309Smaybee } 16534309Smaybee 16544309Smaybee tbl_over = arc_size + arc_mru_ghost->arcs_size + 16554309Smaybee arc_mfu_ghost->arcs_size - arc_c * 2; 16564309Smaybee 16574309Smaybee if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { 16584309Smaybee todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); 16595642Smaybee arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1660789Sahrens } 1661789Sahrens } 1662789Sahrens } 1663789Sahrens 16641544Seschrock static void 16651544Seschrock arc_do_user_evicts(void) 16661544Seschrock { 16671544Seschrock mutex_enter(&arc_eviction_mtx); 16681544Seschrock while (arc_eviction_list != NULL) { 16691544Seschrock arc_buf_t *buf = arc_eviction_list; 16701544Seschrock arc_eviction_list = buf->b_next; 16711544Seschrock buf->b_hdr = NULL; 16721544Seschrock mutex_exit(&arc_eviction_mtx); 16731544Seschrock 16741819Smaybee if (buf->b_efunc != NULL) 16751819Smaybee VERIFY(buf->b_efunc(buf) == 0); 16761544Seschrock 16771544Seschrock buf->b_efunc = NULL; 16781544Seschrock buf->b_private = NULL; 16791544Seschrock kmem_cache_free(buf_cache, buf); 16801544Seschrock mutex_enter(&arc_eviction_mtx); 16811544Seschrock } 16821544Seschrock mutex_exit(&arc_eviction_mtx); 16831544Seschrock } 16841544Seschrock 1685789Sahrens /* 16865642Smaybee * Flush all *evictable* data from the cache for the given spa. 1687789Sahrens * NOTE: this will not touch "active" (i.e. referenced) data. 1688789Sahrens */ 1689789Sahrens void 16905642Smaybee arc_flush(spa_t *spa) 1691789Sahrens { 16925642Smaybee while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 16935642Smaybee (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); 16945642Smaybee if (spa) 16955642Smaybee break; 16965642Smaybee } 16975642Smaybee while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 16985642Smaybee (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); 16995642Smaybee if (spa) 17005642Smaybee break; 17015642Smaybee } 17025642Smaybee while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 17035642Smaybee (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); 17045642Smaybee if (spa) 17055642Smaybee break; 17065642Smaybee } 17075642Smaybee while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 17085642Smaybee (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); 17095642Smaybee if (spa) 17105642Smaybee break; 17115642Smaybee } 17125642Smaybee 17135642Smaybee arc_evict_ghost(arc_mru_ghost, spa, -1); 17145642Smaybee arc_evict_ghost(arc_mfu_ghost, spa, -1); 17151544Seschrock 17161544Seschrock mutex_enter(&arc_reclaim_thr_lock); 17171544Seschrock arc_do_user_evicts(); 17181544Seschrock mutex_exit(&arc_reclaim_thr_lock); 17195642Smaybee ASSERT(spa || arc_eviction_list == NULL); 1720789Sahrens } 1721789Sahrens 17223158Smaybee int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 17232391Smaybee 1724789Sahrens void 17253158Smaybee arc_shrink(void) 1726789Sahrens { 17273403Sbmc if (arc_c > arc_c_min) { 17283158Smaybee uint64_t to_free; 1729789Sahrens 17302048Sstans #ifdef _KERNEL 17313403Sbmc to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 17322048Sstans #else 17333403Sbmc to_free = arc_c >> arc_shrink_shift; 17342048Sstans #endif 17353403Sbmc if (arc_c > arc_c_min + to_free) 17363403Sbmc atomic_add_64(&arc_c, -to_free); 17373158Smaybee else 17383403Sbmc arc_c = arc_c_min; 17392048Sstans 17403403Sbmc atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 17413403Sbmc if (arc_c > arc_size) 17423403Sbmc arc_c = MAX(arc_size, arc_c_min); 17433403Sbmc if (arc_p > arc_c) 17443403Sbmc arc_p = (arc_c >> 1); 17453403Sbmc ASSERT(arc_c >= arc_c_min); 17463403Sbmc ASSERT((int64_t)arc_p >= 0); 17473158Smaybee } 1748789Sahrens 17493403Sbmc if (arc_size > arc_c) 17503158Smaybee arc_adjust(); 1751789Sahrens } 1752789Sahrens 1753789Sahrens static int 1754789Sahrens arc_reclaim_needed(void) 1755789Sahrens { 1756789Sahrens uint64_t extra; 1757789Sahrens 1758789Sahrens #ifdef _KERNEL 17592048Sstans 17602048Sstans if (needfree) 17612048Sstans return (1); 17622048Sstans 1763789Sahrens /* 1764789Sahrens * take 'desfree' extra pages, so we reclaim sooner, rather than later 1765789Sahrens */ 1766789Sahrens extra = desfree; 1767789Sahrens 1768789Sahrens /* 1769789Sahrens * check that we're out of range of the pageout scanner. It starts to 1770789Sahrens * schedule paging if freemem is less than lotsfree and needfree. 1771789Sahrens * lotsfree is the high-water mark for pageout, and needfree is the 1772789Sahrens * number of needed free pages. We add extra pages here to make sure 1773789Sahrens * the scanner doesn't start up while we're freeing memory. 1774789Sahrens */ 1775789Sahrens if (freemem < lotsfree + needfree + extra) 1776789Sahrens return (1); 1777789Sahrens 1778789Sahrens /* 1779789Sahrens * check to make sure that swapfs has enough space so that anon 17805450Sbrendan * reservations can still succeed. anon_resvmem() checks that the 1781789Sahrens * availrmem is greater than swapfs_minfree, and the number of reserved 1782789Sahrens * swap pages. We also add a bit of extra here just to prevent 1783789Sahrens * circumstances from getting really dire. 1784789Sahrens */ 1785789Sahrens if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1786789Sahrens return (1); 1787789Sahrens 17881936Smaybee #if defined(__i386) 1789789Sahrens /* 1790789Sahrens * If we're on an i386 platform, it's possible that we'll exhaust the 1791789Sahrens * kernel heap space before we ever run out of available physical 1792789Sahrens * memory. Most checks of the size of the heap_area compare against 1793789Sahrens * tune.t_minarmem, which is the minimum available real memory that we 1794789Sahrens * can have in the system. However, this is generally fixed at 25 pages 1795789Sahrens * which is so low that it's useless. In this comparison, we seek to 1796789Sahrens * calculate the total heap-size, and reclaim if more than 3/4ths of the 17975450Sbrendan * heap is allocated. (Or, in the calculation, if less than 1/4th is 1798789Sahrens * free) 1799789Sahrens */ 1800789Sahrens if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1801789Sahrens (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1802789Sahrens return (1); 1803789Sahrens #endif 1804789Sahrens 1805789Sahrens #else 1806789Sahrens if (spa_get_random(100) == 0) 1807789Sahrens return (1); 1808789Sahrens #endif 1809789Sahrens return (0); 1810789Sahrens } 1811789Sahrens 1812789Sahrens static void 1813789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1814789Sahrens { 1815789Sahrens size_t i; 1816789Sahrens kmem_cache_t *prev_cache = NULL; 18173290Sjohansen kmem_cache_t *prev_data_cache = NULL; 1818789Sahrens extern kmem_cache_t *zio_buf_cache[]; 18193290Sjohansen extern kmem_cache_t *zio_data_buf_cache[]; 1820789Sahrens 18211484Sek110237 #ifdef _KERNEL 18224309Smaybee if (arc_meta_used >= arc_meta_limit) { 18234309Smaybee /* 18244309Smaybee * We are exceeding our meta-data cache limit. 18254309Smaybee * Purge some DNLC entries to release holds on meta-data. 18264309Smaybee */ 18274309Smaybee dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 18284309Smaybee } 18291936Smaybee #if defined(__i386) 18301936Smaybee /* 18311936Smaybee * Reclaim unused memory from all kmem caches. 18321936Smaybee */ 18331936Smaybee kmem_reap(); 18341936Smaybee #endif 18351484Sek110237 #endif 18361484Sek110237 1837789Sahrens /* 18385450Sbrendan * An aggressive reclamation will shrink the cache size as well as 18391544Seschrock * reap free buffers from the arc kmem caches. 1840789Sahrens */ 1841789Sahrens if (strat == ARC_RECLAIM_AGGR) 18423158Smaybee arc_shrink(); 1843789Sahrens 1844789Sahrens for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1845789Sahrens if (zio_buf_cache[i] != prev_cache) { 1846789Sahrens prev_cache = zio_buf_cache[i]; 1847789Sahrens kmem_cache_reap_now(zio_buf_cache[i]); 1848789Sahrens } 18493290Sjohansen if (zio_data_buf_cache[i] != prev_data_cache) { 18503290Sjohansen prev_data_cache = zio_data_buf_cache[i]; 18513290Sjohansen kmem_cache_reap_now(zio_data_buf_cache[i]); 18523290Sjohansen } 1853789Sahrens } 18541544Seschrock kmem_cache_reap_now(buf_cache); 18551544Seschrock kmem_cache_reap_now(hdr_cache); 1856789Sahrens } 1857789Sahrens 1858789Sahrens static void 1859789Sahrens arc_reclaim_thread(void) 1860789Sahrens { 1861789Sahrens clock_t growtime = 0; 1862789Sahrens arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1863789Sahrens callb_cpr_t cpr; 1864789Sahrens 1865789Sahrens CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1866789Sahrens 1867789Sahrens mutex_enter(&arc_reclaim_thr_lock); 1868789Sahrens while (arc_thread_exit == 0) { 1869789Sahrens if (arc_reclaim_needed()) { 1870789Sahrens 18713403Sbmc if (arc_no_grow) { 1872789Sahrens if (last_reclaim == ARC_RECLAIM_CONS) { 1873789Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1874789Sahrens } else { 1875789Sahrens last_reclaim = ARC_RECLAIM_CONS; 1876789Sahrens } 1877789Sahrens } else { 18783403Sbmc arc_no_grow = TRUE; 1879789Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1880789Sahrens membar_producer(); 1881789Sahrens } 1882789Sahrens 1883789Sahrens /* reset the growth delay for every reclaim */ 1884789Sahrens growtime = lbolt + (arc_grow_retry * hz); 1885789Sahrens 1886789Sahrens arc_kmem_reap_now(last_reclaim); 1887789Sahrens 18884309Smaybee } else if (arc_no_grow && lbolt >= growtime) { 18893403Sbmc arc_no_grow = FALSE; 1890789Sahrens } 1891789Sahrens 18923403Sbmc if (2 * arc_c < arc_size + 18933403Sbmc arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) 18943298Smaybee arc_adjust(); 18953298Smaybee 18961544Seschrock if (arc_eviction_list != NULL) 18971544Seschrock arc_do_user_evicts(); 18981544Seschrock 1899789Sahrens /* block until needed, or one second, whichever is shorter */ 1900789Sahrens CALLB_CPR_SAFE_BEGIN(&cpr); 1901789Sahrens (void) cv_timedwait(&arc_reclaim_thr_cv, 1902789Sahrens &arc_reclaim_thr_lock, (lbolt + hz)); 1903789Sahrens CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1904789Sahrens } 1905789Sahrens 1906789Sahrens arc_thread_exit = 0; 1907789Sahrens cv_broadcast(&arc_reclaim_thr_cv); 1908789Sahrens CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1909789Sahrens thread_exit(); 1910789Sahrens } 1911789Sahrens 19121544Seschrock /* 19131544Seschrock * Adapt arc info given the number of bytes we are trying to add and 19141544Seschrock * the state that we are comming from. This function is only called 19151544Seschrock * when we are adding new content to the cache. 19161544Seschrock */ 1917789Sahrens static void 19181544Seschrock arc_adapt(int bytes, arc_state_t *state) 1919789Sahrens { 19201544Seschrock int mult; 19211544Seschrock 19225450Sbrendan if (state == arc_l2c_only) 19235450Sbrendan return; 19245450Sbrendan 19251544Seschrock ASSERT(bytes > 0); 1926789Sahrens /* 19271544Seschrock * Adapt the target size of the MRU list: 19281544Seschrock * - if we just hit in the MRU ghost list, then increase 19291544Seschrock * the target size of the MRU list. 19301544Seschrock * - if we just hit in the MFU ghost list, then increase 19311544Seschrock * the target size of the MFU list by decreasing the 19321544Seschrock * target size of the MRU list. 1933789Sahrens */ 19343403Sbmc if (state == arc_mru_ghost) { 19353403Sbmc mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 19363403Sbmc 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 19371544Seschrock 19383403Sbmc arc_p = MIN(arc_c, arc_p + bytes * mult); 19393403Sbmc } else if (state == arc_mfu_ghost) { 19403403Sbmc mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 19413403Sbmc 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 19421544Seschrock 19433403Sbmc arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 19441544Seschrock } 19453403Sbmc ASSERT((int64_t)arc_p >= 0); 1946789Sahrens 1947789Sahrens if (arc_reclaim_needed()) { 1948789Sahrens cv_signal(&arc_reclaim_thr_cv); 1949789Sahrens return; 1950789Sahrens } 1951789Sahrens 19523403Sbmc if (arc_no_grow) 1953789Sahrens return; 1954789Sahrens 19553403Sbmc if (arc_c >= arc_c_max) 19561544Seschrock return; 19571544Seschrock 1958789Sahrens /* 19591544Seschrock * If we're within (2 * maxblocksize) bytes of the target 19601544Seschrock * cache size, increment the target cache size 1961789Sahrens */ 19623403Sbmc if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 19633403Sbmc atomic_add_64(&arc_c, (int64_t)bytes); 19643403Sbmc if (arc_c > arc_c_max) 19653403Sbmc arc_c = arc_c_max; 19663403Sbmc else if (state == arc_anon) 19673403Sbmc atomic_add_64(&arc_p, (int64_t)bytes); 19683403Sbmc if (arc_p > arc_c) 19693403Sbmc arc_p = arc_c; 1970789Sahrens } 19713403Sbmc ASSERT((int64_t)arc_p >= 0); 1972789Sahrens } 1973789Sahrens 1974789Sahrens /* 19751544Seschrock * Check if the cache has reached its limits and eviction is required 19761544Seschrock * prior to insert. 1977789Sahrens */ 1978789Sahrens static int 19794309Smaybee arc_evict_needed(arc_buf_contents_t type) 1980789Sahrens { 19814309Smaybee if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 19824309Smaybee return (1); 19834309Smaybee 19844309Smaybee #ifdef _KERNEL 19854309Smaybee /* 19864309Smaybee * If zio data pages are being allocated out of a separate heap segment, 19874309Smaybee * then enforce that the size of available vmem for this area remains 19884309Smaybee * above about 1/32nd free. 19894309Smaybee */ 19904309Smaybee if (type == ARC_BUFC_DATA && zio_arena != NULL && 19914309Smaybee vmem_size(zio_arena, VMEM_FREE) < 19924309Smaybee (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 19934309Smaybee return (1); 19944309Smaybee #endif 19954309Smaybee 1996789Sahrens if (arc_reclaim_needed()) 1997789Sahrens return (1); 1998789Sahrens 19993403Sbmc return (arc_size > arc_c); 2000789Sahrens } 2001789Sahrens 2002789Sahrens /* 20032688Smaybee * The buffer, supplied as the first argument, needs a data block. 20042688Smaybee * So, if we are at cache max, determine which cache should be victimized. 20052688Smaybee * We have the following cases: 2006789Sahrens * 20073403Sbmc * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2008789Sahrens * In this situation if we're out of space, but the resident size of the MFU is 2009789Sahrens * under the limit, victimize the MFU cache to satisfy this insertion request. 2010789Sahrens * 20113403Sbmc * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2012789Sahrens * Here, we've used up all of the available space for the MRU, so we need to 2013789Sahrens * evict from our own cache instead. Evict from the set of resident MRU 2014789Sahrens * entries. 2015789Sahrens * 20163403Sbmc * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2017789Sahrens * c minus p represents the MFU space in the cache, since p is the size of the 2018789Sahrens * cache that is dedicated to the MRU. In this situation there's still space on 2019789Sahrens * the MFU side, so the MRU side needs to be victimized. 2020789Sahrens * 20213403Sbmc * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2022789Sahrens * MFU's resident set is consuming more space than it has been allotted. In 2023789Sahrens * this situation, we must victimize our own cache, the MFU, for this insertion. 2024789Sahrens */ 2025789Sahrens static void 20262688Smaybee arc_get_data_buf(arc_buf_t *buf) 2027789Sahrens { 20283290Sjohansen arc_state_t *state = buf->b_hdr->b_state; 20293290Sjohansen uint64_t size = buf->b_hdr->b_size; 20303290Sjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 20312688Smaybee 20322688Smaybee arc_adapt(size, state); 2033789Sahrens 20342688Smaybee /* 20352688Smaybee * We have not yet reached cache maximum size, 20362688Smaybee * just allocate a new buffer. 20372688Smaybee */ 20384309Smaybee if (!arc_evict_needed(type)) { 20393290Sjohansen if (type == ARC_BUFC_METADATA) { 20403290Sjohansen buf->b_data = zio_buf_alloc(size); 20414309Smaybee arc_space_consume(size); 20423290Sjohansen } else { 20433290Sjohansen ASSERT(type == ARC_BUFC_DATA); 20443290Sjohansen buf->b_data = zio_data_buf_alloc(size); 20454309Smaybee atomic_add_64(&arc_size, size); 20463290Sjohansen } 20472688Smaybee goto out; 20482688Smaybee } 20492688Smaybee 20502688Smaybee /* 20512688Smaybee * If we are prefetching from the mfu ghost list, this buffer 20522688Smaybee * will end up on the mru list; so steal space from there. 20532688Smaybee */ 20543403Sbmc if (state == arc_mfu_ghost) 20553403Sbmc state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 20563403Sbmc else if (state == arc_mru_ghost) 20573403Sbmc state = arc_mru; 2058789Sahrens 20593403Sbmc if (state == arc_mru || state == arc_anon) { 20603403Sbmc uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 20614309Smaybee state = (arc_mfu->arcs_lsize[type] > 0 && 20624309Smaybee arc_p > mru_used) ? arc_mfu : arc_mru; 2063789Sahrens } else { 20642688Smaybee /* MFU cases */ 20653403Sbmc uint64_t mfu_space = arc_c - arc_p; 20664309Smaybee state = (arc_mru->arcs_lsize[type] > 0 && 20674309Smaybee mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 20682688Smaybee } 20695642Smaybee if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 20703290Sjohansen if (type == ARC_BUFC_METADATA) { 20713290Sjohansen buf->b_data = zio_buf_alloc(size); 20724309Smaybee arc_space_consume(size); 20733290Sjohansen } else { 20743290Sjohansen ASSERT(type == ARC_BUFC_DATA); 20753290Sjohansen buf->b_data = zio_data_buf_alloc(size); 20764309Smaybee atomic_add_64(&arc_size, size); 20773290Sjohansen } 20783403Sbmc ARCSTAT_BUMP(arcstat_recycle_miss); 20792688Smaybee } 20802688Smaybee ASSERT(buf->b_data != NULL); 20812688Smaybee out: 20822688Smaybee /* 20832688Smaybee * Update the state size. Note that ghost states have a 20842688Smaybee * "ghost size" and so don't need to be updated. 20852688Smaybee */ 20862688Smaybee if (!GHOST_STATE(buf->b_hdr->b_state)) { 20872688Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 20882688Smaybee 20893403Sbmc atomic_add_64(&hdr->b_state->arcs_size, size); 20902688Smaybee if (list_link_active(&hdr->b_arc_node)) { 20912688Smaybee ASSERT(refcount_is_zero(&hdr->b_refcnt)); 20924309Smaybee atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2093789Sahrens } 20943298Smaybee /* 20953298Smaybee * If we are growing the cache, and we are adding anonymous 20963403Sbmc * data, and we have outgrown arc_p, update arc_p 20973298Smaybee */ 20983403Sbmc if (arc_size < arc_c && hdr->b_state == arc_anon && 20993403Sbmc arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 21003403Sbmc arc_p = MIN(arc_c, arc_p + size); 2101789Sahrens } 2102789Sahrens } 2103789Sahrens 2104789Sahrens /* 2105789Sahrens * This routine is called whenever a buffer is accessed. 21061544Seschrock * NOTE: the hash lock is dropped in this function. 2107789Sahrens */ 2108789Sahrens static void 21092688Smaybee arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2110789Sahrens { 2111789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 2112789Sahrens 21133403Sbmc if (buf->b_state == arc_anon) { 2114789Sahrens /* 2115789Sahrens * This buffer is not in the cache, and does not 2116789Sahrens * appear in our "ghost" list. Add the new buffer 2117789Sahrens * to the MRU state. 2118789Sahrens */ 2119789Sahrens 2120789Sahrens ASSERT(buf->b_arc_access == 0); 2121789Sahrens buf->b_arc_access = lbolt; 21221544Seschrock DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 21233403Sbmc arc_change_state(arc_mru, buf, hash_lock); 2124789Sahrens 21253403Sbmc } else if (buf->b_state == arc_mru) { 2126789Sahrens /* 21272391Smaybee * If this buffer is here because of a prefetch, then either: 21282391Smaybee * - clear the flag if this is a "referencing" read 21292391Smaybee * (any subsequent access will bump this into the MFU state). 21302391Smaybee * or 21312391Smaybee * - move the buffer to the head of the list if this is 21322391Smaybee * another prefetch (to make it less likely to be evicted). 2133789Sahrens */ 2134789Sahrens if ((buf->b_flags & ARC_PREFETCH) != 0) { 21352391Smaybee if (refcount_count(&buf->b_refcnt) == 0) { 21362391Smaybee ASSERT(list_link_active(&buf->b_arc_node)); 21372391Smaybee } else { 21382391Smaybee buf->b_flags &= ~ARC_PREFETCH; 21393403Sbmc ARCSTAT_BUMP(arcstat_mru_hits); 21402391Smaybee } 21412391Smaybee buf->b_arc_access = lbolt; 2142789Sahrens return; 2143789Sahrens } 2144789Sahrens 2145789Sahrens /* 2146789Sahrens * This buffer has been "accessed" only once so far, 2147789Sahrens * but it is still in the cache. Move it to the MFU 2148789Sahrens * state. 2149789Sahrens */ 2150789Sahrens if (lbolt > buf->b_arc_access + ARC_MINTIME) { 2151789Sahrens /* 2152789Sahrens * More than 125ms have passed since we 2153789Sahrens * instantiated this buffer. Move it to the 2154789Sahrens * most frequently used state. 2155789Sahrens */ 2156789Sahrens buf->b_arc_access = lbolt; 21571544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 21583403Sbmc arc_change_state(arc_mfu, buf, hash_lock); 2159789Sahrens } 21603403Sbmc ARCSTAT_BUMP(arcstat_mru_hits); 21613403Sbmc } else if (buf->b_state == arc_mru_ghost) { 2162789Sahrens arc_state_t *new_state; 2163789Sahrens /* 2164789Sahrens * This buffer has been "accessed" recently, but 2165789Sahrens * was evicted from the cache. Move it to the 2166789Sahrens * MFU state. 2167789Sahrens */ 2168789Sahrens 2169789Sahrens if (buf->b_flags & ARC_PREFETCH) { 21703403Sbmc new_state = arc_mru; 21712391Smaybee if (refcount_count(&buf->b_refcnt) > 0) 21722391Smaybee buf->b_flags &= ~ARC_PREFETCH; 21731544Seschrock DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2174789Sahrens } else { 21753403Sbmc new_state = arc_mfu; 21761544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2177789Sahrens } 2178789Sahrens 2179789Sahrens buf->b_arc_access = lbolt; 2180789Sahrens arc_change_state(new_state, buf, hash_lock); 2181789Sahrens 21823403Sbmc ARCSTAT_BUMP(arcstat_mru_ghost_hits); 21833403Sbmc } else if (buf->b_state == arc_mfu) { 2184789Sahrens /* 2185789Sahrens * This buffer has been accessed more than once and is 2186789Sahrens * still in the cache. Keep it in the MFU state. 2187789Sahrens * 21882391Smaybee * NOTE: an add_reference() that occurred when we did 21892391Smaybee * the arc_read() will have kicked this off the list. 21902391Smaybee * If it was a prefetch, we will explicitly move it to 21912391Smaybee * the head of the list now. 2192789Sahrens */ 21932391Smaybee if ((buf->b_flags & ARC_PREFETCH) != 0) { 21942391Smaybee ASSERT(refcount_count(&buf->b_refcnt) == 0); 21952391Smaybee ASSERT(list_link_active(&buf->b_arc_node)); 21962391Smaybee } 21973403Sbmc ARCSTAT_BUMP(arcstat_mfu_hits); 21982391Smaybee buf->b_arc_access = lbolt; 21993403Sbmc } else if (buf->b_state == arc_mfu_ghost) { 22003403Sbmc arc_state_t *new_state = arc_mfu; 2201789Sahrens /* 2202789Sahrens * This buffer has been accessed more than once but has 2203789Sahrens * been evicted from the cache. Move it back to the 2204789Sahrens * MFU state. 2205789Sahrens */ 2206789Sahrens 22072391Smaybee if (buf->b_flags & ARC_PREFETCH) { 22082391Smaybee /* 22092391Smaybee * This is a prefetch access... 22102391Smaybee * move this block back to the MRU state. 22112391Smaybee */ 22122391Smaybee ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 22133403Sbmc new_state = arc_mru; 22142391Smaybee } 22152391Smaybee 2216789Sahrens buf->b_arc_access = lbolt; 22171544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 22182391Smaybee arc_change_state(new_state, buf, hash_lock); 2219789Sahrens 22203403Sbmc ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 22215450Sbrendan } else if (buf->b_state == arc_l2c_only) { 22225450Sbrendan /* 22235450Sbrendan * This buffer is on the 2nd Level ARC. 22245450Sbrendan */ 22255450Sbrendan 22265450Sbrendan buf->b_arc_access = lbolt; 22275450Sbrendan DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 22285450Sbrendan arc_change_state(arc_mfu, buf, hash_lock); 2229789Sahrens } else { 2230789Sahrens ASSERT(!"invalid arc state"); 2231789Sahrens } 2232789Sahrens } 2233789Sahrens 2234789Sahrens /* a generic arc_done_func_t which you can use */ 2235789Sahrens /* ARGSUSED */ 2236789Sahrens void 2237789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2238789Sahrens { 2239789Sahrens bcopy(buf->b_data, arg, buf->b_hdr->b_size); 22401544Seschrock VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2241789Sahrens } 2242789Sahrens 22434309Smaybee /* a generic arc_done_func_t */ 2244789Sahrens void 2245789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2246789Sahrens { 2247789Sahrens arc_buf_t **bufp = arg; 2248789Sahrens if (zio && zio->io_error) { 22491544Seschrock VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2250789Sahrens *bufp = NULL; 2251789Sahrens } else { 2252789Sahrens *bufp = buf; 2253789Sahrens } 2254789Sahrens } 2255789Sahrens 2256789Sahrens static void 2257789Sahrens arc_read_done(zio_t *zio) 2258789Sahrens { 22591589Smaybee arc_buf_hdr_t *hdr, *found; 2260789Sahrens arc_buf_t *buf; 2261789Sahrens arc_buf_t *abuf; /* buffer we're assigning to callback */ 2262789Sahrens kmutex_t *hash_lock; 2263789Sahrens arc_callback_t *callback_list, *acb; 2264789Sahrens int freeable = FALSE; 2265789Sahrens 2266789Sahrens buf = zio->io_private; 2267789Sahrens hdr = buf->b_hdr; 2268789Sahrens 22691589Smaybee /* 22701589Smaybee * The hdr was inserted into hash-table and removed from lists 22711589Smaybee * prior to starting I/O. We should find this header, since 22721589Smaybee * it's in the hash table, and it should be legit since it's 22731589Smaybee * not possible to evict it during the I/O. The only possible 22741589Smaybee * reason for it not to be found is if we were freed during the 22751589Smaybee * read. 22761589Smaybee */ 22771589Smaybee found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 22783093Sahrens &hash_lock); 2279789Sahrens 22801589Smaybee ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 22815450Sbrendan (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 22825450Sbrendan (found == hdr && HDR_L2_READING(hdr))); 22835450Sbrendan 22845450Sbrendan hdr->b_flags &= ~(ARC_L2_READING|ARC_L2_EVICTED); 22855450Sbrendan if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 22865450Sbrendan hdr->b_flags |= ARC_DONT_L2CACHE; 2287789Sahrens 2288789Sahrens /* byteswap if necessary */ 2289789Sahrens callback_list = hdr->b_acb; 2290789Sahrens ASSERT(callback_list != NULL); 2291789Sahrens if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 2292789Sahrens callback_list->acb_byteswap(buf->b_data, hdr->b_size); 2293789Sahrens 22945450Sbrendan arc_cksum_compute(buf, B_FALSE); 22953093Sahrens 2296789Sahrens /* create copies of the data buffer for the callers */ 2297789Sahrens abuf = buf; 2298789Sahrens for (acb = callback_list; acb; acb = acb->acb_next) { 2299789Sahrens if (acb->acb_done) { 23002688Smaybee if (abuf == NULL) 23012688Smaybee abuf = arc_buf_clone(buf); 2302789Sahrens acb->acb_buf = abuf; 2303789Sahrens abuf = NULL; 2304789Sahrens } 2305789Sahrens } 2306789Sahrens hdr->b_acb = NULL; 2307789Sahrens hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 23081544Seschrock ASSERT(!HDR_BUF_AVAILABLE(hdr)); 23091544Seschrock if (abuf == buf) 23101544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 2311789Sahrens 2312789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2313789Sahrens 2314789Sahrens if (zio->io_error != 0) { 2315789Sahrens hdr->b_flags |= ARC_IO_ERROR; 23163403Sbmc if (hdr->b_state != arc_anon) 23173403Sbmc arc_change_state(arc_anon, hdr, hash_lock); 23181544Seschrock if (HDR_IN_HASH_TABLE(hdr)) 23191544Seschrock buf_hash_remove(hdr); 2320789Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 23212391Smaybee /* convert checksum errors into IO errors */ 23221544Seschrock if (zio->io_error == ECKSUM) 23231544Seschrock zio->io_error = EIO; 2324789Sahrens } 2325789Sahrens 23261544Seschrock /* 23272391Smaybee * Broadcast before we drop the hash_lock to avoid the possibility 23282391Smaybee * that the hdr (and hence the cv) might be freed before we get to 23292391Smaybee * the cv_broadcast(). 23301544Seschrock */ 23311544Seschrock cv_broadcast(&hdr->b_cv); 23321544Seschrock 23331589Smaybee if (hash_lock) { 2334789Sahrens /* 2335789Sahrens * Only call arc_access on anonymous buffers. This is because 2336789Sahrens * if we've issued an I/O for an evicted buffer, we've already 2337789Sahrens * called arc_access (to prevent any simultaneous readers from 2338789Sahrens * getting confused). 2339789Sahrens */ 23403403Sbmc if (zio->io_error == 0 && hdr->b_state == arc_anon) 23412688Smaybee arc_access(hdr, hash_lock); 23422688Smaybee mutex_exit(hash_lock); 2343789Sahrens } else { 2344789Sahrens /* 2345789Sahrens * This block was freed while we waited for the read to 2346789Sahrens * complete. It has been removed from the hash table and 2347789Sahrens * moved to the anonymous state (so that it won't show up 2348789Sahrens * in the cache). 2349789Sahrens */ 23503403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2351789Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 2352789Sahrens } 2353789Sahrens 2354789Sahrens /* execute each callback and free its structure */ 2355789Sahrens while ((acb = callback_list) != NULL) { 2356789Sahrens if (acb->acb_done) 2357789Sahrens acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2358789Sahrens 2359789Sahrens if (acb->acb_zio_dummy != NULL) { 2360789Sahrens acb->acb_zio_dummy->io_error = zio->io_error; 2361789Sahrens zio_nowait(acb->acb_zio_dummy); 2362789Sahrens } 2363789Sahrens 2364789Sahrens callback_list = acb->acb_next; 2365789Sahrens kmem_free(acb, sizeof (arc_callback_t)); 2366789Sahrens } 2367789Sahrens 2368789Sahrens if (freeable) 23691544Seschrock arc_hdr_destroy(hdr); 2370789Sahrens } 2371789Sahrens 2372789Sahrens /* 2373789Sahrens * "Read" the block block at the specified DVA (in bp) via the 2374789Sahrens * cache. If the block is found in the cache, invoke the provided 2375789Sahrens * callback immediately and return. Note that the `zio' parameter 2376789Sahrens * in the callback will be NULL in this case, since no IO was 2377789Sahrens * required. If the block is not in the cache pass the read request 2378789Sahrens * on to the spa with a substitute callback function, so that the 2379789Sahrens * requested block will be added to the cache. 2380789Sahrens * 2381789Sahrens * If a read request arrives for a block that has a read in-progress, 2382789Sahrens * either wait for the in-progress read to complete (and return the 2383789Sahrens * results); or, if this is a read with a "done" func, add a record 2384789Sahrens * to the read to invoke the "done" func when the read completes, 2385789Sahrens * and return; or just return. 2386789Sahrens * 2387789Sahrens * arc_read_done() will invoke all the requested "done" functions 2388789Sahrens * for readers of this block. 2389789Sahrens */ 2390789Sahrens int 2391789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 2392789Sahrens arc_done_func_t *done, void *private, int priority, int flags, 23932391Smaybee uint32_t *arc_flags, zbookmark_t *zb) 2394789Sahrens { 2395789Sahrens arc_buf_hdr_t *hdr; 2396789Sahrens arc_buf_t *buf; 2397789Sahrens kmutex_t *hash_lock; 23985450Sbrendan zio_t *rzio; 2399789Sahrens 2400789Sahrens top: 2401789Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 24021544Seschrock if (hdr && hdr->b_datacnt > 0) { 2403789Sahrens 24042391Smaybee *arc_flags |= ARC_CACHED; 24052391Smaybee 2406789Sahrens if (HDR_IO_IN_PROGRESS(hdr)) { 24072391Smaybee 24082391Smaybee if (*arc_flags & ARC_WAIT) { 24092391Smaybee cv_wait(&hdr->b_cv, hash_lock); 24102391Smaybee mutex_exit(hash_lock); 24112391Smaybee goto top; 24122391Smaybee } 24132391Smaybee ASSERT(*arc_flags & ARC_NOWAIT); 24142391Smaybee 24152391Smaybee if (done) { 2416789Sahrens arc_callback_t *acb = NULL; 2417789Sahrens 2418789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), 2419789Sahrens KM_SLEEP); 2420789Sahrens acb->acb_done = done; 2421789Sahrens acb->acb_private = private; 2422789Sahrens acb->acb_byteswap = swap; 2423789Sahrens if (pio != NULL) 2424789Sahrens acb->acb_zio_dummy = zio_null(pio, 2425789Sahrens spa, NULL, NULL, flags); 2426789Sahrens 2427789Sahrens ASSERT(acb->acb_done != NULL); 2428789Sahrens acb->acb_next = hdr->b_acb; 2429789Sahrens hdr->b_acb = acb; 2430789Sahrens add_reference(hdr, hash_lock, private); 2431789Sahrens mutex_exit(hash_lock); 2432789Sahrens return (0); 2433789Sahrens } 2434789Sahrens mutex_exit(hash_lock); 2435789Sahrens return (0); 2436789Sahrens } 2437789Sahrens 24383403Sbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2439789Sahrens 24401544Seschrock if (done) { 24412688Smaybee add_reference(hdr, hash_lock, private); 24421544Seschrock /* 24431544Seschrock * If this block is already in use, create a new 24441544Seschrock * copy of the data so that we will be guaranteed 24451544Seschrock * that arc_release() will always succeed. 24461544Seschrock */ 24471544Seschrock buf = hdr->b_buf; 24481544Seschrock ASSERT(buf); 24491544Seschrock ASSERT(buf->b_data); 24502688Smaybee if (HDR_BUF_AVAILABLE(hdr)) { 24511544Seschrock ASSERT(buf->b_efunc == NULL); 24521544Seschrock hdr->b_flags &= ~ARC_BUF_AVAILABLE; 24532688Smaybee } else { 24542688Smaybee buf = arc_buf_clone(buf); 24551544Seschrock } 24562391Smaybee } else if (*arc_flags & ARC_PREFETCH && 24572391Smaybee refcount_count(&hdr->b_refcnt) == 0) { 24582391Smaybee hdr->b_flags |= ARC_PREFETCH; 2459789Sahrens } 2460789Sahrens DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 24612688Smaybee arc_access(hdr, hash_lock); 24622688Smaybee mutex_exit(hash_lock); 24633403Sbmc ARCSTAT_BUMP(arcstat_hits); 24643403Sbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 24653403Sbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 24663403Sbmc data, metadata, hits); 24673403Sbmc 2468789Sahrens if (done) 2469789Sahrens done(NULL, buf, private); 2470789Sahrens } else { 2471789Sahrens uint64_t size = BP_GET_LSIZE(bp); 2472789Sahrens arc_callback_t *acb; 2473789Sahrens 2474789Sahrens if (hdr == NULL) { 2475789Sahrens /* this block is not in the cache */ 2476789Sahrens arc_buf_hdr_t *exists; 24773290Sjohansen arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 24783290Sjohansen buf = arc_buf_alloc(spa, size, private, type); 2479789Sahrens hdr = buf->b_hdr; 2480789Sahrens hdr->b_dva = *BP_IDENTITY(bp); 2481789Sahrens hdr->b_birth = bp->blk_birth; 2482789Sahrens hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2483789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2484789Sahrens if (exists) { 2485789Sahrens /* somebody beat us to the hash insert */ 2486789Sahrens mutex_exit(hash_lock); 2487789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 2488789Sahrens hdr->b_birth = 0; 2489789Sahrens hdr->b_cksum0 = 0; 24901544Seschrock (void) arc_buf_remove_ref(buf, private); 2491789Sahrens goto top; /* restart the IO request */ 2492789Sahrens } 24932391Smaybee /* if this is a prefetch, we don't have a reference */ 24942391Smaybee if (*arc_flags & ARC_PREFETCH) { 24952391Smaybee (void) remove_reference(hdr, hash_lock, 24962391Smaybee private); 24972391Smaybee hdr->b_flags |= ARC_PREFETCH; 24982391Smaybee } 24992391Smaybee if (BP_GET_LEVEL(bp) > 0) 25002391Smaybee hdr->b_flags |= ARC_INDIRECT; 2501789Sahrens } else { 2502789Sahrens /* this block is in the ghost cache */ 25031544Seschrock ASSERT(GHOST_STATE(hdr->b_state)); 25041544Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 25052391Smaybee ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 25062391Smaybee ASSERT(hdr->b_buf == NULL); 2507789Sahrens 25082391Smaybee /* if this is a prefetch, we don't have a reference */ 25092391Smaybee if (*arc_flags & ARC_PREFETCH) 25102391Smaybee hdr->b_flags |= ARC_PREFETCH; 25112391Smaybee else 25122391Smaybee add_reference(hdr, hash_lock, private); 2513*6245Smaybee buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 25141544Seschrock buf->b_hdr = hdr; 25152688Smaybee buf->b_data = NULL; 25161544Seschrock buf->b_efunc = NULL; 25171544Seschrock buf->b_private = NULL; 25181544Seschrock buf->b_next = NULL; 25191544Seschrock hdr->b_buf = buf; 25202688Smaybee arc_get_data_buf(buf); 25211544Seschrock ASSERT(hdr->b_datacnt == 0); 25221544Seschrock hdr->b_datacnt = 1; 25232391Smaybee 2524789Sahrens } 2525789Sahrens 2526789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2527789Sahrens acb->acb_done = done; 2528789Sahrens acb->acb_private = private; 2529789Sahrens acb->acb_byteswap = swap; 2530789Sahrens 2531789Sahrens ASSERT(hdr->b_acb == NULL); 2532789Sahrens hdr->b_acb = acb; 2533789Sahrens hdr->b_flags |= ARC_IO_IN_PROGRESS; 2534789Sahrens 2535789Sahrens /* 2536789Sahrens * If the buffer has been evicted, migrate it to a present state 2537789Sahrens * before issuing the I/O. Once we drop the hash-table lock, 2538789Sahrens * the header will be marked as I/O in progress and have an 2539789Sahrens * attached buffer. At this point, anybody who finds this 2540789Sahrens * buffer ought to notice that it's legit but has a pending I/O. 2541789Sahrens */ 2542789Sahrens 25431544Seschrock if (GHOST_STATE(hdr->b_state)) 25442688Smaybee arc_access(hdr, hash_lock); 2545789Sahrens 2546789Sahrens ASSERT3U(hdr->b_size, ==, size); 25471596Sahrens DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 25481596Sahrens zbookmark_t *, zb); 25493403Sbmc ARCSTAT_BUMP(arcstat_misses); 25503403Sbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 25513403Sbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 25523403Sbmc data, metadata, misses); 25531544Seschrock 25545450Sbrendan if (l2arc_ndev != 0) { 25555450Sbrendan /* 25565450Sbrendan * Read from the L2ARC if the following are true: 25575450Sbrendan * 1. This buffer has L2ARC metadata. 25585450Sbrendan * 2. This buffer isn't currently writing to the L2ARC. 25595450Sbrendan */ 25605450Sbrendan if (hdr->b_l2hdr != NULL && !HDR_L2_WRITING(hdr)) { 25615450Sbrendan vdev_t *vd = hdr->b_l2hdr->b_dev->l2ad_vdev; 25625450Sbrendan daddr_t addr = hdr->b_l2hdr->b_daddr; 25635450Sbrendan l2arc_read_callback_t *cb; 25645450Sbrendan 25655450Sbrendan DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 25665450Sbrendan ARCSTAT_BUMP(arcstat_l2_hits); 25675450Sbrendan 25685450Sbrendan hdr->b_flags |= ARC_L2_READING; 25695450Sbrendan mutex_exit(hash_lock); 25705450Sbrendan 25715450Sbrendan cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 25725450Sbrendan KM_SLEEP); 25735450Sbrendan cb->l2rcb_buf = buf; 25745450Sbrendan cb->l2rcb_spa = spa; 25755450Sbrendan cb->l2rcb_bp = *bp; 25765450Sbrendan cb->l2rcb_zb = *zb; 25775450Sbrendan cb->l2rcb_flags = flags; 25785450Sbrendan 25795450Sbrendan /* 25805450Sbrendan * l2arc read. 25815450Sbrendan */ 25825450Sbrendan rzio = zio_read_phys(pio, vd, addr, size, 25835450Sbrendan buf->b_data, ZIO_CHECKSUM_OFF, 25845450Sbrendan l2arc_read_done, cb, priority, 25855450Sbrendan flags | ZIO_FLAG_DONT_CACHE, B_FALSE); 25865450Sbrendan DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 25875450Sbrendan zio_t *, rzio); 25885450Sbrendan 25895450Sbrendan if (*arc_flags & ARC_WAIT) 25905450Sbrendan return (zio_wait(rzio)); 25915450Sbrendan 25925450Sbrendan ASSERT(*arc_flags & ARC_NOWAIT); 25935450Sbrendan zio_nowait(rzio); 25945450Sbrendan return (0); 25955450Sbrendan } else { 25965450Sbrendan DTRACE_PROBE1(l2arc__miss, 25975450Sbrendan arc_buf_hdr_t *, hdr); 25985450Sbrendan ARCSTAT_BUMP(arcstat_l2_misses); 25995450Sbrendan if (HDR_L2_WRITING(hdr)) 26005450Sbrendan ARCSTAT_BUMP(arcstat_l2_rw_clash); 26015450Sbrendan } 26025450Sbrendan } 26035450Sbrendan mutex_exit(hash_lock); 26045450Sbrendan 2605789Sahrens rzio = zio_read(pio, spa, bp, buf->b_data, size, 26061544Seschrock arc_read_done, buf, priority, flags, zb); 2607789Sahrens 26082391Smaybee if (*arc_flags & ARC_WAIT) 2609789Sahrens return (zio_wait(rzio)); 2610789Sahrens 26112391Smaybee ASSERT(*arc_flags & ARC_NOWAIT); 2612789Sahrens zio_nowait(rzio); 2613789Sahrens } 2614789Sahrens return (0); 2615789Sahrens } 2616789Sahrens 2617789Sahrens /* 2618789Sahrens * arc_read() variant to support pool traversal. If the block is already 2619789Sahrens * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2620789Sahrens * The idea is that we don't want pool traversal filling up memory, but 2621789Sahrens * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2622789Sahrens */ 2623789Sahrens int 2624789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2625789Sahrens { 2626789Sahrens arc_buf_hdr_t *hdr; 2627789Sahrens kmutex_t *hash_mtx; 2628789Sahrens int rc = 0; 2629789Sahrens 2630789Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2631789Sahrens 26321544Seschrock if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 26331544Seschrock arc_buf_t *buf = hdr->b_buf; 26341544Seschrock 26351544Seschrock ASSERT(buf); 26361544Seschrock while (buf->b_data == NULL) { 26371544Seschrock buf = buf->b_next; 26381544Seschrock ASSERT(buf); 26391544Seschrock } 26401544Seschrock bcopy(buf->b_data, data, hdr->b_size); 26411544Seschrock } else { 2642789Sahrens rc = ENOENT; 26431544Seschrock } 2644789Sahrens 2645789Sahrens if (hash_mtx) 2646789Sahrens mutex_exit(hash_mtx); 2647789Sahrens 2648789Sahrens return (rc); 2649789Sahrens } 2650789Sahrens 26511544Seschrock void 26521544Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 26531544Seschrock { 26541544Seschrock ASSERT(buf->b_hdr != NULL); 26553403Sbmc ASSERT(buf->b_hdr->b_state != arc_anon); 26561544Seschrock ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 26571544Seschrock buf->b_efunc = func; 26581544Seschrock buf->b_private = private; 26591544Seschrock } 26601544Seschrock 26611544Seschrock /* 26621544Seschrock * This is used by the DMU to let the ARC know that a buffer is 26631544Seschrock * being evicted, so the ARC should clean up. If this arc buf 26641544Seschrock * is not yet in the evicted state, it will be put there. 26651544Seschrock */ 26661544Seschrock int 26671544Seschrock arc_buf_evict(arc_buf_t *buf) 26681544Seschrock { 26692887Smaybee arc_buf_hdr_t *hdr; 26701544Seschrock kmutex_t *hash_lock; 26711544Seschrock arc_buf_t **bufp; 26721544Seschrock 26732887Smaybee mutex_enter(&arc_eviction_mtx); 26742887Smaybee hdr = buf->b_hdr; 26751544Seschrock if (hdr == NULL) { 26761544Seschrock /* 26771544Seschrock * We are in arc_do_user_evicts(). 26781544Seschrock */ 26791544Seschrock ASSERT(buf->b_data == NULL); 26802887Smaybee mutex_exit(&arc_eviction_mtx); 26811544Seschrock return (0); 26821544Seschrock } 26832887Smaybee hash_lock = HDR_LOCK(hdr); 26842887Smaybee mutex_exit(&arc_eviction_mtx); 26851544Seschrock 26861544Seschrock mutex_enter(hash_lock); 26871544Seschrock 26882724Smaybee if (buf->b_data == NULL) { 26892724Smaybee /* 26902724Smaybee * We are on the eviction list. 26912724Smaybee */ 26922724Smaybee mutex_exit(hash_lock); 26932724Smaybee mutex_enter(&arc_eviction_mtx); 26942724Smaybee if (buf->b_hdr == NULL) { 26952724Smaybee /* 26962724Smaybee * We are already in arc_do_user_evicts(). 26972724Smaybee */ 26982724Smaybee mutex_exit(&arc_eviction_mtx); 26992724Smaybee return (0); 27002724Smaybee } else { 27012724Smaybee arc_buf_t copy = *buf; /* structure assignment */ 27022724Smaybee /* 27032724Smaybee * Process this buffer now 27042724Smaybee * but let arc_do_user_evicts() do the reaping. 27052724Smaybee */ 27062724Smaybee buf->b_efunc = NULL; 27072724Smaybee mutex_exit(&arc_eviction_mtx); 27082724Smaybee VERIFY(copy.b_efunc(©) == 0); 27092724Smaybee return (1); 27102724Smaybee } 27112724Smaybee } 27122724Smaybee 27132724Smaybee ASSERT(buf->b_hdr == hdr); 27142724Smaybee ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 27153403Sbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 27161544Seschrock 27171544Seschrock /* 27181544Seschrock * Pull this buffer off of the hdr 27191544Seschrock */ 27201544Seschrock bufp = &hdr->b_buf; 27211544Seschrock while (*bufp != buf) 27221544Seschrock bufp = &(*bufp)->b_next; 27231544Seschrock *bufp = buf->b_next; 27241544Seschrock 27251544Seschrock ASSERT(buf->b_data != NULL); 27262688Smaybee arc_buf_destroy(buf, FALSE, FALSE); 27271544Seschrock 27281544Seschrock if (hdr->b_datacnt == 0) { 27291544Seschrock arc_state_t *old_state = hdr->b_state; 27301544Seschrock arc_state_t *evicted_state; 27311544Seschrock 27321544Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 27331544Seschrock 27341544Seschrock evicted_state = 27353403Sbmc (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 27361544Seschrock 27373403Sbmc mutex_enter(&old_state->arcs_mtx); 27383403Sbmc mutex_enter(&evicted_state->arcs_mtx); 27391544Seschrock 27401544Seschrock arc_change_state(evicted_state, hdr, hash_lock); 27411544Seschrock ASSERT(HDR_IN_HASH_TABLE(hdr)); 27425450Sbrendan hdr->b_flags |= ARC_IN_HASH_TABLE; 27435450Sbrendan hdr->b_flags &= ~ARC_BUF_AVAILABLE; 27441544Seschrock 27453403Sbmc mutex_exit(&evicted_state->arcs_mtx); 27463403Sbmc mutex_exit(&old_state->arcs_mtx); 27471544Seschrock } 27481544Seschrock mutex_exit(hash_lock); 27491819Smaybee 27501544Seschrock VERIFY(buf->b_efunc(buf) == 0); 27511544Seschrock buf->b_efunc = NULL; 27521544Seschrock buf->b_private = NULL; 27531544Seschrock buf->b_hdr = NULL; 27541544Seschrock kmem_cache_free(buf_cache, buf); 27551544Seschrock return (1); 27561544Seschrock } 27571544Seschrock 2758789Sahrens /* 2759789Sahrens * Release this buffer from the cache. This must be done 2760789Sahrens * after a read and prior to modifying the buffer contents. 2761789Sahrens * If the buffer has more than one reference, we must make 2762789Sahrens * make a new hdr for the buffer. 2763789Sahrens */ 2764789Sahrens void 2765789Sahrens arc_release(arc_buf_t *buf, void *tag) 2766789Sahrens { 2767789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 2768789Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 27695450Sbrendan l2arc_buf_hdr_t *l2hdr = NULL; 27705450Sbrendan uint64_t buf_size; 2771789Sahrens 2772789Sahrens /* this buffer is not on any list */ 2773789Sahrens ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2774789Sahrens 27753403Sbmc if (hdr->b_state == arc_anon) { 2776789Sahrens /* this buffer is already released */ 2777789Sahrens ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2778789Sahrens ASSERT(BUF_EMPTY(hdr)); 27791544Seschrock ASSERT(buf->b_efunc == NULL); 27803093Sahrens arc_buf_thaw(buf); 2781789Sahrens return; 2782789Sahrens } 2783789Sahrens 2784789Sahrens mutex_enter(hash_lock); 2785789Sahrens 27861544Seschrock /* 27871544Seschrock * Do we have more than one buf? 27881544Seschrock */ 27891544Seschrock if (hdr->b_buf != buf || buf->b_next != NULL) { 2790789Sahrens arc_buf_hdr_t *nhdr; 2791789Sahrens arc_buf_t **bufp; 2792789Sahrens uint64_t blksz = hdr->b_size; 2793789Sahrens spa_t *spa = hdr->b_spa; 27943290Sjohansen arc_buf_contents_t type = hdr->b_type; 27955450Sbrendan uint32_t flags = hdr->b_flags; 2796789Sahrens 27971544Seschrock ASSERT(hdr->b_datacnt > 1); 2798789Sahrens /* 2799789Sahrens * Pull the data off of this buf and attach it to 2800789Sahrens * a new anonymous buf. 2801789Sahrens */ 28021544Seschrock (void) remove_reference(hdr, hash_lock, tag); 2803789Sahrens bufp = &hdr->b_buf; 28041544Seschrock while (*bufp != buf) 2805789Sahrens bufp = &(*bufp)->b_next; 2806789Sahrens *bufp = (*bufp)->b_next; 28073897Smaybee buf->b_next = NULL; 28081544Seschrock 28093403Sbmc ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 28103403Sbmc atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 28111544Seschrock if (refcount_is_zero(&hdr->b_refcnt)) { 28124309Smaybee uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 28134309Smaybee ASSERT3U(*size, >=, hdr->b_size); 28144309Smaybee atomic_add_64(size, -hdr->b_size); 28151544Seschrock } 28161544Seschrock hdr->b_datacnt -= 1; 28175450Sbrendan if (hdr->b_l2hdr != NULL) { 28185450Sbrendan mutex_enter(&l2arc_buflist_mtx); 28195450Sbrendan l2hdr = hdr->b_l2hdr; 28205450Sbrendan hdr->b_l2hdr = NULL; 28215450Sbrendan buf_size = hdr->b_size; 28225450Sbrendan } 28233547Smaybee arc_cksum_verify(buf); 28241544Seschrock 2825789Sahrens mutex_exit(hash_lock); 2826789Sahrens 2827*6245Smaybee nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 2828789Sahrens nhdr->b_size = blksz; 2829789Sahrens nhdr->b_spa = spa; 28303290Sjohansen nhdr->b_type = type; 2831789Sahrens nhdr->b_buf = buf; 28323403Sbmc nhdr->b_state = arc_anon; 2833789Sahrens nhdr->b_arc_access = 0; 28345450Sbrendan nhdr->b_flags = flags & ARC_L2_WRITING; 28355450Sbrendan nhdr->b_l2hdr = NULL; 28361544Seschrock nhdr->b_datacnt = 1; 28373547Smaybee nhdr->b_freeze_cksum = NULL; 28383897Smaybee (void) refcount_add(&nhdr->b_refcnt, tag); 2839789Sahrens buf->b_hdr = nhdr; 28403403Sbmc atomic_add_64(&arc_anon->arcs_size, blksz); 2841789Sahrens } else { 28421544Seschrock ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2843789Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 2844789Sahrens ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 28453403Sbmc arc_change_state(arc_anon, hdr, hash_lock); 2846789Sahrens hdr->b_arc_access = 0; 28475450Sbrendan if (hdr->b_l2hdr != NULL) { 28485450Sbrendan mutex_enter(&l2arc_buflist_mtx); 28495450Sbrendan l2hdr = hdr->b_l2hdr; 28505450Sbrendan hdr->b_l2hdr = NULL; 28515450Sbrendan buf_size = hdr->b_size; 28525450Sbrendan } 2853789Sahrens mutex_exit(hash_lock); 28545450Sbrendan 2855789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 2856789Sahrens hdr->b_birth = 0; 2857789Sahrens hdr->b_cksum0 = 0; 28583547Smaybee arc_buf_thaw(buf); 2859789Sahrens } 28601544Seschrock buf->b_efunc = NULL; 28611544Seschrock buf->b_private = NULL; 28625450Sbrendan 28635450Sbrendan if (l2hdr) { 28645450Sbrendan list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 28655450Sbrendan kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 28665450Sbrendan ARCSTAT_INCR(arcstat_l2_size, -buf_size); 28675450Sbrendan } 28685450Sbrendan if (MUTEX_HELD(&l2arc_buflist_mtx)) 28695450Sbrendan mutex_exit(&l2arc_buflist_mtx); 2870789Sahrens } 2871789Sahrens 2872789Sahrens int 2873789Sahrens arc_released(arc_buf_t *buf) 2874789Sahrens { 28753403Sbmc return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 28761544Seschrock } 28771544Seschrock 28781544Seschrock int 28791544Seschrock arc_has_callback(arc_buf_t *buf) 28801544Seschrock { 28811544Seschrock return (buf->b_efunc != NULL); 2882789Sahrens } 2883789Sahrens 28841544Seschrock #ifdef ZFS_DEBUG 28851544Seschrock int 28861544Seschrock arc_referenced(arc_buf_t *buf) 28871544Seschrock { 28881544Seschrock return (refcount_count(&buf->b_hdr->b_refcnt)); 28891544Seschrock } 28901544Seschrock #endif 28911544Seschrock 2892789Sahrens static void 28933547Smaybee arc_write_ready(zio_t *zio) 28943547Smaybee { 28953547Smaybee arc_write_callback_t *callback = zio->io_private; 28963547Smaybee arc_buf_t *buf = callback->awcb_buf; 28975329Sgw25295 arc_buf_hdr_t *hdr = buf->b_hdr; 28985329Sgw25295 28995329Sgw25295 if (zio->io_error == 0 && callback->awcb_ready) { 29003547Smaybee ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 29013547Smaybee callback->awcb_ready(zio, buf, callback->awcb_private); 29023547Smaybee } 29035329Sgw25295 /* 29045329Sgw25295 * If the IO is already in progress, then this is a re-write 29055329Sgw25295 * attempt, so we need to thaw and re-compute the cksum. It is 29065329Sgw25295 * the responsibility of the callback to handle the freeing 29075329Sgw25295 * and accounting for any re-write attempt. If we don't have a 29085329Sgw25295 * callback registered then simply free the block here. 29095329Sgw25295 */ 29105329Sgw25295 if (HDR_IO_IN_PROGRESS(hdr)) { 29115329Sgw25295 if (!BP_IS_HOLE(&zio->io_bp_orig) && 29125329Sgw25295 callback->awcb_ready == NULL) { 29135329Sgw25295 zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg, 29145329Sgw25295 &zio->io_bp_orig, NULL, NULL)); 29155329Sgw25295 } 29165329Sgw25295 mutex_enter(&hdr->b_freeze_lock); 29175329Sgw25295 if (hdr->b_freeze_cksum != NULL) { 29185329Sgw25295 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 29195329Sgw25295 hdr->b_freeze_cksum = NULL; 29205329Sgw25295 } 29215329Sgw25295 mutex_exit(&hdr->b_freeze_lock); 29225329Sgw25295 } 29235450Sbrendan arc_cksum_compute(buf, B_FALSE); 29245329Sgw25295 hdr->b_flags |= ARC_IO_IN_PROGRESS; 29253547Smaybee } 29263547Smaybee 29273547Smaybee static void 2928789Sahrens arc_write_done(zio_t *zio) 2929789Sahrens { 29303547Smaybee arc_write_callback_t *callback = zio->io_private; 29313547Smaybee arc_buf_t *buf = callback->awcb_buf; 29323547Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 2933789Sahrens 2934789Sahrens hdr->b_acb = NULL; 2935789Sahrens 2936789Sahrens /* this buffer is on no lists and is not in the hash table */ 29373403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2938789Sahrens 2939789Sahrens hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2940789Sahrens hdr->b_birth = zio->io_bp->blk_birth; 2941789Sahrens hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 29421544Seschrock /* 29431544Seschrock * If the block to be written was all-zero, we may have 29441544Seschrock * compressed it away. In this case no write was performed 29451544Seschrock * so there will be no dva/birth-date/checksum. The buffer 29461544Seschrock * must therefor remain anonymous (and uncached). 29471544Seschrock */ 2948789Sahrens if (!BUF_EMPTY(hdr)) { 2949789Sahrens arc_buf_hdr_t *exists; 2950789Sahrens kmutex_t *hash_lock; 2951789Sahrens 29523093Sahrens arc_cksum_verify(buf); 29533093Sahrens 2954789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2955789Sahrens if (exists) { 2956789Sahrens /* 2957789Sahrens * This can only happen if we overwrite for 2958789Sahrens * sync-to-convergence, because we remove 2959789Sahrens * buffers from the hash table when we arc_free(). 2960789Sahrens */ 2961789Sahrens ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2962789Sahrens BP_IDENTITY(zio->io_bp))); 2963789Sahrens ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2964789Sahrens zio->io_bp->blk_birth); 2965789Sahrens 2966789Sahrens ASSERT(refcount_is_zero(&exists->b_refcnt)); 29673403Sbmc arc_change_state(arc_anon, exists, hash_lock); 2968789Sahrens mutex_exit(hash_lock); 29691544Seschrock arc_hdr_destroy(exists); 2970789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2971789Sahrens ASSERT3P(exists, ==, NULL); 2972789Sahrens } 29731544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 29742688Smaybee arc_access(hdr, hash_lock); 29752688Smaybee mutex_exit(hash_lock); 29763547Smaybee } else if (callback->awcb_done == NULL) { 29771544Seschrock int destroy_hdr; 29781544Seschrock /* 29791544Seschrock * This is an anonymous buffer with no user callback, 29801544Seschrock * destroy it if there are no active references. 29811544Seschrock */ 29821544Seschrock mutex_enter(&arc_eviction_mtx); 29831544Seschrock destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 29841544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 29851544Seschrock mutex_exit(&arc_eviction_mtx); 29861544Seschrock if (destroy_hdr) 29871544Seschrock arc_hdr_destroy(hdr); 29881544Seschrock } else { 29891544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2990789Sahrens } 29911544Seschrock 29923547Smaybee if (callback->awcb_done) { 2993789Sahrens ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 29943547Smaybee callback->awcb_done(zio, buf, callback->awcb_private); 2995789Sahrens } 2996789Sahrens 29973547Smaybee kmem_free(callback, sizeof (arc_write_callback_t)); 2998789Sahrens } 2999789Sahrens 30003547Smaybee zio_t * 30011775Sbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 3002789Sahrens uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 30033547Smaybee arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 30043547Smaybee int flags, zbookmark_t *zb) 3005789Sahrens { 3006789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 30073547Smaybee arc_write_callback_t *callback; 30083547Smaybee zio_t *zio; 3009789Sahrens 3010789Sahrens /* this is a private buffer - no locking required */ 30113403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 3012789Sahrens ASSERT(BUF_EMPTY(hdr)); 3013789Sahrens ASSERT(!HDR_IO_ERROR(hdr)); 30142237Smaybee ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 30152237Smaybee ASSERT(hdr->b_acb == 0); 30163547Smaybee callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 30173547Smaybee callback->awcb_ready = ready; 30183547Smaybee callback->awcb_done = done; 30193547Smaybee callback->awcb_private = private; 30203547Smaybee callback->awcb_buf = buf; 30213547Smaybee zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 30223547Smaybee buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback, 30233547Smaybee priority, flags, zb); 3024789Sahrens 30253547Smaybee return (zio); 3026789Sahrens } 3027789Sahrens 3028789Sahrens int 3029789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 3030789Sahrens zio_done_func_t *done, void *private, uint32_t arc_flags) 3031789Sahrens { 3032789Sahrens arc_buf_hdr_t *ab; 3033789Sahrens kmutex_t *hash_lock; 3034789Sahrens zio_t *zio; 3035789Sahrens 3036789Sahrens /* 3037789Sahrens * If this buffer is in the cache, release it, so it 3038789Sahrens * can be re-used. 3039789Sahrens */ 3040789Sahrens ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 3041789Sahrens if (ab != NULL) { 3042789Sahrens /* 3043789Sahrens * The checksum of blocks to free is not always 3044789Sahrens * preserved (eg. on the deadlist). However, if it is 3045789Sahrens * nonzero, it should match what we have in the cache. 3046789Sahrens */ 3047789Sahrens ASSERT(bp->blk_cksum.zc_word[0] == 0 || 3048789Sahrens ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 30493403Sbmc if (ab->b_state != arc_anon) 30503403Sbmc arc_change_state(arc_anon, ab, hash_lock); 30512391Smaybee if (HDR_IO_IN_PROGRESS(ab)) { 30522391Smaybee /* 30532391Smaybee * This should only happen when we prefetch. 30542391Smaybee */ 30552391Smaybee ASSERT(ab->b_flags & ARC_PREFETCH); 30562391Smaybee ASSERT3U(ab->b_datacnt, ==, 1); 30572391Smaybee ab->b_flags |= ARC_FREED_IN_READ; 30582391Smaybee if (HDR_IN_HASH_TABLE(ab)) 30592391Smaybee buf_hash_remove(ab); 30602391Smaybee ab->b_arc_access = 0; 30612391Smaybee bzero(&ab->b_dva, sizeof (dva_t)); 30622391Smaybee ab->b_birth = 0; 30632391Smaybee ab->b_cksum0 = 0; 30642391Smaybee ab->b_buf->b_efunc = NULL; 30652391Smaybee ab->b_buf->b_private = NULL; 30662391Smaybee mutex_exit(hash_lock); 30672391Smaybee } else if (refcount_is_zero(&ab->b_refcnt)) { 30685450Sbrendan ab->b_flags |= ARC_FREE_IN_PROGRESS; 3069789Sahrens mutex_exit(hash_lock); 30701544Seschrock arc_hdr_destroy(ab); 30713403Sbmc ARCSTAT_BUMP(arcstat_deleted); 3072789Sahrens } else { 30731589Smaybee /* 30742391Smaybee * We still have an active reference on this 30752391Smaybee * buffer. This can happen, e.g., from 30762391Smaybee * dbuf_unoverride(). 30771589Smaybee */ 30782391Smaybee ASSERT(!HDR_IN_HASH_TABLE(ab)); 3079789Sahrens ab->b_arc_access = 0; 3080789Sahrens bzero(&ab->b_dva, sizeof (dva_t)); 3081789Sahrens ab->b_birth = 0; 3082789Sahrens ab->b_cksum0 = 0; 30831544Seschrock ab->b_buf->b_efunc = NULL; 30841544Seschrock ab->b_buf->b_private = NULL; 3085789Sahrens mutex_exit(hash_lock); 3086789Sahrens } 3087789Sahrens } 3088789Sahrens 3089789Sahrens zio = zio_free(pio, spa, txg, bp, done, private); 3090789Sahrens 3091789Sahrens if (arc_flags & ARC_WAIT) 3092789Sahrens return (zio_wait(zio)); 3093789Sahrens 3094789Sahrens ASSERT(arc_flags & ARC_NOWAIT); 3095789Sahrens zio_nowait(zio); 3096789Sahrens 3097789Sahrens return (0); 3098789Sahrens } 3099789Sahrens 3100*6245Smaybee static int 3101*6245Smaybee arc_memory_throttle(uint64_t reserve, uint64_t txg) 3102*6245Smaybee { 3103*6245Smaybee #ifdef _KERNEL 3104*6245Smaybee uint64_t inflight_data = arc_anon->arcs_size; 3105*6245Smaybee uint64_t available_memory = ptob(freemem); 3106*6245Smaybee static uint64_t page_load = 0; 3107*6245Smaybee static uint64_t last_txg = 0; 3108*6245Smaybee 3109*6245Smaybee #if defined(__i386) 3110*6245Smaybee available_memory = 3111*6245Smaybee MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3112*6245Smaybee #endif 3113*6245Smaybee if (available_memory >= zfs_write_limit_max) 3114*6245Smaybee return (0); 3115*6245Smaybee 3116*6245Smaybee if (txg > last_txg) { 3117*6245Smaybee last_txg = txg; 3118*6245Smaybee page_load = 0; 3119*6245Smaybee } 3120*6245Smaybee /* 3121*6245Smaybee * If we are in pageout, we know that memory is already tight, 3122*6245Smaybee * the arc is already going to be evicting, so we just want to 3123*6245Smaybee * continue to let page writes occur as quickly as possible. 3124*6245Smaybee */ 3125*6245Smaybee if (curproc == proc_pageout) { 3126*6245Smaybee if (page_load > MAX(ptob(minfree), available_memory) / 4) 3127*6245Smaybee return (ERESTART); 3128*6245Smaybee /* Note: reserve is inflated, so we deflate */ 3129*6245Smaybee page_load += reserve / 8; 3130*6245Smaybee return (0); 3131*6245Smaybee } else if (page_load > 0 && arc_reclaim_needed()) { 3132*6245Smaybee /* memory is low, delay before restarting */ 3133*6245Smaybee ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3134*6245Smaybee return (EAGAIN); 3135*6245Smaybee } 3136*6245Smaybee page_load = 0; 3137*6245Smaybee 3138*6245Smaybee if (arc_size > arc_c_min) { 3139*6245Smaybee uint64_t evictable_memory = 3140*6245Smaybee arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3141*6245Smaybee arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3142*6245Smaybee arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3143*6245Smaybee arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3144*6245Smaybee available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3145*6245Smaybee } 3146*6245Smaybee 3147*6245Smaybee if (inflight_data > available_memory / 4) { 3148*6245Smaybee ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3149*6245Smaybee return (ERESTART); 3150*6245Smaybee } 3151*6245Smaybee #endif 3152*6245Smaybee return (0); 3153*6245Smaybee } 3154*6245Smaybee 3155789Sahrens void 3156*6245Smaybee arc_tempreserve_clear(uint64_t reserve) 3157789Sahrens { 3158*6245Smaybee atomic_add_64(&arc_tempreserve, -reserve); 3159789Sahrens ASSERT((int64_t)arc_tempreserve >= 0); 3160789Sahrens } 3161789Sahrens 3162789Sahrens int 3163*6245Smaybee arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3164789Sahrens { 3165*6245Smaybee int error; 3166*6245Smaybee 3167789Sahrens #ifdef ZFS_DEBUG 3168789Sahrens /* 3169789Sahrens * Once in a while, fail for no reason. Everything should cope. 3170789Sahrens */ 3171789Sahrens if (spa_get_random(10000) == 0) { 3172789Sahrens dprintf("forcing random failure\n"); 3173789Sahrens return (ERESTART); 3174789Sahrens } 3175789Sahrens #endif 3176*6245Smaybee if (reserve > arc_c/4 && !arc_no_grow) 3177*6245Smaybee arc_c = MIN(arc_c_max, reserve * 4); 3178*6245Smaybee if (reserve > arc_c) 3179982Smaybee return (ENOMEM); 3180982Smaybee 3181789Sahrens /* 3182*6245Smaybee * Writes will, almost always, require additional memory allocations 3183*6245Smaybee * in order to compress/encrypt/etc the data. We therefor need to 3184*6245Smaybee * make sure that there is sufficient available memory for this. 3185*6245Smaybee */ 3186*6245Smaybee if (error = arc_memory_throttle(reserve, txg)) 3187*6245Smaybee return (error); 3188*6245Smaybee 3189*6245Smaybee /* 3190982Smaybee * Throttle writes when the amount of dirty data in the cache 3191982Smaybee * gets too large. We try to keep the cache less than half full 3192982Smaybee * of dirty blocks so that our sync times don't grow too large. 3193982Smaybee * Note: if two requests come in concurrently, we might let them 3194982Smaybee * both succeed, when one of them should fail. Not a huge deal. 3195789Sahrens */ 3196*6245Smaybee if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 3197*6245Smaybee arc_anon->arcs_size > arc_c / 4) { 31984309Smaybee dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 31994309Smaybee "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 32004309Smaybee arc_tempreserve>>10, 32014309Smaybee arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 32024309Smaybee arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3203*6245Smaybee reserve>>10, arc_c>>10); 3204789Sahrens return (ERESTART); 3205789Sahrens } 3206*6245Smaybee atomic_add_64(&arc_tempreserve, reserve); 3207789Sahrens return (0); 3208789Sahrens } 3209789Sahrens 3210789Sahrens void 3211789Sahrens arc_init(void) 3212789Sahrens { 3213789Sahrens mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3214789Sahrens cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3215789Sahrens 32162391Smaybee /* Convert seconds to clock ticks */ 32172638Sperrin arc_min_prefetch_lifespan = 1 * hz; 32182391Smaybee 3219789Sahrens /* Start out with 1/8 of all memory */ 32203403Sbmc arc_c = physmem * PAGESIZE / 8; 3221789Sahrens 3222789Sahrens #ifdef _KERNEL 3223789Sahrens /* 3224789Sahrens * On architectures where the physical memory can be larger 3225789Sahrens * than the addressable space (intel in 32-bit mode), we may 3226789Sahrens * need to limit the cache to 1/8 of VM size. 3227789Sahrens */ 32283403Sbmc arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3229789Sahrens #endif 3230789Sahrens 3231982Smaybee /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 32323403Sbmc arc_c_min = MAX(arc_c / 4, 64<<20); 3233982Smaybee /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 32343403Sbmc if (arc_c * 8 >= 1<<30) 32353403Sbmc arc_c_max = (arc_c * 8) - (1<<30); 3236789Sahrens else 32373403Sbmc arc_c_max = arc_c_min; 32383403Sbmc arc_c_max = MAX(arc_c * 6, arc_c_max); 32392885Sahrens 32402885Sahrens /* 32412885Sahrens * Allow the tunables to override our calculations if they are 32422885Sahrens * reasonable (ie. over 64MB) 32432885Sahrens */ 32442885Sahrens if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 32453403Sbmc arc_c_max = zfs_arc_max; 32463403Sbmc if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 32473403Sbmc arc_c_min = zfs_arc_min; 32482885Sahrens 32493403Sbmc arc_c = arc_c_max; 32503403Sbmc arc_p = (arc_c >> 1); 3251789Sahrens 32524309Smaybee /* limit meta-data to 1/4 of the arc capacity */ 32534309Smaybee arc_meta_limit = arc_c_max / 4; 32544645Sek110237 32554645Sek110237 /* Allow the tunable to override if it is reasonable */ 32564645Sek110237 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 32574645Sek110237 arc_meta_limit = zfs_arc_meta_limit; 32584645Sek110237 32594309Smaybee if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 32604309Smaybee arc_c_min = arc_meta_limit / 2; 32614309Smaybee 3262789Sahrens /* if kmem_flags are set, lets try to use less memory */ 3263789Sahrens if (kmem_debugging()) 32643403Sbmc arc_c = arc_c / 2; 32653403Sbmc if (arc_c < arc_c_min) 32663403Sbmc arc_c = arc_c_min; 3267789Sahrens 32683403Sbmc arc_anon = &ARC_anon; 32693403Sbmc arc_mru = &ARC_mru; 32703403Sbmc arc_mru_ghost = &ARC_mru_ghost; 32713403Sbmc arc_mfu = &ARC_mfu; 32723403Sbmc arc_mfu_ghost = &ARC_mfu_ghost; 32735450Sbrendan arc_l2c_only = &ARC_l2c_only; 32743403Sbmc arc_size = 0; 3275789Sahrens 32763403Sbmc mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 32773403Sbmc mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 32783403Sbmc mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 32793403Sbmc mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 32803403Sbmc mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 32815450Sbrendan mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 32822688Smaybee 32834309Smaybee list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 32844309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32854309Smaybee list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 32864309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32874309Smaybee list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 32884309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32894309Smaybee list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 32904309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32914309Smaybee list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 32924309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32934309Smaybee list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 32944309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32954309Smaybee list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 32964309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32974309Smaybee list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 32984309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 32995450Sbrendan list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 33005450Sbrendan sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 33015450Sbrendan list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 33025450Sbrendan sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3303789Sahrens 3304789Sahrens buf_init(); 3305789Sahrens 3306789Sahrens arc_thread_exit = 0; 33071544Seschrock arc_eviction_list = NULL; 33081544Seschrock mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 33092887Smaybee bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3310789Sahrens 33113403Sbmc arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 33123403Sbmc sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 33133403Sbmc 33143403Sbmc if (arc_ksp != NULL) { 33153403Sbmc arc_ksp->ks_data = &arc_stats; 33163403Sbmc kstat_install(arc_ksp); 33173403Sbmc } 33183403Sbmc 3319789Sahrens (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3320789Sahrens TS_RUN, minclsyspri); 33213158Smaybee 33223158Smaybee arc_dead = FALSE; 3323*6245Smaybee 3324*6245Smaybee if (zfs_write_limit_max == 0) 3325*6245Smaybee zfs_write_limit_max = physmem * PAGESIZE >> 3326*6245Smaybee zfs_write_limit_shift; 3327*6245Smaybee else 3328*6245Smaybee zfs_write_limit_shift = 0; 3329789Sahrens } 3330789Sahrens 3331789Sahrens void 3332789Sahrens arc_fini(void) 3333789Sahrens { 3334789Sahrens mutex_enter(&arc_reclaim_thr_lock); 3335789Sahrens arc_thread_exit = 1; 3336789Sahrens while (arc_thread_exit != 0) 3337789Sahrens cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3338789Sahrens mutex_exit(&arc_reclaim_thr_lock); 3339789Sahrens 33405642Smaybee arc_flush(NULL); 3341789Sahrens 3342789Sahrens arc_dead = TRUE; 3343789Sahrens 33443403Sbmc if (arc_ksp != NULL) { 33453403Sbmc kstat_delete(arc_ksp); 33463403Sbmc arc_ksp = NULL; 33473403Sbmc } 33483403Sbmc 33491544Seschrock mutex_destroy(&arc_eviction_mtx); 3350789Sahrens mutex_destroy(&arc_reclaim_thr_lock); 3351789Sahrens cv_destroy(&arc_reclaim_thr_cv); 3352789Sahrens 33534309Smaybee list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 33544309Smaybee list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 33554309Smaybee list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 33564309Smaybee list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 33574309Smaybee list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 33584309Smaybee list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 33594309Smaybee list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 33604309Smaybee list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3361789Sahrens 33623403Sbmc mutex_destroy(&arc_anon->arcs_mtx); 33633403Sbmc mutex_destroy(&arc_mru->arcs_mtx); 33643403Sbmc mutex_destroy(&arc_mru_ghost->arcs_mtx); 33653403Sbmc mutex_destroy(&arc_mfu->arcs_mtx); 33663403Sbmc mutex_destroy(&arc_mfu_ghost->arcs_mtx); 33672856Snd150628 3368789Sahrens buf_fini(); 3369789Sahrens } 33705450Sbrendan 33715450Sbrendan /* 33725450Sbrendan * Level 2 ARC 33735450Sbrendan * 33745450Sbrendan * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 33755450Sbrendan * It uses dedicated storage devices to hold cached data, which are populated 33765450Sbrendan * using large infrequent writes. The main role of this cache is to boost 33775450Sbrendan * the performance of random read workloads. The intended L2ARC devices 33785450Sbrendan * include short-stroked disks, solid state disks, and other media with 33795450Sbrendan * substantially faster read latency than disk. 33805450Sbrendan * 33815450Sbrendan * +-----------------------+ 33825450Sbrendan * | ARC | 33835450Sbrendan * +-----------------------+ 33845450Sbrendan * | ^ ^ 33855450Sbrendan * | | | 33865450Sbrendan * l2arc_feed_thread() arc_read() 33875450Sbrendan * | | | 33885450Sbrendan * | l2arc read | 33895450Sbrendan * V | | 33905450Sbrendan * +---------------+ | 33915450Sbrendan * | L2ARC | | 33925450Sbrendan * +---------------+ | 33935450Sbrendan * | ^ | 33945450Sbrendan * l2arc_write() | | 33955450Sbrendan * | | | 33965450Sbrendan * V | | 33975450Sbrendan * +-------+ +-------+ 33985450Sbrendan * | vdev | | vdev | 33995450Sbrendan * | cache | | cache | 34005450Sbrendan * +-------+ +-------+ 34015450Sbrendan * +=========+ .-----. 34025450Sbrendan * : L2ARC : |-_____-| 34035450Sbrendan * : devices : | Disks | 34045450Sbrendan * +=========+ `-_____-' 34055450Sbrendan * 34065450Sbrendan * Read requests are satisfied from the following sources, in order: 34075450Sbrendan * 34085450Sbrendan * 1) ARC 34095450Sbrendan * 2) vdev cache of L2ARC devices 34105450Sbrendan * 3) L2ARC devices 34115450Sbrendan * 4) vdev cache of disks 34125450Sbrendan * 5) disks 34135450Sbrendan * 34145450Sbrendan * Some L2ARC device types exhibit extremely slow write performance. 34155450Sbrendan * To accommodate for this there are some significant differences between 34165450Sbrendan * the L2ARC and traditional cache design: 34175450Sbrendan * 34185450Sbrendan * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 34195450Sbrendan * the ARC behave as usual, freeing buffers and placing headers on ghost 34205450Sbrendan * lists. The ARC does not send buffers to the L2ARC during eviction as 34215450Sbrendan * this would add inflated write latencies for all ARC memory pressure. 34225450Sbrendan * 34235450Sbrendan * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 34245450Sbrendan * It does this by periodically scanning buffers from the eviction-end of 34255450Sbrendan * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 34265450Sbrendan * not already there. It scans until a headroom of buffers is satisfied, 34275450Sbrendan * which itself is a buffer for ARC eviction. The thread that does this is 34285450Sbrendan * l2arc_feed_thread(), illustrated below; example sizes are included to 34295450Sbrendan * provide a better sense of ratio than this diagram: 34305450Sbrendan * 34315450Sbrendan * head --> tail 34325450Sbrendan * +---------------------+----------+ 34335450Sbrendan * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 34345450Sbrendan * +---------------------+----------+ | o L2ARC eligible 34355450Sbrendan * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 34365450Sbrendan * +---------------------+----------+ | 34375450Sbrendan * 15.9 Gbytes ^ 32 Mbytes | 34385450Sbrendan * headroom | 34395450Sbrendan * l2arc_feed_thread() 34405450Sbrendan * | 34415450Sbrendan * l2arc write hand <--[oooo]--' 34425450Sbrendan * | 8 Mbyte 34435450Sbrendan * | write max 34445450Sbrendan * V 34455450Sbrendan * +==============================+ 34465450Sbrendan * L2ARC dev |####|#|###|###| |####| ... | 34475450Sbrendan * +==============================+ 34485450Sbrendan * 32 Gbytes 34495450Sbrendan * 34505450Sbrendan * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 34515450Sbrendan * evicted, then the L2ARC has cached a buffer much sooner than it probably 34525450Sbrendan * needed to, potentially wasting L2ARC device bandwidth and storage. It is 34535450Sbrendan * safe to say that this is an uncommon case, since buffers at the end of 34545450Sbrendan * the ARC lists have moved there due to inactivity. 34555450Sbrendan * 34565450Sbrendan * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 34575450Sbrendan * then the L2ARC simply misses copying some buffers. This serves as a 34585450Sbrendan * pressure valve to prevent heavy read workloads from both stalling the ARC 34595450Sbrendan * with waits and clogging the L2ARC with writes. This also helps prevent 34605450Sbrendan * the potential for the L2ARC to churn if it attempts to cache content too 34615450Sbrendan * quickly, such as during backups of the entire pool. 34625450Sbrendan * 34635450Sbrendan * 5. Writes to the L2ARC devices are grouped and sent in-sequence, so that 34645450Sbrendan * the vdev queue can aggregate them into larger and fewer writes. Each 34655450Sbrendan * device is written to in a rotor fashion, sweeping writes through 34665450Sbrendan * available space then repeating. 34675450Sbrendan * 34685450Sbrendan * 6. The L2ARC does not store dirty content. It never needs to flush 34695450Sbrendan * write buffers back to disk based storage. 34705450Sbrendan * 34715450Sbrendan * 7. If an ARC buffer is written (and dirtied) which also exists in the 34725450Sbrendan * L2ARC, the now stale L2ARC buffer is immediately dropped. 34735450Sbrendan * 34745450Sbrendan * The performance of the L2ARC can be tweaked by a number of tunables, which 34755450Sbrendan * may be necessary for different workloads: 34765450Sbrendan * 34775450Sbrendan * l2arc_write_max max write bytes per interval 34785450Sbrendan * l2arc_noprefetch skip caching prefetched buffers 34795450Sbrendan * l2arc_headroom number of max device writes to precache 34805450Sbrendan * l2arc_feed_secs seconds between L2ARC writing 34815450Sbrendan * 34825450Sbrendan * Tunables may be removed or added as future performance improvements are 34835450Sbrendan * integrated, and also may become zpool properties. 34845450Sbrendan */ 34855450Sbrendan 34865450Sbrendan static void 34875450Sbrendan l2arc_hdr_stat_add(void) 34885450Sbrendan { 34896018Sbrendan ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 34906018Sbrendan ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 34915450Sbrendan } 34925450Sbrendan 34935450Sbrendan static void 34945450Sbrendan l2arc_hdr_stat_remove(void) 34955450Sbrendan { 34966018Sbrendan ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 34976018Sbrendan ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 34985450Sbrendan } 34995450Sbrendan 35005450Sbrendan /* 35015450Sbrendan * Cycle through L2ARC devices. This is how L2ARC load balances. 35025450Sbrendan * This is called with l2arc_dev_mtx held, which also locks out spa removal. 35035450Sbrendan */ 35045450Sbrendan static l2arc_dev_t * 35055450Sbrendan l2arc_dev_get_next(void) 35065450Sbrendan { 35075450Sbrendan l2arc_dev_t *next; 35085450Sbrendan 35095450Sbrendan if (l2arc_dev_last == NULL) { 35105450Sbrendan next = list_head(l2arc_dev_list); 35115450Sbrendan } else { 35125450Sbrendan next = list_next(l2arc_dev_list, l2arc_dev_last); 35135450Sbrendan if (next == NULL) 35145450Sbrendan next = list_head(l2arc_dev_list); 35155450Sbrendan } 35165450Sbrendan 35175450Sbrendan l2arc_dev_last = next; 35185450Sbrendan 35195450Sbrendan return (next); 35205450Sbrendan } 35215450Sbrendan 35225450Sbrendan /* 35235450Sbrendan * A write to a cache device has completed. Update all headers to allow 35245450Sbrendan * reads from these buffers to begin. 35255450Sbrendan */ 35265450Sbrendan static void 35275450Sbrendan l2arc_write_done(zio_t *zio) 35285450Sbrendan { 35295450Sbrendan l2arc_write_callback_t *cb; 35305450Sbrendan l2arc_dev_t *dev; 35315450Sbrendan list_t *buflist; 35325450Sbrendan l2arc_data_free_t *df, *df_prev; 35335450Sbrendan arc_buf_hdr_t *head, *ab, *ab_prev; 35345450Sbrendan kmutex_t *hash_lock; 35355450Sbrendan 35365450Sbrendan cb = zio->io_private; 35375450Sbrendan ASSERT(cb != NULL); 35385450Sbrendan dev = cb->l2wcb_dev; 35395450Sbrendan ASSERT(dev != NULL); 35405450Sbrendan head = cb->l2wcb_head; 35415450Sbrendan ASSERT(head != NULL); 35425450Sbrendan buflist = dev->l2ad_buflist; 35435450Sbrendan ASSERT(buflist != NULL); 35445450Sbrendan DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 35455450Sbrendan l2arc_write_callback_t *, cb); 35465450Sbrendan 35475450Sbrendan if (zio->io_error != 0) 35485450Sbrendan ARCSTAT_BUMP(arcstat_l2_writes_error); 35495450Sbrendan 35505450Sbrendan mutex_enter(&l2arc_buflist_mtx); 35515450Sbrendan 35525450Sbrendan /* 35535450Sbrendan * All writes completed, or an error was hit. 35545450Sbrendan */ 35555450Sbrendan for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 35565450Sbrendan ab_prev = list_prev(buflist, ab); 35575450Sbrendan 35585450Sbrendan hash_lock = HDR_LOCK(ab); 35595450Sbrendan if (!mutex_tryenter(hash_lock)) { 35605450Sbrendan /* 35615450Sbrendan * This buffer misses out. It may be in a stage 35625450Sbrendan * of eviction. Its ARC_L2_WRITING flag will be 35635450Sbrendan * left set, denying reads to this buffer. 35645450Sbrendan */ 35655450Sbrendan ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 35665450Sbrendan continue; 35675450Sbrendan } 35685450Sbrendan 35695450Sbrendan if (zio->io_error != 0) { 35705450Sbrendan /* 35715450Sbrendan * Error - invalidate L2ARC entry. 35725450Sbrendan */ 35735450Sbrendan ab->b_l2hdr = NULL; 35745450Sbrendan } 35755450Sbrendan 35765450Sbrendan /* 35775450Sbrendan * Allow ARC to begin reads to this L2ARC entry. 35785450Sbrendan */ 35795450Sbrendan ab->b_flags &= ~ARC_L2_WRITING; 35805450Sbrendan 35815450Sbrendan mutex_exit(hash_lock); 35825450Sbrendan } 35835450Sbrendan 35845450Sbrendan atomic_inc_64(&l2arc_writes_done); 35855450Sbrendan list_remove(buflist, head); 35865450Sbrendan kmem_cache_free(hdr_cache, head); 35875450Sbrendan mutex_exit(&l2arc_buflist_mtx); 35885450Sbrendan 35895450Sbrendan /* 35905450Sbrendan * Free buffers that were tagged for destruction. 35915450Sbrendan */ 35925450Sbrendan mutex_enter(&l2arc_free_on_write_mtx); 35935450Sbrendan buflist = l2arc_free_on_write; 35945450Sbrendan for (df = list_tail(buflist); df; df = df_prev) { 35955450Sbrendan df_prev = list_prev(buflist, df); 35965450Sbrendan ASSERT(df->l2df_data != NULL); 35975450Sbrendan ASSERT(df->l2df_func != NULL); 35985450Sbrendan df->l2df_func(df->l2df_data, df->l2df_size); 35995450Sbrendan list_remove(buflist, df); 36005450Sbrendan kmem_free(df, sizeof (l2arc_data_free_t)); 36015450Sbrendan } 36025450Sbrendan mutex_exit(&l2arc_free_on_write_mtx); 36035450Sbrendan 36045450Sbrendan kmem_free(cb, sizeof (l2arc_write_callback_t)); 36055450Sbrendan } 36065450Sbrendan 36075450Sbrendan /* 36085450Sbrendan * A read to a cache device completed. Validate buffer contents before 36095450Sbrendan * handing over to the regular ARC routines. 36105450Sbrendan */ 36115450Sbrendan static void 36125450Sbrendan l2arc_read_done(zio_t *zio) 36135450Sbrendan { 36145450Sbrendan l2arc_read_callback_t *cb; 36155450Sbrendan arc_buf_hdr_t *hdr; 36165450Sbrendan arc_buf_t *buf; 36175450Sbrendan zio_t *rzio; 36185450Sbrendan kmutex_t *hash_lock; 36195450Sbrendan int equal, err = 0; 36205450Sbrendan 36215450Sbrendan cb = zio->io_private; 36225450Sbrendan ASSERT(cb != NULL); 36235450Sbrendan buf = cb->l2rcb_buf; 36245450Sbrendan ASSERT(buf != NULL); 36255450Sbrendan hdr = buf->b_hdr; 36265450Sbrendan ASSERT(hdr != NULL); 36275450Sbrendan 36285450Sbrendan hash_lock = HDR_LOCK(hdr); 36295450Sbrendan mutex_enter(hash_lock); 36305450Sbrendan 36315450Sbrendan /* 36325450Sbrendan * Check this survived the L2ARC journey. 36335450Sbrendan */ 36345450Sbrendan equal = arc_cksum_equal(buf); 36355450Sbrendan if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 36365450Sbrendan mutex_exit(hash_lock); 36375450Sbrendan zio->io_private = buf; 36385450Sbrendan arc_read_done(zio); 36395450Sbrendan } else { 36405450Sbrendan mutex_exit(hash_lock); 36415450Sbrendan /* 36425450Sbrendan * Buffer didn't survive caching. Increment stats and 36435450Sbrendan * reissue to the original storage device. 36445450Sbrendan */ 36455450Sbrendan if (zio->io_error != 0) 36465450Sbrendan ARCSTAT_BUMP(arcstat_l2_io_error); 36475450Sbrendan if (!equal) 36485450Sbrendan ARCSTAT_BUMP(arcstat_l2_cksum_bad); 36495450Sbrendan 36505450Sbrendan zio->io_flags &= ~ZIO_FLAG_DONT_CACHE; 36515450Sbrendan rzio = zio_read(NULL, cb->l2rcb_spa, &cb->l2rcb_bp, 36525450Sbrendan buf->b_data, zio->io_size, arc_read_done, buf, 36535450Sbrendan zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb); 36545450Sbrendan 36555450Sbrendan /* 36565450Sbrendan * Since this is a seperate thread, we can wait on this 36575450Sbrendan * I/O whether there is an io_waiter or not. 36585450Sbrendan */ 36595450Sbrendan err = zio_wait(rzio); 36605450Sbrendan 36615450Sbrendan /* 36625450Sbrendan * Let the resent I/O call arc_read_done() instead. 36635450Sbrendan * io_error is set to the reissued I/O error status. 36645450Sbrendan */ 36655450Sbrendan zio->io_done = NULL; 36665450Sbrendan zio->io_waiter = NULL; 36675450Sbrendan zio->io_error = err; 36685450Sbrendan } 36695450Sbrendan 36705450Sbrendan kmem_free(cb, sizeof (l2arc_read_callback_t)); 36715450Sbrendan } 36725450Sbrendan 36735450Sbrendan /* 36745450Sbrendan * This is the list priority from which the L2ARC will search for pages to 36755450Sbrendan * cache. This is used within loops (0..3) to cycle through lists in the 36765450Sbrendan * desired order. This order can have a significant effect on cache 36775450Sbrendan * performance. 36785450Sbrendan * 36795450Sbrendan * Currently the metadata lists are hit first, MFU then MRU, followed by 36805450Sbrendan * the data lists. This function returns a locked list, and also returns 36815450Sbrendan * the lock pointer. 36825450Sbrendan */ 36835450Sbrendan static list_t * 36845450Sbrendan l2arc_list_locked(int list_num, kmutex_t **lock) 36855450Sbrendan { 36865450Sbrendan list_t *list; 36875450Sbrendan 36885450Sbrendan ASSERT(list_num >= 0 && list_num <= 3); 36895450Sbrendan 36905450Sbrendan switch (list_num) { 36915450Sbrendan case 0: 36925450Sbrendan list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 36935450Sbrendan *lock = &arc_mfu->arcs_mtx; 36945450Sbrendan break; 36955450Sbrendan case 1: 36965450Sbrendan list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 36975450Sbrendan *lock = &arc_mru->arcs_mtx; 36985450Sbrendan break; 36995450Sbrendan case 2: 37005450Sbrendan list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 37015450Sbrendan *lock = &arc_mfu->arcs_mtx; 37025450Sbrendan break; 37035450Sbrendan case 3: 37045450Sbrendan list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 37055450Sbrendan *lock = &arc_mru->arcs_mtx; 37065450Sbrendan break; 37075450Sbrendan } 37085450Sbrendan 37095450Sbrendan ASSERT(!(MUTEX_HELD(*lock))); 37105450Sbrendan mutex_enter(*lock); 37115450Sbrendan return (list); 37125450Sbrendan } 37135450Sbrendan 37145450Sbrendan /* 37155450Sbrendan * Evict buffers from the device write hand to the distance specified in 37165450Sbrendan * bytes. This distance may span populated buffers, it may span nothing. 37175450Sbrendan * This is clearing a region on the L2ARC device ready for writing. 37185450Sbrendan * If the 'all' boolean is set, every buffer is evicted. 37195450Sbrendan */ 37205450Sbrendan static void 37215450Sbrendan l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 37225450Sbrendan { 37235450Sbrendan list_t *buflist; 37245450Sbrendan l2arc_buf_hdr_t *abl2; 37255450Sbrendan arc_buf_hdr_t *ab, *ab_prev; 37265450Sbrendan kmutex_t *hash_lock; 37275450Sbrendan uint64_t taddr; 37285450Sbrendan 37295450Sbrendan ASSERT(MUTEX_HELD(&l2arc_dev_mtx)); 37305450Sbrendan 37315450Sbrendan buflist = dev->l2ad_buflist; 37325450Sbrendan 37335450Sbrendan if (buflist == NULL) 37345450Sbrendan return; 37355450Sbrendan 37365450Sbrendan if (!all && dev->l2ad_first) { 37375450Sbrendan /* 37385450Sbrendan * This is the first sweep through the device. There is 37395450Sbrendan * nothing to evict. 37405450Sbrendan */ 37415450Sbrendan return; 37425450Sbrendan } 37435450Sbrendan 37445450Sbrendan if (dev->l2ad_hand >= (dev->l2ad_end - (2 * dev->l2ad_write))) { 37455450Sbrendan /* 37465450Sbrendan * When nearing the end of the device, evict to the end 37475450Sbrendan * before the device write hand jumps to the start. 37485450Sbrendan */ 37495450Sbrendan taddr = dev->l2ad_end; 37505450Sbrendan } else { 37515450Sbrendan taddr = dev->l2ad_hand + distance; 37525450Sbrendan } 37535450Sbrendan DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 37545450Sbrendan uint64_t, taddr, boolean_t, all); 37555450Sbrendan 37565450Sbrendan top: 37575450Sbrendan mutex_enter(&l2arc_buflist_mtx); 37585450Sbrendan for (ab = list_tail(buflist); ab; ab = ab_prev) { 37595450Sbrendan ab_prev = list_prev(buflist, ab); 37605450Sbrendan 37615450Sbrendan hash_lock = HDR_LOCK(ab); 37625450Sbrendan if (!mutex_tryenter(hash_lock)) { 37635450Sbrendan /* 37645450Sbrendan * Missed the hash lock. Retry. 37655450Sbrendan */ 37665450Sbrendan ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 37675450Sbrendan mutex_exit(&l2arc_buflist_mtx); 37685450Sbrendan mutex_enter(hash_lock); 37695450Sbrendan mutex_exit(hash_lock); 37705450Sbrendan goto top; 37715450Sbrendan } 37725450Sbrendan 37735450Sbrendan if (HDR_L2_WRITE_HEAD(ab)) { 37745450Sbrendan /* 37755450Sbrendan * We hit a write head node. Leave it for 37765450Sbrendan * l2arc_write_done(). 37775450Sbrendan */ 37785450Sbrendan list_remove(buflist, ab); 37795450Sbrendan mutex_exit(hash_lock); 37805450Sbrendan continue; 37815450Sbrendan } 37825450Sbrendan 37835450Sbrendan if (!all && ab->b_l2hdr != NULL && 37845450Sbrendan (ab->b_l2hdr->b_daddr > taddr || 37855450Sbrendan ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 37865450Sbrendan /* 37875450Sbrendan * We've evicted to the target address, 37885450Sbrendan * or the end of the device. 37895450Sbrendan */ 37905450Sbrendan mutex_exit(hash_lock); 37915450Sbrendan break; 37925450Sbrendan } 37935450Sbrendan 37945450Sbrendan if (HDR_FREE_IN_PROGRESS(ab)) { 37955450Sbrendan /* 37965450Sbrendan * Already on the path to destruction. 37975450Sbrendan */ 37985450Sbrendan mutex_exit(hash_lock); 37995450Sbrendan continue; 38005450Sbrendan } 38015450Sbrendan 38025450Sbrendan if (ab->b_state == arc_l2c_only) { 38035450Sbrendan ASSERT(!HDR_L2_READING(ab)); 38045450Sbrendan /* 38055450Sbrendan * This doesn't exist in the ARC. Destroy. 38065450Sbrendan * arc_hdr_destroy() will call list_remove() 38075450Sbrendan * and decrement arcstat_l2_size. 38085450Sbrendan */ 38095450Sbrendan arc_change_state(arc_anon, ab, hash_lock); 38105450Sbrendan arc_hdr_destroy(ab); 38115450Sbrendan } else { 38125450Sbrendan /* 38135450Sbrendan * Tell ARC this no longer exists in L2ARC. 38145450Sbrendan */ 38155450Sbrendan if (ab->b_l2hdr != NULL) { 38165450Sbrendan abl2 = ab->b_l2hdr; 38175450Sbrendan ab->b_l2hdr = NULL; 38185450Sbrendan kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 38195450Sbrendan ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 38205450Sbrendan } 38215450Sbrendan list_remove(buflist, ab); 38225450Sbrendan 38235450Sbrendan /* 38245450Sbrendan * This may have been leftover after a 38255450Sbrendan * failed write. 38265450Sbrendan */ 38275450Sbrendan ab->b_flags &= ~ARC_L2_WRITING; 38285450Sbrendan 38295450Sbrendan /* 38305450Sbrendan * Invalidate issued or about to be issued 38315450Sbrendan * reads, since we may be about to write 38325450Sbrendan * over this location. 38335450Sbrendan */ 38345450Sbrendan if (HDR_L2_READING(ab)) { 38355450Sbrendan ARCSTAT_BUMP(arcstat_l2_evict_reading); 38365450Sbrendan ab->b_flags |= ARC_L2_EVICTED; 38375450Sbrendan } 38385450Sbrendan } 38395450Sbrendan mutex_exit(hash_lock); 38405450Sbrendan } 38415450Sbrendan mutex_exit(&l2arc_buflist_mtx); 38425450Sbrendan 38435450Sbrendan spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); 38445450Sbrendan dev->l2ad_evict = taddr; 38455450Sbrendan } 38465450Sbrendan 38475450Sbrendan /* 38485450Sbrendan * Find and write ARC buffers to the L2ARC device. 38495450Sbrendan * 38505450Sbrendan * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 38515450Sbrendan * for reading until they have completed writing. 38525450Sbrendan */ 38535450Sbrendan static void 38545450Sbrendan l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev) 38555450Sbrendan { 38565450Sbrendan arc_buf_hdr_t *ab, *ab_prev, *head; 38575450Sbrendan l2arc_buf_hdr_t *hdrl2; 38585450Sbrendan list_t *list; 38595450Sbrendan uint64_t passed_sz, write_sz, buf_sz; 38605450Sbrendan uint64_t target_sz = dev->l2ad_write; 38615450Sbrendan uint64_t headroom = dev->l2ad_write * l2arc_headroom; 38625450Sbrendan void *buf_data; 38635450Sbrendan kmutex_t *hash_lock, *list_lock; 38645450Sbrendan boolean_t have_lock, full; 38655450Sbrendan l2arc_write_callback_t *cb; 38665450Sbrendan zio_t *pio, *wzio; 38675450Sbrendan 38685450Sbrendan ASSERT(MUTEX_HELD(&l2arc_dev_mtx)); 38695450Sbrendan ASSERT(dev->l2ad_vdev != NULL); 38705450Sbrendan 38715450Sbrendan pio = NULL; 38725450Sbrendan write_sz = 0; 38735450Sbrendan full = B_FALSE; 3874*6245Smaybee head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 38755450Sbrendan head->b_flags |= ARC_L2_WRITE_HEAD; 38765450Sbrendan 38775450Sbrendan /* 38785450Sbrendan * Copy buffers for L2ARC writing. 38795450Sbrendan */ 38805450Sbrendan mutex_enter(&l2arc_buflist_mtx); 38815450Sbrendan for (int try = 0; try <= 3; try++) { 38825450Sbrendan list = l2arc_list_locked(try, &list_lock); 38835450Sbrendan passed_sz = 0; 38845450Sbrendan 38855450Sbrendan for (ab = list_tail(list); ab; ab = ab_prev) { 38865450Sbrendan ab_prev = list_prev(list, ab); 38875450Sbrendan 38885450Sbrendan hash_lock = HDR_LOCK(ab); 38895450Sbrendan have_lock = MUTEX_HELD(hash_lock); 38905450Sbrendan if (!have_lock && !mutex_tryenter(hash_lock)) { 38915450Sbrendan /* 38925450Sbrendan * Skip this buffer rather than waiting. 38935450Sbrendan */ 38945450Sbrendan continue; 38955450Sbrendan } 38965450Sbrendan 38975450Sbrendan passed_sz += ab->b_size; 38985450Sbrendan if (passed_sz > headroom) { 38995450Sbrendan /* 39005450Sbrendan * Searched too far. 39015450Sbrendan */ 39025450Sbrendan mutex_exit(hash_lock); 39035450Sbrendan break; 39045450Sbrendan } 39055450Sbrendan 39065450Sbrendan if (ab->b_spa != spa) { 39075450Sbrendan mutex_exit(hash_lock); 39085450Sbrendan continue; 39095450Sbrendan } 39105450Sbrendan 39115450Sbrendan if (ab->b_l2hdr != NULL) { 39125450Sbrendan /* 39135450Sbrendan * Already in L2ARC. 39145450Sbrendan */ 39155450Sbrendan mutex_exit(hash_lock); 39165450Sbrendan continue; 39175450Sbrendan } 39185450Sbrendan 39195450Sbrendan if (HDR_IO_IN_PROGRESS(ab) || HDR_DONT_L2CACHE(ab)) { 39205450Sbrendan mutex_exit(hash_lock); 39215450Sbrendan continue; 39225450Sbrendan } 39235450Sbrendan 39245450Sbrendan if ((write_sz + ab->b_size) > target_sz) { 39255450Sbrendan full = B_TRUE; 39265450Sbrendan mutex_exit(hash_lock); 39275450Sbrendan break; 39285450Sbrendan } 39295450Sbrendan 39305450Sbrendan if (ab->b_buf == NULL) { 39315450Sbrendan DTRACE_PROBE1(l2arc__buf__null, void *, ab); 39325450Sbrendan mutex_exit(hash_lock); 39335450Sbrendan continue; 39345450Sbrendan } 39355450Sbrendan 39365450Sbrendan if (pio == NULL) { 39375450Sbrendan /* 39385450Sbrendan * Insert a dummy header on the buflist so 39395450Sbrendan * l2arc_write_done() can find where the 39405450Sbrendan * write buffers begin without searching. 39415450Sbrendan */ 39425450Sbrendan list_insert_head(dev->l2ad_buflist, head); 39435450Sbrendan 39445450Sbrendan cb = kmem_alloc( 39455450Sbrendan sizeof (l2arc_write_callback_t), KM_SLEEP); 39465450Sbrendan cb->l2wcb_dev = dev; 39475450Sbrendan cb->l2wcb_head = head; 39485450Sbrendan pio = zio_root(spa, l2arc_write_done, cb, 39495450Sbrendan ZIO_FLAG_CANFAIL); 39505450Sbrendan } 39515450Sbrendan 39525450Sbrendan /* 39535450Sbrendan * Create and add a new L2ARC header. 39545450Sbrendan */ 39555450Sbrendan hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 39565450Sbrendan hdrl2->b_dev = dev; 39575450Sbrendan hdrl2->b_daddr = dev->l2ad_hand; 39585450Sbrendan 39595450Sbrendan ab->b_flags |= ARC_L2_WRITING; 39605450Sbrendan ab->b_l2hdr = hdrl2; 39615450Sbrendan list_insert_head(dev->l2ad_buflist, ab); 39625450Sbrendan buf_data = ab->b_buf->b_data; 39635450Sbrendan buf_sz = ab->b_size; 39645450Sbrendan 39655450Sbrendan /* 39665450Sbrendan * Compute and store the buffer cksum before 39675450Sbrendan * writing. On debug the cksum is verified first. 39685450Sbrendan */ 39695450Sbrendan arc_cksum_verify(ab->b_buf); 39705450Sbrendan arc_cksum_compute(ab->b_buf, B_TRUE); 39715450Sbrendan 39725450Sbrendan mutex_exit(hash_lock); 39735450Sbrendan 39745450Sbrendan wzio = zio_write_phys(pio, dev->l2ad_vdev, 39755450Sbrendan dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 39765450Sbrendan NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 39775450Sbrendan ZIO_FLAG_CANFAIL, B_FALSE); 39785450Sbrendan 39795450Sbrendan DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 39805450Sbrendan zio_t *, wzio); 39815450Sbrendan (void) zio_nowait(wzio); 39825450Sbrendan 39835450Sbrendan write_sz += buf_sz; 39845450Sbrendan dev->l2ad_hand += buf_sz; 39855450Sbrendan } 39865450Sbrendan 39875450Sbrendan mutex_exit(list_lock); 39885450Sbrendan 39895450Sbrendan if (full == B_TRUE) 39905450Sbrendan break; 39915450Sbrendan } 39925450Sbrendan mutex_exit(&l2arc_buflist_mtx); 39935450Sbrendan 39945450Sbrendan if (pio == NULL) { 39955450Sbrendan ASSERT3U(write_sz, ==, 0); 39965450Sbrendan kmem_cache_free(hdr_cache, head); 39975450Sbrendan return; 39985450Sbrendan } 39995450Sbrendan 40005450Sbrendan ASSERT3U(write_sz, <=, target_sz); 40015450Sbrendan ARCSTAT_BUMP(arcstat_l2_writes_sent); 40025450Sbrendan ARCSTAT_INCR(arcstat_l2_size, write_sz); 40035450Sbrendan spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); 40045450Sbrendan 40055450Sbrendan /* 40065450Sbrendan * Bump device hand to the device start if it is approaching the end. 40075450Sbrendan * l2arc_evict() will already have evicted ahead for this case. 40085450Sbrendan */ 40095450Sbrendan if (dev->l2ad_hand >= (dev->l2ad_end - dev->l2ad_write)) { 40105450Sbrendan spa_l2cache_space_update(dev->l2ad_vdev, 0, 40115450Sbrendan dev->l2ad_end - dev->l2ad_hand); 40125450Sbrendan dev->l2ad_hand = dev->l2ad_start; 40135450Sbrendan dev->l2ad_evict = dev->l2ad_start; 40145450Sbrendan dev->l2ad_first = B_FALSE; 40155450Sbrendan } 40165450Sbrendan 40175450Sbrendan (void) zio_wait(pio); 40185450Sbrendan } 40195450Sbrendan 40205450Sbrendan /* 40215450Sbrendan * This thread feeds the L2ARC at regular intervals. This is the beating 40225450Sbrendan * heart of the L2ARC. 40235450Sbrendan */ 40245450Sbrendan static void 40255450Sbrendan l2arc_feed_thread(void) 40265450Sbrendan { 40275450Sbrendan callb_cpr_t cpr; 40285450Sbrendan l2arc_dev_t *dev; 40295450Sbrendan spa_t *spa; 40305450Sbrendan int interval; 40315450Sbrendan boolean_t startup = B_TRUE; 40325450Sbrendan 40335450Sbrendan CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 40345450Sbrendan 40355450Sbrendan mutex_enter(&l2arc_feed_thr_lock); 40365450Sbrendan 40375450Sbrendan while (l2arc_thread_exit == 0) { 40385450Sbrendan /* 40395450Sbrendan * Initially pause for L2ARC_FEED_DELAY seconds as a grace 40405450Sbrendan * interval during boot, followed by l2arc_feed_secs seconds 40415450Sbrendan * thereafter. 40425450Sbrendan */ 40435450Sbrendan CALLB_CPR_SAFE_BEGIN(&cpr); 40445450Sbrendan if (startup) { 40455450Sbrendan interval = L2ARC_FEED_DELAY; 40465450Sbrendan startup = B_FALSE; 40475450Sbrendan } else { 40485450Sbrendan interval = l2arc_feed_secs; 40495450Sbrendan } 40505450Sbrendan (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 40515450Sbrendan lbolt + (hz * interval)); 40525450Sbrendan CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 40535450Sbrendan 40545450Sbrendan /* 40555450Sbrendan * Do nothing until L2ARC devices exist. 40565450Sbrendan */ 40575450Sbrendan mutex_enter(&l2arc_dev_mtx); 40585450Sbrendan if (l2arc_ndev == 0) { 40595450Sbrendan mutex_exit(&l2arc_dev_mtx); 40605450Sbrendan continue; 40615450Sbrendan } 40625450Sbrendan 40635450Sbrendan /* 40645450Sbrendan * Avoid contributing to memory pressure. 40655450Sbrendan */ 40665450Sbrendan if (arc_reclaim_needed()) { 40675450Sbrendan ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 40685450Sbrendan mutex_exit(&l2arc_dev_mtx); 40695450Sbrendan continue; 40705450Sbrendan } 40715450Sbrendan 40725450Sbrendan /* 40735450Sbrendan * This selects the next l2arc device to write to, and in 40745450Sbrendan * doing so the next spa to feed from: dev->l2ad_spa. 40755450Sbrendan */ 40765450Sbrendan if ((dev = l2arc_dev_get_next()) == NULL) { 40775450Sbrendan mutex_exit(&l2arc_dev_mtx); 40785450Sbrendan continue; 40795450Sbrendan } 40805450Sbrendan spa = dev->l2ad_spa; 40815450Sbrendan ASSERT(spa != NULL); 40825450Sbrendan ARCSTAT_BUMP(arcstat_l2_feeds); 40835450Sbrendan 40845450Sbrendan /* 40855450Sbrendan * Evict L2ARC buffers that will be overwritten. 40865450Sbrendan */ 40875450Sbrendan l2arc_evict(dev, dev->l2ad_write, B_FALSE); 40885450Sbrendan 40895450Sbrendan /* 40905450Sbrendan * Write ARC buffers. 40915450Sbrendan */ 40925450Sbrendan l2arc_write_buffers(spa, dev); 40935450Sbrendan mutex_exit(&l2arc_dev_mtx); 40945450Sbrendan } 40955450Sbrendan 40965450Sbrendan l2arc_thread_exit = 0; 40975450Sbrendan cv_broadcast(&l2arc_feed_thr_cv); 40985450Sbrendan CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 40995450Sbrendan thread_exit(); 41005450Sbrendan } 41015450Sbrendan 41025450Sbrendan /* 41035450Sbrendan * Add a vdev for use by the L2ARC. By this point the spa has already 41045450Sbrendan * validated the vdev and opened it. 41055450Sbrendan */ 41065450Sbrendan void 41075450Sbrendan l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) 41085450Sbrendan { 41095450Sbrendan l2arc_dev_t *adddev; 41105450Sbrendan 41115450Sbrendan /* 41125450Sbrendan * Create a new l2arc device entry. 41135450Sbrendan */ 41145450Sbrendan adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 41155450Sbrendan adddev->l2ad_spa = spa; 41165450Sbrendan adddev->l2ad_vdev = vd; 41175450Sbrendan adddev->l2ad_write = l2arc_write_max; 41185450Sbrendan adddev->l2ad_start = start; 41195450Sbrendan adddev->l2ad_end = end; 41205450Sbrendan adddev->l2ad_hand = adddev->l2ad_start; 41215450Sbrendan adddev->l2ad_evict = adddev->l2ad_start; 41225450Sbrendan adddev->l2ad_first = B_TRUE; 41235450Sbrendan ASSERT3U(adddev->l2ad_write, >, 0); 41245450Sbrendan 41255450Sbrendan /* 41265450Sbrendan * This is a list of all ARC buffers that are still valid on the 41275450Sbrendan * device. 41285450Sbrendan */ 41295450Sbrendan adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 41305450Sbrendan list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 41315450Sbrendan offsetof(arc_buf_hdr_t, b_l2node)); 41325450Sbrendan 41335450Sbrendan spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); 41345450Sbrendan 41355450Sbrendan /* 41365450Sbrendan * Add device to global list 41375450Sbrendan */ 41385450Sbrendan mutex_enter(&l2arc_dev_mtx); 41395450Sbrendan list_insert_head(l2arc_dev_list, adddev); 41405450Sbrendan atomic_inc_64(&l2arc_ndev); 41415450Sbrendan mutex_exit(&l2arc_dev_mtx); 41425450Sbrendan } 41435450Sbrendan 41445450Sbrendan /* 41455450Sbrendan * Remove a vdev from the L2ARC. 41465450Sbrendan */ 41475450Sbrendan void 41485450Sbrendan l2arc_remove_vdev(vdev_t *vd) 41495450Sbrendan { 41505450Sbrendan l2arc_dev_t *dev, *nextdev, *remdev = NULL; 41515450Sbrendan 41525450Sbrendan /* 41535450Sbrendan * We can only grab the spa config lock when cache device writes 41545450Sbrendan * complete. 41555450Sbrendan */ 41565450Sbrendan ASSERT3U(l2arc_writes_sent, ==, l2arc_writes_done); 41575450Sbrendan 41585450Sbrendan /* 41595450Sbrendan * Find the device by vdev 41605450Sbrendan */ 41615450Sbrendan mutex_enter(&l2arc_dev_mtx); 41625450Sbrendan for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 41635450Sbrendan nextdev = list_next(l2arc_dev_list, dev); 41645450Sbrendan if (vd == dev->l2ad_vdev) { 41655450Sbrendan remdev = dev; 41665450Sbrendan break; 41675450Sbrendan } 41685450Sbrendan } 41695450Sbrendan ASSERT(remdev != NULL); 41705450Sbrendan 41715450Sbrendan /* 41725450Sbrendan * Remove device from global list 41735450Sbrendan */ 41745450Sbrendan list_remove(l2arc_dev_list, remdev); 41755450Sbrendan l2arc_dev_last = NULL; /* may have been invalidated */ 41765450Sbrendan 41775450Sbrendan /* 41785450Sbrendan * Clear all buflists and ARC references. L2ARC device flush. 41795450Sbrendan */ 41805450Sbrendan l2arc_evict(remdev, 0, B_TRUE); 41815450Sbrendan list_destroy(remdev->l2ad_buflist); 41825450Sbrendan kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 41835450Sbrendan kmem_free(remdev, sizeof (l2arc_dev_t)); 41845450Sbrendan 41855450Sbrendan atomic_dec_64(&l2arc_ndev); 41865450Sbrendan mutex_exit(&l2arc_dev_mtx); 41875450Sbrendan } 41885450Sbrendan 41895450Sbrendan void 41905450Sbrendan l2arc_init() 41915450Sbrendan { 41925450Sbrendan l2arc_thread_exit = 0; 41935450Sbrendan l2arc_ndev = 0; 41945450Sbrendan l2arc_writes_sent = 0; 41955450Sbrendan l2arc_writes_done = 0; 41965450Sbrendan 41975450Sbrendan mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 41985450Sbrendan cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 41995450Sbrendan mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 42005450Sbrendan mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 42015450Sbrendan mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 42025450Sbrendan 42035450Sbrendan l2arc_dev_list = &L2ARC_dev_list; 42045450Sbrendan l2arc_free_on_write = &L2ARC_free_on_write; 42055450Sbrendan list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 42065450Sbrendan offsetof(l2arc_dev_t, l2ad_node)); 42075450Sbrendan list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 42085450Sbrendan offsetof(l2arc_data_free_t, l2df_list_node)); 42095450Sbrendan 42105450Sbrendan (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 42115450Sbrendan TS_RUN, minclsyspri); 42125450Sbrendan } 42135450Sbrendan 42145450Sbrendan void 42155450Sbrendan l2arc_fini() 42165450Sbrendan { 42175450Sbrendan mutex_enter(&l2arc_feed_thr_lock); 42185450Sbrendan cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 42195450Sbrendan l2arc_thread_exit = 1; 42205450Sbrendan while (l2arc_thread_exit != 0) 42215450Sbrendan cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 42225450Sbrendan mutex_exit(&l2arc_feed_thr_lock); 42235450Sbrendan 42245450Sbrendan mutex_destroy(&l2arc_feed_thr_lock); 42255450Sbrendan cv_destroy(&l2arc_feed_thr_cv); 42265450Sbrendan mutex_destroy(&l2arc_dev_mtx); 42275450Sbrendan mutex_destroy(&l2arc_buflist_mtx); 42285450Sbrendan mutex_destroy(&l2arc_free_on_write_mtx); 42295450Sbrendan 42305450Sbrendan list_destroy(l2arc_dev_list); 42315450Sbrendan list_destroy(l2arc_free_on_write); 42325450Sbrendan } 4233