1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51484Sek110237 * Common Development and Distribution License (the "License"). 61484Sek110237 * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 223403Sbmc * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23789Sahrens * Use is subject to license terms. 24789Sahrens */ 25789Sahrens 26789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 27789Sahrens 28789Sahrens /* 293403Sbmc * DVA-based Adjustable Replacement Cache 30789Sahrens * 311544Seschrock * While much of the theory of operation used here is 321544Seschrock * based on the self-tuning, low overhead replacement cache 33789Sahrens * presented by Megiddo and Modha at FAST 2003, there are some 34789Sahrens * significant differences: 35789Sahrens * 36789Sahrens * 1. The Megiddo and Modha model assumes any page is evictable. 37789Sahrens * Pages in its cache cannot be "locked" into memory. This makes 38789Sahrens * the eviction algorithm simple: evict the last page in the list. 39789Sahrens * This also make the performance characteristics easy to reason 40789Sahrens * about. Our cache is not so simple. At any given moment, some 41789Sahrens * subset of the blocks in the cache are un-evictable because we 42789Sahrens * have handed out a reference to them. Blocks are only evictable 43789Sahrens * when there are no external references active. This makes 44789Sahrens * eviction far more problematic: we choose to evict the evictable 45789Sahrens * blocks that are the "lowest" in the list. 46789Sahrens * 47789Sahrens * There are times when it is not possible to evict the requested 48789Sahrens * space. In these circumstances we are unable to adjust the cache 49789Sahrens * size. To prevent the cache growing unbounded at these times we 50789Sahrens * implement a "cache throttle" that slowes the flow of new data 51789Sahrens * into the cache until we can make space avaiable. 52789Sahrens * 53789Sahrens * 2. The Megiddo and Modha model assumes a fixed cache size. 54789Sahrens * Pages are evicted when the cache is full and there is a cache 55789Sahrens * miss. Our model has a variable sized cache. It grows with 56789Sahrens * high use, but also tries to react to memory preasure from the 57789Sahrens * operating system: decreasing its size when system memory is 58789Sahrens * tight. 59789Sahrens * 60789Sahrens * 3. The Megiddo and Modha model assumes a fixed page size. All 61789Sahrens * elements of the cache are therefor exactly the same size. So 62789Sahrens * when adjusting the cache size following a cache miss, its simply 63789Sahrens * a matter of choosing a single page to evict. In our model, we 64789Sahrens * have variable sized cache blocks (rangeing from 512 bytes to 65789Sahrens * 128K bytes). We therefor choose a set of blocks to evict to make 66789Sahrens * space for a cache miss that approximates as closely as possible 67789Sahrens * the space used by the new block. 68789Sahrens * 69789Sahrens * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70789Sahrens * by N. Megiddo & D. Modha, FAST 2003 71789Sahrens */ 72789Sahrens 73789Sahrens /* 74789Sahrens * The locking model: 75789Sahrens * 76789Sahrens * A new reference to a cache buffer can be obtained in two 77789Sahrens * ways: 1) via a hash table lookup using the DVA as a key, 78789Sahrens * or 2) via one of the ARC lists. The arc_read() inerface 79789Sahrens * uses method 1, while the internal arc algorithms for 80789Sahrens * adjusting the cache use method 2. We therefor provide two 81789Sahrens * types of locks: 1) the hash table lock array, and 2) the 82789Sahrens * arc list locks. 83789Sahrens * 84789Sahrens * Buffers do not have their own mutexs, rather they rely on the 85789Sahrens * hash table mutexs for the bulk of their protection (i.e. most 86789Sahrens * fields in the arc_buf_hdr_t are protected by these mutexs). 87789Sahrens * 88789Sahrens * buf_hash_find() returns the appropriate mutex (held) when it 89789Sahrens * locates the requested buffer in the hash table. It returns 90789Sahrens * NULL for the mutex if the buffer was not in the table. 91789Sahrens * 92789Sahrens * buf_hash_remove() expects the appropriate hash mutex to be 93789Sahrens * already held before it is invoked. 94789Sahrens * 95789Sahrens * Each arc state also has a mutex which is used to protect the 96789Sahrens * buffer list associated with the state. When attempting to 97789Sahrens * obtain a hash table lock while holding an arc list lock you 98789Sahrens * must use: mutex_tryenter() to avoid deadlock. Also note that 992688Smaybee * the active state mutex must be held before the ghost state mutex. 100789Sahrens * 1011544Seschrock * Arc buffers may have an associated eviction callback function. 1021544Seschrock * This function will be invoked prior to removing the buffer (e.g. 1031544Seschrock * in arc_do_user_evicts()). Note however that the data associated 1041544Seschrock * with the buffer may be evicted prior to the callback. The callback 1051544Seschrock * must be made with *no locks held* (to prevent deadlock). Additionally, 1061544Seschrock * the users of callbacks must ensure that their private data is 1071544Seschrock * protected from simultaneous callbacks from arc_buf_evict() 1081544Seschrock * and arc_do_user_evicts(). 1091544Seschrock * 110789Sahrens * Note that the majority of the performance stats are manipulated 111789Sahrens * with atomic operations. 112789Sahrens */ 113789Sahrens 114789Sahrens #include <sys/spa.h> 115789Sahrens #include <sys/zio.h> 1163093Sahrens #include <sys/zio_checksum.h> 117789Sahrens #include <sys/zfs_context.h> 118789Sahrens #include <sys/arc.h> 119789Sahrens #include <sys/refcount.h> 120789Sahrens #ifdef _KERNEL 121789Sahrens #include <sys/vmsystm.h> 122789Sahrens #include <vm/anon.h> 123789Sahrens #include <sys/fs/swapnode.h> 1241484Sek110237 #include <sys/dnlc.h> 125789Sahrens #endif 126789Sahrens #include <sys/callb.h> 1273403Sbmc #include <sys/kstat.h> 128789Sahrens 129789Sahrens static kmutex_t arc_reclaim_thr_lock; 130789Sahrens static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 131789Sahrens static uint8_t arc_thread_exit; 132789Sahrens 1331484Sek110237 #define ARC_REDUCE_DNLC_PERCENT 3 1341484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 1351484Sek110237 136789Sahrens typedef enum arc_reclaim_strategy { 137789Sahrens ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 138789Sahrens ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 139789Sahrens } arc_reclaim_strategy_t; 140789Sahrens 141789Sahrens /* number of seconds before growing cache again */ 142789Sahrens static int arc_grow_retry = 60; 143789Sahrens 1442391Smaybee /* 1452638Sperrin * minimum lifespan of a prefetch block in clock ticks 1462638Sperrin * (initialized in arc_init()) 1472391Smaybee */ 1482638Sperrin static int arc_min_prefetch_lifespan; 1492391Smaybee 150789Sahrens static int arc_dead; 151789Sahrens 152789Sahrens /* 1532885Sahrens * These tunables are for performance analysis. 1542885Sahrens */ 1552885Sahrens uint64_t zfs_arc_max; 1562885Sahrens uint64_t zfs_arc_min; 1572885Sahrens 1582885Sahrens /* 159*4309Smaybee * Note that buffers can be in one of 5 states: 160789Sahrens * ARC_anon - anonymous (discussed below) 1611544Seschrock * ARC_mru - recently used, currently cached 1621544Seschrock * ARC_mru_ghost - recentely used, no longer in cache 1631544Seschrock * ARC_mfu - frequently used, currently cached 1641544Seschrock * ARC_mfu_ghost - frequently used, no longer in cache 165*4309Smaybee * When there are no active references to the buffer, they are 166*4309Smaybee * are linked onto a list in one of these arc states. These are 167*4309Smaybee * the only buffers that can be evicted or deleted. Within each 168*4309Smaybee * state there are multiple lists, one for meta-data and one for 169*4309Smaybee * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 170*4309Smaybee * etc.) is tracked separately so that it can be managed more 171*4309Smaybee * explicitly: favored over data, limited explicitely. 172789Sahrens * 173789Sahrens * Anonymous buffers are buffers that are not associated with 174789Sahrens * a DVA. These are buffers that hold dirty block copies 175789Sahrens * before they are written to stable storage. By definition, 1761544Seschrock * they are "ref'd" and are considered part of arc_mru 177789Sahrens * that cannot be freed. Generally, they will aquire a DVA 1781544Seschrock * as they are written and migrate onto the arc_mru list. 179789Sahrens */ 180789Sahrens 181789Sahrens typedef struct arc_state { 182*4309Smaybee list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 183*4309Smaybee uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 184*4309Smaybee uint64_t arcs_size; /* total amount of data in this state */ 1853403Sbmc kmutex_t arcs_mtx; 186789Sahrens } arc_state_t; 187789Sahrens 188789Sahrens /* The 5 states: */ 189789Sahrens static arc_state_t ARC_anon; 1901544Seschrock static arc_state_t ARC_mru; 1911544Seschrock static arc_state_t ARC_mru_ghost; 1921544Seschrock static arc_state_t ARC_mfu; 1931544Seschrock static arc_state_t ARC_mfu_ghost; 194789Sahrens 1953403Sbmc typedef struct arc_stats { 1963403Sbmc kstat_named_t arcstat_hits; 1973403Sbmc kstat_named_t arcstat_misses; 1983403Sbmc kstat_named_t arcstat_demand_data_hits; 1993403Sbmc kstat_named_t arcstat_demand_data_misses; 2003403Sbmc kstat_named_t arcstat_demand_metadata_hits; 2013403Sbmc kstat_named_t arcstat_demand_metadata_misses; 2023403Sbmc kstat_named_t arcstat_prefetch_data_hits; 2033403Sbmc kstat_named_t arcstat_prefetch_data_misses; 2043403Sbmc kstat_named_t arcstat_prefetch_metadata_hits; 2053403Sbmc kstat_named_t arcstat_prefetch_metadata_misses; 2063403Sbmc kstat_named_t arcstat_mru_hits; 2073403Sbmc kstat_named_t arcstat_mru_ghost_hits; 2083403Sbmc kstat_named_t arcstat_mfu_hits; 2093403Sbmc kstat_named_t arcstat_mfu_ghost_hits; 2103403Sbmc kstat_named_t arcstat_deleted; 2113403Sbmc kstat_named_t arcstat_recycle_miss; 2123403Sbmc kstat_named_t arcstat_mutex_miss; 2133403Sbmc kstat_named_t arcstat_evict_skip; 2143403Sbmc kstat_named_t arcstat_hash_elements; 2153403Sbmc kstat_named_t arcstat_hash_elements_max; 2163403Sbmc kstat_named_t arcstat_hash_collisions; 2173403Sbmc kstat_named_t arcstat_hash_chains; 2183403Sbmc kstat_named_t arcstat_hash_chain_max; 2193403Sbmc kstat_named_t arcstat_p; 2203403Sbmc kstat_named_t arcstat_c; 2213403Sbmc kstat_named_t arcstat_c_min; 2223403Sbmc kstat_named_t arcstat_c_max; 2233403Sbmc kstat_named_t arcstat_size; 2243403Sbmc } arc_stats_t; 2253403Sbmc 2263403Sbmc static arc_stats_t arc_stats = { 2273403Sbmc { "hits", KSTAT_DATA_UINT64 }, 2283403Sbmc { "misses", KSTAT_DATA_UINT64 }, 2293403Sbmc { "demand_data_hits", KSTAT_DATA_UINT64 }, 2303403Sbmc { "demand_data_misses", KSTAT_DATA_UINT64 }, 2313403Sbmc { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 2323403Sbmc { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 2333403Sbmc { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 2343403Sbmc { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 2353403Sbmc { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 2363403Sbmc { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 2373403Sbmc { "mru_hits", KSTAT_DATA_UINT64 }, 2383403Sbmc { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 2393403Sbmc { "mfu_hits", KSTAT_DATA_UINT64 }, 2403403Sbmc { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 2413403Sbmc { "deleted", KSTAT_DATA_UINT64 }, 2423403Sbmc { "recycle_miss", KSTAT_DATA_UINT64 }, 2433403Sbmc { "mutex_miss", KSTAT_DATA_UINT64 }, 2443403Sbmc { "evict_skip", KSTAT_DATA_UINT64 }, 2453403Sbmc { "hash_elements", KSTAT_DATA_UINT64 }, 2463403Sbmc { "hash_elements_max", KSTAT_DATA_UINT64 }, 2473403Sbmc { "hash_collisions", KSTAT_DATA_UINT64 }, 2483403Sbmc { "hash_chains", KSTAT_DATA_UINT64 }, 2493403Sbmc { "hash_chain_max", KSTAT_DATA_UINT64 }, 2503403Sbmc { "p", KSTAT_DATA_UINT64 }, 2513403Sbmc { "c", KSTAT_DATA_UINT64 }, 2523403Sbmc { "c_min", KSTAT_DATA_UINT64 }, 2533403Sbmc { "c_max", KSTAT_DATA_UINT64 }, 2543403Sbmc { "size", KSTAT_DATA_UINT64 } 2553403Sbmc }; 256789Sahrens 2573403Sbmc #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 2583403Sbmc 2593403Sbmc #define ARCSTAT_INCR(stat, val) \ 2603403Sbmc atomic_add_64(&arc_stats.stat.value.ui64, (val)); 2613403Sbmc 2623403Sbmc #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 2633403Sbmc #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 2643403Sbmc 2653403Sbmc #define ARCSTAT_MAX(stat, val) { \ 2663403Sbmc uint64_t m; \ 2673403Sbmc while ((val) > (m = arc_stats.stat.value.ui64) && \ 2683403Sbmc (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 2693403Sbmc continue; \ 2703403Sbmc } 2713403Sbmc 2723403Sbmc #define ARCSTAT_MAXSTAT(stat) \ 2733403Sbmc ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 274789Sahrens 2753403Sbmc /* 2763403Sbmc * We define a macro to allow ARC hits/misses to be easily broken down by 2773403Sbmc * two separate conditions, giving a total of four different subtypes for 2783403Sbmc * each of hits and misses (so eight statistics total). 2793403Sbmc */ 2803403Sbmc #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 2813403Sbmc if (cond1) { \ 2823403Sbmc if (cond2) { \ 2833403Sbmc ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 2843403Sbmc } else { \ 2853403Sbmc ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 2863403Sbmc } \ 2873403Sbmc } else { \ 2883403Sbmc if (cond2) { \ 2893403Sbmc ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 2903403Sbmc } else { \ 2913403Sbmc ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 2923403Sbmc } \ 2933403Sbmc } 294789Sahrens 2953403Sbmc kstat_t *arc_ksp; 2963403Sbmc static arc_state_t *arc_anon; 2973403Sbmc static arc_state_t *arc_mru; 2983403Sbmc static arc_state_t *arc_mru_ghost; 2993403Sbmc static arc_state_t *arc_mfu; 3003403Sbmc static arc_state_t *arc_mfu_ghost; 3013403Sbmc 3023403Sbmc /* 3033403Sbmc * There are several ARC variables that are critical to export as kstats -- 3043403Sbmc * but we don't want to have to grovel around in the kstat whenever we wish to 3053403Sbmc * manipulate them. For these variables, we therefore define them to be in 3063403Sbmc * terms of the statistic variable. This assures that we are not introducing 3073403Sbmc * the possibility of inconsistency by having shadow copies of the variables, 3083403Sbmc * while still allowing the code to be readable. 3093403Sbmc */ 3103403Sbmc #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 3113403Sbmc #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 3123403Sbmc #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 3133403Sbmc #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 3143403Sbmc #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 3153403Sbmc 3163403Sbmc static int arc_no_grow; /* Don't try to grow cache size */ 3173403Sbmc static uint64_t arc_tempreserve; 318*4309Smaybee static uint64_t arc_meta_used; 319*4309Smaybee static uint64_t arc_meta_limit; 320*4309Smaybee static uint64_t arc_meta_max = 0; 321789Sahrens 322789Sahrens typedef struct arc_callback arc_callback_t; 323789Sahrens 324789Sahrens struct arc_callback { 3253547Smaybee void *acb_private; 326789Sahrens arc_done_func_t *acb_done; 327789Sahrens arc_byteswap_func_t *acb_byteswap; 328789Sahrens arc_buf_t *acb_buf; 329789Sahrens zio_t *acb_zio_dummy; 330789Sahrens arc_callback_t *acb_next; 331789Sahrens }; 332789Sahrens 3333547Smaybee typedef struct arc_write_callback arc_write_callback_t; 3343547Smaybee 3353547Smaybee struct arc_write_callback { 3363547Smaybee void *awcb_private; 3373547Smaybee arc_done_func_t *awcb_ready; 3383547Smaybee arc_done_func_t *awcb_done; 3393547Smaybee arc_buf_t *awcb_buf; 3403547Smaybee }; 3413547Smaybee 342789Sahrens struct arc_buf_hdr { 343789Sahrens /* protected by hash lock */ 344789Sahrens dva_t b_dva; 345789Sahrens uint64_t b_birth; 346789Sahrens uint64_t b_cksum0; 347789Sahrens 3483093Sahrens kmutex_t b_freeze_lock; 3493093Sahrens zio_cksum_t *b_freeze_cksum; 3503093Sahrens 351789Sahrens arc_buf_hdr_t *b_hash_next; 352789Sahrens arc_buf_t *b_buf; 353789Sahrens uint32_t b_flags; 3541544Seschrock uint32_t b_datacnt; 355789Sahrens 3563290Sjohansen arc_callback_t *b_acb; 357789Sahrens kcondvar_t b_cv; 3583290Sjohansen 3593290Sjohansen /* immutable */ 3603290Sjohansen arc_buf_contents_t b_type; 3613290Sjohansen uint64_t b_size; 3623290Sjohansen spa_t *b_spa; 363789Sahrens 364789Sahrens /* protected by arc state mutex */ 365789Sahrens arc_state_t *b_state; 366789Sahrens list_node_t b_arc_node; 367789Sahrens 368789Sahrens /* updated atomically */ 369789Sahrens clock_t b_arc_access; 370789Sahrens 371789Sahrens /* self protecting */ 372789Sahrens refcount_t b_refcnt; 373789Sahrens }; 374789Sahrens 3751544Seschrock static arc_buf_t *arc_eviction_list; 3761544Seschrock static kmutex_t arc_eviction_mtx; 3772887Smaybee static arc_buf_hdr_t arc_eviction_hdr; 3782688Smaybee static void arc_get_data_buf(arc_buf_t *buf); 3792688Smaybee static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 380*4309Smaybee static int arc_evict_needed(arc_buf_contents_t type); 3811544Seschrock 3821544Seschrock #define GHOST_STATE(state) \ 3833403Sbmc ((state) == arc_mru_ghost || (state) == arc_mfu_ghost) 3841544Seschrock 385789Sahrens /* 386789Sahrens * Private ARC flags. These flags are private ARC only flags that will show up 387789Sahrens * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 388789Sahrens * be passed in as arc_flags in things like arc_read. However, these flags 389789Sahrens * should never be passed and should only be set by ARC code. When adding new 390789Sahrens * public flags, make sure not to smash the private ones. 391789Sahrens */ 392789Sahrens 3931544Seschrock #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 394789Sahrens #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 395789Sahrens #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 396789Sahrens #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 3971544Seschrock #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 3982391Smaybee #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 399789Sahrens 4001544Seschrock #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 401789Sahrens #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 402789Sahrens #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 403789Sahrens #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 4041544Seschrock #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 405789Sahrens 406789Sahrens /* 407789Sahrens * Hash table routines 408789Sahrens */ 409789Sahrens 410789Sahrens #define HT_LOCK_PAD 64 411789Sahrens 412789Sahrens struct ht_lock { 413789Sahrens kmutex_t ht_lock; 414789Sahrens #ifdef _KERNEL 415789Sahrens unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 416789Sahrens #endif 417789Sahrens }; 418789Sahrens 419789Sahrens #define BUF_LOCKS 256 420789Sahrens typedef struct buf_hash_table { 421789Sahrens uint64_t ht_mask; 422789Sahrens arc_buf_hdr_t **ht_table; 423789Sahrens struct ht_lock ht_locks[BUF_LOCKS]; 424789Sahrens } buf_hash_table_t; 425789Sahrens 426789Sahrens static buf_hash_table_t buf_hash_table; 427789Sahrens 428789Sahrens #define BUF_HASH_INDEX(spa, dva, birth) \ 429789Sahrens (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 430789Sahrens #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 431789Sahrens #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 432789Sahrens #define HDR_LOCK(buf) \ 433789Sahrens (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 434789Sahrens 435789Sahrens uint64_t zfs_crc64_table[256]; 436789Sahrens 437789Sahrens static uint64_t 438789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 439789Sahrens { 440789Sahrens uintptr_t spav = (uintptr_t)spa; 441789Sahrens uint8_t *vdva = (uint8_t *)dva; 442789Sahrens uint64_t crc = -1ULL; 443789Sahrens int i; 444789Sahrens 445789Sahrens ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 446789Sahrens 447789Sahrens for (i = 0; i < sizeof (dva_t); i++) 448789Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 449789Sahrens 450789Sahrens crc ^= (spav>>8) ^ birth; 451789Sahrens 452789Sahrens return (crc); 453789Sahrens } 454789Sahrens 455789Sahrens #define BUF_EMPTY(buf) \ 456789Sahrens ((buf)->b_dva.dva_word[0] == 0 && \ 457789Sahrens (buf)->b_dva.dva_word[1] == 0 && \ 458789Sahrens (buf)->b_birth == 0) 459789Sahrens 460789Sahrens #define BUF_EQUAL(spa, dva, birth, buf) \ 461789Sahrens ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 462789Sahrens ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 463789Sahrens ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 464789Sahrens 465789Sahrens static arc_buf_hdr_t * 466789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 467789Sahrens { 468789Sahrens uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 469789Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 470789Sahrens arc_buf_hdr_t *buf; 471789Sahrens 472789Sahrens mutex_enter(hash_lock); 473789Sahrens for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 474789Sahrens buf = buf->b_hash_next) { 475789Sahrens if (BUF_EQUAL(spa, dva, birth, buf)) { 476789Sahrens *lockp = hash_lock; 477789Sahrens return (buf); 478789Sahrens } 479789Sahrens } 480789Sahrens mutex_exit(hash_lock); 481789Sahrens *lockp = NULL; 482789Sahrens return (NULL); 483789Sahrens } 484789Sahrens 485789Sahrens /* 486789Sahrens * Insert an entry into the hash table. If there is already an element 487789Sahrens * equal to elem in the hash table, then the already existing element 488789Sahrens * will be returned and the new element will not be inserted. 489789Sahrens * Otherwise returns NULL. 490789Sahrens */ 491789Sahrens static arc_buf_hdr_t * 492789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 493789Sahrens { 494789Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 495789Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 496789Sahrens arc_buf_hdr_t *fbuf; 4973403Sbmc uint32_t i; 498789Sahrens 4991544Seschrock ASSERT(!HDR_IN_HASH_TABLE(buf)); 500789Sahrens *lockp = hash_lock; 501789Sahrens mutex_enter(hash_lock); 502789Sahrens for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 503789Sahrens fbuf = fbuf->b_hash_next, i++) { 504789Sahrens if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 505789Sahrens return (fbuf); 506789Sahrens } 507789Sahrens 508789Sahrens buf->b_hash_next = buf_hash_table.ht_table[idx]; 509789Sahrens buf_hash_table.ht_table[idx] = buf; 5101544Seschrock buf->b_flags |= ARC_IN_HASH_TABLE; 511789Sahrens 512789Sahrens /* collect some hash table performance data */ 513789Sahrens if (i > 0) { 5143403Sbmc ARCSTAT_BUMP(arcstat_hash_collisions); 515789Sahrens if (i == 1) 5163403Sbmc ARCSTAT_BUMP(arcstat_hash_chains); 5173403Sbmc 5183403Sbmc ARCSTAT_MAX(arcstat_hash_chain_max, i); 519789Sahrens } 5203403Sbmc 5213403Sbmc ARCSTAT_BUMP(arcstat_hash_elements); 5223403Sbmc ARCSTAT_MAXSTAT(arcstat_hash_elements); 523789Sahrens 524789Sahrens return (NULL); 525789Sahrens } 526789Sahrens 527789Sahrens static void 528789Sahrens buf_hash_remove(arc_buf_hdr_t *buf) 529789Sahrens { 530789Sahrens arc_buf_hdr_t *fbuf, **bufp; 531789Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 532789Sahrens 533789Sahrens ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 5341544Seschrock ASSERT(HDR_IN_HASH_TABLE(buf)); 535789Sahrens 536789Sahrens bufp = &buf_hash_table.ht_table[idx]; 537789Sahrens while ((fbuf = *bufp) != buf) { 538789Sahrens ASSERT(fbuf != NULL); 539789Sahrens bufp = &fbuf->b_hash_next; 540789Sahrens } 541789Sahrens *bufp = buf->b_hash_next; 542789Sahrens buf->b_hash_next = NULL; 5431544Seschrock buf->b_flags &= ~ARC_IN_HASH_TABLE; 544789Sahrens 545789Sahrens /* collect some hash table performance data */ 5463403Sbmc ARCSTAT_BUMPDOWN(arcstat_hash_elements); 5473403Sbmc 548789Sahrens if (buf_hash_table.ht_table[idx] && 549789Sahrens buf_hash_table.ht_table[idx]->b_hash_next == NULL) 5503403Sbmc ARCSTAT_BUMPDOWN(arcstat_hash_chains); 551789Sahrens } 552789Sahrens 553789Sahrens /* 554789Sahrens * Global data structures and functions for the buf kmem cache. 555789Sahrens */ 556789Sahrens static kmem_cache_t *hdr_cache; 557789Sahrens static kmem_cache_t *buf_cache; 558789Sahrens 559789Sahrens static void 560789Sahrens buf_fini(void) 561789Sahrens { 562789Sahrens int i; 563789Sahrens 564789Sahrens kmem_free(buf_hash_table.ht_table, 565789Sahrens (buf_hash_table.ht_mask + 1) * sizeof (void *)); 566789Sahrens for (i = 0; i < BUF_LOCKS; i++) 567789Sahrens mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 568789Sahrens kmem_cache_destroy(hdr_cache); 569789Sahrens kmem_cache_destroy(buf_cache); 570789Sahrens } 571789Sahrens 572789Sahrens /* 573789Sahrens * Constructor callback - called when the cache is empty 574789Sahrens * and a new buf is requested. 575789Sahrens */ 576789Sahrens /* ARGSUSED */ 577789Sahrens static int 578789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag) 579789Sahrens { 580789Sahrens arc_buf_hdr_t *buf = vbuf; 581789Sahrens 582789Sahrens bzero(buf, sizeof (arc_buf_hdr_t)); 583789Sahrens refcount_create(&buf->b_refcnt); 584789Sahrens cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 585789Sahrens return (0); 586789Sahrens } 587789Sahrens 588789Sahrens /* 589789Sahrens * Destructor callback - called when a cached buf is 590789Sahrens * no longer required. 591789Sahrens */ 592789Sahrens /* ARGSUSED */ 593789Sahrens static void 594789Sahrens hdr_dest(void *vbuf, void *unused) 595789Sahrens { 596789Sahrens arc_buf_hdr_t *buf = vbuf; 597789Sahrens 598789Sahrens refcount_destroy(&buf->b_refcnt); 599789Sahrens cv_destroy(&buf->b_cv); 600789Sahrens } 601789Sahrens 602789Sahrens /* 603789Sahrens * Reclaim callback -- invoked when memory is low. 604789Sahrens */ 605789Sahrens /* ARGSUSED */ 606789Sahrens static void 607789Sahrens hdr_recl(void *unused) 608789Sahrens { 609789Sahrens dprintf("hdr_recl called\n"); 6103158Smaybee /* 6113158Smaybee * umem calls the reclaim func when we destroy the buf cache, 6123158Smaybee * which is after we do arc_fini(). 6133158Smaybee */ 6143158Smaybee if (!arc_dead) 6153158Smaybee cv_signal(&arc_reclaim_thr_cv); 616789Sahrens } 617789Sahrens 618789Sahrens static void 619789Sahrens buf_init(void) 620789Sahrens { 621789Sahrens uint64_t *ct; 6221544Seschrock uint64_t hsize = 1ULL << 12; 623789Sahrens int i, j; 624789Sahrens 625789Sahrens /* 626789Sahrens * The hash table is big enough to fill all of physical memory 6271544Seschrock * with an average 64K block size. The table will take up 6281544Seschrock * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 629789Sahrens */ 6301544Seschrock while (hsize * 65536 < physmem * PAGESIZE) 631789Sahrens hsize <<= 1; 6321544Seschrock retry: 633789Sahrens buf_hash_table.ht_mask = hsize - 1; 6341544Seschrock buf_hash_table.ht_table = 6351544Seschrock kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 6361544Seschrock if (buf_hash_table.ht_table == NULL) { 6371544Seschrock ASSERT(hsize > (1ULL << 8)); 6381544Seschrock hsize >>= 1; 6391544Seschrock goto retry; 6401544Seschrock } 641789Sahrens 642789Sahrens hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 643789Sahrens 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 644789Sahrens buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 645789Sahrens 0, NULL, NULL, NULL, NULL, NULL, 0); 646789Sahrens 647789Sahrens for (i = 0; i < 256; i++) 648789Sahrens for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 649789Sahrens *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 650789Sahrens 651789Sahrens for (i = 0; i < BUF_LOCKS; i++) { 652789Sahrens mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 653789Sahrens NULL, MUTEX_DEFAULT, NULL); 654789Sahrens } 655789Sahrens } 656789Sahrens 657789Sahrens #define ARC_MINTIME (hz>>4) /* 62 ms */ 658789Sahrens 659789Sahrens static void 6603093Sahrens arc_cksum_verify(arc_buf_t *buf) 6613093Sahrens { 6623093Sahrens zio_cksum_t zc; 6633093Sahrens 6643312Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 6653093Sahrens return; 6663093Sahrens 6673093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 6683265Sahrens if (buf->b_hdr->b_freeze_cksum == NULL || 6693265Sahrens (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 6703093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 6713093Sahrens return; 6723093Sahrens } 6733093Sahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 6743093Sahrens if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 6753093Sahrens panic("buffer modified while frozen!"); 6763093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 6773093Sahrens } 6783093Sahrens 6793093Sahrens static void 6803093Sahrens arc_cksum_compute(arc_buf_t *buf) 6813093Sahrens { 6823312Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 6833093Sahrens return; 6843093Sahrens 6853093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 6863093Sahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 6873093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 6883093Sahrens return; 6893093Sahrens } 6903093Sahrens buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 6913093Sahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 6923093Sahrens buf->b_hdr->b_freeze_cksum); 6933093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 6943093Sahrens } 6953093Sahrens 6963093Sahrens void 6973093Sahrens arc_buf_thaw(arc_buf_t *buf) 6983093Sahrens { 6993312Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 7003093Sahrens return; 7013093Sahrens 7023403Sbmc if (buf->b_hdr->b_state != arc_anon) 7033093Sahrens panic("modifying non-anon buffer!"); 7043093Sahrens if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 7053093Sahrens panic("modifying buffer while i/o in progress!"); 7063093Sahrens arc_cksum_verify(buf); 7073093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 7083093Sahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 7093093Sahrens kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 7103093Sahrens buf->b_hdr->b_freeze_cksum = NULL; 7113093Sahrens } 7123093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 7133093Sahrens } 7143093Sahrens 7153093Sahrens void 7163093Sahrens arc_buf_freeze(arc_buf_t *buf) 7173093Sahrens { 7183312Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 7193312Sahrens return; 7203312Sahrens 7213093Sahrens ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 7223403Sbmc buf->b_hdr->b_state == arc_anon); 7233093Sahrens arc_cksum_compute(buf); 7243093Sahrens } 7253093Sahrens 7263093Sahrens static void 727789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 728789Sahrens { 729789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 730789Sahrens 731789Sahrens if ((refcount_add(&ab->b_refcnt, tag) == 1) && 7323403Sbmc (ab->b_state != arc_anon)) { 7333700Sek110237 uint64_t delta = ab->b_size * ab->b_datacnt; 734*4309Smaybee list_t *list = &ab->b_state->arcs_list[ab->b_type]; 735*4309Smaybee uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 736789Sahrens 7373403Sbmc ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 7383403Sbmc mutex_enter(&ab->b_state->arcs_mtx); 739789Sahrens ASSERT(list_link_active(&ab->b_arc_node)); 740*4309Smaybee list_remove(list, ab); 7411544Seschrock if (GHOST_STATE(ab->b_state)) { 7421544Seschrock ASSERT3U(ab->b_datacnt, ==, 0); 7431544Seschrock ASSERT3P(ab->b_buf, ==, NULL); 7441544Seschrock delta = ab->b_size; 7451544Seschrock } 7461544Seschrock ASSERT(delta > 0); 747*4309Smaybee ASSERT3U(*size, >=, delta); 748*4309Smaybee atomic_add_64(size, -delta); 7493403Sbmc mutex_exit(&ab->b_state->arcs_mtx); 7502391Smaybee /* remove the prefetch flag is we get a reference */ 7512391Smaybee if (ab->b_flags & ARC_PREFETCH) 7522391Smaybee ab->b_flags &= ~ARC_PREFETCH; 753789Sahrens } 754789Sahrens } 755789Sahrens 756789Sahrens static int 757789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 758789Sahrens { 759789Sahrens int cnt; 7603403Sbmc arc_state_t *state = ab->b_state; 761789Sahrens 7623403Sbmc ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 7633403Sbmc ASSERT(!GHOST_STATE(state)); 764789Sahrens 765789Sahrens if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 7663403Sbmc (state != arc_anon)) { 767*4309Smaybee uint64_t *size = &state->arcs_lsize[ab->b_type]; 768*4309Smaybee 7693403Sbmc ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 7703403Sbmc mutex_enter(&state->arcs_mtx); 771789Sahrens ASSERT(!list_link_active(&ab->b_arc_node)); 772*4309Smaybee list_insert_head(&state->arcs_list[ab->b_type], ab); 7731544Seschrock ASSERT(ab->b_datacnt > 0); 774*4309Smaybee atomic_add_64(size, ab->b_size * ab->b_datacnt); 7753403Sbmc mutex_exit(&state->arcs_mtx); 776789Sahrens } 777789Sahrens return (cnt); 778789Sahrens } 779789Sahrens 780789Sahrens /* 781789Sahrens * Move the supplied buffer to the indicated state. The mutex 782789Sahrens * for the buffer must be held by the caller. 783789Sahrens */ 784789Sahrens static void 7851544Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 786789Sahrens { 7871544Seschrock arc_state_t *old_state = ab->b_state; 7883700Sek110237 int64_t refcnt = refcount_count(&ab->b_refcnt); 7893700Sek110237 uint64_t from_delta, to_delta; 790789Sahrens 791789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 7921544Seschrock ASSERT(new_state != old_state); 7931544Seschrock ASSERT(refcnt == 0 || ab->b_datacnt > 0); 7941544Seschrock ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 7951544Seschrock 7961544Seschrock from_delta = to_delta = ab->b_datacnt * ab->b_size; 797789Sahrens 798789Sahrens /* 799789Sahrens * If this buffer is evictable, transfer it from the 800789Sahrens * old state list to the new state list. 801789Sahrens */ 8021544Seschrock if (refcnt == 0) { 8033403Sbmc if (old_state != arc_anon) { 8043403Sbmc int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 805*4309Smaybee uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 8061544Seschrock 8071544Seschrock if (use_mutex) 8083403Sbmc mutex_enter(&old_state->arcs_mtx); 8091544Seschrock 8101544Seschrock ASSERT(list_link_active(&ab->b_arc_node)); 811*4309Smaybee list_remove(&old_state->arcs_list[ab->b_type], ab); 812789Sahrens 8132391Smaybee /* 8142391Smaybee * If prefetching out of the ghost cache, 8152391Smaybee * we will have a non-null datacnt. 8162391Smaybee */ 8172391Smaybee if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 8182391Smaybee /* ghost elements have a ghost size */ 8191544Seschrock ASSERT(ab->b_buf == NULL); 8201544Seschrock from_delta = ab->b_size; 821789Sahrens } 822*4309Smaybee ASSERT3U(*size, >=, from_delta); 823*4309Smaybee atomic_add_64(size, -from_delta); 8241544Seschrock 8251544Seschrock if (use_mutex) 8263403Sbmc mutex_exit(&old_state->arcs_mtx); 827789Sahrens } 8283403Sbmc if (new_state != arc_anon) { 8293403Sbmc int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 830*4309Smaybee uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 831789Sahrens 8321544Seschrock if (use_mutex) 8333403Sbmc mutex_enter(&new_state->arcs_mtx); 8341544Seschrock 835*4309Smaybee list_insert_head(&new_state->arcs_list[ab->b_type], ab); 8361544Seschrock 8371544Seschrock /* ghost elements have a ghost size */ 8381544Seschrock if (GHOST_STATE(new_state)) { 8391544Seschrock ASSERT(ab->b_datacnt == 0); 8401544Seschrock ASSERT(ab->b_buf == NULL); 8411544Seschrock to_delta = ab->b_size; 8421544Seschrock } 843*4309Smaybee atomic_add_64(size, to_delta); 844*4309Smaybee ASSERT3U(new_state->arcs_size + to_delta, >=, *size); 8451544Seschrock 8461544Seschrock if (use_mutex) 8473403Sbmc mutex_exit(&new_state->arcs_mtx); 848789Sahrens } 849789Sahrens } 850789Sahrens 851789Sahrens ASSERT(!BUF_EMPTY(ab)); 8523403Sbmc if (new_state == arc_anon && old_state != arc_anon) { 853789Sahrens buf_hash_remove(ab); 854789Sahrens } 855789Sahrens 8561544Seschrock /* adjust state sizes */ 8571544Seschrock if (to_delta) 8583403Sbmc atomic_add_64(&new_state->arcs_size, to_delta); 8591544Seschrock if (from_delta) { 8603403Sbmc ASSERT3U(old_state->arcs_size, >=, from_delta); 8613403Sbmc atomic_add_64(&old_state->arcs_size, -from_delta); 862789Sahrens } 863789Sahrens ab->b_state = new_state; 864789Sahrens } 865789Sahrens 866*4309Smaybee void 867*4309Smaybee arc_space_consume(uint64_t space) 868*4309Smaybee { 869*4309Smaybee atomic_add_64(&arc_meta_used, space); 870*4309Smaybee atomic_add_64(&arc_size, space); 871*4309Smaybee } 872*4309Smaybee 873*4309Smaybee void 874*4309Smaybee arc_space_return(uint64_t space) 875*4309Smaybee { 876*4309Smaybee ASSERT(arc_meta_used >= space); 877*4309Smaybee if (arc_meta_max < arc_meta_used) 878*4309Smaybee arc_meta_max = arc_meta_used; 879*4309Smaybee atomic_add_64(&arc_meta_used, -space); 880*4309Smaybee ASSERT(arc_size >= space); 881*4309Smaybee atomic_add_64(&arc_size, -space); 882*4309Smaybee } 883*4309Smaybee 884*4309Smaybee void * 885*4309Smaybee arc_data_buf_alloc(uint64_t size) 886*4309Smaybee { 887*4309Smaybee if (arc_evict_needed(ARC_BUFC_DATA)) 888*4309Smaybee cv_signal(&arc_reclaim_thr_cv); 889*4309Smaybee atomic_add_64(&arc_size, size); 890*4309Smaybee return (zio_data_buf_alloc(size)); 891*4309Smaybee } 892*4309Smaybee 893*4309Smaybee void 894*4309Smaybee arc_data_buf_free(void *buf, uint64_t size) 895*4309Smaybee { 896*4309Smaybee zio_data_buf_free(buf, size); 897*4309Smaybee ASSERT(arc_size >= size); 898*4309Smaybee atomic_add_64(&arc_size, -size); 899*4309Smaybee } 900*4309Smaybee 901789Sahrens arc_buf_t * 9023290Sjohansen arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 903789Sahrens { 904789Sahrens arc_buf_hdr_t *hdr; 905789Sahrens arc_buf_t *buf; 906789Sahrens 907789Sahrens ASSERT3U(size, >, 0); 908789Sahrens hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 909789Sahrens ASSERT(BUF_EMPTY(hdr)); 910789Sahrens hdr->b_size = size; 9113290Sjohansen hdr->b_type = type; 912789Sahrens hdr->b_spa = spa; 9133403Sbmc hdr->b_state = arc_anon; 914789Sahrens hdr->b_arc_access = 0; 915789Sahrens buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 916789Sahrens buf->b_hdr = hdr; 9172688Smaybee buf->b_data = NULL; 9181544Seschrock buf->b_efunc = NULL; 9191544Seschrock buf->b_private = NULL; 920789Sahrens buf->b_next = NULL; 921789Sahrens hdr->b_buf = buf; 9222688Smaybee arc_get_data_buf(buf); 9231544Seschrock hdr->b_datacnt = 1; 924789Sahrens hdr->b_flags = 0; 925789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 926789Sahrens (void) refcount_add(&hdr->b_refcnt, tag); 927789Sahrens 928789Sahrens return (buf); 929789Sahrens } 930789Sahrens 9312688Smaybee static arc_buf_t * 9322688Smaybee arc_buf_clone(arc_buf_t *from) 9331544Seschrock { 9342688Smaybee arc_buf_t *buf; 9352688Smaybee arc_buf_hdr_t *hdr = from->b_hdr; 9362688Smaybee uint64_t size = hdr->b_size; 9371544Seschrock 9382688Smaybee buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 9392688Smaybee buf->b_hdr = hdr; 9402688Smaybee buf->b_data = NULL; 9412688Smaybee buf->b_efunc = NULL; 9422688Smaybee buf->b_private = NULL; 9432688Smaybee buf->b_next = hdr->b_buf; 9442688Smaybee hdr->b_buf = buf; 9452688Smaybee arc_get_data_buf(buf); 9462688Smaybee bcopy(from->b_data, buf->b_data, size); 9472688Smaybee hdr->b_datacnt += 1; 9482688Smaybee return (buf); 9491544Seschrock } 9501544Seschrock 9511544Seschrock void 9521544Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag) 9531544Seschrock { 9542887Smaybee arc_buf_hdr_t *hdr; 9551544Seschrock kmutex_t *hash_lock; 9561544Seschrock 9572724Smaybee /* 9582724Smaybee * Check to see if this buffer is currently being evicted via 9592887Smaybee * arc_do_user_evicts(). 9602724Smaybee */ 9612887Smaybee mutex_enter(&arc_eviction_mtx); 9622887Smaybee hdr = buf->b_hdr; 9632887Smaybee if (hdr == NULL) { 9642887Smaybee mutex_exit(&arc_eviction_mtx); 9652724Smaybee return; 9662887Smaybee } 9672887Smaybee hash_lock = HDR_LOCK(hdr); 9682887Smaybee mutex_exit(&arc_eviction_mtx); 9692724Smaybee 9702724Smaybee mutex_enter(hash_lock); 9711544Seschrock if (buf->b_data == NULL) { 9721544Seschrock /* 9731544Seschrock * This buffer is evicted. 9741544Seschrock */ 9752724Smaybee mutex_exit(hash_lock); 9761544Seschrock return; 9771544Seschrock } 9781544Seschrock 9792724Smaybee ASSERT(buf->b_hdr == hdr); 9803403Sbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 9811544Seschrock add_reference(hdr, hash_lock, tag); 9822688Smaybee arc_access(hdr, hash_lock); 9832688Smaybee mutex_exit(hash_lock); 9843403Sbmc ARCSTAT_BUMP(arcstat_hits); 9853403Sbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 9863403Sbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 9873403Sbmc data, metadata, hits); 9881544Seschrock } 9891544Seschrock 990789Sahrens static void 9912688Smaybee arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 9921544Seschrock { 9931544Seschrock arc_buf_t **bufp; 9941544Seschrock 9951544Seschrock /* free up data associated with the buf */ 9961544Seschrock if (buf->b_data) { 9971544Seschrock arc_state_t *state = buf->b_hdr->b_state; 9981544Seschrock uint64_t size = buf->b_hdr->b_size; 9993290Sjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 10001544Seschrock 10013093Sahrens arc_cksum_verify(buf); 10022688Smaybee if (!recycle) { 10033290Sjohansen if (type == ARC_BUFC_METADATA) { 10043290Sjohansen zio_buf_free(buf->b_data, size); 1005*4309Smaybee arc_space_return(size); 10063290Sjohansen } else { 10073290Sjohansen ASSERT(type == ARC_BUFC_DATA); 10083290Sjohansen zio_data_buf_free(buf->b_data, size); 1009*4309Smaybee atomic_add_64(&arc_size, -size); 10103290Sjohansen } 10112688Smaybee } 10121544Seschrock if (list_link_active(&buf->b_hdr->b_arc_node)) { 1013*4309Smaybee uint64_t *cnt = &state->arcs_lsize[type]; 1014*4309Smaybee 10151544Seschrock ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 10163403Sbmc ASSERT(state != arc_anon); 1017*4309Smaybee 1018*4309Smaybee ASSERT3U(*cnt, >=, size); 1019*4309Smaybee atomic_add_64(cnt, -size); 10201544Seschrock } 10213403Sbmc ASSERT3U(state->arcs_size, >=, size); 10223403Sbmc atomic_add_64(&state->arcs_size, -size); 10231544Seschrock buf->b_data = NULL; 10241544Seschrock ASSERT(buf->b_hdr->b_datacnt > 0); 10251544Seschrock buf->b_hdr->b_datacnt -= 1; 10261544Seschrock } 10271544Seschrock 10281544Seschrock /* only remove the buf if requested */ 10291544Seschrock if (!all) 10301544Seschrock return; 10311544Seschrock 10321544Seschrock /* remove the buf from the hdr list */ 10331544Seschrock for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 10341544Seschrock continue; 10351544Seschrock *bufp = buf->b_next; 10361544Seschrock 10371544Seschrock ASSERT(buf->b_efunc == NULL); 10381544Seschrock 10391544Seschrock /* clean up the buf */ 10401544Seschrock buf->b_hdr = NULL; 10411544Seschrock kmem_cache_free(buf_cache, buf); 10421544Seschrock } 10431544Seschrock 10441544Seschrock static void 10451544Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr) 1046789Sahrens { 1047789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 10483403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 10491544Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1050789Sahrens 1051789Sahrens if (!BUF_EMPTY(hdr)) { 10521544Seschrock ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1053789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 1054789Sahrens hdr->b_birth = 0; 1055789Sahrens hdr->b_cksum0 = 0; 1056789Sahrens } 10571544Seschrock while (hdr->b_buf) { 1058789Sahrens arc_buf_t *buf = hdr->b_buf; 1059789Sahrens 10601544Seschrock if (buf->b_efunc) { 10611544Seschrock mutex_enter(&arc_eviction_mtx); 10621544Seschrock ASSERT(buf->b_hdr != NULL); 10632688Smaybee arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 10641544Seschrock hdr->b_buf = buf->b_next; 10652887Smaybee buf->b_hdr = &arc_eviction_hdr; 10661544Seschrock buf->b_next = arc_eviction_list; 10671544Seschrock arc_eviction_list = buf; 10681544Seschrock mutex_exit(&arc_eviction_mtx); 10691544Seschrock } else { 10702688Smaybee arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 10711544Seschrock } 1072789Sahrens } 10733093Sahrens if (hdr->b_freeze_cksum != NULL) { 10743093Sahrens kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 10753093Sahrens hdr->b_freeze_cksum = NULL; 10763093Sahrens } 10771544Seschrock 1078789Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 1079789Sahrens ASSERT3P(hdr->b_hash_next, ==, NULL); 1080789Sahrens ASSERT3P(hdr->b_acb, ==, NULL); 1081789Sahrens kmem_cache_free(hdr_cache, hdr); 1082789Sahrens } 1083789Sahrens 1084789Sahrens void 1085789Sahrens arc_buf_free(arc_buf_t *buf, void *tag) 1086789Sahrens { 1087789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 10883403Sbmc int hashed = hdr->b_state != arc_anon; 10891544Seschrock 10901544Seschrock ASSERT(buf->b_efunc == NULL); 10911544Seschrock ASSERT(buf->b_data != NULL); 10921544Seschrock 10931544Seschrock if (hashed) { 10941544Seschrock kmutex_t *hash_lock = HDR_LOCK(hdr); 10951544Seschrock 10961544Seschrock mutex_enter(hash_lock); 10971544Seschrock (void) remove_reference(hdr, hash_lock, tag); 10981544Seschrock if (hdr->b_datacnt > 1) 10992688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 11001544Seschrock else 11011544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 11021544Seschrock mutex_exit(hash_lock); 11031544Seschrock } else if (HDR_IO_IN_PROGRESS(hdr)) { 11041544Seschrock int destroy_hdr; 11051544Seschrock /* 11061544Seschrock * We are in the middle of an async write. Don't destroy 11071544Seschrock * this buffer unless the write completes before we finish 11081544Seschrock * decrementing the reference count. 11091544Seschrock */ 11101544Seschrock mutex_enter(&arc_eviction_mtx); 11111544Seschrock (void) remove_reference(hdr, NULL, tag); 11121544Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 11131544Seschrock destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 11141544Seschrock mutex_exit(&arc_eviction_mtx); 11151544Seschrock if (destroy_hdr) 11161544Seschrock arc_hdr_destroy(hdr); 11171544Seschrock } else { 11181544Seschrock if (remove_reference(hdr, NULL, tag) > 0) { 11191544Seschrock ASSERT(HDR_IO_ERROR(hdr)); 11202688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 11211544Seschrock } else { 11221544Seschrock arc_hdr_destroy(hdr); 11231544Seschrock } 11241544Seschrock } 11251544Seschrock } 11261544Seschrock 11271544Seschrock int 11281544Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag) 11291544Seschrock { 11301544Seschrock arc_buf_hdr_t *hdr = buf->b_hdr; 1131789Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 11321544Seschrock int no_callback = (buf->b_efunc == NULL); 11331544Seschrock 11343403Sbmc if (hdr->b_state == arc_anon) { 11351544Seschrock arc_buf_free(buf, tag); 11361544Seschrock return (no_callback); 11371544Seschrock } 1138789Sahrens 1139789Sahrens mutex_enter(hash_lock); 11403403Sbmc ASSERT(hdr->b_state != arc_anon); 11411544Seschrock ASSERT(buf->b_data != NULL); 1142789Sahrens 11431544Seschrock (void) remove_reference(hdr, hash_lock, tag); 11441544Seschrock if (hdr->b_datacnt > 1) { 11451544Seschrock if (no_callback) 11462688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 11471544Seschrock } else if (no_callback) { 11481544Seschrock ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 11491544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 1150789Sahrens } 11511544Seschrock ASSERT(no_callback || hdr->b_datacnt > 1 || 11521544Seschrock refcount_is_zero(&hdr->b_refcnt)); 1153789Sahrens mutex_exit(hash_lock); 11541544Seschrock return (no_callback); 1155789Sahrens } 1156789Sahrens 1157789Sahrens int 1158789Sahrens arc_buf_size(arc_buf_t *buf) 1159789Sahrens { 1160789Sahrens return (buf->b_hdr->b_size); 1161789Sahrens } 1162789Sahrens 1163789Sahrens /* 1164789Sahrens * Evict buffers from list until we've removed the specified number of 1165789Sahrens * bytes. Move the removed buffers to the appropriate evict state. 11662688Smaybee * If the recycle flag is set, then attempt to "recycle" a buffer: 11672688Smaybee * - look for a buffer to evict that is `bytes' long. 11682688Smaybee * - return the data block from this buffer rather than freeing it. 11692688Smaybee * This flag is used by callers that are trying to make space for a 11702688Smaybee * new buffer in a full arc cache. 1171789Sahrens */ 11722688Smaybee static void * 11733290Sjohansen arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle, 11743290Sjohansen arc_buf_contents_t type) 1175789Sahrens { 1176789Sahrens arc_state_t *evicted_state; 11772688Smaybee uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 11782918Smaybee arc_buf_hdr_t *ab, *ab_prev = NULL; 1179*4309Smaybee list_t *list = &state->arcs_list[type]; 1180789Sahrens kmutex_t *hash_lock; 11812688Smaybee boolean_t have_lock; 11822918Smaybee void *stolen = NULL; 1183789Sahrens 11843403Sbmc ASSERT(state == arc_mru || state == arc_mfu); 1185789Sahrens 11863403Sbmc evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1187789Sahrens 11883403Sbmc mutex_enter(&state->arcs_mtx); 11893403Sbmc mutex_enter(&evicted_state->arcs_mtx); 1190789Sahrens 1191*4309Smaybee for (ab = list_tail(list); ab; ab = ab_prev) { 1192*4309Smaybee ab_prev = list_prev(list, ab); 11932391Smaybee /* prefetch buffers have a minimum lifespan */ 11942688Smaybee if (HDR_IO_IN_PROGRESS(ab) || 11952688Smaybee (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 11962688Smaybee lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 11972391Smaybee skipped++; 11982391Smaybee continue; 11992391Smaybee } 12002918Smaybee /* "lookahead" for better eviction candidate */ 12012918Smaybee if (recycle && ab->b_size != bytes && 12022918Smaybee ab_prev && ab_prev->b_size == bytes) 12032688Smaybee continue; 1204789Sahrens hash_lock = HDR_LOCK(ab); 12052688Smaybee have_lock = MUTEX_HELD(hash_lock); 12062688Smaybee if (have_lock || mutex_tryenter(hash_lock)) { 1207789Sahrens ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 12081544Seschrock ASSERT(ab->b_datacnt > 0); 12091544Seschrock while (ab->b_buf) { 12101544Seschrock arc_buf_t *buf = ab->b_buf; 12112688Smaybee if (buf->b_data) { 12121544Seschrock bytes_evicted += ab->b_size; 12133290Sjohansen if (recycle && ab->b_type == type && 12143290Sjohansen ab->b_size == bytes) { 12152918Smaybee stolen = buf->b_data; 12162918Smaybee recycle = FALSE; 12172918Smaybee } 12182688Smaybee } 12191544Seschrock if (buf->b_efunc) { 12201544Seschrock mutex_enter(&arc_eviction_mtx); 12212918Smaybee arc_buf_destroy(buf, 12222918Smaybee buf->b_data == stolen, FALSE); 12231544Seschrock ab->b_buf = buf->b_next; 12242887Smaybee buf->b_hdr = &arc_eviction_hdr; 12251544Seschrock buf->b_next = arc_eviction_list; 12261544Seschrock arc_eviction_list = buf; 12271544Seschrock mutex_exit(&arc_eviction_mtx); 12281544Seschrock } else { 12292918Smaybee arc_buf_destroy(buf, 12302918Smaybee buf->b_data == stolen, TRUE); 12311544Seschrock } 12321544Seschrock } 12331544Seschrock ASSERT(ab->b_datacnt == 0); 1234789Sahrens arc_change_state(evicted_state, ab, hash_lock); 12351544Seschrock ASSERT(HDR_IN_HASH_TABLE(ab)); 12361544Seschrock ab->b_flags = ARC_IN_HASH_TABLE; 1237789Sahrens DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 12382688Smaybee if (!have_lock) 12392688Smaybee mutex_exit(hash_lock); 12401544Seschrock if (bytes >= 0 && bytes_evicted >= bytes) 1241789Sahrens break; 1242789Sahrens } else { 12432688Smaybee missed += 1; 1244789Sahrens } 1245789Sahrens } 12463403Sbmc 12473403Sbmc mutex_exit(&evicted_state->arcs_mtx); 12483403Sbmc mutex_exit(&state->arcs_mtx); 1249789Sahrens 1250789Sahrens if (bytes_evicted < bytes) 1251789Sahrens dprintf("only evicted %lld bytes from %x", 1252789Sahrens (longlong_t)bytes_evicted, state); 1253789Sahrens 12542688Smaybee if (skipped) 12553403Sbmc ARCSTAT_INCR(arcstat_evict_skip, skipped); 12563403Sbmc 12572688Smaybee if (missed) 12583403Sbmc ARCSTAT_INCR(arcstat_mutex_miss, missed); 12593403Sbmc 12602918Smaybee return (stolen); 1261789Sahrens } 1262789Sahrens 1263789Sahrens /* 1264789Sahrens * Remove buffers from list until we've removed the specified number of 1265789Sahrens * bytes. Destroy the buffers that are removed. 1266789Sahrens */ 1267789Sahrens static void 12681544Seschrock arc_evict_ghost(arc_state_t *state, int64_t bytes) 1269789Sahrens { 1270789Sahrens arc_buf_hdr_t *ab, *ab_prev; 1271*4309Smaybee list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1272789Sahrens kmutex_t *hash_lock; 12731544Seschrock uint64_t bytes_deleted = 0; 12743700Sek110237 uint64_t bufs_skipped = 0; 1275789Sahrens 12761544Seschrock ASSERT(GHOST_STATE(state)); 1277789Sahrens top: 12783403Sbmc mutex_enter(&state->arcs_mtx); 1279*4309Smaybee for (ab = list_tail(list); ab; ab = ab_prev) { 1280*4309Smaybee ab_prev = list_prev(list, ab); 1281789Sahrens hash_lock = HDR_LOCK(ab); 1282789Sahrens if (mutex_tryenter(hash_lock)) { 12832391Smaybee ASSERT(!HDR_IO_IN_PROGRESS(ab)); 12841544Seschrock ASSERT(ab->b_buf == NULL); 12853403Sbmc arc_change_state(arc_anon, ab, hash_lock); 1286789Sahrens mutex_exit(hash_lock); 12873403Sbmc ARCSTAT_BUMP(arcstat_deleted); 12881544Seschrock bytes_deleted += ab->b_size; 12891544Seschrock arc_hdr_destroy(ab); 1290789Sahrens DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1291789Sahrens if (bytes >= 0 && bytes_deleted >= bytes) 1292789Sahrens break; 1293789Sahrens } else { 1294789Sahrens if (bytes < 0) { 12953403Sbmc mutex_exit(&state->arcs_mtx); 1296789Sahrens mutex_enter(hash_lock); 1297789Sahrens mutex_exit(hash_lock); 1298789Sahrens goto top; 1299789Sahrens } 1300789Sahrens bufs_skipped += 1; 1301789Sahrens } 1302789Sahrens } 13033403Sbmc mutex_exit(&state->arcs_mtx); 1304789Sahrens 1305*4309Smaybee if (list == &state->arcs_list[ARC_BUFC_DATA] && 1306*4309Smaybee (bytes < 0 || bytes_deleted < bytes)) { 1307*4309Smaybee list = &state->arcs_list[ARC_BUFC_METADATA]; 1308*4309Smaybee goto top; 1309*4309Smaybee } 1310*4309Smaybee 1311789Sahrens if (bufs_skipped) { 13123403Sbmc ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1313789Sahrens ASSERT(bytes >= 0); 1314789Sahrens } 1315789Sahrens 1316789Sahrens if (bytes_deleted < bytes) 1317789Sahrens dprintf("only deleted %lld bytes from %p", 1318789Sahrens (longlong_t)bytes_deleted, state); 1319789Sahrens } 1320789Sahrens 1321789Sahrens static void 1322789Sahrens arc_adjust(void) 1323789Sahrens { 13243403Sbmc int64_t top_sz, mru_over, arc_over, todelete; 1325789Sahrens 13263403Sbmc top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1327789Sahrens 1328*4309Smaybee if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1329*4309Smaybee int64_t toevict = 1330*4309Smaybee MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); 1331*4309Smaybee (void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_DATA); 1332*4309Smaybee top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1333*4309Smaybee } 1334*4309Smaybee 1335*4309Smaybee if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1336*4309Smaybee int64_t toevict = 1337*4309Smaybee MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); 1338*4309Smaybee (void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_METADATA); 13393403Sbmc top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1340789Sahrens } 1341789Sahrens 13423403Sbmc mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1343789Sahrens 1344789Sahrens if (mru_over > 0) { 1345*4309Smaybee if (arc_mru_ghost->arcs_size > 0) { 1346*4309Smaybee todelete = MIN(arc_mru_ghost->arcs_size, mru_over); 13473403Sbmc arc_evict_ghost(arc_mru_ghost, todelete); 1348789Sahrens } 1349789Sahrens } 1350789Sahrens 13513403Sbmc if ((arc_over = arc_size - arc_c) > 0) { 13521544Seschrock int64_t tbl_over; 1353789Sahrens 1354*4309Smaybee if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1355*4309Smaybee int64_t toevict = 1356*4309Smaybee MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); 13573403Sbmc (void) arc_evict(arc_mfu, toevict, FALSE, 1358*4309Smaybee ARC_BUFC_DATA); 1359*4309Smaybee arc_over = arc_size - arc_c; 1360789Sahrens } 1361789Sahrens 1362*4309Smaybee if (arc_over > 0 && 1363*4309Smaybee arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1364*4309Smaybee int64_t toevict = 1365*4309Smaybee MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], 1366*4309Smaybee arc_over); 1367*4309Smaybee (void) arc_evict(arc_mfu, toevict, FALSE, 1368*4309Smaybee ARC_BUFC_METADATA); 1369*4309Smaybee } 1370*4309Smaybee 1371*4309Smaybee tbl_over = arc_size + arc_mru_ghost->arcs_size + 1372*4309Smaybee arc_mfu_ghost->arcs_size - arc_c * 2; 1373*4309Smaybee 1374*4309Smaybee if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { 1375*4309Smaybee todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); 13763403Sbmc arc_evict_ghost(arc_mfu_ghost, todelete); 1377789Sahrens } 1378789Sahrens } 1379789Sahrens } 1380789Sahrens 13811544Seschrock static void 13821544Seschrock arc_do_user_evicts(void) 13831544Seschrock { 13841544Seschrock mutex_enter(&arc_eviction_mtx); 13851544Seschrock while (arc_eviction_list != NULL) { 13861544Seschrock arc_buf_t *buf = arc_eviction_list; 13871544Seschrock arc_eviction_list = buf->b_next; 13881544Seschrock buf->b_hdr = NULL; 13891544Seschrock mutex_exit(&arc_eviction_mtx); 13901544Seschrock 13911819Smaybee if (buf->b_efunc != NULL) 13921819Smaybee VERIFY(buf->b_efunc(buf) == 0); 13931544Seschrock 13941544Seschrock buf->b_efunc = NULL; 13951544Seschrock buf->b_private = NULL; 13961544Seschrock kmem_cache_free(buf_cache, buf); 13971544Seschrock mutex_enter(&arc_eviction_mtx); 13981544Seschrock } 13991544Seschrock mutex_exit(&arc_eviction_mtx); 14001544Seschrock } 14011544Seschrock 1402789Sahrens /* 1403789Sahrens * Flush all *evictable* data from the cache. 1404789Sahrens * NOTE: this will not touch "active" (i.e. referenced) data. 1405789Sahrens */ 1406789Sahrens void 1407789Sahrens arc_flush(void) 1408789Sahrens { 1409*4309Smaybee while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) 1410*4309Smaybee (void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_DATA); 1411*4309Smaybee while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) 1412*4309Smaybee (void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_METADATA); 1413*4309Smaybee while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) 1414*4309Smaybee (void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_DATA); 1415*4309Smaybee while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) 1416*4309Smaybee (void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_METADATA); 1417789Sahrens 14183403Sbmc arc_evict_ghost(arc_mru_ghost, -1); 14193403Sbmc arc_evict_ghost(arc_mfu_ghost, -1); 14201544Seschrock 14211544Seschrock mutex_enter(&arc_reclaim_thr_lock); 14221544Seschrock arc_do_user_evicts(); 14231544Seschrock mutex_exit(&arc_reclaim_thr_lock); 14241544Seschrock ASSERT(arc_eviction_list == NULL); 1425789Sahrens } 1426789Sahrens 14273158Smaybee int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 14282391Smaybee 1429789Sahrens void 14303158Smaybee arc_shrink(void) 1431789Sahrens { 14323403Sbmc if (arc_c > arc_c_min) { 14333158Smaybee uint64_t to_free; 1434789Sahrens 14352048Sstans #ifdef _KERNEL 14363403Sbmc to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 14372048Sstans #else 14383403Sbmc to_free = arc_c >> arc_shrink_shift; 14392048Sstans #endif 14403403Sbmc if (arc_c > arc_c_min + to_free) 14413403Sbmc atomic_add_64(&arc_c, -to_free); 14423158Smaybee else 14433403Sbmc arc_c = arc_c_min; 14442048Sstans 14453403Sbmc atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 14463403Sbmc if (arc_c > arc_size) 14473403Sbmc arc_c = MAX(arc_size, arc_c_min); 14483403Sbmc if (arc_p > arc_c) 14493403Sbmc arc_p = (arc_c >> 1); 14503403Sbmc ASSERT(arc_c >= arc_c_min); 14513403Sbmc ASSERT((int64_t)arc_p >= 0); 14523158Smaybee } 1453789Sahrens 14543403Sbmc if (arc_size > arc_c) 14553158Smaybee arc_adjust(); 1456789Sahrens } 1457789Sahrens 1458789Sahrens static int 1459789Sahrens arc_reclaim_needed(void) 1460789Sahrens { 1461789Sahrens uint64_t extra; 1462789Sahrens 1463789Sahrens #ifdef _KERNEL 14642048Sstans 14652048Sstans if (needfree) 14662048Sstans return (1); 14672048Sstans 1468789Sahrens /* 1469789Sahrens * take 'desfree' extra pages, so we reclaim sooner, rather than later 1470789Sahrens */ 1471789Sahrens extra = desfree; 1472789Sahrens 1473789Sahrens /* 1474789Sahrens * check that we're out of range of the pageout scanner. It starts to 1475789Sahrens * schedule paging if freemem is less than lotsfree and needfree. 1476789Sahrens * lotsfree is the high-water mark for pageout, and needfree is the 1477789Sahrens * number of needed free pages. We add extra pages here to make sure 1478789Sahrens * the scanner doesn't start up while we're freeing memory. 1479789Sahrens */ 1480789Sahrens if (freemem < lotsfree + needfree + extra) 1481789Sahrens return (1); 1482789Sahrens 1483789Sahrens /* 1484789Sahrens * check to make sure that swapfs has enough space so that anon 1485789Sahrens * reservations can still succeeed. anon_resvmem() checks that the 1486789Sahrens * availrmem is greater than swapfs_minfree, and the number of reserved 1487789Sahrens * swap pages. We also add a bit of extra here just to prevent 1488789Sahrens * circumstances from getting really dire. 1489789Sahrens */ 1490789Sahrens if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1491789Sahrens return (1); 1492789Sahrens 14931936Smaybee #if defined(__i386) 1494789Sahrens /* 1495789Sahrens * If we're on an i386 platform, it's possible that we'll exhaust the 1496789Sahrens * kernel heap space before we ever run out of available physical 1497789Sahrens * memory. Most checks of the size of the heap_area compare against 1498789Sahrens * tune.t_minarmem, which is the minimum available real memory that we 1499789Sahrens * can have in the system. However, this is generally fixed at 25 pages 1500789Sahrens * which is so low that it's useless. In this comparison, we seek to 1501789Sahrens * calculate the total heap-size, and reclaim if more than 3/4ths of the 1502789Sahrens * heap is allocated. (Or, in the caclulation, if less than 1/4th is 1503789Sahrens * free) 1504789Sahrens */ 1505789Sahrens if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1506789Sahrens (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1507789Sahrens return (1); 1508789Sahrens #endif 1509789Sahrens 1510789Sahrens #else 1511789Sahrens if (spa_get_random(100) == 0) 1512789Sahrens return (1); 1513789Sahrens #endif 1514789Sahrens return (0); 1515789Sahrens } 1516789Sahrens 1517789Sahrens static void 1518789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1519789Sahrens { 1520789Sahrens size_t i; 1521789Sahrens kmem_cache_t *prev_cache = NULL; 15223290Sjohansen kmem_cache_t *prev_data_cache = NULL; 1523789Sahrens extern kmem_cache_t *zio_buf_cache[]; 15243290Sjohansen extern kmem_cache_t *zio_data_buf_cache[]; 1525789Sahrens 15261484Sek110237 #ifdef _KERNEL 1527*4309Smaybee if (arc_meta_used >= arc_meta_limit) { 1528*4309Smaybee /* 1529*4309Smaybee * We are exceeding our meta-data cache limit. 1530*4309Smaybee * Purge some DNLC entries to release holds on meta-data. 1531*4309Smaybee */ 1532*4309Smaybee dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1533*4309Smaybee } 15341936Smaybee #if defined(__i386) 15351936Smaybee /* 15361936Smaybee * Reclaim unused memory from all kmem caches. 15371936Smaybee */ 15381936Smaybee kmem_reap(); 15391936Smaybee #endif 15401484Sek110237 #endif 15411484Sek110237 1542789Sahrens /* 15431544Seschrock * An agressive reclamation will shrink the cache size as well as 15441544Seschrock * reap free buffers from the arc kmem caches. 1545789Sahrens */ 1546789Sahrens if (strat == ARC_RECLAIM_AGGR) 15473158Smaybee arc_shrink(); 1548789Sahrens 1549789Sahrens for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1550789Sahrens if (zio_buf_cache[i] != prev_cache) { 1551789Sahrens prev_cache = zio_buf_cache[i]; 1552789Sahrens kmem_cache_reap_now(zio_buf_cache[i]); 1553789Sahrens } 15543290Sjohansen if (zio_data_buf_cache[i] != prev_data_cache) { 15553290Sjohansen prev_data_cache = zio_data_buf_cache[i]; 15563290Sjohansen kmem_cache_reap_now(zio_data_buf_cache[i]); 15573290Sjohansen } 1558789Sahrens } 15591544Seschrock kmem_cache_reap_now(buf_cache); 15601544Seschrock kmem_cache_reap_now(hdr_cache); 1561789Sahrens } 1562789Sahrens 1563789Sahrens static void 1564789Sahrens arc_reclaim_thread(void) 1565789Sahrens { 1566789Sahrens clock_t growtime = 0; 1567789Sahrens arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1568789Sahrens callb_cpr_t cpr; 1569789Sahrens 1570789Sahrens CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1571789Sahrens 1572789Sahrens mutex_enter(&arc_reclaim_thr_lock); 1573789Sahrens while (arc_thread_exit == 0) { 1574789Sahrens if (arc_reclaim_needed()) { 1575789Sahrens 15763403Sbmc if (arc_no_grow) { 1577789Sahrens if (last_reclaim == ARC_RECLAIM_CONS) { 1578789Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1579789Sahrens } else { 1580789Sahrens last_reclaim = ARC_RECLAIM_CONS; 1581789Sahrens } 1582789Sahrens } else { 15833403Sbmc arc_no_grow = TRUE; 1584789Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1585789Sahrens membar_producer(); 1586789Sahrens } 1587789Sahrens 1588789Sahrens /* reset the growth delay for every reclaim */ 1589789Sahrens growtime = lbolt + (arc_grow_retry * hz); 1590789Sahrens 1591789Sahrens arc_kmem_reap_now(last_reclaim); 1592789Sahrens 1593*4309Smaybee } else if (arc_no_grow && lbolt >= growtime) { 15943403Sbmc arc_no_grow = FALSE; 1595789Sahrens } 1596789Sahrens 15973403Sbmc if (2 * arc_c < arc_size + 15983403Sbmc arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) 15993298Smaybee arc_adjust(); 16003298Smaybee 16011544Seschrock if (arc_eviction_list != NULL) 16021544Seschrock arc_do_user_evicts(); 16031544Seschrock 1604789Sahrens /* block until needed, or one second, whichever is shorter */ 1605789Sahrens CALLB_CPR_SAFE_BEGIN(&cpr); 1606789Sahrens (void) cv_timedwait(&arc_reclaim_thr_cv, 1607789Sahrens &arc_reclaim_thr_lock, (lbolt + hz)); 1608789Sahrens CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1609789Sahrens } 1610789Sahrens 1611789Sahrens arc_thread_exit = 0; 1612789Sahrens cv_broadcast(&arc_reclaim_thr_cv); 1613789Sahrens CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1614789Sahrens thread_exit(); 1615789Sahrens } 1616789Sahrens 16171544Seschrock /* 16181544Seschrock * Adapt arc info given the number of bytes we are trying to add and 16191544Seschrock * the state that we are comming from. This function is only called 16201544Seschrock * when we are adding new content to the cache. 16211544Seschrock */ 1622789Sahrens static void 16231544Seschrock arc_adapt(int bytes, arc_state_t *state) 1624789Sahrens { 16251544Seschrock int mult; 16261544Seschrock 16271544Seschrock ASSERT(bytes > 0); 1628789Sahrens /* 16291544Seschrock * Adapt the target size of the MRU list: 16301544Seschrock * - if we just hit in the MRU ghost list, then increase 16311544Seschrock * the target size of the MRU list. 16321544Seschrock * - if we just hit in the MFU ghost list, then increase 16331544Seschrock * the target size of the MFU list by decreasing the 16341544Seschrock * target size of the MRU list. 1635789Sahrens */ 16363403Sbmc if (state == arc_mru_ghost) { 16373403Sbmc mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 16383403Sbmc 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 16391544Seschrock 16403403Sbmc arc_p = MIN(arc_c, arc_p + bytes * mult); 16413403Sbmc } else if (state == arc_mfu_ghost) { 16423403Sbmc mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 16433403Sbmc 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 16441544Seschrock 16453403Sbmc arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 16461544Seschrock } 16473403Sbmc ASSERT((int64_t)arc_p >= 0); 1648789Sahrens 1649789Sahrens if (arc_reclaim_needed()) { 1650789Sahrens cv_signal(&arc_reclaim_thr_cv); 1651789Sahrens return; 1652789Sahrens } 1653789Sahrens 16543403Sbmc if (arc_no_grow) 1655789Sahrens return; 1656789Sahrens 16573403Sbmc if (arc_c >= arc_c_max) 16581544Seschrock return; 16591544Seschrock 1660789Sahrens /* 16611544Seschrock * If we're within (2 * maxblocksize) bytes of the target 16621544Seschrock * cache size, increment the target cache size 1663789Sahrens */ 16643403Sbmc if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 16653403Sbmc atomic_add_64(&arc_c, (int64_t)bytes); 16663403Sbmc if (arc_c > arc_c_max) 16673403Sbmc arc_c = arc_c_max; 16683403Sbmc else if (state == arc_anon) 16693403Sbmc atomic_add_64(&arc_p, (int64_t)bytes); 16703403Sbmc if (arc_p > arc_c) 16713403Sbmc arc_p = arc_c; 1672789Sahrens } 16733403Sbmc ASSERT((int64_t)arc_p >= 0); 1674789Sahrens } 1675789Sahrens 1676789Sahrens /* 16771544Seschrock * Check if the cache has reached its limits and eviction is required 16781544Seschrock * prior to insert. 1679789Sahrens */ 1680789Sahrens static int 1681*4309Smaybee arc_evict_needed(arc_buf_contents_t type) 1682789Sahrens { 1683*4309Smaybee if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 1684*4309Smaybee return (1); 1685*4309Smaybee 1686*4309Smaybee #ifdef _KERNEL 1687*4309Smaybee /* 1688*4309Smaybee * If zio data pages are being allocated out of a separate heap segment, 1689*4309Smaybee * then enforce that the size of available vmem for this area remains 1690*4309Smaybee * above about 1/32nd free. 1691*4309Smaybee */ 1692*4309Smaybee if (type == ARC_BUFC_DATA && zio_arena != NULL && 1693*4309Smaybee vmem_size(zio_arena, VMEM_FREE) < 1694*4309Smaybee (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 1695*4309Smaybee return (1); 1696*4309Smaybee #endif 1697*4309Smaybee 1698789Sahrens if (arc_reclaim_needed()) 1699789Sahrens return (1); 1700789Sahrens 17013403Sbmc return (arc_size > arc_c); 1702789Sahrens } 1703789Sahrens 1704789Sahrens /* 17052688Smaybee * The buffer, supplied as the first argument, needs a data block. 17062688Smaybee * So, if we are at cache max, determine which cache should be victimized. 17072688Smaybee * We have the following cases: 1708789Sahrens * 17093403Sbmc * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 1710789Sahrens * In this situation if we're out of space, but the resident size of the MFU is 1711789Sahrens * under the limit, victimize the MFU cache to satisfy this insertion request. 1712789Sahrens * 17133403Sbmc * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 1714789Sahrens * Here, we've used up all of the available space for the MRU, so we need to 1715789Sahrens * evict from our own cache instead. Evict from the set of resident MRU 1716789Sahrens * entries. 1717789Sahrens * 17183403Sbmc * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 1719789Sahrens * c minus p represents the MFU space in the cache, since p is the size of the 1720789Sahrens * cache that is dedicated to the MRU. In this situation there's still space on 1721789Sahrens * the MFU side, so the MRU side needs to be victimized. 1722789Sahrens * 17233403Sbmc * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 1724789Sahrens * MFU's resident set is consuming more space than it has been allotted. In 1725789Sahrens * this situation, we must victimize our own cache, the MFU, for this insertion. 1726789Sahrens */ 1727789Sahrens static void 17282688Smaybee arc_get_data_buf(arc_buf_t *buf) 1729789Sahrens { 17303290Sjohansen arc_state_t *state = buf->b_hdr->b_state; 17313290Sjohansen uint64_t size = buf->b_hdr->b_size; 17323290Sjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 17332688Smaybee 17342688Smaybee arc_adapt(size, state); 1735789Sahrens 17362688Smaybee /* 17372688Smaybee * We have not yet reached cache maximum size, 17382688Smaybee * just allocate a new buffer. 17392688Smaybee */ 1740*4309Smaybee if (!arc_evict_needed(type)) { 17413290Sjohansen if (type == ARC_BUFC_METADATA) { 17423290Sjohansen buf->b_data = zio_buf_alloc(size); 1743*4309Smaybee arc_space_consume(size); 17443290Sjohansen } else { 17453290Sjohansen ASSERT(type == ARC_BUFC_DATA); 17463290Sjohansen buf->b_data = zio_data_buf_alloc(size); 1747*4309Smaybee atomic_add_64(&arc_size, size); 17483290Sjohansen } 17492688Smaybee goto out; 17502688Smaybee } 17512688Smaybee 17522688Smaybee /* 17532688Smaybee * If we are prefetching from the mfu ghost list, this buffer 17542688Smaybee * will end up on the mru list; so steal space from there. 17552688Smaybee */ 17563403Sbmc if (state == arc_mfu_ghost) 17573403Sbmc state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 17583403Sbmc else if (state == arc_mru_ghost) 17593403Sbmc state = arc_mru; 1760789Sahrens 17613403Sbmc if (state == arc_mru || state == arc_anon) { 17623403Sbmc uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 1763*4309Smaybee state = (arc_mfu->arcs_lsize[type] > 0 && 1764*4309Smaybee arc_p > mru_used) ? arc_mfu : arc_mru; 1765789Sahrens } else { 17662688Smaybee /* MFU cases */ 17673403Sbmc uint64_t mfu_space = arc_c - arc_p; 1768*4309Smaybee state = (arc_mru->arcs_lsize[type] > 0 && 1769*4309Smaybee mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 17702688Smaybee } 17713290Sjohansen if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) { 17723290Sjohansen if (type == ARC_BUFC_METADATA) { 17733290Sjohansen buf->b_data = zio_buf_alloc(size); 1774*4309Smaybee arc_space_consume(size); 17753290Sjohansen } else { 17763290Sjohansen ASSERT(type == ARC_BUFC_DATA); 17773290Sjohansen buf->b_data = zio_data_buf_alloc(size); 1778*4309Smaybee atomic_add_64(&arc_size, size); 17793290Sjohansen } 17803403Sbmc ARCSTAT_BUMP(arcstat_recycle_miss); 17812688Smaybee } 17822688Smaybee ASSERT(buf->b_data != NULL); 17832688Smaybee out: 17842688Smaybee /* 17852688Smaybee * Update the state size. Note that ghost states have a 17862688Smaybee * "ghost size" and so don't need to be updated. 17872688Smaybee */ 17882688Smaybee if (!GHOST_STATE(buf->b_hdr->b_state)) { 17892688Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 17902688Smaybee 17913403Sbmc atomic_add_64(&hdr->b_state->arcs_size, size); 17922688Smaybee if (list_link_active(&hdr->b_arc_node)) { 17932688Smaybee ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1794*4309Smaybee atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 1795789Sahrens } 17963298Smaybee /* 17973298Smaybee * If we are growing the cache, and we are adding anonymous 17983403Sbmc * data, and we have outgrown arc_p, update arc_p 17993298Smaybee */ 18003403Sbmc if (arc_size < arc_c && hdr->b_state == arc_anon && 18013403Sbmc arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 18023403Sbmc arc_p = MIN(arc_c, arc_p + size); 1803789Sahrens } 1804789Sahrens } 1805789Sahrens 1806789Sahrens /* 1807789Sahrens * This routine is called whenever a buffer is accessed. 18081544Seschrock * NOTE: the hash lock is dropped in this function. 1809789Sahrens */ 1810789Sahrens static void 18112688Smaybee arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 1812789Sahrens { 1813789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 1814789Sahrens 18153403Sbmc if (buf->b_state == arc_anon) { 1816789Sahrens /* 1817789Sahrens * This buffer is not in the cache, and does not 1818789Sahrens * appear in our "ghost" list. Add the new buffer 1819789Sahrens * to the MRU state. 1820789Sahrens */ 1821789Sahrens 1822789Sahrens ASSERT(buf->b_arc_access == 0); 1823789Sahrens buf->b_arc_access = lbolt; 18241544Seschrock DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 18253403Sbmc arc_change_state(arc_mru, buf, hash_lock); 1826789Sahrens 18273403Sbmc } else if (buf->b_state == arc_mru) { 1828789Sahrens /* 18292391Smaybee * If this buffer is here because of a prefetch, then either: 18302391Smaybee * - clear the flag if this is a "referencing" read 18312391Smaybee * (any subsequent access will bump this into the MFU state). 18322391Smaybee * or 18332391Smaybee * - move the buffer to the head of the list if this is 18342391Smaybee * another prefetch (to make it less likely to be evicted). 1835789Sahrens */ 1836789Sahrens if ((buf->b_flags & ARC_PREFETCH) != 0) { 18372391Smaybee if (refcount_count(&buf->b_refcnt) == 0) { 18382391Smaybee ASSERT(list_link_active(&buf->b_arc_node)); 18392391Smaybee } else { 18402391Smaybee buf->b_flags &= ~ARC_PREFETCH; 18413403Sbmc ARCSTAT_BUMP(arcstat_mru_hits); 18422391Smaybee } 18432391Smaybee buf->b_arc_access = lbolt; 1844789Sahrens return; 1845789Sahrens } 1846789Sahrens 1847789Sahrens /* 1848789Sahrens * This buffer has been "accessed" only once so far, 1849789Sahrens * but it is still in the cache. Move it to the MFU 1850789Sahrens * state. 1851789Sahrens */ 1852789Sahrens if (lbolt > buf->b_arc_access + ARC_MINTIME) { 1853789Sahrens /* 1854789Sahrens * More than 125ms have passed since we 1855789Sahrens * instantiated this buffer. Move it to the 1856789Sahrens * most frequently used state. 1857789Sahrens */ 1858789Sahrens buf->b_arc_access = lbolt; 18591544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 18603403Sbmc arc_change_state(arc_mfu, buf, hash_lock); 1861789Sahrens } 18623403Sbmc ARCSTAT_BUMP(arcstat_mru_hits); 18633403Sbmc } else if (buf->b_state == arc_mru_ghost) { 1864789Sahrens arc_state_t *new_state; 1865789Sahrens /* 1866789Sahrens * This buffer has been "accessed" recently, but 1867789Sahrens * was evicted from the cache. Move it to the 1868789Sahrens * MFU state. 1869789Sahrens */ 1870789Sahrens 1871789Sahrens if (buf->b_flags & ARC_PREFETCH) { 18723403Sbmc new_state = arc_mru; 18732391Smaybee if (refcount_count(&buf->b_refcnt) > 0) 18742391Smaybee buf->b_flags &= ~ARC_PREFETCH; 18751544Seschrock DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1876789Sahrens } else { 18773403Sbmc new_state = arc_mfu; 18781544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1879789Sahrens } 1880789Sahrens 1881789Sahrens buf->b_arc_access = lbolt; 1882789Sahrens arc_change_state(new_state, buf, hash_lock); 1883789Sahrens 18843403Sbmc ARCSTAT_BUMP(arcstat_mru_ghost_hits); 18853403Sbmc } else if (buf->b_state == arc_mfu) { 1886789Sahrens /* 1887789Sahrens * This buffer has been accessed more than once and is 1888789Sahrens * still in the cache. Keep it in the MFU state. 1889789Sahrens * 18902391Smaybee * NOTE: an add_reference() that occurred when we did 18912391Smaybee * the arc_read() will have kicked this off the list. 18922391Smaybee * If it was a prefetch, we will explicitly move it to 18932391Smaybee * the head of the list now. 1894789Sahrens */ 18952391Smaybee if ((buf->b_flags & ARC_PREFETCH) != 0) { 18962391Smaybee ASSERT(refcount_count(&buf->b_refcnt) == 0); 18972391Smaybee ASSERT(list_link_active(&buf->b_arc_node)); 18982391Smaybee } 18993403Sbmc ARCSTAT_BUMP(arcstat_mfu_hits); 19002391Smaybee buf->b_arc_access = lbolt; 19013403Sbmc } else if (buf->b_state == arc_mfu_ghost) { 19023403Sbmc arc_state_t *new_state = arc_mfu; 1903789Sahrens /* 1904789Sahrens * This buffer has been accessed more than once but has 1905789Sahrens * been evicted from the cache. Move it back to the 1906789Sahrens * MFU state. 1907789Sahrens */ 1908789Sahrens 19092391Smaybee if (buf->b_flags & ARC_PREFETCH) { 19102391Smaybee /* 19112391Smaybee * This is a prefetch access... 19122391Smaybee * move this block back to the MRU state. 19132391Smaybee */ 19142391Smaybee ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 19153403Sbmc new_state = arc_mru; 19162391Smaybee } 19172391Smaybee 1918789Sahrens buf->b_arc_access = lbolt; 19191544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 19202391Smaybee arc_change_state(new_state, buf, hash_lock); 1921789Sahrens 19223403Sbmc ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 1923789Sahrens } else { 1924789Sahrens ASSERT(!"invalid arc state"); 1925789Sahrens } 1926789Sahrens } 1927789Sahrens 1928789Sahrens /* a generic arc_done_func_t which you can use */ 1929789Sahrens /* ARGSUSED */ 1930789Sahrens void 1931789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 1932789Sahrens { 1933789Sahrens bcopy(buf->b_data, arg, buf->b_hdr->b_size); 19341544Seschrock VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1935789Sahrens } 1936789Sahrens 1937*4309Smaybee /* a generic arc_done_func_t */ 1938789Sahrens void 1939789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 1940789Sahrens { 1941789Sahrens arc_buf_t **bufp = arg; 1942789Sahrens if (zio && zio->io_error) { 19431544Seschrock VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1944789Sahrens *bufp = NULL; 1945789Sahrens } else { 1946789Sahrens *bufp = buf; 1947789Sahrens } 1948789Sahrens } 1949789Sahrens 1950789Sahrens static void 1951789Sahrens arc_read_done(zio_t *zio) 1952789Sahrens { 19531589Smaybee arc_buf_hdr_t *hdr, *found; 1954789Sahrens arc_buf_t *buf; 1955789Sahrens arc_buf_t *abuf; /* buffer we're assigning to callback */ 1956789Sahrens kmutex_t *hash_lock; 1957789Sahrens arc_callback_t *callback_list, *acb; 1958789Sahrens int freeable = FALSE; 1959789Sahrens 1960789Sahrens buf = zio->io_private; 1961789Sahrens hdr = buf->b_hdr; 1962789Sahrens 19631589Smaybee /* 19641589Smaybee * The hdr was inserted into hash-table and removed from lists 19651589Smaybee * prior to starting I/O. We should find this header, since 19661589Smaybee * it's in the hash table, and it should be legit since it's 19671589Smaybee * not possible to evict it during the I/O. The only possible 19681589Smaybee * reason for it not to be found is if we were freed during the 19691589Smaybee * read. 19701589Smaybee */ 19711589Smaybee found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 19723093Sahrens &hash_lock); 1973789Sahrens 19741589Smaybee ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 19751589Smaybee (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp)))); 1976789Sahrens 1977789Sahrens /* byteswap if necessary */ 1978789Sahrens callback_list = hdr->b_acb; 1979789Sahrens ASSERT(callback_list != NULL); 1980789Sahrens if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 1981789Sahrens callback_list->acb_byteswap(buf->b_data, hdr->b_size); 1982789Sahrens 19833093Sahrens arc_cksum_compute(buf); 19843093Sahrens 1985789Sahrens /* create copies of the data buffer for the callers */ 1986789Sahrens abuf = buf; 1987789Sahrens for (acb = callback_list; acb; acb = acb->acb_next) { 1988789Sahrens if (acb->acb_done) { 19892688Smaybee if (abuf == NULL) 19902688Smaybee abuf = arc_buf_clone(buf); 1991789Sahrens acb->acb_buf = abuf; 1992789Sahrens abuf = NULL; 1993789Sahrens } 1994789Sahrens } 1995789Sahrens hdr->b_acb = NULL; 1996789Sahrens hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 19971544Seschrock ASSERT(!HDR_BUF_AVAILABLE(hdr)); 19981544Seschrock if (abuf == buf) 19991544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 2000789Sahrens 2001789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2002789Sahrens 2003789Sahrens if (zio->io_error != 0) { 2004789Sahrens hdr->b_flags |= ARC_IO_ERROR; 20053403Sbmc if (hdr->b_state != arc_anon) 20063403Sbmc arc_change_state(arc_anon, hdr, hash_lock); 20071544Seschrock if (HDR_IN_HASH_TABLE(hdr)) 20081544Seschrock buf_hash_remove(hdr); 2009789Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 20102391Smaybee /* convert checksum errors into IO errors */ 20111544Seschrock if (zio->io_error == ECKSUM) 20121544Seschrock zio->io_error = EIO; 2013789Sahrens } 2014789Sahrens 20151544Seschrock /* 20162391Smaybee * Broadcast before we drop the hash_lock to avoid the possibility 20172391Smaybee * that the hdr (and hence the cv) might be freed before we get to 20182391Smaybee * the cv_broadcast(). 20191544Seschrock */ 20201544Seschrock cv_broadcast(&hdr->b_cv); 20211544Seschrock 20221589Smaybee if (hash_lock) { 2023789Sahrens /* 2024789Sahrens * Only call arc_access on anonymous buffers. This is because 2025789Sahrens * if we've issued an I/O for an evicted buffer, we've already 2026789Sahrens * called arc_access (to prevent any simultaneous readers from 2027789Sahrens * getting confused). 2028789Sahrens */ 20293403Sbmc if (zio->io_error == 0 && hdr->b_state == arc_anon) 20302688Smaybee arc_access(hdr, hash_lock); 20312688Smaybee mutex_exit(hash_lock); 2032789Sahrens } else { 2033789Sahrens /* 2034789Sahrens * This block was freed while we waited for the read to 2035789Sahrens * complete. It has been removed from the hash table and 2036789Sahrens * moved to the anonymous state (so that it won't show up 2037789Sahrens * in the cache). 2038789Sahrens */ 20393403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2040789Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 2041789Sahrens } 2042789Sahrens 2043789Sahrens /* execute each callback and free its structure */ 2044789Sahrens while ((acb = callback_list) != NULL) { 2045789Sahrens if (acb->acb_done) 2046789Sahrens acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2047789Sahrens 2048789Sahrens if (acb->acb_zio_dummy != NULL) { 2049789Sahrens acb->acb_zio_dummy->io_error = zio->io_error; 2050789Sahrens zio_nowait(acb->acb_zio_dummy); 2051789Sahrens } 2052789Sahrens 2053789Sahrens callback_list = acb->acb_next; 2054789Sahrens kmem_free(acb, sizeof (arc_callback_t)); 2055789Sahrens } 2056789Sahrens 2057789Sahrens if (freeable) 20581544Seschrock arc_hdr_destroy(hdr); 2059789Sahrens } 2060789Sahrens 2061789Sahrens /* 2062789Sahrens * "Read" the block block at the specified DVA (in bp) via the 2063789Sahrens * cache. If the block is found in the cache, invoke the provided 2064789Sahrens * callback immediately and return. Note that the `zio' parameter 2065789Sahrens * in the callback will be NULL in this case, since no IO was 2066789Sahrens * required. If the block is not in the cache pass the read request 2067789Sahrens * on to the spa with a substitute callback function, so that the 2068789Sahrens * requested block will be added to the cache. 2069789Sahrens * 2070789Sahrens * If a read request arrives for a block that has a read in-progress, 2071789Sahrens * either wait for the in-progress read to complete (and return the 2072789Sahrens * results); or, if this is a read with a "done" func, add a record 2073789Sahrens * to the read to invoke the "done" func when the read completes, 2074789Sahrens * and return; or just return. 2075789Sahrens * 2076789Sahrens * arc_read_done() will invoke all the requested "done" functions 2077789Sahrens * for readers of this block. 2078789Sahrens */ 2079789Sahrens int 2080789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 2081789Sahrens arc_done_func_t *done, void *private, int priority, int flags, 20822391Smaybee uint32_t *arc_flags, zbookmark_t *zb) 2083789Sahrens { 2084789Sahrens arc_buf_hdr_t *hdr; 2085789Sahrens arc_buf_t *buf; 2086789Sahrens kmutex_t *hash_lock; 2087789Sahrens zio_t *rzio; 2088789Sahrens 2089789Sahrens top: 2090789Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 20911544Seschrock if (hdr && hdr->b_datacnt > 0) { 2092789Sahrens 20932391Smaybee *arc_flags |= ARC_CACHED; 20942391Smaybee 2095789Sahrens if (HDR_IO_IN_PROGRESS(hdr)) { 20962391Smaybee 20972391Smaybee if (*arc_flags & ARC_WAIT) { 20982391Smaybee cv_wait(&hdr->b_cv, hash_lock); 20992391Smaybee mutex_exit(hash_lock); 21002391Smaybee goto top; 21012391Smaybee } 21022391Smaybee ASSERT(*arc_flags & ARC_NOWAIT); 21032391Smaybee 21042391Smaybee if (done) { 2105789Sahrens arc_callback_t *acb = NULL; 2106789Sahrens 2107789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), 2108789Sahrens KM_SLEEP); 2109789Sahrens acb->acb_done = done; 2110789Sahrens acb->acb_private = private; 2111789Sahrens acb->acb_byteswap = swap; 2112789Sahrens if (pio != NULL) 2113789Sahrens acb->acb_zio_dummy = zio_null(pio, 2114789Sahrens spa, NULL, NULL, flags); 2115789Sahrens 2116789Sahrens ASSERT(acb->acb_done != NULL); 2117789Sahrens acb->acb_next = hdr->b_acb; 2118789Sahrens hdr->b_acb = acb; 2119789Sahrens add_reference(hdr, hash_lock, private); 2120789Sahrens mutex_exit(hash_lock); 2121789Sahrens return (0); 2122789Sahrens } 2123789Sahrens mutex_exit(hash_lock); 2124789Sahrens return (0); 2125789Sahrens } 2126789Sahrens 21273403Sbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2128789Sahrens 21291544Seschrock if (done) { 21302688Smaybee add_reference(hdr, hash_lock, private); 21311544Seschrock /* 21321544Seschrock * If this block is already in use, create a new 21331544Seschrock * copy of the data so that we will be guaranteed 21341544Seschrock * that arc_release() will always succeed. 21351544Seschrock */ 21361544Seschrock buf = hdr->b_buf; 21371544Seschrock ASSERT(buf); 21381544Seschrock ASSERT(buf->b_data); 21392688Smaybee if (HDR_BUF_AVAILABLE(hdr)) { 21401544Seschrock ASSERT(buf->b_efunc == NULL); 21411544Seschrock hdr->b_flags &= ~ARC_BUF_AVAILABLE; 21422688Smaybee } else { 21432688Smaybee buf = arc_buf_clone(buf); 21441544Seschrock } 21452391Smaybee } else if (*arc_flags & ARC_PREFETCH && 21462391Smaybee refcount_count(&hdr->b_refcnt) == 0) { 21472391Smaybee hdr->b_flags |= ARC_PREFETCH; 2148789Sahrens } 2149789Sahrens DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 21502688Smaybee arc_access(hdr, hash_lock); 21512688Smaybee mutex_exit(hash_lock); 21523403Sbmc ARCSTAT_BUMP(arcstat_hits); 21533403Sbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 21543403Sbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 21553403Sbmc data, metadata, hits); 21563403Sbmc 2157789Sahrens if (done) 2158789Sahrens done(NULL, buf, private); 2159789Sahrens } else { 2160789Sahrens uint64_t size = BP_GET_LSIZE(bp); 2161789Sahrens arc_callback_t *acb; 2162789Sahrens 2163789Sahrens if (hdr == NULL) { 2164789Sahrens /* this block is not in the cache */ 2165789Sahrens arc_buf_hdr_t *exists; 21663290Sjohansen arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 21673290Sjohansen buf = arc_buf_alloc(spa, size, private, type); 2168789Sahrens hdr = buf->b_hdr; 2169789Sahrens hdr->b_dva = *BP_IDENTITY(bp); 2170789Sahrens hdr->b_birth = bp->blk_birth; 2171789Sahrens hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2172789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2173789Sahrens if (exists) { 2174789Sahrens /* somebody beat us to the hash insert */ 2175789Sahrens mutex_exit(hash_lock); 2176789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 2177789Sahrens hdr->b_birth = 0; 2178789Sahrens hdr->b_cksum0 = 0; 21791544Seschrock (void) arc_buf_remove_ref(buf, private); 2180789Sahrens goto top; /* restart the IO request */ 2181789Sahrens } 21822391Smaybee /* if this is a prefetch, we don't have a reference */ 21832391Smaybee if (*arc_flags & ARC_PREFETCH) { 21842391Smaybee (void) remove_reference(hdr, hash_lock, 21852391Smaybee private); 21862391Smaybee hdr->b_flags |= ARC_PREFETCH; 21872391Smaybee } 21882391Smaybee if (BP_GET_LEVEL(bp) > 0) 21892391Smaybee hdr->b_flags |= ARC_INDIRECT; 2190789Sahrens } else { 2191789Sahrens /* this block is in the ghost cache */ 21921544Seschrock ASSERT(GHOST_STATE(hdr->b_state)); 21931544Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 21942391Smaybee ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 21952391Smaybee ASSERT(hdr->b_buf == NULL); 2196789Sahrens 21972391Smaybee /* if this is a prefetch, we don't have a reference */ 21982391Smaybee if (*arc_flags & ARC_PREFETCH) 21992391Smaybee hdr->b_flags |= ARC_PREFETCH; 22002391Smaybee else 22012391Smaybee add_reference(hdr, hash_lock, private); 2202789Sahrens buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 22031544Seschrock buf->b_hdr = hdr; 22042688Smaybee buf->b_data = NULL; 22051544Seschrock buf->b_efunc = NULL; 22061544Seschrock buf->b_private = NULL; 22071544Seschrock buf->b_next = NULL; 22081544Seschrock hdr->b_buf = buf; 22092688Smaybee arc_get_data_buf(buf); 22101544Seschrock ASSERT(hdr->b_datacnt == 0); 22111544Seschrock hdr->b_datacnt = 1; 22122391Smaybee 2213789Sahrens } 2214789Sahrens 2215789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2216789Sahrens acb->acb_done = done; 2217789Sahrens acb->acb_private = private; 2218789Sahrens acb->acb_byteswap = swap; 2219789Sahrens 2220789Sahrens ASSERT(hdr->b_acb == NULL); 2221789Sahrens hdr->b_acb = acb; 2222789Sahrens hdr->b_flags |= ARC_IO_IN_PROGRESS; 2223789Sahrens 2224789Sahrens /* 2225789Sahrens * If the buffer has been evicted, migrate it to a present state 2226789Sahrens * before issuing the I/O. Once we drop the hash-table lock, 2227789Sahrens * the header will be marked as I/O in progress and have an 2228789Sahrens * attached buffer. At this point, anybody who finds this 2229789Sahrens * buffer ought to notice that it's legit but has a pending I/O. 2230789Sahrens */ 2231789Sahrens 22321544Seschrock if (GHOST_STATE(hdr->b_state)) 22332688Smaybee arc_access(hdr, hash_lock); 22342688Smaybee mutex_exit(hash_lock); 2235789Sahrens 2236789Sahrens ASSERT3U(hdr->b_size, ==, size); 22371596Sahrens DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 22381596Sahrens zbookmark_t *, zb); 22393403Sbmc ARCSTAT_BUMP(arcstat_misses); 22403403Sbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 22413403Sbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 22423403Sbmc data, metadata, misses); 22431544Seschrock 2244789Sahrens rzio = zio_read(pio, spa, bp, buf->b_data, size, 22451544Seschrock arc_read_done, buf, priority, flags, zb); 2246789Sahrens 22472391Smaybee if (*arc_flags & ARC_WAIT) 2248789Sahrens return (zio_wait(rzio)); 2249789Sahrens 22502391Smaybee ASSERT(*arc_flags & ARC_NOWAIT); 2251789Sahrens zio_nowait(rzio); 2252789Sahrens } 2253789Sahrens return (0); 2254789Sahrens } 2255789Sahrens 2256789Sahrens /* 2257789Sahrens * arc_read() variant to support pool traversal. If the block is already 2258789Sahrens * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2259789Sahrens * The idea is that we don't want pool traversal filling up memory, but 2260789Sahrens * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2261789Sahrens */ 2262789Sahrens int 2263789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2264789Sahrens { 2265789Sahrens arc_buf_hdr_t *hdr; 2266789Sahrens kmutex_t *hash_mtx; 2267789Sahrens int rc = 0; 2268789Sahrens 2269789Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2270789Sahrens 22711544Seschrock if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 22721544Seschrock arc_buf_t *buf = hdr->b_buf; 22731544Seschrock 22741544Seschrock ASSERT(buf); 22751544Seschrock while (buf->b_data == NULL) { 22761544Seschrock buf = buf->b_next; 22771544Seschrock ASSERT(buf); 22781544Seschrock } 22791544Seschrock bcopy(buf->b_data, data, hdr->b_size); 22801544Seschrock } else { 2281789Sahrens rc = ENOENT; 22821544Seschrock } 2283789Sahrens 2284789Sahrens if (hash_mtx) 2285789Sahrens mutex_exit(hash_mtx); 2286789Sahrens 2287789Sahrens return (rc); 2288789Sahrens } 2289789Sahrens 22901544Seschrock void 22911544Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 22921544Seschrock { 22931544Seschrock ASSERT(buf->b_hdr != NULL); 22943403Sbmc ASSERT(buf->b_hdr->b_state != arc_anon); 22951544Seschrock ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 22961544Seschrock buf->b_efunc = func; 22971544Seschrock buf->b_private = private; 22981544Seschrock } 22991544Seschrock 23001544Seschrock /* 23011544Seschrock * This is used by the DMU to let the ARC know that a buffer is 23021544Seschrock * being evicted, so the ARC should clean up. If this arc buf 23031544Seschrock * is not yet in the evicted state, it will be put there. 23041544Seschrock */ 23051544Seschrock int 23061544Seschrock arc_buf_evict(arc_buf_t *buf) 23071544Seschrock { 23082887Smaybee arc_buf_hdr_t *hdr; 23091544Seschrock kmutex_t *hash_lock; 23101544Seschrock arc_buf_t **bufp; 23111544Seschrock 23122887Smaybee mutex_enter(&arc_eviction_mtx); 23132887Smaybee hdr = buf->b_hdr; 23141544Seschrock if (hdr == NULL) { 23151544Seschrock /* 23161544Seschrock * We are in arc_do_user_evicts(). 23171544Seschrock */ 23181544Seschrock ASSERT(buf->b_data == NULL); 23192887Smaybee mutex_exit(&arc_eviction_mtx); 23201544Seschrock return (0); 23211544Seschrock } 23222887Smaybee hash_lock = HDR_LOCK(hdr); 23232887Smaybee mutex_exit(&arc_eviction_mtx); 23241544Seschrock 23251544Seschrock mutex_enter(hash_lock); 23261544Seschrock 23272724Smaybee if (buf->b_data == NULL) { 23282724Smaybee /* 23292724Smaybee * We are on the eviction list. 23302724Smaybee */ 23312724Smaybee mutex_exit(hash_lock); 23322724Smaybee mutex_enter(&arc_eviction_mtx); 23332724Smaybee if (buf->b_hdr == NULL) { 23342724Smaybee /* 23352724Smaybee * We are already in arc_do_user_evicts(). 23362724Smaybee */ 23372724Smaybee mutex_exit(&arc_eviction_mtx); 23382724Smaybee return (0); 23392724Smaybee } else { 23402724Smaybee arc_buf_t copy = *buf; /* structure assignment */ 23412724Smaybee /* 23422724Smaybee * Process this buffer now 23432724Smaybee * but let arc_do_user_evicts() do the reaping. 23442724Smaybee */ 23452724Smaybee buf->b_efunc = NULL; 23462724Smaybee mutex_exit(&arc_eviction_mtx); 23472724Smaybee VERIFY(copy.b_efunc(©) == 0); 23482724Smaybee return (1); 23492724Smaybee } 23502724Smaybee } 23512724Smaybee 23522724Smaybee ASSERT(buf->b_hdr == hdr); 23532724Smaybee ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 23543403Sbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 23551544Seschrock 23561544Seschrock /* 23571544Seschrock * Pull this buffer off of the hdr 23581544Seschrock */ 23591544Seschrock bufp = &hdr->b_buf; 23601544Seschrock while (*bufp != buf) 23611544Seschrock bufp = &(*bufp)->b_next; 23621544Seschrock *bufp = buf->b_next; 23631544Seschrock 23641544Seschrock ASSERT(buf->b_data != NULL); 23652688Smaybee arc_buf_destroy(buf, FALSE, FALSE); 23661544Seschrock 23671544Seschrock if (hdr->b_datacnt == 0) { 23681544Seschrock arc_state_t *old_state = hdr->b_state; 23691544Seschrock arc_state_t *evicted_state; 23701544Seschrock 23711544Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 23721544Seschrock 23731544Seschrock evicted_state = 23743403Sbmc (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 23751544Seschrock 23763403Sbmc mutex_enter(&old_state->arcs_mtx); 23773403Sbmc mutex_enter(&evicted_state->arcs_mtx); 23781544Seschrock 23791544Seschrock arc_change_state(evicted_state, hdr, hash_lock); 23801544Seschrock ASSERT(HDR_IN_HASH_TABLE(hdr)); 23811544Seschrock hdr->b_flags = ARC_IN_HASH_TABLE; 23821544Seschrock 23833403Sbmc mutex_exit(&evicted_state->arcs_mtx); 23843403Sbmc mutex_exit(&old_state->arcs_mtx); 23851544Seschrock } 23861544Seschrock mutex_exit(hash_lock); 23871819Smaybee 23881544Seschrock VERIFY(buf->b_efunc(buf) == 0); 23891544Seschrock buf->b_efunc = NULL; 23901544Seschrock buf->b_private = NULL; 23911544Seschrock buf->b_hdr = NULL; 23921544Seschrock kmem_cache_free(buf_cache, buf); 23931544Seschrock return (1); 23941544Seschrock } 23951544Seschrock 2396789Sahrens /* 2397789Sahrens * Release this buffer from the cache. This must be done 2398789Sahrens * after a read and prior to modifying the buffer contents. 2399789Sahrens * If the buffer has more than one reference, we must make 2400789Sahrens * make a new hdr for the buffer. 2401789Sahrens */ 2402789Sahrens void 2403789Sahrens arc_release(arc_buf_t *buf, void *tag) 2404789Sahrens { 2405789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 2406789Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 2407789Sahrens 2408789Sahrens /* this buffer is not on any list */ 2409789Sahrens ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2410789Sahrens 24113403Sbmc if (hdr->b_state == arc_anon) { 2412789Sahrens /* this buffer is already released */ 2413789Sahrens ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2414789Sahrens ASSERT(BUF_EMPTY(hdr)); 24151544Seschrock ASSERT(buf->b_efunc == NULL); 24163093Sahrens arc_buf_thaw(buf); 2417789Sahrens return; 2418789Sahrens } 2419789Sahrens 2420789Sahrens mutex_enter(hash_lock); 2421789Sahrens 24221544Seschrock /* 24231544Seschrock * Do we have more than one buf? 24241544Seschrock */ 24251544Seschrock if (hdr->b_buf != buf || buf->b_next != NULL) { 2426789Sahrens arc_buf_hdr_t *nhdr; 2427789Sahrens arc_buf_t **bufp; 2428789Sahrens uint64_t blksz = hdr->b_size; 2429789Sahrens spa_t *spa = hdr->b_spa; 24303290Sjohansen arc_buf_contents_t type = hdr->b_type; 2431789Sahrens 24321544Seschrock ASSERT(hdr->b_datacnt > 1); 2433789Sahrens /* 2434789Sahrens * Pull the data off of this buf and attach it to 2435789Sahrens * a new anonymous buf. 2436789Sahrens */ 24371544Seschrock (void) remove_reference(hdr, hash_lock, tag); 2438789Sahrens bufp = &hdr->b_buf; 24391544Seschrock while (*bufp != buf) 2440789Sahrens bufp = &(*bufp)->b_next; 2441789Sahrens *bufp = (*bufp)->b_next; 24423897Smaybee buf->b_next = NULL; 24431544Seschrock 24443403Sbmc ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 24453403Sbmc atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 24461544Seschrock if (refcount_is_zero(&hdr->b_refcnt)) { 2447*4309Smaybee uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 2448*4309Smaybee ASSERT3U(*size, >=, hdr->b_size); 2449*4309Smaybee atomic_add_64(size, -hdr->b_size); 24501544Seschrock } 24511544Seschrock hdr->b_datacnt -= 1; 24523547Smaybee arc_cksum_verify(buf); 24531544Seschrock 2454789Sahrens mutex_exit(hash_lock); 2455789Sahrens 2456789Sahrens nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 2457789Sahrens nhdr->b_size = blksz; 2458789Sahrens nhdr->b_spa = spa; 24593290Sjohansen nhdr->b_type = type; 2460789Sahrens nhdr->b_buf = buf; 24613403Sbmc nhdr->b_state = arc_anon; 2462789Sahrens nhdr->b_arc_access = 0; 2463789Sahrens nhdr->b_flags = 0; 24641544Seschrock nhdr->b_datacnt = 1; 24653547Smaybee nhdr->b_freeze_cksum = NULL; 24663897Smaybee (void) refcount_add(&nhdr->b_refcnt, tag); 2467789Sahrens buf->b_hdr = nhdr; 24683403Sbmc atomic_add_64(&arc_anon->arcs_size, blksz); 2469789Sahrens 2470789Sahrens hdr = nhdr; 2471789Sahrens } else { 24721544Seschrock ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2473789Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 2474789Sahrens ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 24753403Sbmc arc_change_state(arc_anon, hdr, hash_lock); 2476789Sahrens hdr->b_arc_access = 0; 2477789Sahrens mutex_exit(hash_lock); 2478789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 2479789Sahrens hdr->b_birth = 0; 2480789Sahrens hdr->b_cksum0 = 0; 24813547Smaybee arc_buf_thaw(buf); 2482789Sahrens } 24831544Seschrock buf->b_efunc = NULL; 24841544Seschrock buf->b_private = NULL; 2485789Sahrens } 2486789Sahrens 2487789Sahrens int 2488789Sahrens arc_released(arc_buf_t *buf) 2489789Sahrens { 24903403Sbmc return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 24911544Seschrock } 24921544Seschrock 24931544Seschrock int 24941544Seschrock arc_has_callback(arc_buf_t *buf) 24951544Seschrock { 24961544Seschrock return (buf->b_efunc != NULL); 2497789Sahrens } 2498789Sahrens 24991544Seschrock #ifdef ZFS_DEBUG 25001544Seschrock int 25011544Seschrock arc_referenced(arc_buf_t *buf) 25021544Seschrock { 25031544Seschrock return (refcount_count(&buf->b_hdr->b_refcnt)); 25041544Seschrock } 25051544Seschrock #endif 25061544Seschrock 2507789Sahrens static void 25083547Smaybee arc_write_ready(zio_t *zio) 25093547Smaybee { 25103547Smaybee arc_write_callback_t *callback = zio->io_private; 25113547Smaybee arc_buf_t *buf = callback->awcb_buf; 25123547Smaybee 25133547Smaybee if (callback->awcb_ready) { 25143547Smaybee ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 25153547Smaybee callback->awcb_ready(zio, buf, callback->awcb_private); 25163547Smaybee } 25173547Smaybee arc_cksum_compute(buf); 25183547Smaybee } 25193547Smaybee 25203547Smaybee static void 2521789Sahrens arc_write_done(zio_t *zio) 2522789Sahrens { 25233547Smaybee arc_write_callback_t *callback = zio->io_private; 25243547Smaybee arc_buf_t *buf = callback->awcb_buf; 25253547Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 2526789Sahrens 2527789Sahrens hdr->b_acb = NULL; 2528789Sahrens 2529789Sahrens /* this buffer is on no lists and is not in the hash table */ 25303403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2531789Sahrens 2532789Sahrens hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2533789Sahrens hdr->b_birth = zio->io_bp->blk_birth; 2534789Sahrens hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 25351544Seschrock /* 25361544Seschrock * If the block to be written was all-zero, we may have 25371544Seschrock * compressed it away. In this case no write was performed 25381544Seschrock * so there will be no dva/birth-date/checksum. The buffer 25391544Seschrock * must therefor remain anonymous (and uncached). 25401544Seschrock */ 2541789Sahrens if (!BUF_EMPTY(hdr)) { 2542789Sahrens arc_buf_hdr_t *exists; 2543789Sahrens kmutex_t *hash_lock; 2544789Sahrens 25453093Sahrens arc_cksum_verify(buf); 25463093Sahrens 2547789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2548789Sahrens if (exists) { 2549789Sahrens /* 2550789Sahrens * This can only happen if we overwrite for 2551789Sahrens * sync-to-convergence, because we remove 2552789Sahrens * buffers from the hash table when we arc_free(). 2553789Sahrens */ 2554789Sahrens ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2555789Sahrens BP_IDENTITY(zio->io_bp))); 2556789Sahrens ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2557789Sahrens zio->io_bp->blk_birth); 2558789Sahrens 2559789Sahrens ASSERT(refcount_is_zero(&exists->b_refcnt)); 25603403Sbmc arc_change_state(arc_anon, exists, hash_lock); 2561789Sahrens mutex_exit(hash_lock); 25621544Seschrock arc_hdr_destroy(exists); 2563789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2564789Sahrens ASSERT3P(exists, ==, NULL); 2565789Sahrens } 25661544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 25672688Smaybee arc_access(hdr, hash_lock); 25682688Smaybee mutex_exit(hash_lock); 25693547Smaybee } else if (callback->awcb_done == NULL) { 25701544Seschrock int destroy_hdr; 25711544Seschrock /* 25721544Seschrock * This is an anonymous buffer with no user callback, 25731544Seschrock * destroy it if there are no active references. 25741544Seschrock */ 25751544Seschrock mutex_enter(&arc_eviction_mtx); 25761544Seschrock destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 25771544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 25781544Seschrock mutex_exit(&arc_eviction_mtx); 25791544Seschrock if (destroy_hdr) 25801544Seschrock arc_hdr_destroy(hdr); 25811544Seschrock } else { 25821544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2583789Sahrens } 25841544Seschrock 25853547Smaybee if (callback->awcb_done) { 2586789Sahrens ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 25873547Smaybee callback->awcb_done(zio, buf, callback->awcb_private); 2588789Sahrens } 2589789Sahrens 25903547Smaybee kmem_free(callback, sizeof (arc_write_callback_t)); 2591789Sahrens } 2592789Sahrens 25933547Smaybee zio_t * 25941775Sbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 2595789Sahrens uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 25963547Smaybee arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 25973547Smaybee int flags, zbookmark_t *zb) 2598789Sahrens { 2599789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 26003547Smaybee arc_write_callback_t *callback; 26013547Smaybee zio_t *zio; 2602789Sahrens 2603789Sahrens /* this is a private buffer - no locking required */ 26043403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2605789Sahrens ASSERT(BUF_EMPTY(hdr)); 2606789Sahrens ASSERT(!HDR_IO_ERROR(hdr)); 26072237Smaybee ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 26082237Smaybee ASSERT(hdr->b_acb == 0); 26093547Smaybee callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 26103547Smaybee callback->awcb_ready = ready; 26113547Smaybee callback->awcb_done = done; 26123547Smaybee callback->awcb_private = private; 26133547Smaybee callback->awcb_buf = buf; 26141544Seschrock hdr->b_flags |= ARC_IO_IN_PROGRESS; 26153547Smaybee zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 26163547Smaybee buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback, 26173547Smaybee priority, flags, zb); 2618789Sahrens 26193547Smaybee return (zio); 2620789Sahrens } 2621789Sahrens 2622789Sahrens int 2623789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 2624789Sahrens zio_done_func_t *done, void *private, uint32_t arc_flags) 2625789Sahrens { 2626789Sahrens arc_buf_hdr_t *ab; 2627789Sahrens kmutex_t *hash_lock; 2628789Sahrens zio_t *zio; 2629789Sahrens 2630789Sahrens /* 2631789Sahrens * If this buffer is in the cache, release it, so it 2632789Sahrens * can be re-used. 2633789Sahrens */ 2634789Sahrens ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2635789Sahrens if (ab != NULL) { 2636789Sahrens /* 2637789Sahrens * The checksum of blocks to free is not always 2638789Sahrens * preserved (eg. on the deadlist). However, if it is 2639789Sahrens * nonzero, it should match what we have in the cache. 2640789Sahrens */ 2641789Sahrens ASSERT(bp->blk_cksum.zc_word[0] == 0 || 2642789Sahrens ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 26433403Sbmc if (ab->b_state != arc_anon) 26443403Sbmc arc_change_state(arc_anon, ab, hash_lock); 26452391Smaybee if (HDR_IO_IN_PROGRESS(ab)) { 26462391Smaybee /* 26472391Smaybee * This should only happen when we prefetch. 26482391Smaybee */ 26492391Smaybee ASSERT(ab->b_flags & ARC_PREFETCH); 26502391Smaybee ASSERT3U(ab->b_datacnt, ==, 1); 26512391Smaybee ab->b_flags |= ARC_FREED_IN_READ; 26522391Smaybee if (HDR_IN_HASH_TABLE(ab)) 26532391Smaybee buf_hash_remove(ab); 26542391Smaybee ab->b_arc_access = 0; 26552391Smaybee bzero(&ab->b_dva, sizeof (dva_t)); 26562391Smaybee ab->b_birth = 0; 26572391Smaybee ab->b_cksum0 = 0; 26582391Smaybee ab->b_buf->b_efunc = NULL; 26592391Smaybee ab->b_buf->b_private = NULL; 26602391Smaybee mutex_exit(hash_lock); 26612391Smaybee } else if (refcount_is_zero(&ab->b_refcnt)) { 2662789Sahrens mutex_exit(hash_lock); 26631544Seschrock arc_hdr_destroy(ab); 26643403Sbmc ARCSTAT_BUMP(arcstat_deleted); 2665789Sahrens } else { 26661589Smaybee /* 26672391Smaybee * We still have an active reference on this 26682391Smaybee * buffer. This can happen, e.g., from 26692391Smaybee * dbuf_unoverride(). 26701589Smaybee */ 26712391Smaybee ASSERT(!HDR_IN_HASH_TABLE(ab)); 2672789Sahrens ab->b_arc_access = 0; 2673789Sahrens bzero(&ab->b_dva, sizeof (dva_t)); 2674789Sahrens ab->b_birth = 0; 2675789Sahrens ab->b_cksum0 = 0; 26761544Seschrock ab->b_buf->b_efunc = NULL; 26771544Seschrock ab->b_buf->b_private = NULL; 2678789Sahrens mutex_exit(hash_lock); 2679789Sahrens } 2680789Sahrens } 2681789Sahrens 2682789Sahrens zio = zio_free(pio, spa, txg, bp, done, private); 2683789Sahrens 2684789Sahrens if (arc_flags & ARC_WAIT) 2685789Sahrens return (zio_wait(zio)); 2686789Sahrens 2687789Sahrens ASSERT(arc_flags & ARC_NOWAIT); 2688789Sahrens zio_nowait(zio); 2689789Sahrens 2690789Sahrens return (0); 2691789Sahrens } 2692789Sahrens 2693789Sahrens void 2694789Sahrens arc_tempreserve_clear(uint64_t tempreserve) 2695789Sahrens { 2696789Sahrens atomic_add_64(&arc_tempreserve, -tempreserve); 2697789Sahrens ASSERT((int64_t)arc_tempreserve >= 0); 2698789Sahrens } 2699789Sahrens 2700789Sahrens int 2701789Sahrens arc_tempreserve_space(uint64_t tempreserve) 2702789Sahrens { 2703789Sahrens #ifdef ZFS_DEBUG 2704789Sahrens /* 2705789Sahrens * Once in a while, fail for no reason. Everything should cope. 2706789Sahrens */ 2707789Sahrens if (spa_get_random(10000) == 0) { 2708789Sahrens dprintf("forcing random failure\n"); 2709789Sahrens return (ERESTART); 2710789Sahrens } 2711789Sahrens #endif 27123403Sbmc if (tempreserve > arc_c/4 && !arc_no_grow) 27133403Sbmc arc_c = MIN(arc_c_max, tempreserve * 4); 27143403Sbmc if (tempreserve > arc_c) 2715982Smaybee return (ENOMEM); 2716982Smaybee 2717789Sahrens /* 2718982Smaybee * Throttle writes when the amount of dirty data in the cache 2719982Smaybee * gets too large. We try to keep the cache less than half full 2720982Smaybee * of dirty blocks so that our sync times don't grow too large. 2721982Smaybee * Note: if two requests come in concurrently, we might let them 2722982Smaybee * both succeed, when one of them should fail. Not a huge deal. 2723982Smaybee * 2724982Smaybee * XXX The limit should be adjusted dynamically to keep the time 2725982Smaybee * to sync a dataset fixed (around 1-5 seconds?). 2726789Sahrens */ 2727789Sahrens 27283403Sbmc if (tempreserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 27293403Sbmc arc_tempreserve + arc_anon->arcs_size > arc_c / 4) { 2730*4309Smaybee dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 2731*4309Smaybee "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 2732*4309Smaybee arc_tempreserve>>10, 2733*4309Smaybee arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 2734*4309Smaybee arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 27353403Sbmc tempreserve>>10, arc_c>>10); 2736789Sahrens return (ERESTART); 2737789Sahrens } 2738789Sahrens atomic_add_64(&arc_tempreserve, tempreserve); 2739789Sahrens return (0); 2740789Sahrens } 2741789Sahrens 2742789Sahrens void 2743789Sahrens arc_init(void) 2744789Sahrens { 2745789Sahrens mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 2746789Sahrens cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 2747789Sahrens 27482391Smaybee /* Convert seconds to clock ticks */ 27492638Sperrin arc_min_prefetch_lifespan = 1 * hz; 27502391Smaybee 2751789Sahrens /* Start out with 1/8 of all memory */ 27523403Sbmc arc_c = physmem * PAGESIZE / 8; 2753789Sahrens 2754789Sahrens #ifdef _KERNEL 2755789Sahrens /* 2756789Sahrens * On architectures where the physical memory can be larger 2757789Sahrens * than the addressable space (intel in 32-bit mode), we may 2758789Sahrens * need to limit the cache to 1/8 of VM size. 2759789Sahrens */ 27603403Sbmc arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 2761789Sahrens #endif 2762789Sahrens 2763982Smaybee /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 27643403Sbmc arc_c_min = MAX(arc_c / 4, 64<<20); 2765982Smaybee /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 27663403Sbmc if (arc_c * 8 >= 1<<30) 27673403Sbmc arc_c_max = (arc_c * 8) - (1<<30); 2768789Sahrens else 27693403Sbmc arc_c_max = arc_c_min; 27703403Sbmc arc_c_max = MAX(arc_c * 6, arc_c_max); 27712885Sahrens 27722885Sahrens /* 27732885Sahrens * Allow the tunables to override our calculations if they are 27742885Sahrens * reasonable (ie. over 64MB) 27752885Sahrens */ 27762885Sahrens if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 27773403Sbmc arc_c_max = zfs_arc_max; 27783403Sbmc if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 27793403Sbmc arc_c_min = zfs_arc_min; 27802885Sahrens 27813403Sbmc arc_c = arc_c_max; 27823403Sbmc arc_p = (arc_c >> 1); 2783789Sahrens 2784*4309Smaybee /* limit meta-data to 1/4 of the arc capacity */ 2785*4309Smaybee arc_meta_limit = arc_c_max / 4; 2786*4309Smaybee if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 2787*4309Smaybee arc_c_min = arc_meta_limit / 2; 2788*4309Smaybee 2789789Sahrens /* if kmem_flags are set, lets try to use less memory */ 2790789Sahrens if (kmem_debugging()) 27913403Sbmc arc_c = arc_c / 2; 27923403Sbmc if (arc_c < arc_c_min) 27933403Sbmc arc_c = arc_c_min; 2794789Sahrens 27953403Sbmc arc_anon = &ARC_anon; 27963403Sbmc arc_mru = &ARC_mru; 27973403Sbmc arc_mru_ghost = &ARC_mru_ghost; 27983403Sbmc arc_mfu = &ARC_mfu; 27993403Sbmc arc_mfu_ghost = &ARC_mfu_ghost; 28003403Sbmc arc_size = 0; 2801789Sahrens 28023403Sbmc mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 28033403Sbmc mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 28043403Sbmc mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 28053403Sbmc mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 28063403Sbmc mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 28072688Smaybee 2808*4309Smaybee list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 2809*4309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 2810*4309Smaybee list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 2811*4309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 2812*4309Smaybee list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 2813*4309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 2814*4309Smaybee list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 2815*4309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 2816*4309Smaybee list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 2817*4309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 2818*4309Smaybee list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 2819*4309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 2820*4309Smaybee list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 2821*4309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 2822*4309Smaybee list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 2823*4309Smaybee sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 2824789Sahrens 2825789Sahrens buf_init(); 2826789Sahrens 2827789Sahrens arc_thread_exit = 0; 28281544Seschrock arc_eviction_list = NULL; 28291544Seschrock mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 28302887Smaybee bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 2831789Sahrens 28323403Sbmc arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 28333403Sbmc sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 28343403Sbmc 28353403Sbmc if (arc_ksp != NULL) { 28363403Sbmc arc_ksp->ks_data = &arc_stats; 28373403Sbmc kstat_install(arc_ksp); 28383403Sbmc } 28393403Sbmc 2840789Sahrens (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 2841789Sahrens TS_RUN, minclsyspri); 28423158Smaybee 28433158Smaybee arc_dead = FALSE; 2844789Sahrens } 2845789Sahrens 2846789Sahrens void 2847789Sahrens arc_fini(void) 2848789Sahrens { 2849789Sahrens mutex_enter(&arc_reclaim_thr_lock); 2850789Sahrens arc_thread_exit = 1; 2851789Sahrens while (arc_thread_exit != 0) 2852789Sahrens cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 2853789Sahrens mutex_exit(&arc_reclaim_thr_lock); 2854789Sahrens 2855789Sahrens arc_flush(); 2856789Sahrens 2857789Sahrens arc_dead = TRUE; 2858789Sahrens 28593403Sbmc if (arc_ksp != NULL) { 28603403Sbmc kstat_delete(arc_ksp); 28613403Sbmc arc_ksp = NULL; 28623403Sbmc } 28633403Sbmc 28641544Seschrock mutex_destroy(&arc_eviction_mtx); 2865789Sahrens mutex_destroy(&arc_reclaim_thr_lock); 2866789Sahrens cv_destroy(&arc_reclaim_thr_cv); 2867789Sahrens 2868*4309Smaybee list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 2869*4309Smaybee list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 2870*4309Smaybee list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 2871*4309Smaybee list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 2872*4309Smaybee list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 2873*4309Smaybee list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 2874*4309Smaybee list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 2875*4309Smaybee list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 2876789Sahrens 28773403Sbmc mutex_destroy(&arc_anon->arcs_mtx); 28783403Sbmc mutex_destroy(&arc_mru->arcs_mtx); 28793403Sbmc mutex_destroy(&arc_mru_ghost->arcs_mtx); 28803403Sbmc mutex_destroy(&arc_mfu->arcs_mtx); 28813403Sbmc mutex_destroy(&arc_mfu_ghost->arcs_mtx); 28822856Snd150628 2883789Sahrens buf_fini(); 2884789Sahrens } 2885