1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51484Sek110237 * Common Development and Distribution License (the "License"). 61484Sek110237 * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 223403Sbmc * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23789Sahrens * Use is subject to license terms. 24789Sahrens */ 25789Sahrens 26789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 27789Sahrens 28789Sahrens /* 293403Sbmc * DVA-based Adjustable Replacement Cache 30789Sahrens * 311544Seschrock * While much of the theory of operation used here is 321544Seschrock * based on the self-tuning, low overhead replacement cache 33789Sahrens * presented by Megiddo and Modha at FAST 2003, there are some 34789Sahrens * significant differences: 35789Sahrens * 36789Sahrens * 1. The Megiddo and Modha model assumes any page is evictable. 37789Sahrens * Pages in its cache cannot be "locked" into memory. This makes 38789Sahrens * the eviction algorithm simple: evict the last page in the list. 39789Sahrens * This also make the performance characteristics easy to reason 40789Sahrens * about. Our cache is not so simple. At any given moment, some 41789Sahrens * subset of the blocks in the cache are un-evictable because we 42789Sahrens * have handed out a reference to them. Blocks are only evictable 43789Sahrens * when there are no external references active. This makes 44789Sahrens * eviction far more problematic: we choose to evict the evictable 45789Sahrens * blocks that are the "lowest" in the list. 46789Sahrens * 47789Sahrens * There are times when it is not possible to evict the requested 48789Sahrens * space. In these circumstances we are unable to adjust the cache 49789Sahrens * size. To prevent the cache growing unbounded at these times we 50789Sahrens * implement a "cache throttle" that slowes the flow of new data 51789Sahrens * into the cache until we can make space avaiable. 52789Sahrens * 53789Sahrens * 2. The Megiddo and Modha model assumes a fixed cache size. 54789Sahrens * Pages are evicted when the cache is full and there is a cache 55789Sahrens * miss. Our model has a variable sized cache. It grows with 56789Sahrens * high use, but also tries to react to memory preasure from the 57789Sahrens * operating system: decreasing its size when system memory is 58789Sahrens * tight. 59789Sahrens * 60789Sahrens * 3. The Megiddo and Modha model assumes a fixed page size. All 61789Sahrens * elements of the cache are therefor exactly the same size. So 62789Sahrens * when adjusting the cache size following a cache miss, its simply 63789Sahrens * a matter of choosing a single page to evict. In our model, we 64789Sahrens * have variable sized cache blocks (rangeing from 512 bytes to 65789Sahrens * 128K bytes). We therefor choose a set of blocks to evict to make 66789Sahrens * space for a cache miss that approximates as closely as possible 67789Sahrens * the space used by the new block. 68789Sahrens * 69789Sahrens * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70789Sahrens * by N. Megiddo & D. Modha, FAST 2003 71789Sahrens */ 72789Sahrens 73789Sahrens /* 74789Sahrens * The locking model: 75789Sahrens * 76789Sahrens * A new reference to a cache buffer can be obtained in two 77789Sahrens * ways: 1) via a hash table lookup using the DVA as a key, 78789Sahrens * or 2) via one of the ARC lists. The arc_read() inerface 79789Sahrens * uses method 1, while the internal arc algorithms for 80789Sahrens * adjusting the cache use method 2. We therefor provide two 81789Sahrens * types of locks: 1) the hash table lock array, and 2) the 82789Sahrens * arc list locks. 83789Sahrens * 84789Sahrens * Buffers do not have their own mutexs, rather they rely on the 85789Sahrens * hash table mutexs for the bulk of their protection (i.e. most 86789Sahrens * fields in the arc_buf_hdr_t are protected by these mutexs). 87789Sahrens * 88789Sahrens * buf_hash_find() returns the appropriate mutex (held) when it 89789Sahrens * locates the requested buffer in the hash table. It returns 90789Sahrens * NULL for the mutex if the buffer was not in the table. 91789Sahrens * 92789Sahrens * buf_hash_remove() expects the appropriate hash mutex to be 93789Sahrens * already held before it is invoked. 94789Sahrens * 95789Sahrens * Each arc state also has a mutex which is used to protect the 96789Sahrens * buffer list associated with the state. When attempting to 97789Sahrens * obtain a hash table lock while holding an arc list lock you 98789Sahrens * must use: mutex_tryenter() to avoid deadlock. Also note that 992688Smaybee * the active state mutex must be held before the ghost state mutex. 100789Sahrens * 1011544Seschrock * Arc buffers may have an associated eviction callback function. 1021544Seschrock * This function will be invoked prior to removing the buffer (e.g. 1031544Seschrock * in arc_do_user_evicts()). Note however that the data associated 1041544Seschrock * with the buffer may be evicted prior to the callback. The callback 1051544Seschrock * must be made with *no locks held* (to prevent deadlock). Additionally, 1061544Seschrock * the users of callbacks must ensure that their private data is 1071544Seschrock * protected from simultaneous callbacks from arc_buf_evict() 1081544Seschrock * and arc_do_user_evicts(). 1091544Seschrock * 110789Sahrens * Note that the majority of the performance stats are manipulated 111789Sahrens * with atomic operations. 112789Sahrens */ 113789Sahrens 114789Sahrens #include <sys/spa.h> 115789Sahrens #include <sys/zio.h> 1163093Sahrens #include <sys/zio_checksum.h> 117789Sahrens #include <sys/zfs_context.h> 118789Sahrens #include <sys/arc.h> 119789Sahrens #include <sys/refcount.h> 120789Sahrens #ifdef _KERNEL 121789Sahrens #include <sys/vmsystm.h> 122789Sahrens #include <vm/anon.h> 123789Sahrens #include <sys/fs/swapnode.h> 1241484Sek110237 #include <sys/dnlc.h> 125789Sahrens #endif 126789Sahrens #include <sys/callb.h> 1273403Sbmc #include <sys/kstat.h> 128789Sahrens 129789Sahrens static kmutex_t arc_reclaim_thr_lock; 130789Sahrens static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 131789Sahrens static uint8_t arc_thread_exit; 132789Sahrens 1331484Sek110237 #define ARC_REDUCE_DNLC_PERCENT 3 1341484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 1351484Sek110237 136789Sahrens typedef enum arc_reclaim_strategy { 137789Sahrens ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 138789Sahrens ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 139789Sahrens } arc_reclaim_strategy_t; 140789Sahrens 141789Sahrens /* number of seconds before growing cache again */ 142789Sahrens static int arc_grow_retry = 60; 143789Sahrens 1442391Smaybee /* 1452638Sperrin * minimum lifespan of a prefetch block in clock ticks 1462638Sperrin * (initialized in arc_init()) 1472391Smaybee */ 1482638Sperrin static int arc_min_prefetch_lifespan; 1492391Smaybee 150789Sahrens static int arc_dead; 151789Sahrens 152789Sahrens /* 1532885Sahrens * These tunables are for performance analysis. 1542885Sahrens */ 1552885Sahrens uint64_t zfs_arc_max; 1562885Sahrens uint64_t zfs_arc_min; 1572885Sahrens 1582885Sahrens /* 159789Sahrens * Note that buffers can be on one of 5 states: 160789Sahrens * ARC_anon - anonymous (discussed below) 1611544Seschrock * ARC_mru - recently used, currently cached 1621544Seschrock * ARC_mru_ghost - recentely used, no longer in cache 1631544Seschrock * ARC_mfu - frequently used, currently cached 1641544Seschrock * ARC_mfu_ghost - frequently used, no longer in cache 165789Sahrens * When there are no active references to the buffer, they 166789Sahrens * are linked onto one of the lists in arc. These are the 167789Sahrens * only buffers that can be evicted or deleted. 168789Sahrens * 169789Sahrens * Anonymous buffers are buffers that are not associated with 170789Sahrens * a DVA. These are buffers that hold dirty block copies 171789Sahrens * before they are written to stable storage. By definition, 1721544Seschrock * they are "ref'd" and are considered part of arc_mru 173789Sahrens * that cannot be freed. Generally, they will aquire a DVA 1741544Seschrock * as they are written and migrate onto the arc_mru list. 175789Sahrens */ 176789Sahrens 177789Sahrens typedef struct arc_state { 1783403Sbmc list_t arcs_list; /* linked list of evictable buffer in state */ 1793403Sbmc uint64_t arcs_lsize; /* total size of buffers in the linked list */ 1803403Sbmc uint64_t arcs_size; /* total size of all buffers in this state */ 1813403Sbmc kmutex_t arcs_mtx; 182789Sahrens } arc_state_t; 183789Sahrens 184789Sahrens /* The 5 states: */ 185789Sahrens static arc_state_t ARC_anon; 1861544Seschrock static arc_state_t ARC_mru; 1871544Seschrock static arc_state_t ARC_mru_ghost; 1881544Seschrock static arc_state_t ARC_mfu; 1891544Seschrock static arc_state_t ARC_mfu_ghost; 190789Sahrens 1913403Sbmc typedef struct arc_stats { 1923403Sbmc kstat_named_t arcstat_hits; 1933403Sbmc kstat_named_t arcstat_misses; 1943403Sbmc kstat_named_t arcstat_demand_data_hits; 1953403Sbmc kstat_named_t arcstat_demand_data_misses; 1963403Sbmc kstat_named_t arcstat_demand_metadata_hits; 1973403Sbmc kstat_named_t arcstat_demand_metadata_misses; 1983403Sbmc kstat_named_t arcstat_prefetch_data_hits; 1993403Sbmc kstat_named_t arcstat_prefetch_data_misses; 2003403Sbmc kstat_named_t arcstat_prefetch_metadata_hits; 2013403Sbmc kstat_named_t arcstat_prefetch_metadata_misses; 2023403Sbmc kstat_named_t arcstat_mru_hits; 2033403Sbmc kstat_named_t arcstat_mru_ghost_hits; 2043403Sbmc kstat_named_t arcstat_mfu_hits; 2053403Sbmc kstat_named_t arcstat_mfu_ghost_hits; 2063403Sbmc kstat_named_t arcstat_deleted; 2073403Sbmc kstat_named_t arcstat_recycle_miss; 2083403Sbmc kstat_named_t arcstat_mutex_miss; 2093403Sbmc kstat_named_t arcstat_evict_skip; 2103403Sbmc kstat_named_t arcstat_hash_elements; 2113403Sbmc kstat_named_t arcstat_hash_elements_max; 2123403Sbmc kstat_named_t arcstat_hash_collisions; 2133403Sbmc kstat_named_t arcstat_hash_chains; 2143403Sbmc kstat_named_t arcstat_hash_chain_max; 2153403Sbmc kstat_named_t arcstat_p; 2163403Sbmc kstat_named_t arcstat_c; 2173403Sbmc kstat_named_t arcstat_c_min; 2183403Sbmc kstat_named_t arcstat_c_max; 2193403Sbmc kstat_named_t arcstat_size; 2203403Sbmc } arc_stats_t; 2213403Sbmc 2223403Sbmc static arc_stats_t arc_stats = { 2233403Sbmc { "hits", KSTAT_DATA_UINT64 }, 2243403Sbmc { "misses", KSTAT_DATA_UINT64 }, 2253403Sbmc { "demand_data_hits", KSTAT_DATA_UINT64 }, 2263403Sbmc { "demand_data_misses", KSTAT_DATA_UINT64 }, 2273403Sbmc { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 2283403Sbmc { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 2293403Sbmc { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 2303403Sbmc { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 2313403Sbmc { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 2323403Sbmc { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 2333403Sbmc { "mru_hits", KSTAT_DATA_UINT64 }, 2343403Sbmc { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 2353403Sbmc { "mfu_hits", KSTAT_DATA_UINT64 }, 2363403Sbmc { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 2373403Sbmc { "deleted", KSTAT_DATA_UINT64 }, 2383403Sbmc { "recycle_miss", KSTAT_DATA_UINT64 }, 2393403Sbmc { "mutex_miss", KSTAT_DATA_UINT64 }, 2403403Sbmc { "evict_skip", KSTAT_DATA_UINT64 }, 2413403Sbmc { "hash_elements", KSTAT_DATA_UINT64 }, 2423403Sbmc { "hash_elements_max", KSTAT_DATA_UINT64 }, 2433403Sbmc { "hash_collisions", KSTAT_DATA_UINT64 }, 2443403Sbmc { "hash_chains", KSTAT_DATA_UINT64 }, 2453403Sbmc { "hash_chain_max", KSTAT_DATA_UINT64 }, 2463403Sbmc { "p", KSTAT_DATA_UINT64 }, 2473403Sbmc { "c", KSTAT_DATA_UINT64 }, 2483403Sbmc { "c_min", KSTAT_DATA_UINT64 }, 2493403Sbmc { "c_max", KSTAT_DATA_UINT64 }, 2503403Sbmc { "size", KSTAT_DATA_UINT64 } 2513403Sbmc }; 252789Sahrens 2533403Sbmc #define ARCSTAT(stat) (arc_stats.stat.value.ui64) 2543403Sbmc 2553403Sbmc #define ARCSTAT_INCR(stat, val) \ 2563403Sbmc atomic_add_64(&arc_stats.stat.value.ui64, (val)); 2573403Sbmc 2583403Sbmc #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 2593403Sbmc #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 2603403Sbmc 2613403Sbmc #define ARCSTAT_MAX(stat, val) { \ 2623403Sbmc uint64_t m; \ 2633403Sbmc while ((val) > (m = arc_stats.stat.value.ui64) && \ 2643403Sbmc (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 2653403Sbmc continue; \ 2663403Sbmc } 2673403Sbmc 2683403Sbmc #define ARCSTAT_MAXSTAT(stat) \ 2693403Sbmc ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 270789Sahrens 2713403Sbmc /* 2723403Sbmc * We define a macro to allow ARC hits/misses to be easily broken down by 2733403Sbmc * two separate conditions, giving a total of four different subtypes for 2743403Sbmc * each of hits and misses (so eight statistics total). 2753403Sbmc */ 2763403Sbmc #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 2773403Sbmc if (cond1) { \ 2783403Sbmc if (cond2) { \ 2793403Sbmc ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 2803403Sbmc } else { \ 2813403Sbmc ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 2823403Sbmc } \ 2833403Sbmc } else { \ 2843403Sbmc if (cond2) { \ 2853403Sbmc ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 2863403Sbmc } else { \ 2873403Sbmc ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 2883403Sbmc } \ 2893403Sbmc } 290789Sahrens 2913403Sbmc kstat_t *arc_ksp; 2923403Sbmc static arc_state_t *arc_anon; 2933403Sbmc static arc_state_t *arc_mru; 2943403Sbmc static arc_state_t *arc_mru_ghost; 2953403Sbmc static arc_state_t *arc_mfu; 2963403Sbmc static arc_state_t *arc_mfu_ghost; 2973403Sbmc 2983403Sbmc /* 2993403Sbmc * There are several ARC variables that are critical to export as kstats -- 3003403Sbmc * but we don't want to have to grovel around in the kstat whenever we wish to 3013403Sbmc * manipulate them. For these variables, we therefore define them to be in 3023403Sbmc * terms of the statistic variable. This assures that we are not introducing 3033403Sbmc * the possibility of inconsistency by having shadow copies of the variables, 3043403Sbmc * while still allowing the code to be readable. 3053403Sbmc */ 3063403Sbmc #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 3073403Sbmc #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 3083403Sbmc #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 3093403Sbmc #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 3103403Sbmc #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 3113403Sbmc 3123403Sbmc static int arc_no_grow; /* Don't try to grow cache size */ 3133403Sbmc static uint64_t arc_tempreserve; 314789Sahrens 315789Sahrens typedef struct arc_callback arc_callback_t; 316789Sahrens 317789Sahrens struct arc_callback { 318*3547Smaybee void *acb_private; 319789Sahrens arc_done_func_t *acb_done; 320789Sahrens arc_byteswap_func_t *acb_byteswap; 321789Sahrens arc_buf_t *acb_buf; 322789Sahrens zio_t *acb_zio_dummy; 323789Sahrens arc_callback_t *acb_next; 324789Sahrens }; 325789Sahrens 326*3547Smaybee typedef struct arc_write_callback arc_write_callback_t; 327*3547Smaybee 328*3547Smaybee struct arc_write_callback { 329*3547Smaybee void *awcb_private; 330*3547Smaybee arc_done_func_t *awcb_ready; 331*3547Smaybee arc_done_func_t *awcb_done; 332*3547Smaybee arc_buf_t *awcb_buf; 333*3547Smaybee }; 334*3547Smaybee 335789Sahrens struct arc_buf_hdr { 336789Sahrens /* protected by hash lock */ 337789Sahrens dva_t b_dva; 338789Sahrens uint64_t b_birth; 339789Sahrens uint64_t b_cksum0; 340789Sahrens 3413093Sahrens kmutex_t b_freeze_lock; 3423093Sahrens zio_cksum_t *b_freeze_cksum; 3433093Sahrens 344789Sahrens arc_buf_hdr_t *b_hash_next; 345789Sahrens arc_buf_t *b_buf; 346789Sahrens uint32_t b_flags; 3471544Seschrock uint32_t b_datacnt; 348789Sahrens 3493290Sjohansen arc_callback_t *b_acb; 350789Sahrens kcondvar_t b_cv; 3513290Sjohansen 3523290Sjohansen /* immutable */ 3533290Sjohansen arc_buf_contents_t b_type; 3543290Sjohansen uint64_t b_size; 3553290Sjohansen spa_t *b_spa; 356789Sahrens 357789Sahrens /* protected by arc state mutex */ 358789Sahrens arc_state_t *b_state; 359789Sahrens list_node_t b_arc_node; 360789Sahrens 361789Sahrens /* updated atomically */ 362789Sahrens clock_t b_arc_access; 363789Sahrens 364789Sahrens /* self protecting */ 365789Sahrens refcount_t b_refcnt; 366789Sahrens }; 367789Sahrens 3681544Seschrock static arc_buf_t *arc_eviction_list; 3691544Seschrock static kmutex_t arc_eviction_mtx; 3702887Smaybee static arc_buf_hdr_t arc_eviction_hdr; 3712688Smaybee static void arc_get_data_buf(arc_buf_t *buf); 3722688Smaybee static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 3731544Seschrock 3741544Seschrock #define GHOST_STATE(state) \ 3753403Sbmc ((state) == arc_mru_ghost || (state) == arc_mfu_ghost) 3761544Seschrock 377789Sahrens /* 378789Sahrens * Private ARC flags. These flags are private ARC only flags that will show up 379789Sahrens * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 380789Sahrens * be passed in as arc_flags in things like arc_read. However, these flags 381789Sahrens * should never be passed and should only be set by ARC code. When adding new 382789Sahrens * public flags, make sure not to smash the private ones. 383789Sahrens */ 384789Sahrens 3851544Seschrock #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 386789Sahrens #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 387789Sahrens #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 388789Sahrens #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 3891544Seschrock #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 3902391Smaybee #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 391789Sahrens 3921544Seschrock #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 393789Sahrens #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 394789Sahrens #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 395789Sahrens #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 3961544Seschrock #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 397789Sahrens 398789Sahrens /* 399789Sahrens * Hash table routines 400789Sahrens */ 401789Sahrens 402789Sahrens #define HT_LOCK_PAD 64 403789Sahrens 404789Sahrens struct ht_lock { 405789Sahrens kmutex_t ht_lock; 406789Sahrens #ifdef _KERNEL 407789Sahrens unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 408789Sahrens #endif 409789Sahrens }; 410789Sahrens 411789Sahrens #define BUF_LOCKS 256 412789Sahrens typedef struct buf_hash_table { 413789Sahrens uint64_t ht_mask; 414789Sahrens arc_buf_hdr_t **ht_table; 415789Sahrens struct ht_lock ht_locks[BUF_LOCKS]; 416789Sahrens } buf_hash_table_t; 417789Sahrens 418789Sahrens static buf_hash_table_t buf_hash_table; 419789Sahrens 420789Sahrens #define BUF_HASH_INDEX(spa, dva, birth) \ 421789Sahrens (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 422789Sahrens #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 423789Sahrens #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 424789Sahrens #define HDR_LOCK(buf) \ 425789Sahrens (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 426789Sahrens 427789Sahrens uint64_t zfs_crc64_table[256]; 428789Sahrens 429789Sahrens static uint64_t 430789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 431789Sahrens { 432789Sahrens uintptr_t spav = (uintptr_t)spa; 433789Sahrens uint8_t *vdva = (uint8_t *)dva; 434789Sahrens uint64_t crc = -1ULL; 435789Sahrens int i; 436789Sahrens 437789Sahrens ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 438789Sahrens 439789Sahrens for (i = 0; i < sizeof (dva_t); i++) 440789Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 441789Sahrens 442789Sahrens crc ^= (spav>>8) ^ birth; 443789Sahrens 444789Sahrens return (crc); 445789Sahrens } 446789Sahrens 447789Sahrens #define BUF_EMPTY(buf) \ 448789Sahrens ((buf)->b_dva.dva_word[0] == 0 && \ 449789Sahrens (buf)->b_dva.dva_word[1] == 0 && \ 450789Sahrens (buf)->b_birth == 0) 451789Sahrens 452789Sahrens #define BUF_EQUAL(spa, dva, birth, buf) \ 453789Sahrens ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 454789Sahrens ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 455789Sahrens ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 456789Sahrens 457789Sahrens static arc_buf_hdr_t * 458789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 459789Sahrens { 460789Sahrens uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 461789Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 462789Sahrens arc_buf_hdr_t *buf; 463789Sahrens 464789Sahrens mutex_enter(hash_lock); 465789Sahrens for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 466789Sahrens buf = buf->b_hash_next) { 467789Sahrens if (BUF_EQUAL(spa, dva, birth, buf)) { 468789Sahrens *lockp = hash_lock; 469789Sahrens return (buf); 470789Sahrens } 471789Sahrens } 472789Sahrens mutex_exit(hash_lock); 473789Sahrens *lockp = NULL; 474789Sahrens return (NULL); 475789Sahrens } 476789Sahrens 477789Sahrens /* 478789Sahrens * Insert an entry into the hash table. If there is already an element 479789Sahrens * equal to elem in the hash table, then the already existing element 480789Sahrens * will be returned and the new element will not be inserted. 481789Sahrens * Otherwise returns NULL. 482789Sahrens */ 483789Sahrens static arc_buf_hdr_t * 484789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 485789Sahrens { 486789Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 487789Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 488789Sahrens arc_buf_hdr_t *fbuf; 4893403Sbmc uint32_t i; 490789Sahrens 4911544Seschrock ASSERT(!HDR_IN_HASH_TABLE(buf)); 492789Sahrens *lockp = hash_lock; 493789Sahrens mutex_enter(hash_lock); 494789Sahrens for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 495789Sahrens fbuf = fbuf->b_hash_next, i++) { 496789Sahrens if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 497789Sahrens return (fbuf); 498789Sahrens } 499789Sahrens 500789Sahrens buf->b_hash_next = buf_hash_table.ht_table[idx]; 501789Sahrens buf_hash_table.ht_table[idx] = buf; 5021544Seschrock buf->b_flags |= ARC_IN_HASH_TABLE; 503789Sahrens 504789Sahrens /* collect some hash table performance data */ 505789Sahrens if (i > 0) { 5063403Sbmc ARCSTAT_BUMP(arcstat_hash_collisions); 507789Sahrens if (i == 1) 5083403Sbmc ARCSTAT_BUMP(arcstat_hash_chains); 5093403Sbmc 5103403Sbmc ARCSTAT_MAX(arcstat_hash_chain_max, i); 511789Sahrens } 5123403Sbmc 5133403Sbmc ARCSTAT_BUMP(arcstat_hash_elements); 5143403Sbmc ARCSTAT_MAXSTAT(arcstat_hash_elements); 515789Sahrens 516789Sahrens return (NULL); 517789Sahrens } 518789Sahrens 519789Sahrens static void 520789Sahrens buf_hash_remove(arc_buf_hdr_t *buf) 521789Sahrens { 522789Sahrens arc_buf_hdr_t *fbuf, **bufp; 523789Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 524789Sahrens 525789Sahrens ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 5261544Seschrock ASSERT(HDR_IN_HASH_TABLE(buf)); 527789Sahrens 528789Sahrens bufp = &buf_hash_table.ht_table[idx]; 529789Sahrens while ((fbuf = *bufp) != buf) { 530789Sahrens ASSERT(fbuf != NULL); 531789Sahrens bufp = &fbuf->b_hash_next; 532789Sahrens } 533789Sahrens *bufp = buf->b_hash_next; 534789Sahrens buf->b_hash_next = NULL; 5351544Seschrock buf->b_flags &= ~ARC_IN_HASH_TABLE; 536789Sahrens 537789Sahrens /* collect some hash table performance data */ 5383403Sbmc ARCSTAT_BUMPDOWN(arcstat_hash_elements); 5393403Sbmc 540789Sahrens if (buf_hash_table.ht_table[idx] && 541789Sahrens buf_hash_table.ht_table[idx]->b_hash_next == NULL) 5423403Sbmc ARCSTAT_BUMPDOWN(arcstat_hash_chains); 543789Sahrens } 544789Sahrens 545789Sahrens /* 546789Sahrens * Global data structures and functions for the buf kmem cache. 547789Sahrens */ 548789Sahrens static kmem_cache_t *hdr_cache; 549789Sahrens static kmem_cache_t *buf_cache; 550789Sahrens 551789Sahrens static void 552789Sahrens buf_fini(void) 553789Sahrens { 554789Sahrens int i; 555789Sahrens 556789Sahrens kmem_free(buf_hash_table.ht_table, 557789Sahrens (buf_hash_table.ht_mask + 1) * sizeof (void *)); 558789Sahrens for (i = 0; i < BUF_LOCKS; i++) 559789Sahrens mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 560789Sahrens kmem_cache_destroy(hdr_cache); 561789Sahrens kmem_cache_destroy(buf_cache); 562789Sahrens } 563789Sahrens 564789Sahrens /* 565789Sahrens * Constructor callback - called when the cache is empty 566789Sahrens * and a new buf is requested. 567789Sahrens */ 568789Sahrens /* ARGSUSED */ 569789Sahrens static int 570789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag) 571789Sahrens { 572789Sahrens arc_buf_hdr_t *buf = vbuf; 573789Sahrens 574789Sahrens bzero(buf, sizeof (arc_buf_hdr_t)); 575789Sahrens refcount_create(&buf->b_refcnt); 576789Sahrens cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 577789Sahrens return (0); 578789Sahrens } 579789Sahrens 580789Sahrens /* 581789Sahrens * Destructor callback - called when a cached buf is 582789Sahrens * no longer required. 583789Sahrens */ 584789Sahrens /* ARGSUSED */ 585789Sahrens static void 586789Sahrens hdr_dest(void *vbuf, void *unused) 587789Sahrens { 588789Sahrens arc_buf_hdr_t *buf = vbuf; 589789Sahrens 590789Sahrens refcount_destroy(&buf->b_refcnt); 591789Sahrens cv_destroy(&buf->b_cv); 592789Sahrens } 593789Sahrens 594789Sahrens /* 595789Sahrens * Reclaim callback -- invoked when memory is low. 596789Sahrens */ 597789Sahrens /* ARGSUSED */ 598789Sahrens static void 599789Sahrens hdr_recl(void *unused) 600789Sahrens { 601789Sahrens dprintf("hdr_recl called\n"); 6023158Smaybee /* 6033158Smaybee * umem calls the reclaim func when we destroy the buf cache, 6043158Smaybee * which is after we do arc_fini(). 6053158Smaybee */ 6063158Smaybee if (!arc_dead) 6073158Smaybee cv_signal(&arc_reclaim_thr_cv); 608789Sahrens } 609789Sahrens 610789Sahrens static void 611789Sahrens buf_init(void) 612789Sahrens { 613789Sahrens uint64_t *ct; 6141544Seschrock uint64_t hsize = 1ULL << 12; 615789Sahrens int i, j; 616789Sahrens 617789Sahrens /* 618789Sahrens * The hash table is big enough to fill all of physical memory 6191544Seschrock * with an average 64K block size. The table will take up 6201544Seschrock * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 621789Sahrens */ 6221544Seschrock while (hsize * 65536 < physmem * PAGESIZE) 623789Sahrens hsize <<= 1; 6241544Seschrock retry: 625789Sahrens buf_hash_table.ht_mask = hsize - 1; 6261544Seschrock buf_hash_table.ht_table = 6271544Seschrock kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 6281544Seschrock if (buf_hash_table.ht_table == NULL) { 6291544Seschrock ASSERT(hsize > (1ULL << 8)); 6301544Seschrock hsize >>= 1; 6311544Seschrock goto retry; 6321544Seschrock } 633789Sahrens 634789Sahrens hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 635789Sahrens 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 636789Sahrens buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 637789Sahrens 0, NULL, NULL, NULL, NULL, NULL, 0); 638789Sahrens 639789Sahrens for (i = 0; i < 256; i++) 640789Sahrens for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 641789Sahrens *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 642789Sahrens 643789Sahrens for (i = 0; i < BUF_LOCKS; i++) { 644789Sahrens mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 645789Sahrens NULL, MUTEX_DEFAULT, NULL); 646789Sahrens } 647789Sahrens } 648789Sahrens 649789Sahrens #define ARC_MINTIME (hz>>4) /* 62 ms */ 650789Sahrens 651789Sahrens static void 6523093Sahrens arc_cksum_verify(arc_buf_t *buf) 6533093Sahrens { 6543093Sahrens zio_cksum_t zc; 6553093Sahrens 6563312Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 6573093Sahrens return; 6583093Sahrens 6593093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 6603265Sahrens if (buf->b_hdr->b_freeze_cksum == NULL || 6613265Sahrens (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 6623093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 6633093Sahrens return; 6643093Sahrens } 6653093Sahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 6663093Sahrens if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 6673093Sahrens panic("buffer modified while frozen!"); 6683093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 6693093Sahrens } 6703093Sahrens 6713093Sahrens static void 6723093Sahrens arc_cksum_compute(arc_buf_t *buf) 6733093Sahrens { 6743312Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 6753093Sahrens return; 6763093Sahrens 6773093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 6783093Sahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 6793093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 6803093Sahrens return; 6813093Sahrens } 6823093Sahrens buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 6833093Sahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 6843093Sahrens buf->b_hdr->b_freeze_cksum); 6853093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 6863093Sahrens } 6873093Sahrens 6883093Sahrens void 6893093Sahrens arc_buf_thaw(arc_buf_t *buf) 6903093Sahrens { 6913312Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 6923093Sahrens return; 6933093Sahrens 6943403Sbmc if (buf->b_hdr->b_state != arc_anon) 6953093Sahrens panic("modifying non-anon buffer!"); 6963093Sahrens if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 6973093Sahrens panic("modifying buffer while i/o in progress!"); 6983093Sahrens arc_cksum_verify(buf); 6993093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 7003093Sahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 7013093Sahrens kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 7023093Sahrens buf->b_hdr->b_freeze_cksum = NULL; 7033093Sahrens } 7043093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 7053093Sahrens } 7063093Sahrens 7073093Sahrens void 7083093Sahrens arc_buf_freeze(arc_buf_t *buf) 7093093Sahrens { 7103312Sahrens if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 7113312Sahrens return; 7123312Sahrens 7133093Sahrens ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 7143403Sbmc buf->b_hdr->b_state == arc_anon); 7153093Sahrens arc_cksum_compute(buf); 7163093Sahrens } 7173093Sahrens 7183093Sahrens static void 719789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 720789Sahrens { 721789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 722789Sahrens 723789Sahrens if ((refcount_add(&ab->b_refcnt, tag) == 1) && 7243403Sbmc (ab->b_state != arc_anon)) { 7251544Seschrock int delta = ab->b_size * ab->b_datacnt; 726789Sahrens 7273403Sbmc ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 7283403Sbmc mutex_enter(&ab->b_state->arcs_mtx); 729789Sahrens ASSERT(list_link_active(&ab->b_arc_node)); 7303403Sbmc list_remove(&ab->b_state->arcs_list, ab); 7311544Seschrock if (GHOST_STATE(ab->b_state)) { 7321544Seschrock ASSERT3U(ab->b_datacnt, ==, 0); 7331544Seschrock ASSERT3P(ab->b_buf, ==, NULL); 7341544Seschrock delta = ab->b_size; 7351544Seschrock } 7361544Seschrock ASSERT(delta > 0); 7373403Sbmc ASSERT3U(ab->b_state->arcs_lsize, >=, delta); 7383403Sbmc atomic_add_64(&ab->b_state->arcs_lsize, -delta); 7393403Sbmc mutex_exit(&ab->b_state->arcs_mtx); 7402391Smaybee /* remove the prefetch flag is we get a reference */ 7412391Smaybee if (ab->b_flags & ARC_PREFETCH) 7422391Smaybee ab->b_flags &= ~ARC_PREFETCH; 743789Sahrens } 744789Sahrens } 745789Sahrens 746789Sahrens static int 747789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 748789Sahrens { 749789Sahrens int cnt; 7503403Sbmc arc_state_t *state = ab->b_state; 751789Sahrens 7523403Sbmc ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 7533403Sbmc ASSERT(!GHOST_STATE(state)); 754789Sahrens 755789Sahrens if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 7563403Sbmc (state != arc_anon)) { 7573403Sbmc ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 7583403Sbmc mutex_enter(&state->arcs_mtx); 759789Sahrens ASSERT(!list_link_active(&ab->b_arc_node)); 7603403Sbmc list_insert_head(&state->arcs_list, ab); 7611544Seschrock ASSERT(ab->b_datacnt > 0); 7623403Sbmc atomic_add_64(&state->arcs_lsize, ab->b_size * ab->b_datacnt); 7633403Sbmc ASSERT3U(state->arcs_size, >=, state->arcs_lsize); 7643403Sbmc mutex_exit(&state->arcs_mtx); 765789Sahrens } 766789Sahrens return (cnt); 767789Sahrens } 768789Sahrens 769789Sahrens /* 770789Sahrens * Move the supplied buffer to the indicated state. The mutex 771789Sahrens * for the buffer must be held by the caller. 772789Sahrens */ 773789Sahrens static void 7741544Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 775789Sahrens { 7761544Seschrock arc_state_t *old_state = ab->b_state; 7771544Seschrock int refcnt = refcount_count(&ab->b_refcnt); 7781544Seschrock int from_delta, to_delta; 779789Sahrens 780789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 7811544Seschrock ASSERT(new_state != old_state); 7821544Seschrock ASSERT(refcnt == 0 || ab->b_datacnt > 0); 7831544Seschrock ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 7841544Seschrock 7851544Seschrock from_delta = to_delta = ab->b_datacnt * ab->b_size; 786789Sahrens 787789Sahrens /* 788789Sahrens * If this buffer is evictable, transfer it from the 789789Sahrens * old state list to the new state list. 790789Sahrens */ 7911544Seschrock if (refcnt == 0) { 7923403Sbmc if (old_state != arc_anon) { 7933403Sbmc int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 7941544Seschrock 7951544Seschrock if (use_mutex) 7963403Sbmc mutex_enter(&old_state->arcs_mtx); 7971544Seschrock 7981544Seschrock ASSERT(list_link_active(&ab->b_arc_node)); 7993403Sbmc list_remove(&old_state->arcs_list, ab); 800789Sahrens 8012391Smaybee /* 8022391Smaybee * If prefetching out of the ghost cache, 8032391Smaybee * we will have a non-null datacnt. 8042391Smaybee */ 8052391Smaybee if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 8062391Smaybee /* ghost elements have a ghost size */ 8071544Seschrock ASSERT(ab->b_buf == NULL); 8081544Seschrock from_delta = ab->b_size; 809789Sahrens } 8103403Sbmc ASSERT3U(old_state->arcs_lsize, >=, from_delta); 8113403Sbmc atomic_add_64(&old_state->arcs_lsize, -from_delta); 8121544Seschrock 8131544Seschrock if (use_mutex) 8143403Sbmc mutex_exit(&old_state->arcs_mtx); 815789Sahrens } 8163403Sbmc if (new_state != arc_anon) { 8173403Sbmc int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 818789Sahrens 8191544Seschrock if (use_mutex) 8203403Sbmc mutex_enter(&new_state->arcs_mtx); 8211544Seschrock 8223403Sbmc list_insert_head(&new_state->arcs_list, ab); 8231544Seschrock 8241544Seschrock /* ghost elements have a ghost size */ 8251544Seschrock if (GHOST_STATE(new_state)) { 8261544Seschrock ASSERT(ab->b_datacnt == 0); 8271544Seschrock ASSERT(ab->b_buf == NULL); 8281544Seschrock to_delta = ab->b_size; 8291544Seschrock } 8303403Sbmc atomic_add_64(&new_state->arcs_lsize, to_delta); 8313403Sbmc ASSERT3U(new_state->arcs_size + to_delta, >=, 8323403Sbmc new_state->arcs_lsize); 8331544Seschrock 8341544Seschrock if (use_mutex) 8353403Sbmc mutex_exit(&new_state->arcs_mtx); 836789Sahrens } 837789Sahrens } 838789Sahrens 839789Sahrens ASSERT(!BUF_EMPTY(ab)); 8403403Sbmc if (new_state == arc_anon && old_state != arc_anon) { 841789Sahrens buf_hash_remove(ab); 842789Sahrens } 843789Sahrens 8441544Seschrock /* adjust state sizes */ 8451544Seschrock if (to_delta) 8463403Sbmc atomic_add_64(&new_state->arcs_size, to_delta); 8471544Seschrock if (from_delta) { 8483403Sbmc ASSERT3U(old_state->arcs_size, >=, from_delta); 8493403Sbmc atomic_add_64(&old_state->arcs_size, -from_delta); 850789Sahrens } 851789Sahrens ab->b_state = new_state; 852789Sahrens } 853789Sahrens 854789Sahrens arc_buf_t * 8553290Sjohansen arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 856789Sahrens { 857789Sahrens arc_buf_hdr_t *hdr; 858789Sahrens arc_buf_t *buf; 859789Sahrens 860789Sahrens ASSERT3U(size, >, 0); 861789Sahrens hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 862789Sahrens ASSERT(BUF_EMPTY(hdr)); 863789Sahrens hdr->b_size = size; 8643290Sjohansen hdr->b_type = type; 865789Sahrens hdr->b_spa = spa; 8663403Sbmc hdr->b_state = arc_anon; 867789Sahrens hdr->b_arc_access = 0; 868789Sahrens buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 869789Sahrens buf->b_hdr = hdr; 8702688Smaybee buf->b_data = NULL; 8711544Seschrock buf->b_efunc = NULL; 8721544Seschrock buf->b_private = NULL; 873789Sahrens buf->b_next = NULL; 874789Sahrens hdr->b_buf = buf; 8752688Smaybee arc_get_data_buf(buf); 8761544Seschrock hdr->b_datacnt = 1; 877789Sahrens hdr->b_flags = 0; 878789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 879789Sahrens (void) refcount_add(&hdr->b_refcnt, tag); 880789Sahrens 881789Sahrens return (buf); 882789Sahrens } 883789Sahrens 8842688Smaybee static arc_buf_t * 8852688Smaybee arc_buf_clone(arc_buf_t *from) 8861544Seschrock { 8872688Smaybee arc_buf_t *buf; 8882688Smaybee arc_buf_hdr_t *hdr = from->b_hdr; 8892688Smaybee uint64_t size = hdr->b_size; 8901544Seschrock 8912688Smaybee buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 8922688Smaybee buf->b_hdr = hdr; 8932688Smaybee buf->b_data = NULL; 8942688Smaybee buf->b_efunc = NULL; 8952688Smaybee buf->b_private = NULL; 8962688Smaybee buf->b_next = hdr->b_buf; 8972688Smaybee hdr->b_buf = buf; 8982688Smaybee arc_get_data_buf(buf); 8992688Smaybee bcopy(from->b_data, buf->b_data, size); 9002688Smaybee hdr->b_datacnt += 1; 9012688Smaybee return (buf); 9021544Seschrock } 9031544Seschrock 9041544Seschrock void 9051544Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag) 9061544Seschrock { 9072887Smaybee arc_buf_hdr_t *hdr; 9081544Seschrock kmutex_t *hash_lock; 9091544Seschrock 9102724Smaybee /* 9112724Smaybee * Check to see if this buffer is currently being evicted via 9122887Smaybee * arc_do_user_evicts(). 9132724Smaybee */ 9142887Smaybee mutex_enter(&arc_eviction_mtx); 9152887Smaybee hdr = buf->b_hdr; 9162887Smaybee if (hdr == NULL) { 9172887Smaybee mutex_exit(&arc_eviction_mtx); 9182724Smaybee return; 9192887Smaybee } 9202887Smaybee hash_lock = HDR_LOCK(hdr); 9212887Smaybee mutex_exit(&arc_eviction_mtx); 9222724Smaybee 9232724Smaybee mutex_enter(hash_lock); 9241544Seschrock if (buf->b_data == NULL) { 9251544Seschrock /* 9261544Seschrock * This buffer is evicted. 9271544Seschrock */ 9282724Smaybee mutex_exit(hash_lock); 9291544Seschrock return; 9301544Seschrock } 9311544Seschrock 9322724Smaybee ASSERT(buf->b_hdr == hdr); 9333403Sbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 9341544Seschrock add_reference(hdr, hash_lock, tag); 9352688Smaybee arc_access(hdr, hash_lock); 9362688Smaybee mutex_exit(hash_lock); 9373403Sbmc ARCSTAT_BUMP(arcstat_hits); 9383403Sbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 9393403Sbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 9403403Sbmc data, metadata, hits); 9411544Seschrock } 9421544Seschrock 943789Sahrens static void 9442688Smaybee arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 9451544Seschrock { 9461544Seschrock arc_buf_t **bufp; 9471544Seschrock 9481544Seschrock /* free up data associated with the buf */ 9491544Seschrock if (buf->b_data) { 9501544Seschrock arc_state_t *state = buf->b_hdr->b_state; 9511544Seschrock uint64_t size = buf->b_hdr->b_size; 9523290Sjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 9531544Seschrock 9543093Sahrens arc_cksum_verify(buf); 9552688Smaybee if (!recycle) { 9563290Sjohansen if (type == ARC_BUFC_METADATA) { 9573290Sjohansen zio_buf_free(buf->b_data, size); 9583290Sjohansen } else { 9593290Sjohansen ASSERT(type == ARC_BUFC_DATA); 9603290Sjohansen zio_data_buf_free(buf->b_data, size); 9613290Sjohansen } 9623403Sbmc atomic_add_64(&arc_size, -size); 9632688Smaybee } 9641544Seschrock if (list_link_active(&buf->b_hdr->b_arc_node)) { 9651544Seschrock ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 9663403Sbmc ASSERT(state != arc_anon); 9673403Sbmc ASSERT3U(state->arcs_lsize, >=, size); 9683403Sbmc atomic_add_64(&state->arcs_lsize, -size); 9691544Seschrock } 9703403Sbmc ASSERT3U(state->arcs_size, >=, size); 9713403Sbmc atomic_add_64(&state->arcs_size, -size); 9721544Seschrock buf->b_data = NULL; 9731544Seschrock ASSERT(buf->b_hdr->b_datacnt > 0); 9741544Seschrock buf->b_hdr->b_datacnt -= 1; 9751544Seschrock } 9761544Seschrock 9771544Seschrock /* only remove the buf if requested */ 9781544Seschrock if (!all) 9791544Seschrock return; 9801544Seschrock 9811544Seschrock /* remove the buf from the hdr list */ 9821544Seschrock for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 9831544Seschrock continue; 9841544Seschrock *bufp = buf->b_next; 9851544Seschrock 9861544Seschrock ASSERT(buf->b_efunc == NULL); 9871544Seschrock 9881544Seschrock /* clean up the buf */ 9891544Seschrock buf->b_hdr = NULL; 9901544Seschrock kmem_cache_free(buf_cache, buf); 9911544Seschrock } 9921544Seschrock 9931544Seschrock static void 9941544Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr) 995789Sahrens { 996789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 9973403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 9981544Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 999789Sahrens 1000789Sahrens if (!BUF_EMPTY(hdr)) { 10011544Seschrock ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1002789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 1003789Sahrens hdr->b_birth = 0; 1004789Sahrens hdr->b_cksum0 = 0; 1005789Sahrens } 10061544Seschrock while (hdr->b_buf) { 1007789Sahrens arc_buf_t *buf = hdr->b_buf; 1008789Sahrens 10091544Seschrock if (buf->b_efunc) { 10101544Seschrock mutex_enter(&arc_eviction_mtx); 10111544Seschrock ASSERT(buf->b_hdr != NULL); 10122688Smaybee arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 10131544Seschrock hdr->b_buf = buf->b_next; 10142887Smaybee buf->b_hdr = &arc_eviction_hdr; 10151544Seschrock buf->b_next = arc_eviction_list; 10161544Seschrock arc_eviction_list = buf; 10171544Seschrock mutex_exit(&arc_eviction_mtx); 10181544Seschrock } else { 10192688Smaybee arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 10201544Seschrock } 1021789Sahrens } 10223093Sahrens if (hdr->b_freeze_cksum != NULL) { 10233093Sahrens kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 10243093Sahrens hdr->b_freeze_cksum = NULL; 10253093Sahrens } 10261544Seschrock 1027789Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 1028789Sahrens ASSERT3P(hdr->b_hash_next, ==, NULL); 1029789Sahrens ASSERT3P(hdr->b_acb, ==, NULL); 1030789Sahrens kmem_cache_free(hdr_cache, hdr); 1031789Sahrens } 1032789Sahrens 1033789Sahrens void 1034789Sahrens arc_buf_free(arc_buf_t *buf, void *tag) 1035789Sahrens { 1036789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 10373403Sbmc int hashed = hdr->b_state != arc_anon; 10381544Seschrock 10391544Seschrock ASSERT(buf->b_efunc == NULL); 10401544Seschrock ASSERT(buf->b_data != NULL); 10411544Seschrock 10421544Seschrock if (hashed) { 10431544Seschrock kmutex_t *hash_lock = HDR_LOCK(hdr); 10441544Seschrock 10451544Seschrock mutex_enter(hash_lock); 10461544Seschrock (void) remove_reference(hdr, hash_lock, tag); 10471544Seschrock if (hdr->b_datacnt > 1) 10482688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 10491544Seschrock else 10501544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 10511544Seschrock mutex_exit(hash_lock); 10521544Seschrock } else if (HDR_IO_IN_PROGRESS(hdr)) { 10531544Seschrock int destroy_hdr; 10541544Seschrock /* 10551544Seschrock * We are in the middle of an async write. Don't destroy 10561544Seschrock * this buffer unless the write completes before we finish 10571544Seschrock * decrementing the reference count. 10581544Seschrock */ 10591544Seschrock mutex_enter(&arc_eviction_mtx); 10601544Seschrock (void) remove_reference(hdr, NULL, tag); 10611544Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 10621544Seschrock destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 10631544Seschrock mutex_exit(&arc_eviction_mtx); 10641544Seschrock if (destroy_hdr) 10651544Seschrock arc_hdr_destroy(hdr); 10661544Seschrock } else { 10671544Seschrock if (remove_reference(hdr, NULL, tag) > 0) { 10681544Seschrock ASSERT(HDR_IO_ERROR(hdr)); 10692688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 10701544Seschrock } else { 10711544Seschrock arc_hdr_destroy(hdr); 10721544Seschrock } 10731544Seschrock } 10741544Seschrock } 10751544Seschrock 10761544Seschrock int 10771544Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag) 10781544Seschrock { 10791544Seschrock arc_buf_hdr_t *hdr = buf->b_hdr; 1080789Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 10811544Seschrock int no_callback = (buf->b_efunc == NULL); 10821544Seschrock 10833403Sbmc if (hdr->b_state == arc_anon) { 10841544Seschrock arc_buf_free(buf, tag); 10851544Seschrock return (no_callback); 10861544Seschrock } 1087789Sahrens 1088789Sahrens mutex_enter(hash_lock); 10893403Sbmc ASSERT(hdr->b_state != arc_anon); 10901544Seschrock ASSERT(buf->b_data != NULL); 1091789Sahrens 10921544Seschrock (void) remove_reference(hdr, hash_lock, tag); 10931544Seschrock if (hdr->b_datacnt > 1) { 10941544Seschrock if (no_callback) 10952688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 10961544Seschrock } else if (no_callback) { 10971544Seschrock ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 10981544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 1099789Sahrens } 11001544Seschrock ASSERT(no_callback || hdr->b_datacnt > 1 || 11011544Seschrock refcount_is_zero(&hdr->b_refcnt)); 1102789Sahrens mutex_exit(hash_lock); 11031544Seschrock return (no_callback); 1104789Sahrens } 1105789Sahrens 1106789Sahrens int 1107789Sahrens arc_buf_size(arc_buf_t *buf) 1108789Sahrens { 1109789Sahrens return (buf->b_hdr->b_size); 1110789Sahrens } 1111789Sahrens 1112789Sahrens /* 1113789Sahrens * Evict buffers from list until we've removed the specified number of 1114789Sahrens * bytes. Move the removed buffers to the appropriate evict state. 11152688Smaybee * If the recycle flag is set, then attempt to "recycle" a buffer: 11162688Smaybee * - look for a buffer to evict that is `bytes' long. 11172688Smaybee * - return the data block from this buffer rather than freeing it. 11182688Smaybee * This flag is used by callers that are trying to make space for a 11192688Smaybee * new buffer in a full arc cache. 1120789Sahrens */ 11212688Smaybee static void * 11223290Sjohansen arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle, 11233290Sjohansen arc_buf_contents_t type) 1124789Sahrens { 1125789Sahrens arc_state_t *evicted_state; 11262688Smaybee uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 11272918Smaybee arc_buf_hdr_t *ab, *ab_prev = NULL; 1128789Sahrens kmutex_t *hash_lock; 11292688Smaybee boolean_t have_lock; 11302918Smaybee void *stolen = NULL; 1131789Sahrens 11323403Sbmc ASSERT(state == arc_mru || state == arc_mfu); 1133789Sahrens 11343403Sbmc evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1135789Sahrens 11363403Sbmc mutex_enter(&state->arcs_mtx); 11373403Sbmc mutex_enter(&evicted_state->arcs_mtx); 1138789Sahrens 11393403Sbmc for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) { 11403403Sbmc ab_prev = list_prev(&state->arcs_list, ab); 11412391Smaybee /* prefetch buffers have a minimum lifespan */ 11422688Smaybee if (HDR_IO_IN_PROGRESS(ab) || 11432688Smaybee (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 11442688Smaybee lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 11452391Smaybee skipped++; 11462391Smaybee continue; 11472391Smaybee } 11482918Smaybee /* "lookahead" for better eviction candidate */ 11492918Smaybee if (recycle && ab->b_size != bytes && 11502918Smaybee ab_prev && ab_prev->b_size == bytes) 11512688Smaybee continue; 1152789Sahrens hash_lock = HDR_LOCK(ab); 11532688Smaybee have_lock = MUTEX_HELD(hash_lock); 11542688Smaybee if (have_lock || mutex_tryenter(hash_lock)) { 1155789Sahrens ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 11561544Seschrock ASSERT(ab->b_datacnt > 0); 11571544Seschrock while (ab->b_buf) { 11581544Seschrock arc_buf_t *buf = ab->b_buf; 11592688Smaybee if (buf->b_data) { 11601544Seschrock bytes_evicted += ab->b_size; 11613290Sjohansen if (recycle && ab->b_type == type && 11623290Sjohansen ab->b_size == bytes) { 11632918Smaybee stolen = buf->b_data; 11642918Smaybee recycle = FALSE; 11652918Smaybee } 11662688Smaybee } 11671544Seschrock if (buf->b_efunc) { 11681544Seschrock mutex_enter(&arc_eviction_mtx); 11692918Smaybee arc_buf_destroy(buf, 11702918Smaybee buf->b_data == stolen, FALSE); 11711544Seschrock ab->b_buf = buf->b_next; 11722887Smaybee buf->b_hdr = &arc_eviction_hdr; 11731544Seschrock buf->b_next = arc_eviction_list; 11741544Seschrock arc_eviction_list = buf; 11751544Seschrock mutex_exit(&arc_eviction_mtx); 11761544Seschrock } else { 11772918Smaybee arc_buf_destroy(buf, 11782918Smaybee buf->b_data == stolen, TRUE); 11791544Seschrock } 11801544Seschrock } 11811544Seschrock ASSERT(ab->b_datacnt == 0); 1182789Sahrens arc_change_state(evicted_state, ab, hash_lock); 11831544Seschrock ASSERT(HDR_IN_HASH_TABLE(ab)); 11841544Seschrock ab->b_flags = ARC_IN_HASH_TABLE; 1185789Sahrens DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 11862688Smaybee if (!have_lock) 11872688Smaybee mutex_exit(hash_lock); 11881544Seschrock if (bytes >= 0 && bytes_evicted >= bytes) 1189789Sahrens break; 1190789Sahrens } else { 11912688Smaybee missed += 1; 1192789Sahrens } 1193789Sahrens } 11943403Sbmc 11953403Sbmc mutex_exit(&evicted_state->arcs_mtx); 11963403Sbmc mutex_exit(&state->arcs_mtx); 1197789Sahrens 1198789Sahrens if (bytes_evicted < bytes) 1199789Sahrens dprintf("only evicted %lld bytes from %x", 1200789Sahrens (longlong_t)bytes_evicted, state); 1201789Sahrens 12022688Smaybee if (skipped) 12033403Sbmc ARCSTAT_INCR(arcstat_evict_skip, skipped); 12043403Sbmc 12052688Smaybee if (missed) 12063403Sbmc ARCSTAT_INCR(arcstat_mutex_miss, missed); 12073403Sbmc 12082918Smaybee return (stolen); 1209789Sahrens } 1210789Sahrens 1211789Sahrens /* 1212789Sahrens * Remove buffers from list until we've removed the specified number of 1213789Sahrens * bytes. Destroy the buffers that are removed. 1214789Sahrens */ 1215789Sahrens static void 12161544Seschrock arc_evict_ghost(arc_state_t *state, int64_t bytes) 1217789Sahrens { 1218789Sahrens arc_buf_hdr_t *ab, *ab_prev; 1219789Sahrens kmutex_t *hash_lock; 12201544Seschrock uint64_t bytes_deleted = 0; 12211544Seschrock uint_t bufs_skipped = 0; 1222789Sahrens 12231544Seschrock ASSERT(GHOST_STATE(state)); 1224789Sahrens top: 12253403Sbmc mutex_enter(&state->arcs_mtx); 12263403Sbmc for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) { 12273403Sbmc ab_prev = list_prev(&state->arcs_list, ab); 1228789Sahrens hash_lock = HDR_LOCK(ab); 1229789Sahrens if (mutex_tryenter(hash_lock)) { 12302391Smaybee ASSERT(!HDR_IO_IN_PROGRESS(ab)); 12311544Seschrock ASSERT(ab->b_buf == NULL); 12323403Sbmc arc_change_state(arc_anon, ab, hash_lock); 1233789Sahrens mutex_exit(hash_lock); 12343403Sbmc ARCSTAT_BUMP(arcstat_deleted); 12351544Seschrock bytes_deleted += ab->b_size; 12361544Seschrock arc_hdr_destroy(ab); 1237789Sahrens DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1238789Sahrens if (bytes >= 0 && bytes_deleted >= bytes) 1239789Sahrens break; 1240789Sahrens } else { 1241789Sahrens if (bytes < 0) { 12423403Sbmc mutex_exit(&state->arcs_mtx); 1243789Sahrens mutex_enter(hash_lock); 1244789Sahrens mutex_exit(hash_lock); 1245789Sahrens goto top; 1246789Sahrens } 1247789Sahrens bufs_skipped += 1; 1248789Sahrens } 1249789Sahrens } 12503403Sbmc mutex_exit(&state->arcs_mtx); 1251789Sahrens 1252789Sahrens if (bufs_skipped) { 12533403Sbmc ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1254789Sahrens ASSERT(bytes >= 0); 1255789Sahrens } 1256789Sahrens 1257789Sahrens if (bytes_deleted < bytes) 1258789Sahrens dprintf("only deleted %lld bytes from %p", 1259789Sahrens (longlong_t)bytes_deleted, state); 1260789Sahrens } 1261789Sahrens 1262789Sahrens static void 1263789Sahrens arc_adjust(void) 1264789Sahrens { 12653403Sbmc int64_t top_sz, mru_over, arc_over, todelete; 1266789Sahrens 12673403Sbmc top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1268789Sahrens 12693403Sbmc if (top_sz > arc_p && arc_mru->arcs_lsize > 0) { 12703403Sbmc int64_t toevict = MIN(arc_mru->arcs_lsize, top_sz - arc_p); 12713403Sbmc (void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_UNDEF); 12723403Sbmc top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1273789Sahrens } 1274789Sahrens 12753403Sbmc mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1276789Sahrens 1277789Sahrens if (mru_over > 0) { 12783403Sbmc if (arc_mru_ghost->arcs_lsize > 0) { 12793403Sbmc todelete = MIN(arc_mru_ghost->arcs_lsize, mru_over); 12803403Sbmc arc_evict_ghost(arc_mru_ghost, todelete); 1281789Sahrens } 1282789Sahrens } 1283789Sahrens 12843403Sbmc if ((arc_over = arc_size - arc_c) > 0) { 12851544Seschrock int64_t tbl_over; 1286789Sahrens 12873403Sbmc if (arc_mfu->arcs_lsize > 0) { 12883403Sbmc int64_t toevict = MIN(arc_mfu->arcs_lsize, arc_over); 12893403Sbmc (void) arc_evict(arc_mfu, toevict, FALSE, 12903290Sjohansen ARC_BUFC_UNDEF); 1291789Sahrens } 1292789Sahrens 12933403Sbmc tbl_over = arc_size + arc_mru_ghost->arcs_lsize + 12943403Sbmc arc_mfu_ghost->arcs_lsize - arc_c*2; 1295789Sahrens 12963403Sbmc if (tbl_over > 0 && arc_mfu_ghost->arcs_lsize > 0) { 12973403Sbmc todelete = MIN(arc_mfu_ghost->arcs_lsize, tbl_over); 12983403Sbmc arc_evict_ghost(arc_mfu_ghost, todelete); 1299789Sahrens } 1300789Sahrens } 1301789Sahrens } 1302789Sahrens 13031544Seschrock static void 13041544Seschrock arc_do_user_evicts(void) 13051544Seschrock { 13061544Seschrock mutex_enter(&arc_eviction_mtx); 13071544Seschrock while (arc_eviction_list != NULL) { 13081544Seschrock arc_buf_t *buf = arc_eviction_list; 13091544Seschrock arc_eviction_list = buf->b_next; 13101544Seschrock buf->b_hdr = NULL; 13111544Seschrock mutex_exit(&arc_eviction_mtx); 13121544Seschrock 13131819Smaybee if (buf->b_efunc != NULL) 13141819Smaybee VERIFY(buf->b_efunc(buf) == 0); 13151544Seschrock 13161544Seschrock buf->b_efunc = NULL; 13171544Seschrock buf->b_private = NULL; 13181544Seschrock kmem_cache_free(buf_cache, buf); 13191544Seschrock mutex_enter(&arc_eviction_mtx); 13201544Seschrock } 13211544Seschrock mutex_exit(&arc_eviction_mtx); 13221544Seschrock } 13231544Seschrock 1324789Sahrens /* 1325789Sahrens * Flush all *evictable* data from the cache. 1326789Sahrens * NOTE: this will not touch "active" (i.e. referenced) data. 1327789Sahrens */ 1328789Sahrens void 1329789Sahrens arc_flush(void) 1330789Sahrens { 13313403Sbmc while (list_head(&arc_mru->arcs_list)) 13323403Sbmc (void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_UNDEF); 13333403Sbmc while (list_head(&arc_mfu->arcs_list)) 13343403Sbmc (void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_UNDEF); 1335789Sahrens 13363403Sbmc arc_evict_ghost(arc_mru_ghost, -1); 13373403Sbmc arc_evict_ghost(arc_mfu_ghost, -1); 13381544Seschrock 13391544Seschrock mutex_enter(&arc_reclaim_thr_lock); 13401544Seschrock arc_do_user_evicts(); 13411544Seschrock mutex_exit(&arc_reclaim_thr_lock); 13421544Seschrock ASSERT(arc_eviction_list == NULL); 1343789Sahrens } 1344789Sahrens 13453158Smaybee int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 13462391Smaybee 1347789Sahrens void 13483158Smaybee arc_shrink(void) 1349789Sahrens { 13503403Sbmc if (arc_c > arc_c_min) { 13513158Smaybee uint64_t to_free; 1352789Sahrens 13532048Sstans #ifdef _KERNEL 13543403Sbmc to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); 13552048Sstans #else 13563403Sbmc to_free = arc_c >> arc_shrink_shift; 13572048Sstans #endif 13583403Sbmc if (arc_c > arc_c_min + to_free) 13593403Sbmc atomic_add_64(&arc_c, -to_free); 13603158Smaybee else 13613403Sbmc arc_c = arc_c_min; 13622048Sstans 13633403Sbmc atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 13643403Sbmc if (arc_c > arc_size) 13653403Sbmc arc_c = MAX(arc_size, arc_c_min); 13663403Sbmc if (arc_p > arc_c) 13673403Sbmc arc_p = (arc_c >> 1); 13683403Sbmc ASSERT(arc_c >= arc_c_min); 13693403Sbmc ASSERT((int64_t)arc_p >= 0); 13703158Smaybee } 1371789Sahrens 13723403Sbmc if (arc_size > arc_c) 13733158Smaybee arc_adjust(); 1374789Sahrens } 1375789Sahrens 1376789Sahrens static int 1377789Sahrens arc_reclaim_needed(void) 1378789Sahrens { 1379789Sahrens uint64_t extra; 1380789Sahrens 1381789Sahrens #ifdef _KERNEL 13822048Sstans 13832048Sstans if (needfree) 13842048Sstans return (1); 13852048Sstans 1386789Sahrens /* 1387789Sahrens * take 'desfree' extra pages, so we reclaim sooner, rather than later 1388789Sahrens */ 1389789Sahrens extra = desfree; 1390789Sahrens 1391789Sahrens /* 1392789Sahrens * check that we're out of range of the pageout scanner. It starts to 1393789Sahrens * schedule paging if freemem is less than lotsfree and needfree. 1394789Sahrens * lotsfree is the high-water mark for pageout, and needfree is the 1395789Sahrens * number of needed free pages. We add extra pages here to make sure 1396789Sahrens * the scanner doesn't start up while we're freeing memory. 1397789Sahrens */ 1398789Sahrens if (freemem < lotsfree + needfree + extra) 1399789Sahrens return (1); 1400789Sahrens 1401789Sahrens /* 1402789Sahrens * check to make sure that swapfs has enough space so that anon 1403789Sahrens * reservations can still succeeed. anon_resvmem() checks that the 1404789Sahrens * availrmem is greater than swapfs_minfree, and the number of reserved 1405789Sahrens * swap pages. We also add a bit of extra here just to prevent 1406789Sahrens * circumstances from getting really dire. 1407789Sahrens */ 1408789Sahrens if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1409789Sahrens return (1); 1410789Sahrens 14113307Sjohansen /* 14123307Sjohansen * If zio data pages are being allocated out of a separate heap segment, 14133307Sjohansen * then check that the size of available vmem for this area remains 14143307Sjohansen * above 1/4th free. This needs to be done since the size of the 14153307Sjohansen * non-default segment is smaller than physical memory, so we could 14163307Sjohansen * conceivably run out of VA in that segment before running out of 14173307Sjohansen * physical memory. 14183307Sjohansen */ 14193307Sjohansen if ((zio_arena != NULL) && (btop(vmem_size(zio_arena, VMEM_FREE)) < 14203307Sjohansen (btop(vmem_size(zio_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))) 14213307Sjohansen return (1); 14223307Sjohansen 14231936Smaybee #if defined(__i386) 1424789Sahrens /* 1425789Sahrens * If we're on an i386 platform, it's possible that we'll exhaust the 1426789Sahrens * kernel heap space before we ever run out of available physical 1427789Sahrens * memory. Most checks of the size of the heap_area compare against 1428789Sahrens * tune.t_minarmem, which is the minimum available real memory that we 1429789Sahrens * can have in the system. However, this is generally fixed at 25 pages 1430789Sahrens * which is so low that it's useless. In this comparison, we seek to 1431789Sahrens * calculate the total heap-size, and reclaim if more than 3/4ths of the 1432789Sahrens * heap is allocated. (Or, in the caclulation, if less than 1/4th is 1433789Sahrens * free) 1434789Sahrens */ 1435789Sahrens if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1436789Sahrens (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1437789Sahrens return (1); 1438789Sahrens #endif 1439789Sahrens 1440789Sahrens #else 1441789Sahrens if (spa_get_random(100) == 0) 1442789Sahrens return (1); 1443789Sahrens #endif 1444789Sahrens return (0); 1445789Sahrens } 1446789Sahrens 1447789Sahrens static void 1448789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1449789Sahrens { 1450789Sahrens size_t i; 1451789Sahrens kmem_cache_t *prev_cache = NULL; 14523290Sjohansen kmem_cache_t *prev_data_cache = NULL; 1453789Sahrens extern kmem_cache_t *zio_buf_cache[]; 14543290Sjohansen extern kmem_cache_t *zio_data_buf_cache[]; 1455789Sahrens 14561484Sek110237 #ifdef _KERNEL 14571484Sek110237 /* 14581484Sek110237 * First purge some DNLC entries, in case the DNLC is using 14591484Sek110237 * up too much memory. 14601484Sek110237 */ 14611505Sek110237 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 14621936Smaybee 14631936Smaybee #if defined(__i386) 14641936Smaybee /* 14651936Smaybee * Reclaim unused memory from all kmem caches. 14661936Smaybee */ 14671936Smaybee kmem_reap(); 14681936Smaybee #endif 14691484Sek110237 #endif 14701484Sek110237 1471789Sahrens /* 14721544Seschrock * An agressive reclamation will shrink the cache size as well as 14731544Seschrock * reap free buffers from the arc kmem caches. 1474789Sahrens */ 1475789Sahrens if (strat == ARC_RECLAIM_AGGR) 14763158Smaybee arc_shrink(); 1477789Sahrens 1478789Sahrens for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1479789Sahrens if (zio_buf_cache[i] != prev_cache) { 1480789Sahrens prev_cache = zio_buf_cache[i]; 1481789Sahrens kmem_cache_reap_now(zio_buf_cache[i]); 1482789Sahrens } 14833290Sjohansen if (zio_data_buf_cache[i] != prev_data_cache) { 14843290Sjohansen prev_data_cache = zio_data_buf_cache[i]; 14853290Sjohansen kmem_cache_reap_now(zio_data_buf_cache[i]); 14863290Sjohansen } 1487789Sahrens } 14881544Seschrock kmem_cache_reap_now(buf_cache); 14891544Seschrock kmem_cache_reap_now(hdr_cache); 1490789Sahrens } 1491789Sahrens 1492789Sahrens static void 1493789Sahrens arc_reclaim_thread(void) 1494789Sahrens { 1495789Sahrens clock_t growtime = 0; 1496789Sahrens arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1497789Sahrens callb_cpr_t cpr; 1498789Sahrens 1499789Sahrens CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1500789Sahrens 1501789Sahrens mutex_enter(&arc_reclaim_thr_lock); 1502789Sahrens while (arc_thread_exit == 0) { 1503789Sahrens if (arc_reclaim_needed()) { 1504789Sahrens 15053403Sbmc if (arc_no_grow) { 1506789Sahrens if (last_reclaim == ARC_RECLAIM_CONS) { 1507789Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1508789Sahrens } else { 1509789Sahrens last_reclaim = ARC_RECLAIM_CONS; 1510789Sahrens } 1511789Sahrens } else { 15123403Sbmc arc_no_grow = TRUE; 1513789Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1514789Sahrens membar_producer(); 1515789Sahrens } 1516789Sahrens 1517789Sahrens /* reset the growth delay for every reclaim */ 1518789Sahrens growtime = lbolt + (arc_grow_retry * hz); 15192856Snd150628 ASSERT(growtime > 0); 1520789Sahrens 1521789Sahrens arc_kmem_reap_now(last_reclaim); 1522789Sahrens 1523789Sahrens } else if ((growtime > 0) && ((growtime - lbolt) <= 0)) { 15243403Sbmc arc_no_grow = FALSE; 1525789Sahrens } 1526789Sahrens 15273403Sbmc if (2 * arc_c < arc_size + 15283403Sbmc arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) 15293298Smaybee arc_adjust(); 15303298Smaybee 15311544Seschrock if (arc_eviction_list != NULL) 15321544Seschrock arc_do_user_evicts(); 15331544Seschrock 1534789Sahrens /* block until needed, or one second, whichever is shorter */ 1535789Sahrens CALLB_CPR_SAFE_BEGIN(&cpr); 1536789Sahrens (void) cv_timedwait(&arc_reclaim_thr_cv, 1537789Sahrens &arc_reclaim_thr_lock, (lbolt + hz)); 1538789Sahrens CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1539789Sahrens } 1540789Sahrens 1541789Sahrens arc_thread_exit = 0; 1542789Sahrens cv_broadcast(&arc_reclaim_thr_cv); 1543789Sahrens CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1544789Sahrens thread_exit(); 1545789Sahrens } 1546789Sahrens 15471544Seschrock /* 15481544Seschrock * Adapt arc info given the number of bytes we are trying to add and 15491544Seschrock * the state that we are comming from. This function is only called 15501544Seschrock * when we are adding new content to the cache. 15511544Seschrock */ 1552789Sahrens static void 15531544Seschrock arc_adapt(int bytes, arc_state_t *state) 1554789Sahrens { 15551544Seschrock int mult; 15561544Seschrock 15571544Seschrock ASSERT(bytes > 0); 1558789Sahrens /* 15591544Seschrock * Adapt the target size of the MRU list: 15601544Seschrock * - if we just hit in the MRU ghost list, then increase 15611544Seschrock * the target size of the MRU list. 15621544Seschrock * - if we just hit in the MFU ghost list, then increase 15631544Seschrock * the target size of the MFU list by decreasing the 15641544Seschrock * target size of the MRU list. 1565789Sahrens */ 15663403Sbmc if (state == arc_mru_ghost) { 15673403Sbmc mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 15683403Sbmc 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 15691544Seschrock 15703403Sbmc arc_p = MIN(arc_c, arc_p + bytes * mult); 15713403Sbmc } else if (state == arc_mfu_ghost) { 15723403Sbmc mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 15733403Sbmc 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 15741544Seschrock 15753403Sbmc arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 15761544Seschrock } 15773403Sbmc ASSERT((int64_t)arc_p >= 0); 1578789Sahrens 1579789Sahrens if (arc_reclaim_needed()) { 1580789Sahrens cv_signal(&arc_reclaim_thr_cv); 1581789Sahrens return; 1582789Sahrens } 1583789Sahrens 15843403Sbmc if (arc_no_grow) 1585789Sahrens return; 1586789Sahrens 15873403Sbmc if (arc_c >= arc_c_max) 15881544Seschrock return; 15891544Seschrock 1590789Sahrens /* 15911544Seschrock * If we're within (2 * maxblocksize) bytes of the target 15921544Seschrock * cache size, increment the target cache size 1593789Sahrens */ 15943403Sbmc if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 15953403Sbmc atomic_add_64(&arc_c, (int64_t)bytes); 15963403Sbmc if (arc_c > arc_c_max) 15973403Sbmc arc_c = arc_c_max; 15983403Sbmc else if (state == arc_anon) 15993403Sbmc atomic_add_64(&arc_p, (int64_t)bytes); 16003403Sbmc if (arc_p > arc_c) 16013403Sbmc arc_p = arc_c; 1602789Sahrens } 16033403Sbmc ASSERT((int64_t)arc_p >= 0); 1604789Sahrens } 1605789Sahrens 1606789Sahrens /* 16071544Seschrock * Check if the cache has reached its limits and eviction is required 16081544Seschrock * prior to insert. 1609789Sahrens */ 1610789Sahrens static int 1611789Sahrens arc_evict_needed() 1612789Sahrens { 1613789Sahrens if (arc_reclaim_needed()) 1614789Sahrens return (1); 1615789Sahrens 16163403Sbmc return (arc_size > arc_c); 1617789Sahrens } 1618789Sahrens 1619789Sahrens /* 16202688Smaybee * The buffer, supplied as the first argument, needs a data block. 16212688Smaybee * So, if we are at cache max, determine which cache should be victimized. 16222688Smaybee * We have the following cases: 1623789Sahrens * 16243403Sbmc * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 1625789Sahrens * In this situation if we're out of space, but the resident size of the MFU is 1626789Sahrens * under the limit, victimize the MFU cache to satisfy this insertion request. 1627789Sahrens * 16283403Sbmc * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 1629789Sahrens * Here, we've used up all of the available space for the MRU, so we need to 1630789Sahrens * evict from our own cache instead. Evict from the set of resident MRU 1631789Sahrens * entries. 1632789Sahrens * 16333403Sbmc * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 1634789Sahrens * c minus p represents the MFU space in the cache, since p is the size of the 1635789Sahrens * cache that is dedicated to the MRU. In this situation there's still space on 1636789Sahrens * the MFU side, so the MRU side needs to be victimized. 1637789Sahrens * 16383403Sbmc * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 1639789Sahrens * MFU's resident set is consuming more space than it has been allotted. In 1640789Sahrens * this situation, we must victimize our own cache, the MFU, for this insertion. 1641789Sahrens */ 1642789Sahrens static void 16432688Smaybee arc_get_data_buf(arc_buf_t *buf) 1644789Sahrens { 16453290Sjohansen arc_state_t *state = buf->b_hdr->b_state; 16463290Sjohansen uint64_t size = buf->b_hdr->b_size; 16473290Sjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 16482688Smaybee 16492688Smaybee arc_adapt(size, state); 1650789Sahrens 16512688Smaybee /* 16522688Smaybee * We have not yet reached cache maximum size, 16532688Smaybee * just allocate a new buffer. 16542688Smaybee */ 16552688Smaybee if (!arc_evict_needed()) { 16563290Sjohansen if (type == ARC_BUFC_METADATA) { 16573290Sjohansen buf->b_data = zio_buf_alloc(size); 16583290Sjohansen } else { 16593290Sjohansen ASSERT(type == ARC_BUFC_DATA); 16603290Sjohansen buf->b_data = zio_data_buf_alloc(size); 16613290Sjohansen } 16623403Sbmc atomic_add_64(&arc_size, size); 16632688Smaybee goto out; 16642688Smaybee } 16652688Smaybee 16662688Smaybee /* 16672688Smaybee * If we are prefetching from the mfu ghost list, this buffer 16682688Smaybee * will end up on the mru list; so steal space from there. 16692688Smaybee */ 16703403Sbmc if (state == arc_mfu_ghost) 16713403Sbmc state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 16723403Sbmc else if (state == arc_mru_ghost) 16733403Sbmc state = arc_mru; 1674789Sahrens 16753403Sbmc if (state == arc_mru || state == arc_anon) { 16763403Sbmc uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 16773403Sbmc state = (arc_p > mru_used) ? arc_mfu : arc_mru; 1678789Sahrens } else { 16792688Smaybee /* MFU cases */ 16803403Sbmc uint64_t mfu_space = arc_c - arc_p; 16813403Sbmc state = (mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 16822688Smaybee } 16833290Sjohansen if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) { 16843290Sjohansen if (type == ARC_BUFC_METADATA) { 16853290Sjohansen buf->b_data = zio_buf_alloc(size); 16863290Sjohansen } else { 16873290Sjohansen ASSERT(type == ARC_BUFC_DATA); 16883290Sjohansen buf->b_data = zio_data_buf_alloc(size); 16893290Sjohansen } 16903403Sbmc atomic_add_64(&arc_size, size); 16913403Sbmc ARCSTAT_BUMP(arcstat_recycle_miss); 16922688Smaybee } 16932688Smaybee ASSERT(buf->b_data != NULL); 16942688Smaybee out: 16952688Smaybee /* 16962688Smaybee * Update the state size. Note that ghost states have a 16972688Smaybee * "ghost size" and so don't need to be updated. 16982688Smaybee */ 16992688Smaybee if (!GHOST_STATE(buf->b_hdr->b_state)) { 17002688Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 17012688Smaybee 17023403Sbmc atomic_add_64(&hdr->b_state->arcs_size, size); 17032688Smaybee if (list_link_active(&hdr->b_arc_node)) { 17042688Smaybee ASSERT(refcount_is_zero(&hdr->b_refcnt)); 17053403Sbmc atomic_add_64(&hdr->b_state->arcs_lsize, size); 1706789Sahrens } 17073298Smaybee /* 17083298Smaybee * If we are growing the cache, and we are adding anonymous 17093403Sbmc * data, and we have outgrown arc_p, update arc_p 17103298Smaybee */ 17113403Sbmc if (arc_size < arc_c && hdr->b_state == arc_anon && 17123403Sbmc arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 17133403Sbmc arc_p = MIN(arc_c, arc_p + size); 1714789Sahrens } 1715789Sahrens } 1716789Sahrens 1717789Sahrens /* 1718789Sahrens * This routine is called whenever a buffer is accessed. 17191544Seschrock * NOTE: the hash lock is dropped in this function. 1720789Sahrens */ 1721789Sahrens static void 17222688Smaybee arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 1723789Sahrens { 1724789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 1725789Sahrens 17263403Sbmc if (buf->b_state == arc_anon) { 1727789Sahrens /* 1728789Sahrens * This buffer is not in the cache, and does not 1729789Sahrens * appear in our "ghost" list. Add the new buffer 1730789Sahrens * to the MRU state. 1731789Sahrens */ 1732789Sahrens 1733789Sahrens ASSERT(buf->b_arc_access == 0); 1734789Sahrens buf->b_arc_access = lbolt; 17351544Seschrock DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 17363403Sbmc arc_change_state(arc_mru, buf, hash_lock); 1737789Sahrens 17383403Sbmc } else if (buf->b_state == arc_mru) { 1739789Sahrens /* 17402391Smaybee * If this buffer is here because of a prefetch, then either: 17412391Smaybee * - clear the flag if this is a "referencing" read 17422391Smaybee * (any subsequent access will bump this into the MFU state). 17432391Smaybee * or 17442391Smaybee * - move the buffer to the head of the list if this is 17452391Smaybee * another prefetch (to make it less likely to be evicted). 1746789Sahrens */ 1747789Sahrens if ((buf->b_flags & ARC_PREFETCH) != 0) { 17482391Smaybee if (refcount_count(&buf->b_refcnt) == 0) { 17492391Smaybee ASSERT(list_link_active(&buf->b_arc_node)); 17503403Sbmc mutex_enter(&arc_mru->arcs_mtx); 17513403Sbmc list_remove(&arc_mru->arcs_list, buf); 17523403Sbmc list_insert_head(&arc_mru->arcs_list, buf); 17533403Sbmc mutex_exit(&arc_mru->arcs_mtx); 17542391Smaybee } else { 17552391Smaybee buf->b_flags &= ~ARC_PREFETCH; 17563403Sbmc ARCSTAT_BUMP(arcstat_mru_hits); 17572391Smaybee } 17582391Smaybee buf->b_arc_access = lbolt; 1759789Sahrens return; 1760789Sahrens } 1761789Sahrens 1762789Sahrens /* 1763789Sahrens * This buffer has been "accessed" only once so far, 1764789Sahrens * but it is still in the cache. Move it to the MFU 1765789Sahrens * state. 1766789Sahrens */ 1767789Sahrens if (lbolt > buf->b_arc_access + ARC_MINTIME) { 1768789Sahrens /* 1769789Sahrens * More than 125ms have passed since we 1770789Sahrens * instantiated this buffer. Move it to the 1771789Sahrens * most frequently used state. 1772789Sahrens */ 1773789Sahrens buf->b_arc_access = lbolt; 17741544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 17753403Sbmc arc_change_state(arc_mfu, buf, hash_lock); 1776789Sahrens } 17773403Sbmc ARCSTAT_BUMP(arcstat_mru_hits); 17783403Sbmc } else if (buf->b_state == arc_mru_ghost) { 1779789Sahrens arc_state_t *new_state; 1780789Sahrens /* 1781789Sahrens * This buffer has been "accessed" recently, but 1782789Sahrens * was evicted from the cache. Move it to the 1783789Sahrens * MFU state. 1784789Sahrens */ 1785789Sahrens 1786789Sahrens if (buf->b_flags & ARC_PREFETCH) { 17873403Sbmc new_state = arc_mru; 17882391Smaybee if (refcount_count(&buf->b_refcnt) > 0) 17892391Smaybee buf->b_flags &= ~ARC_PREFETCH; 17901544Seschrock DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1791789Sahrens } else { 17923403Sbmc new_state = arc_mfu; 17931544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1794789Sahrens } 1795789Sahrens 1796789Sahrens buf->b_arc_access = lbolt; 1797789Sahrens arc_change_state(new_state, buf, hash_lock); 1798789Sahrens 17993403Sbmc ARCSTAT_BUMP(arcstat_mru_ghost_hits); 18003403Sbmc } else if (buf->b_state == arc_mfu) { 1801789Sahrens /* 1802789Sahrens * This buffer has been accessed more than once and is 1803789Sahrens * still in the cache. Keep it in the MFU state. 1804789Sahrens * 18052391Smaybee * NOTE: an add_reference() that occurred when we did 18062391Smaybee * the arc_read() will have kicked this off the list. 18072391Smaybee * If it was a prefetch, we will explicitly move it to 18082391Smaybee * the head of the list now. 1809789Sahrens */ 18102391Smaybee if ((buf->b_flags & ARC_PREFETCH) != 0) { 18112391Smaybee ASSERT(refcount_count(&buf->b_refcnt) == 0); 18122391Smaybee ASSERT(list_link_active(&buf->b_arc_node)); 18133403Sbmc mutex_enter(&arc_mfu->arcs_mtx); 18143403Sbmc list_remove(&arc_mfu->arcs_list, buf); 18153403Sbmc list_insert_head(&arc_mfu->arcs_list, buf); 18163403Sbmc mutex_exit(&arc_mfu->arcs_mtx); 18172391Smaybee } 18183403Sbmc ARCSTAT_BUMP(arcstat_mfu_hits); 18192391Smaybee buf->b_arc_access = lbolt; 18203403Sbmc } else if (buf->b_state == arc_mfu_ghost) { 18213403Sbmc arc_state_t *new_state = arc_mfu; 1822789Sahrens /* 1823789Sahrens * This buffer has been accessed more than once but has 1824789Sahrens * been evicted from the cache. Move it back to the 1825789Sahrens * MFU state. 1826789Sahrens */ 1827789Sahrens 18282391Smaybee if (buf->b_flags & ARC_PREFETCH) { 18292391Smaybee /* 18302391Smaybee * This is a prefetch access... 18312391Smaybee * move this block back to the MRU state. 18322391Smaybee */ 18332391Smaybee ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 18343403Sbmc new_state = arc_mru; 18352391Smaybee } 18362391Smaybee 1837789Sahrens buf->b_arc_access = lbolt; 18381544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 18392391Smaybee arc_change_state(new_state, buf, hash_lock); 1840789Sahrens 18413403Sbmc ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 1842789Sahrens } else { 1843789Sahrens ASSERT(!"invalid arc state"); 1844789Sahrens } 1845789Sahrens } 1846789Sahrens 1847789Sahrens /* a generic arc_done_func_t which you can use */ 1848789Sahrens /* ARGSUSED */ 1849789Sahrens void 1850789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 1851789Sahrens { 1852789Sahrens bcopy(buf->b_data, arg, buf->b_hdr->b_size); 18531544Seschrock VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1854789Sahrens } 1855789Sahrens 1856789Sahrens /* a generic arc_done_func_t which you can use */ 1857789Sahrens void 1858789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 1859789Sahrens { 1860789Sahrens arc_buf_t **bufp = arg; 1861789Sahrens if (zio && zio->io_error) { 18621544Seschrock VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1863789Sahrens *bufp = NULL; 1864789Sahrens } else { 1865789Sahrens *bufp = buf; 1866789Sahrens } 1867789Sahrens } 1868789Sahrens 1869789Sahrens static void 1870789Sahrens arc_read_done(zio_t *zio) 1871789Sahrens { 18721589Smaybee arc_buf_hdr_t *hdr, *found; 1873789Sahrens arc_buf_t *buf; 1874789Sahrens arc_buf_t *abuf; /* buffer we're assigning to callback */ 1875789Sahrens kmutex_t *hash_lock; 1876789Sahrens arc_callback_t *callback_list, *acb; 1877789Sahrens int freeable = FALSE; 1878789Sahrens 1879789Sahrens buf = zio->io_private; 1880789Sahrens hdr = buf->b_hdr; 1881789Sahrens 18821589Smaybee /* 18831589Smaybee * The hdr was inserted into hash-table and removed from lists 18841589Smaybee * prior to starting I/O. We should find this header, since 18851589Smaybee * it's in the hash table, and it should be legit since it's 18861589Smaybee * not possible to evict it during the I/O. The only possible 18871589Smaybee * reason for it not to be found is if we were freed during the 18881589Smaybee * read. 18891589Smaybee */ 18901589Smaybee found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 18913093Sahrens &hash_lock); 1892789Sahrens 18931589Smaybee ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 18941589Smaybee (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp)))); 1895789Sahrens 1896789Sahrens /* byteswap if necessary */ 1897789Sahrens callback_list = hdr->b_acb; 1898789Sahrens ASSERT(callback_list != NULL); 1899789Sahrens if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 1900789Sahrens callback_list->acb_byteswap(buf->b_data, hdr->b_size); 1901789Sahrens 19023093Sahrens arc_cksum_compute(buf); 19033093Sahrens 1904789Sahrens /* create copies of the data buffer for the callers */ 1905789Sahrens abuf = buf; 1906789Sahrens for (acb = callback_list; acb; acb = acb->acb_next) { 1907789Sahrens if (acb->acb_done) { 19082688Smaybee if (abuf == NULL) 19092688Smaybee abuf = arc_buf_clone(buf); 1910789Sahrens acb->acb_buf = abuf; 1911789Sahrens abuf = NULL; 1912789Sahrens } 1913789Sahrens } 1914789Sahrens hdr->b_acb = NULL; 1915789Sahrens hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 19161544Seschrock ASSERT(!HDR_BUF_AVAILABLE(hdr)); 19171544Seschrock if (abuf == buf) 19181544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 1919789Sahrens 1920789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 1921789Sahrens 1922789Sahrens if (zio->io_error != 0) { 1923789Sahrens hdr->b_flags |= ARC_IO_ERROR; 19243403Sbmc if (hdr->b_state != arc_anon) 19253403Sbmc arc_change_state(arc_anon, hdr, hash_lock); 19261544Seschrock if (HDR_IN_HASH_TABLE(hdr)) 19271544Seschrock buf_hash_remove(hdr); 1928789Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 19292391Smaybee /* convert checksum errors into IO errors */ 19301544Seschrock if (zio->io_error == ECKSUM) 19311544Seschrock zio->io_error = EIO; 1932789Sahrens } 1933789Sahrens 19341544Seschrock /* 19352391Smaybee * Broadcast before we drop the hash_lock to avoid the possibility 19362391Smaybee * that the hdr (and hence the cv) might be freed before we get to 19372391Smaybee * the cv_broadcast(). 19381544Seschrock */ 19391544Seschrock cv_broadcast(&hdr->b_cv); 19401544Seschrock 19411589Smaybee if (hash_lock) { 1942789Sahrens /* 1943789Sahrens * Only call arc_access on anonymous buffers. This is because 1944789Sahrens * if we've issued an I/O for an evicted buffer, we've already 1945789Sahrens * called arc_access (to prevent any simultaneous readers from 1946789Sahrens * getting confused). 1947789Sahrens */ 19483403Sbmc if (zio->io_error == 0 && hdr->b_state == arc_anon) 19492688Smaybee arc_access(hdr, hash_lock); 19502688Smaybee mutex_exit(hash_lock); 1951789Sahrens } else { 1952789Sahrens /* 1953789Sahrens * This block was freed while we waited for the read to 1954789Sahrens * complete. It has been removed from the hash table and 1955789Sahrens * moved to the anonymous state (so that it won't show up 1956789Sahrens * in the cache). 1957789Sahrens */ 19583403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 1959789Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 1960789Sahrens } 1961789Sahrens 1962789Sahrens /* execute each callback and free its structure */ 1963789Sahrens while ((acb = callback_list) != NULL) { 1964789Sahrens if (acb->acb_done) 1965789Sahrens acb->acb_done(zio, acb->acb_buf, acb->acb_private); 1966789Sahrens 1967789Sahrens if (acb->acb_zio_dummy != NULL) { 1968789Sahrens acb->acb_zio_dummy->io_error = zio->io_error; 1969789Sahrens zio_nowait(acb->acb_zio_dummy); 1970789Sahrens } 1971789Sahrens 1972789Sahrens callback_list = acb->acb_next; 1973789Sahrens kmem_free(acb, sizeof (arc_callback_t)); 1974789Sahrens } 1975789Sahrens 1976789Sahrens if (freeable) 19771544Seschrock arc_hdr_destroy(hdr); 1978789Sahrens } 1979789Sahrens 1980789Sahrens /* 1981789Sahrens * "Read" the block block at the specified DVA (in bp) via the 1982789Sahrens * cache. If the block is found in the cache, invoke the provided 1983789Sahrens * callback immediately and return. Note that the `zio' parameter 1984789Sahrens * in the callback will be NULL in this case, since no IO was 1985789Sahrens * required. If the block is not in the cache pass the read request 1986789Sahrens * on to the spa with a substitute callback function, so that the 1987789Sahrens * requested block will be added to the cache. 1988789Sahrens * 1989789Sahrens * If a read request arrives for a block that has a read in-progress, 1990789Sahrens * either wait for the in-progress read to complete (and return the 1991789Sahrens * results); or, if this is a read with a "done" func, add a record 1992789Sahrens * to the read to invoke the "done" func when the read completes, 1993789Sahrens * and return; or just return. 1994789Sahrens * 1995789Sahrens * arc_read_done() will invoke all the requested "done" functions 1996789Sahrens * for readers of this block. 1997789Sahrens */ 1998789Sahrens int 1999789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 2000789Sahrens arc_done_func_t *done, void *private, int priority, int flags, 20012391Smaybee uint32_t *arc_flags, zbookmark_t *zb) 2002789Sahrens { 2003789Sahrens arc_buf_hdr_t *hdr; 2004789Sahrens arc_buf_t *buf; 2005789Sahrens kmutex_t *hash_lock; 2006789Sahrens zio_t *rzio; 2007789Sahrens 2008789Sahrens top: 2009789Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 20101544Seschrock if (hdr && hdr->b_datacnt > 0) { 2011789Sahrens 20122391Smaybee *arc_flags |= ARC_CACHED; 20132391Smaybee 2014789Sahrens if (HDR_IO_IN_PROGRESS(hdr)) { 20152391Smaybee 20162391Smaybee if (*arc_flags & ARC_WAIT) { 20172391Smaybee cv_wait(&hdr->b_cv, hash_lock); 20182391Smaybee mutex_exit(hash_lock); 20192391Smaybee goto top; 20202391Smaybee } 20212391Smaybee ASSERT(*arc_flags & ARC_NOWAIT); 20222391Smaybee 20232391Smaybee if (done) { 2024789Sahrens arc_callback_t *acb = NULL; 2025789Sahrens 2026789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), 2027789Sahrens KM_SLEEP); 2028789Sahrens acb->acb_done = done; 2029789Sahrens acb->acb_private = private; 2030789Sahrens acb->acb_byteswap = swap; 2031789Sahrens if (pio != NULL) 2032789Sahrens acb->acb_zio_dummy = zio_null(pio, 2033789Sahrens spa, NULL, NULL, flags); 2034789Sahrens 2035789Sahrens ASSERT(acb->acb_done != NULL); 2036789Sahrens acb->acb_next = hdr->b_acb; 2037789Sahrens hdr->b_acb = acb; 2038789Sahrens add_reference(hdr, hash_lock, private); 2039789Sahrens mutex_exit(hash_lock); 2040789Sahrens return (0); 2041789Sahrens } 2042789Sahrens mutex_exit(hash_lock); 2043789Sahrens return (0); 2044789Sahrens } 2045789Sahrens 20463403Sbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2047789Sahrens 20481544Seschrock if (done) { 20492688Smaybee add_reference(hdr, hash_lock, private); 20501544Seschrock /* 20511544Seschrock * If this block is already in use, create a new 20521544Seschrock * copy of the data so that we will be guaranteed 20531544Seschrock * that arc_release() will always succeed. 20541544Seschrock */ 20551544Seschrock buf = hdr->b_buf; 20561544Seschrock ASSERT(buf); 20571544Seschrock ASSERT(buf->b_data); 20582688Smaybee if (HDR_BUF_AVAILABLE(hdr)) { 20591544Seschrock ASSERT(buf->b_efunc == NULL); 20601544Seschrock hdr->b_flags &= ~ARC_BUF_AVAILABLE; 20612688Smaybee } else { 20622688Smaybee buf = arc_buf_clone(buf); 20631544Seschrock } 20642391Smaybee } else if (*arc_flags & ARC_PREFETCH && 20652391Smaybee refcount_count(&hdr->b_refcnt) == 0) { 20662391Smaybee hdr->b_flags |= ARC_PREFETCH; 2067789Sahrens } 2068789Sahrens DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 20692688Smaybee arc_access(hdr, hash_lock); 20702688Smaybee mutex_exit(hash_lock); 20713403Sbmc ARCSTAT_BUMP(arcstat_hits); 20723403Sbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 20733403Sbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 20743403Sbmc data, metadata, hits); 20753403Sbmc 2076789Sahrens if (done) 2077789Sahrens done(NULL, buf, private); 2078789Sahrens } else { 2079789Sahrens uint64_t size = BP_GET_LSIZE(bp); 2080789Sahrens arc_callback_t *acb; 2081789Sahrens 2082789Sahrens if (hdr == NULL) { 2083789Sahrens /* this block is not in the cache */ 2084789Sahrens arc_buf_hdr_t *exists; 20853290Sjohansen arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 20863290Sjohansen buf = arc_buf_alloc(spa, size, private, type); 2087789Sahrens hdr = buf->b_hdr; 2088789Sahrens hdr->b_dva = *BP_IDENTITY(bp); 2089789Sahrens hdr->b_birth = bp->blk_birth; 2090789Sahrens hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2091789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2092789Sahrens if (exists) { 2093789Sahrens /* somebody beat us to the hash insert */ 2094789Sahrens mutex_exit(hash_lock); 2095789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 2096789Sahrens hdr->b_birth = 0; 2097789Sahrens hdr->b_cksum0 = 0; 20981544Seschrock (void) arc_buf_remove_ref(buf, private); 2099789Sahrens goto top; /* restart the IO request */ 2100789Sahrens } 21012391Smaybee /* if this is a prefetch, we don't have a reference */ 21022391Smaybee if (*arc_flags & ARC_PREFETCH) { 21032391Smaybee (void) remove_reference(hdr, hash_lock, 21042391Smaybee private); 21052391Smaybee hdr->b_flags |= ARC_PREFETCH; 21062391Smaybee } 21072391Smaybee if (BP_GET_LEVEL(bp) > 0) 21082391Smaybee hdr->b_flags |= ARC_INDIRECT; 2109789Sahrens } else { 2110789Sahrens /* this block is in the ghost cache */ 21111544Seschrock ASSERT(GHOST_STATE(hdr->b_state)); 21121544Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 21132391Smaybee ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 21142391Smaybee ASSERT(hdr->b_buf == NULL); 2115789Sahrens 21162391Smaybee /* if this is a prefetch, we don't have a reference */ 21172391Smaybee if (*arc_flags & ARC_PREFETCH) 21182391Smaybee hdr->b_flags |= ARC_PREFETCH; 21192391Smaybee else 21202391Smaybee add_reference(hdr, hash_lock, private); 2121789Sahrens buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 21221544Seschrock buf->b_hdr = hdr; 21232688Smaybee buf->b_data = NULL; 21241544Seschrock buf->b_efunc = NULL; 21251544Seschrock buf->b_private = NULL; 21261544Seschrock buf->b_next = NULL; 21271544Seschrock hdr->b_buf = buf; 21282688Smaybee arc_get_data_buf(buf); 21291544Seschrock ASSERT(hdr->b_datacnt == 0); 21301544Seschrock hdr->b_datacnt = 1; 21312391Smaybee 2132789Sahrens } 2133789Sahrens 2134789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2135789Sahrens acb->acb_done = done; 2136789Sahrens acb->acb_private = private; 2137789Sahrens acb->acb_byteswap = swap; 2138789Sahrens 2139789Sahrens ASSERT(hdr->b_acb == NULL); 2140789Sahrens hdr->b_acb = acb; 2141789Sahrens hdr->b_flags |= ARC_IO_IN_PROGRESS; 2142789Sahrens 2143789Sahrens /* 2144789Sahrens * If the buffer has been evicted, migrate it to a present state 2145789Sahrens * before issuing the I/O. Once we drop the hash-table lock, 2146789Sahrens * the header will be marked as I/O in progress and have an 2147789Sahrens * attached buffer. At this point, anybody who finds this 2148789Sahrens * buffer ought to notice that it's legit but has a pending I/O. 2149789Sahrens */ 2150789Sahrens 21511544Seschrock if (GHOST_STATE(hdr->b_state)) 21522688Smaybee arc_access(hdr, hash_lock); 21532688Smaybee mutex_exit(hash_lock); 2154789Sahrens 2155789Sahrens ASSERT3U(hdr->b_size, ==, size); 21561596Sahrens DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 21571596Sahrens zbookmark_t *, zb); 21583403Sbmc ARCSTAT_BUMP(arcstat_misses); 21593403Sbmc ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 21603403Sbmc demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 21613403Sbmc data, metadata, misses); 21621544Seschrock 2163789Sahrens rzio = zio_read(pio, spa, bp, buf->b_data, size, 21641544Seschrock arc_read_done, buf, priority, flags, zb); 2165789Sahrens 21662391Smaybee if (*arc_flags & ARC_WAIT) 2167789Sahrens return (zio_wait(rzio)); 2168789Sahrens 21692391Smaybee ASSERT(*arc_flags & ARC_NOWAIT); 2170789Sahrens zio_nowait(rzio); 2171789Sahrens } 2172789Sahrens return (0); 2173789Sahrens } 2174789Sahrens 2175789Sahrens /* 2176789Sahrens * arc_read() variant to support pool traversal. If the block is already 2177789Sahrens * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2178789Sahrens * The idea is that we don't want pool traversal filling up memory, but 2179789Sahrens * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2180789Sahrens */ 2181789Sahrens int 2182789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2183789Sahrens { 2184789Sahrens arc_buf_hdr_t *hdr; 2185789Sahrens kmutex_t *hash_mtx; 2186789Sahrens int rc = 0; 2187789Sahrens 2188789Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2189789Sahrens 21901544Seschrock if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 21911544Seschrock arc_buf_t *buf = hdr->b_buf; 21921544Seschrock 21931544Seschrock ASSERT(buf); 21941544Seschrock while (buf->b_data == NULL) { 21951544Seschrock buf = buf->b_next; 21961544Seschrock ASSERT(buf); 21971544Seschrock } 21981544Seschrock bcopy(buf->b_data, data, hdr->b_size); 21991544Seschrock } else { 2200789Sahrens rc = ENOENT; 22011544Seschrock } 2202789Sahrens 2203789Sahrens if (hash_mtx) 2204789Sahrens mutex_exit(hash_mtx); 2205789Sahrens 2206789Sahrens return (rc); 2207789Sahrens } 2208789Sahrens 22091544Seschrock void 22101544Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 22111544Seschrock { 22121544Seschrock ASSERT(buf->b_hdr != NULL); 22133403Sbmc ASSERT(buf->b_hdr->b_state != arc_anon); 22141544Seschrock ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 22151544Seschrock buf->b_efunc = func; 22161544Seschrock buf->b_private = private; 22171544Seschrock } 22181544Seschrock 22191544Seschrock /* 22201544Seschrock * This is used by the DMU to let the ARC know that a buffer is 22211544Seschrock * being evicted, so the ARC should clean up. If this arc buf 22221544Seschrock * is not yet in the evicted state, it will be put there. 22231544Seschrock */ 22241544Seschrock int 22251544Seschrock arc_buf_evict(arc_buf_t *buf) 22261544Seschrock { 22272887Smaybee arc_buf_hdr_t *hdr; 22281544Seschrock kmutex_t *hash_lock; 22291544Seschrock arc_buf_t **bufp; 22301544Seschrock 22312887Smaybee mutex_enter(&arc_eviction_mtx); 22322887Smaybee hdr = buf->b_hdr; 22331544Seschrock if (hdr == NULL) { 22341544Seschrock /* 22351544Seschrock * We are in arc_do_user_evicts(). 22361544Seschrock */ 22371544Seschrock ASSERT(buf->b_data == NULL); 22382887Smaybee mutex_exit(&arc_eviction_mtx); 22391544Seschrock return (0); 22401544Seschrock } 22412887Smaybee hash_lock = HDR_LOCK(hdr); 22422887Smaybee mutex_exit(&arc_eviction_mtx); 22431544Seschrock 22441544Seschrock mutex_enter(hash_lock); 22451544Seschrock 22462724Smaybee if (buf->b_data == NULL) { 22472724Smaybee /* 22482724Smaybee * We are on the eviction list. 22492724Smaybee */ 22502724Smaybee mutex_exit(hash_lock); 22512724Smaybee mutex_enter(&arc_eviction_mtx); 22522724Smaybee if (buf->b_hdr == NULL) { 22532724Smaybee /* 22542724Smaybee * We are already in arc_do_user_evicts(). 22552724Smaybee */ 22562724Smaybee mutex_exit(&arc_eviction_mtx); 22572724Smaybee return (0); 22582724Smaybee } else { 22592724Smaybee arc_buf_t copy = *buf; /* structure assignment */ 22602724Smaybee /* 22612724Smaybee * Process this buffer now 22622724Smaybee * but let arc_do_user_evicts() do the reaping. 22632724Smaybee */ 22642724Smaybee buf->b_efunc = NULL; 22652724Smaybee mutex_exit(&arc_eviction_mtx); 22662724Smaybee VERIFY(copy.b_efunc(©) == 0); 22672724Smaybee return (1); 22682724Smaybee } 22692724Smaybee } 22702724Smaybee 22712724Smaybee ASSERT(buf->b_hdr == hdr); 22722724Smaybee ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 22733403Sbmc ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 22741544Seschrock 22751544Seschrock /* 22761544Seschrock * Pull this buffer off of the hdr 22771544Seschrock */ 22781544Seschrock bufp = &hdr->b_buf; 22791544Seschrock while (*bufp != buf) 22801544Seschrock bufp = &(*bufp)->b_next; 22811544Seschrock *bufp = buf->b_next; 22821544Seschrock 22831544Seschrock ASSERT(buf->b_data != NULL); 22842688Smaybee arc_buf_destroy(buf, FALSE, FALSE); 22851544Seschrock 22861544Seschrock if (hdr->b_datacnt == 0) { 22871544Seschrock arc_state_t *old_state = hdr->b_state; 22881544Seschrock arc_state_t *evicted_state; 22891544Seschrock 22901544Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 22911544Seschrock 22921544Seschrock evicted_state = 22933403Sbmc (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 22941544Seschrock 22953403Sbmc mutex_enter(&old_state->arcs_mtx); 22963403Sbmc mutex_enter(&evicted_state->arcs_mtx); 22971544Seschrock 22981544Seschrock arc_change_state(evicted_state, hdr, hash_lock); 22991544Seschrock ASSERT(HDR_IN_HASH_TABLE(hdr)); 23001544Seschrock hdr->b_flags = ARC_IN_HASH_TABLE; 23011544Seschrock 23023403Sbmc mutex_exit(&evicted_state->arcs_mtx); 23033403Sbmc mutex_exit(&old_state->arcs_mtx); 23041544Seschrock } 23051544Seschrock mutex_exit(hash_lock); 23061819Smaybee 23071544Seschrock VERIFY(buf->b_efunc(buf) == 0); 23081544Seschrock buf->b_efunc = NULL; 23091544Seschrock buf->b_private = NULL; 23101544Seschrock buf->b_hdr = NULL; 23111544Seschrock kmem_cache_free(buf_cache, buf); 23121544Seschrock return (1); 23131544Seschrock } 23141544Seschrock 2315789Sahrens /* 2316789Sahrens * Release this buffer from the cache. This must be done 2317789Sahrens * after a read and prior to modifying the buffer contents. 2318789Sahrens * If the buffer has more than one reference, we must make 2319789Sahrens * make a new hdr for the buffer. 2320789Sahrens */ 2321789Sahrens void 2322789Sahrens arc_release(arc_buf_t *buf, void *tag) 2323789Sahrens { 2324789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 2325789Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 2326789Sahrens 2327789Sahrens /* this buffer is not on any list */ 2328789Sahrens ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2329789Sahrens 23303403Sbmc if (hdr->b_state == arc_anon) { 2331789Sahrens /* this buffer is already released */ 2332789Sahrens ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2333789Sahrens ASSERT(BUF_EMPTY(hdr)); 23341544Seschrock ASSERT(buf->b_efunc == NULL); 23353093Sahrens arc_buf_thaw(buf); 2336789Sahrens return; 2337789Sahrens } 2338789Sahrens 2339789Sahrens mutex_enter(hash_lock); 2340789Sahrens 23411544Seschrock /* 23421544Seschrock * Do we have more than one buf? 23431544Seschrock */ 23441544Seschrock if (hdr->b_buf != buf || buf->b_next != NULL) { 2345789Sahrens arc_buf_hdr_t *nhdr; 2346789Sahrens arc_buf_t **bufp; 2347789Sahrens uint64_t blksz = hdr->b_size; 2348789Sahrens spa_t *spa = hdr->b_spa; 23493290Sjohansen arc_buf_contents_t type = hdr->b_type; 2350789Sahrens 23511544Seschrock ASSERT(hdr->b_datacnt > 1); 2352789Sahrens /* 2353789Sahrens * Pull the data off of this buf and attach it to 2354789Sahrens * a new anonymous buf. 2355789Sahrens */ 23561544Seschrock (void) remove_reference(hdr, hash_lock, tag); 2357789Sahrens bufp = &hdr->b_buf; 23581544Seschrock while (*bufp != buf) 2359789Sahrens bufp = &(*bufp)->b_next; 2360789Sahrens *bufp = (*bufp)->b_next; 23611544Seschrock 23623403Sbmc ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 23633403Sbmc atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 23641544Seschrock if (refcount_is_zero(&hdr->b_refcnt)) { 23653403Sbmc ASSERT3U(hdr->b_state->arcs_lsize, >=, hdr->b_size); 23663403Sbmc atomic_add_64(&hdr->b_state->arcs_lsize, -hdr->b_size); 23671544Seschrock } 23681544Seschrock hdr->b_datacnt -= 1; 2369*3547Smaybee arc_cksum_verify(buf); 23701544Seschrock 2371789Sahrens mutex_exit(hash_lock); 2372789Sahrens 2373789Sahrens nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 2374789Sahrens nhdr->b_size = blksz; 2375789Sahrens nhdr->b_spa = spa; 23763290Sjohansen nhdr->b_type = type; 2377789Sahrens nhdr->b_buf = buf; 23783403Sbmc nhdr->b_state = arc_anon; 2379789Sahrens nhdr->b_arc_access = 0; 2380789Sahrens nhdr->b_flags = 0; 23811544Seschrock nhdr->b_datacnt = 1; 2382*3547Smaybee nhdr->b_freeze_cksum = NULL; 2383789Sahrens buf->b_hdr = nhdr; 2384789Sahrens buf->b_next = NULL; 2385789Sahrens (void) refcount_add(&nhdr->b_refcnt, tag); 23863403Sbmc atomic_add_64(&arc_anon->arcs_size, blksz); 2387789Sahrens 2388789Sahrens hdr = nhdr; 2389789Sahrens } else { 23901544Seschrock ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2391789Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 2392789Sahrens ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 23933403Sbmc arc_change_state(arc_anon, hdr, hash_lock); 2394789Sahrens hdr->b_arc_access = 0; 2395789Sahrens mutex_exit(hash_lock); 2396789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 2397789Sahrens hdr->b_birth = 0; 2398789Sahrens hdr->b_cksum0 = 0; 2399*3547Smaybee arc_buf_thaw(buf); 2400789Sahrens } 24011544Seschrock buf->b_efunc = NULL; 24021544Seschrock buf->b_private = NULL; 2403789Sahrens } 2404789Sahrens 2405789Sahrens int 2406789Sahrens arc_released(arc_buf_t *buf) 2407789Sahrens { 24083403Sbmc return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 24091544Seschrock } 24101544Seschrock 24111544Seschrock int 24121544Seschrock arc_has_callback(arc_buf_t *buf) 24131544Seschrock { 24141544Seschrock return (buf->b_efunc != NULL); 2415789Sahrens } 2416789Sahrens 24171544Seschrock #ifdef ZFS_DEBUG 24181544Seschrock int 24191544Seschrock arc_referenced(arc_buf_t *buf) 24201544Seschrock { 24211544Seschrock return (refcount_count(&buf->b_hdr->b_refcnt)); 24221544Seschrock } 24231544Seschrock #endif 24241544Seschrock 2425789Sahrens static void 2426*3547Smaybee arc_write_ready(zio_t *zio) 2427*3547Smaybee { 2428*3547Smaybee arc_write_callback_t *callback = zio->io_private; 2429*3547Smaybee arc_buf_t *buf = callback->awcb_buf; 2430*3547Smaybee 2431*3547Smaybee if (callback->awcb_ready) { 2432*3547Smaybee ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 2433*3547Smaybee callback->awcb_ready(zio, buf, callback->awcb_private); 2434*3547Smaybee } 2435*3547Smaybee arc_cksum_compute(buf); 2436*3547Smaybee } 2437*3547Smaybee 2438*3547Smaybee static void 2439789Sahrens arc_write_done(zio_t *zio) 2440789Sahrens { 2441*3547Smaybee arc_write_callback_t *callback = zio->io_private; 2442*3547Smaybee arc_buf_t *buf = callback->awcb_buf; 2443*3547Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 2444789Sahrens 2445789Sahrens hdr->b_acb = NULL; 2446789Sahrens 2447789Sahrens /* this buffer is on no lists and is not in the hash table */ 24483403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2449789Sahrens 2450789Sahrens hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2451789Sahrens hdr->b_birth = zio->io_bp->blk_birth; 2452789Sahrens hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 24531544Seschrock /* 24541544Seschrock * If the block to be written was all-zero, we may have 24551544Seschrock * compressed it away. In this case no write was performed 24561544Seschrock * so there will be no dva/birth-date/checksum. The buffer 24571544Seschrock * must therefor remain anonymous (and uncached). 24581544Seschrock */ 2459789Sahrens if (!BUF_EMPTY(hdr)) { 2460789Sahrens arc_buf_hdr_t *exists; 2461789Sahrens kmutex_t *hash_lock; 2462789Sahrens 24633093Sahrens arc_cksum_verify(buf); 24643093Sahrens 2465789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2466789Sahrens if (exists) { 2467789Sahrens /* 2468789Sahrens * This can only happen if we overwrite for 2469789Sahrens * sync-to-convergence, because we remove 2470789Sahrens * buffers from the hash table when we arc_free(). 2471789Sahrens */ 2472789Sahrens ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2473789Sahrens BP_IDENTITY(zio->io_bp))); 2474789Sahrens ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2475789Sahrens zio->io_bp->blk_birth); 2476789Sahrens 2477789Sahrens ASSERT(refcount_is_zero(&exists->b_refcnt)); 24783403Sbmc arc_change_state(arc_anon, exists, hash_lock); 2479789Sahrens mutex_exit(hash_lock); 24801544Seschrock arc_hdr_destroy(exists); 2481789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2482789Sahrens ASSERT3P(exists, ==, NULL); 2483789Sahrens } 24841544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 24852688Smaybee arc_access(hdr, hash_lock); 24862688Smaybee mutex_exit(hash_lock); 2487*3547Smaybee } else if (callback->awcb_done == NULL) { 24881544Seschrock int destroy_hdr; 24891544Seschrock /* 24901544Seschrock * This is an anonymous buffer with no user callback, 24911544Seschrock * destroy it if there are no active references. 24921544Seschrock */ 24931544Seschrock mutex_enter(&arc_eviction_mtx); 24941544Seschrock destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 24951544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 24961544Seschrock mutex_exit(&arc_eviction_mtx); 24971544Seschrock if (destroy_hdr) 24981544Seschrock arc_hdr_destroy(hdr); 24991544Seschrock } else { 25001544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2501789Sahrens } 25021544Seschrock 2503*3547Smaybee if (callback->awcb_done) { 2504789Sahrens ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 2505*3547Smaybee callback->awcb_done(zio, buf, callback->awcb_private); 2506789Sahrens } 2507789Sahrens 2508*3547Smaybee kmem_free(callback, sizeof (arc_write_callback_t)); 2509789Sahrens } 2510789Sahrens 2511*3547Smaybee zio_t * 25121775Sbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 2513789Sahrens uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 2514*3547Smaybee arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 2515*3547Smaybee int flags, zbookmark_t *zb) 2516789Sahrens { 2517789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 2518*3547Smaybee arc_write_callback_t *callback; 2519*3547Smaybee zio_t *zio; 2520789Sahrens 2521789Sahrens /* this is a private buffer - no locking required */ 25223403Sbmc ASSERT3P(hdr->b_state, ==, arc_anon); 2523789Sahrens ASSERT(BUF_EMPTY(hdr)); 2524789Sahrens ASSERT(!HDR_IO_ERROR(hdr)); 25252237Smaybee ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 25262237Smaybee ASSERT(hdr->b_acb == 0); 2527*3547Smaybee callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 2528*3547Smaybee callback->awcb_ready = ready; 2529*3547Smaybee callback->awcb_done = done; 2530*3547Smaybee callback->awcb_private = private; 2531*3547Smaybee callback->awcb_buf = buf; 25321544Seschrock hdr->b_flags |= ARC_IO_IN_PROGRESS; 2533*3547Smaybee zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 2534*3547Smaybee buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback, 2535*3547Smaybee priority, flags, zb); 2536789Sahrens 2537*3547Smaybee return (zio); 2538789Sahrens } 2539789Sahrens 2540789Sahrens int 2541789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 2542789Sahrens zio_done_func_t *done, void *private, uint32_t arc_flags) 2543789Sahrens { 2544789Sahrens arc_buf_hdr_t *ab; 2545789Sahrens kmutex_t *hash_lock; 2546789Sahrens zio_t *zio; 2547789Sahrens 2548789Sahrens /* 2549789Sahrens * If this buffer is in the cache, release it, so it 2550789Sahrens * can be re-used. 2551789Sahrens */ 2552789Sahrens ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2553789Sahrens if (ab != NULL) { 2554789Sahrens /* 2555789Sahrens * The checksum of blocks to free is not always 2556789Sahrens * preserved (eg. on the deadlist). However, if it is 2557789Sahrens * nonzero, it should match what we have in the cache. 2558789Sahrens */ 2559789Sahrens ASSERT(bp->blk_cksum.zc_word[0] == 0 || 2560789Sahrens ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 25613403Sbmc if (ab->b_state != arc_anon) 25623403Sbmc arc_change_state(arc_anon, ab, hash_lock); 25632391Smaybee if (HDR_IO_IN_PROGRESS(ab)) { 25642391Smaybee /* 25652391Smaybee * This should only happen when we prefetch. 25662391Smaybee */ 25672391Smaybee ASSERT(ab->b_flags & ARC_PREFETCH); 25682391Smaybee ASSERT3U(ab->b_datacnt, ==, 1); 25692391Smaybee ab->b_flags |= ARC_FREED_IN_READ; 25702391Smaybee if (HDR_IN_HASH_TABLE(ab)) 25712391Smaybee buf_hash_remove(ab); 25722391Smaybee ab->b_arc_access = 0; 25732391Smaybee bzero(&ab->b_dva, sizeof (dva_t)); 25742391Smaybee ab->b_birth = 0; 25752391Smaybee ab->b_cksum0 = 0; 25762391Smaybee ab->b_buf->b_efunc = NULL; 25772391Smaybee ab->b_buf->b_private = NULL; 25782391Smaybee mutex_exit(hash_lock); 25792391Smaybee } else if (refcount_is_zero(&ab->b_refcnt)) { 2580789Sahrens mutex_exit(hash_lock); 25811544Seschrock arc_hdr_destroy(ab); 25823403Sbmc ARCSTAT_BUMP(arcstat_deleted); 2583789Sahrens } else { 25841589Smaybee /* 25852391Smaybee * We still have an active reference on this 25862391Smaybee * buffer. This can happen, e.g., from 25872391Smaybee * dbuf_unoverride(). 25881589Smaybee */ 25892391Smaybee ASSERT(!HDR_IN_HASH_TABLE(ab)); 2590789Sahrens ab->b_arc_access = 0; 2591789Sahrens bzero(&ab->b_dva, sizeof (dva_t)); 2592789Sahrens ab->b_birth = 0; 2593789Sahrens ab->b_cksum0 = 0; 25941544Seschrock ab->b_buf->b_efunc = NULL; 25951544Seschrock ab->b_buf->b_private = NULL; 2596789Sahrens mutex_exit(hash_lock); 2597789Sahrens } 2598789Sahrens } 2599789Sahrens 2600789Sahrens zio = zio_free(pio, spa, txg, bp, done, private); 2601789Sahrens 2602789Sahrens if (arc_flags & ARC_WAIT) 2603789Sahrens return (zio_wait(zio)); 2604789Sahrens 2605789Sahrens ASSERT(arc_flags & ARC_NOWAIT); 2606789Sahrens zio_nowait(zio); 2607789Sahrens 2608789Sahrens return (0); 2609789Sahrens } 2610789Sahrens 2611789Sahrens void 2612789Sahrens arc_tempreserve_clear(uint64_t tempreserve) 2613789Sahrens { 2614789Sahrens atomic_add_64(&arc_tempreserve, -tempreserve); 2615789Sahrens ASSERT((int64_t)arc_tempreserve >= 0); 2616789Sahrens } 2617789Sahrens 2618789Sahrens int 2619789Sahrens arc_tempreserve_space(uint64_t tempreserve) 2620789Sahrens { 2621789Sahrens #ifdef ZFS_DEBUG 2622789Sahrens /* 2623789Sahrens * Once in a while, fail for no reason. Everything should cope. 2624789Sahrens */ 2625789Sahrens if (spa_get_random(10000) == 0) { 2626789Sahrens dprintf("forcing random failure\n"); 2627789Sahrens return (ERESTART); 2628789Sahrens } 2629789Sahrens #endif 26303403Sbmc if (tempreserve > arc_c/4 && !arc_no_grow) 26313403Sbmc arc_c = MIN(arc_c_max, tempreserve * 4); 26323403Sbmc if (tempreserve > arc_c) 2633982Smaybee return (ENOMEM); 2634982Smaybee 2635789Sahrens /* 2636982Smaybee * Throttle writes when the amount of dirty data in the cache 2637982Smaybee * gets too large. We try to keep the cache less than half full 2638982Smaybee * of dirty blocks so that our sync times don't grow too large. 2639982Smaybee * Note: if two requests come in concurrently, we might let them 2640982Smaybee * both succeed, when one of them should fail. Not a huge deal. 2641982Smaybee * 2642982Smaybee * XXX The limit should be adjusted dynamically to keep the time 2643982Smaybee * to sync a dataset fixed (around 1-5 seconds?). 2644789Sahrens */ 2645789Sahrens 26463403Sbmc if (tempreserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 26473403Sbmc arc_tempreserve + arc_anon->arcs_size > arc_c / 4) { 2648789Sahrens dprintf("failing, arc_tempreserve=%lluK anon=%lluK " 26493403Sbmc "tempreserve=%lluK arc_c=%lluK\n", 26503403Sbmc arc_tempreserve>>10, arc_anon->arcs_lsize>>10, 26513403Sbmc tempreserve>>10, arc_c>>10); 2652789Sahrens return (ERESTART); 2653789Sahrens } 2654789Sahrens atomic_add_64(&arc_tempreserve, tempreserve); 2655789Sahrens return (0); 2656789Sahrens } 2657789Sahrens 2658789Sahrens void 2659789Sahrens arc_init(void) 2660789Sahrens { 2661789Sahrens mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 2662789Sahrens cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 2663789Sahrens 26642391Smaybee /* Convert seconds to clock ticks */ 26652638Sperrin arc_min_prefetch_lifespan = 1 * hz; 26662391Smaybee 2667789Sahrens /* Start out with 1/8 of all memory */ 26683403Sbmc arc_c = physmem * PAGESIZE / 8; 2669789Sahrens 2670789Sahrens #ifdef _KERNEL 2671789Sahrens /* 2672789Sahrens * On architectures where the physical memory can be larger 2673789Sahrens * than the addressable space (intel in 32-bit mode), we may 2674789Sahrens * need to limit the cache to 1/8 of VM size. 2675789Sahrens */ 26763403Sbmc arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 2677789Sahrens #endif 2678789Sahrens 2679982Smaybee /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 26803403Sbmc arc_c_min = MAX(arc_c / 4, 64<<20); 2681982Smaybee /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 26823403Sbmc if (arc_c * 8 >= 1<<30) 26833403Sbmc arc_c_max = (arc_c * 8) - (1<<30); 2684789Sahrens else 26853403Sbmc arc_c_max = arc_c_min; 26863403Sbmc arc_c_max = MAX(arc_c * 6, arc_c_max); 26872885Sahrens 26882885Sahrens /* 26892885Sahrens * Allow the tunables to override our calculations if they are 26902885Sahrens * reasonable (ie. over 64MB) 26912885Sahrens */ 26922885Sahrens if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 26933403Sbmc arc_c_max = zfs_arc_max; 26943403Sbmc if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 26953403Sbmc arc_c_min = zfs_arc_min; 26962885Sahrens 26973403Sbmc arc_c = arc_c_max; 26983403Sbmc arc_p = (arc_c >> 1); 2699789Sahrens 2700789Sahrens /* if kmem_flags are set, lets try to use less memory */ 2701789Sahrens if (kmem_debugging()) 27023403Sbmc arc_c = arc_c / 2; 27033403Sbmc if (arc_c < arc_c_min) 27043403Sbmc arc_c = arc_c_min; 2705789Sahrens 27063403Sbmc arc_anon = &ARC_anon; 27073403Sbmc arc_mru = &ARC_mru; 27083403Sbmc arc_mru_ghost = &ARC_mru_ghost; 27093403Sbmc arc_mfu = &ARC_mfu; 27103403Sbmc arc_mfu_ghost = &ARC_mfu_ghost; 27113403Sbmc arc_size = 0; 2712789Sahrens 27133403Sbmc mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 27143403Sbmc mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 27153403Sbmc mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 27163403Sbmc mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 27173403Sbmc mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 27182688Smaybee 27193403Sbmc list_create(&arc_mru->arcs_list, sizeof (arc_buf_hdr_t), 27203403Sbmc offsetof(arc_buf_hdr_t, b_arc_node)); 27213403Sbmc list_create(&arc_mru_ghost->arcs_list, sizeof (arc_buf_hdr_t), 2722789Sahrens offsetof(arc_buf_hdr_t, b_arc_node)); 27233403Sbmc list_create(&arc_mfu->arcs_list, sizeof (arc_buf_hdr_t), 2724789Sahrens offsetof(arc_buf_hdr_t, b_arc_node)); 27253403Sbmc list_create(&arc_mfu_ghost->arcs_list, sizeof (arc_buf_hdr_t), 2726789Sahrens offsetof(arc_buf_hdr_t, b_arc_node)); 2727789Sahrens 2728789Sahrens buf_init(); 2729789Sahrens 2730789Sahrens arc_thread_exit = 0; 27311544Seschrock arc_eviction_list = NULL; 27321544Seschrock mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 27332887Smaybee bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 2734789Sahrens 27353403Sbmc arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 27363403Sbmc sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 27373403Sbmc 27383403Sbmc if (arc_ksp != NULL) { 27393403Sbmc arc_ksp->ks_data = &arc_stats; 27403403Sbmc kstat_install(arc_ksp); 27413403Sbmc } 27423403Sbmc 2743789Sahrens (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 2744789Sahrens TS_RUN, minclsyspri); 27453158Smaybee 27463158Smaybee arc_dead = FALSE; 2747789Sahrens } 2748789Sahrens 2749789Sahrens void 2750789Sahrens arc_fini(void) 2751789Sahrens { 2752789Sahrens mutex_enter(&arc_reclaim_thr_lock); 2753789Sahrens arc_thread_exit = 1; 2754789Sahrens while (arc_thread_exit != 0) 2755789Sahrens cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 2756789Sahrens mutex_exit(&arc_reclaim_thr_lock); 2757789Sahrens 2758789Sahrens arc_flush(); 2759789Sahrens 2760789Sahrens arc_dead = TRUE; 2761789Sahrens 27623403Sbmc if (arc_ksp != NULL) { 27633403Sbmc kstat_delete(arc_ksp); 27643403Sbmc arc_ksp = NULL; 27653403Sbmc } 27663403Sbmc 27671544Seschrock mutex_destroy(&arc_eviction_mtx); 2768789Sahrens mutex_destroy(&arc_reclaim_thr_lock); 2769789Sahrens cv_destroy(&arc_reclaim_thr_cv); 2770789Sahrens 27713403Sbmc list_destroy(&arc_mru->arcs_list); 27723403Sbmc list_destroy(&arc_mru_ghost->arcs_list); 27733403Sbmc list_destroy(&arc_mfu->arcs_list); 27743403Sbmc list_destroy(&arc_mfu_ghost->arcs_list); 2775789Sahrens 27763403Sbmc mutex_destroy(&arc_anon->arcs_mtx); 27773403Sbmc mutex_destroy(&arc_mru->arcs_mtx); 27783403Sbmc mutex_destroy(&arc_mru_ghost->arcs_mtx); 27793403Sbmc mutex_destroy(&arc_mfu->arcs_mtx); 27803403Sbmc mutex_destroy(&arc_mfu_ghost->arcs_mtx); 27812856Snd150628 2782789Sahrens buf_fini(); 2783789Sahrens } 2784