1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 51484Sek110237 * Common Development and Distribution License (the "License"). 61484Sek110237 * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 221484Sek110237 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23789Sahrens * Use is subject to license terms. 24789Sahrens */ 25789Sahrens 26789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 27789Sahrens 28789Sahrens /* 29789Sahrens * DVA-based Adjustable Relpacement Cache 30789Sahrens * 311544Seschrock * While much of the theory of operation used here is 321544Seschrock * based on the self-tuning, low overhead replacement cache 33789Sahrens * presented by Megiddo and Modha at FAST 2003, there are some 34789Sahrens * significant differences: 35789Sahrens * 36789Sahrens * 1. The Megiddo and Modha model assumes any page is evictable. 37789Sahrens * Pages in its cache cannot be "locked" into memory. This makes 38789Sahrens * the eviction algorithm simple: evict the last page in the list. 39789Sahrens * This also make the performance characteristics easy to reason 40789Sahrens * about. Our cache is not so simple. At any given moment, some 41789Sahrens * subset of the blocks in the cache are un-evictable because we 42789Sahrens * have handed out a reference to them. Blocks are only evictable 43789Sahrens * when there are no external references active. This makes 44789Sahrens * eviction far more problematic: we choose to evict the evictable 45789Sahrens * blocks that are the "lowest" in the list. 46789Sahrens * 47789Sahrens * There are times when it is not possible to evict the requested 48789Sahrens * space. In these circumstances we are unable to adjust the cache 49789Sahrens * size. To prevent the cache growing unbounded at these times we 50789Sahrens * implement a "cache throttle" that slowes the flow of new data 51789Sahrens * into the cache until we can make space avaiable. 52789Sahrens * 53789Sahrens * 2. The Megiddo and Modha model assumes a fixed cache size. 54789Sahrens * Pages are evicted when the cache is full and there is a cache 55789Sahrens * miss. Our model has a variable sized cache. It grows with 56789Sahrens * high use, but also tries to react to memory preasure from the 57789Sahrens * operating system: decreasing its size when system memory is 58789Sahrens * tight. 59789Sahrens * 60789Sahrens * 3. The Megiddo and Modha model assumes a fixed page size. All 61789Sahrens * elements of the cache are therefor exactly the same size. So 62789Sahrens * when adjusting the cache size following a cache miss, its simply 63789Sahrens * a matter of choosing a single page to evict. In our model, we 64789Sahrens * have variable sized cache blocks (rangeing from 512 bytes to 65789Sahrens * 128K bytes). We therefor choose a set of blocks to evict to make 66789Sahrens * space for a cache miss that approximates as closely as possible 67789Sahrens * the space used by the new block. 68789Sahrens * 69789Sahrens * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70789Sahrens * by N. Megiddo & D. Modha, FAST 2003 71789Sahrens */ 72789Sahrens 73789Sahrens /* 74789Sahrens * The locking model: 75789Sahrens * 76789Sahrens * A new reference to a cache buffer can be obtained in two 77789Sahrens * ways: 1) via a hash table lookup using the DVA as a key, 78789Sahrens * or 2) via one of the ARC lists. The arc_read() inerface 79789Sahrens * uses method 1, while the internal arc algorithms for 80789Sahrens * adjusting the cache use method 2. We therefor provide two 81789Sahrens * types of locks: 1) the hash table lock array, and 2) the 82789Sahrens * arc list locks. 83789Sahrens * 84789Sahrens * Buffers do not have their own mutexs, rather they rely on the 85789Sahrens * hash table mutexs for the bulk of their protection (i.e. most 86789Sahrens * fields in the arc_buf_hdr_t are protected by these mutexs). 87789Sahrens * 88789Sahrens * buf_hash_find() returns the appropriate mutex (held) when it 89789Sahrens * locates the requested buffer in the hash table. It returns 90789Sahrens * NULL for the mutex if the buffer was not in the table. 91789Sahrens * 92789Sahrens * buf_hash_remove() expects the appropriate hash mutex to be 93789Sahrens * already held before it is invoked. 94789Sahrens * 95789Sahrens * Each arc state also has a mutex which is used to protect the 96789Sahrens * buffer list associated with the state. When attempting to 97789Sahrens * obtain a hash table lock while holding an arc list lock you 98789Sahrens * must use: mutex_tryenter() to avoid deadlock. Also note that 992688Smaybee * the active state mutex must be held before the ghost state mutex. 100789Sahrens * 1011544Seschrock * Arc buffers may have an associated eviction callback function. 1021544Seschrock * This function will be invoked prior to removing the buffer (e.g. 1031544Seschrock * in arc_do_user_evicts()). Note however that the data associated 1041544Seschrock * with the buffer may be evicted prior to the callback. The callback 1051544Seschrock * must be made with *no locks held* (to prevent deadlock). Additionally, 1061544Seschrock * the users of callbacks must ensure that their private data is 1071544Seschrock * protected from simultaneous callbacks from arc_buf_evict() 1081544Seschrock * and arc_do_user_evicts(). 1091544Seschrock * 110789Sahrens * Note that the majority of the performance stats are manipulated 111789Sahrens * with atomic operations. 112789Sahrens */ 113789Sahrens 114789Sahrens #include <sys/spa.h> 115789Sahrens #include <sys/zio.h> 1163093Sahrens #include <sys/zio_checksum.h> 117789Sahrens #include <sys/zfs_context.h> 118789Sahrens #include <sys/arc.h> 119789Sahrens #include <sys/refcount.h> 120789Sahrens #ifdef _KERNEL 121789Sahrens #include <sys/vmsystm.h> 122789Sahrens #include <vm/anon.h> 123789Sahrens #include <sys/fs/swapnode.h> 1241484Sek110237 #include <sys/dnlc.h> 125789Sahrens #endif 126789Sahrens #include <sys/callb.h> 127789Sahrens 128789Sahrens static kmutex_t arc_reclaim_thr_lock; 129789Sahrens static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 130789Sahrens static uint8_t arc_thread_exit; 131789Sahrens 1321484Sek110237 #define ARC_REDUCE_DNLC_PERCENT 3 1331484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 1341484Sek110237 135789Sahrens typedef enum arc_reclaim_strategy { 136789Sahrens ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 137789Sahrens ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 138789Sahrens } arc_reclaim_strategy_t; 139789Sahrens 140789Sahrens /* number of seconds before growing cache again */ 141789Sahrens static int arc_grow_retry = 60; 142789Sahrens 1432391Smaybee /* 1442638Sperrin * minimum lifespan of a prefetch block in clock ticks 1452638Sperrin * (initialized in arc_init()) 1462391Smaybee */ 1472638Sperrin static int arc_min_prefetch_lifespan; 1482391Smaybee 149789Sahrens static int arc_dead; 150789Sahrens 151789Sahrens /* 1522885Sahrens * These tunables are for performance analysis. 1532885Sahrens */ 1542885Sahrens uint64_t zfs_arc_max; 1552885Sahrens uint64_t zfs_arc_min; 1562885Sahrens 1572885Sahrens /* 158789Sahrens * Note that buffers can be on one of 5 states: 159789Sahrens * ARC_anon - anonymous (discussed below) 1601544Seschrock * ARC_mru - recently used, currently cached 1611544Seschrock * ARC_mru_ghost - recentely used, no longer in cache 1621544Seschrock * ARC_mfu - frequently used, currently cached 1631544Seschrock * ARC_mfu_ghost - frequently used, no longer in cache 164789Sahrens * When there are no active references to the buffer, they 165789Sahrens * are linked onto one of the lists in arc. These are the 166789Sahrens * only buffers that can be evicted or deleted. 167789Sahrens * 168789Sahrens * Anonymous buffers are buffers that are not associated with 169789Sahrens * a DVA. These are buffers that hold dirty block copies 170789Sahrens * before they are written to stable storage. By definition, 1711544Seschrock * they are "ref'd" and are considered part of arc_mru 172789Sahrens * that cannot be freed. Generally, they will aquire a DVA 1731544Seschrock * as they are written and migrate onto the arc_mru list. 174789Sahrens */ 175789Sahrens 176789Sahrens typedef struct arc_state { 177789Sahrens list_t list; /* linked list of evictable buffer in state */ 178789Sahrens uint64_t lsize; /* total size of buffers in the linked list */ 179789Sahrens uint64_t size; /* total size of all buffers in this state */ 180789Sahrens uint64_t hits; 181789Sahrens kmutex_t mtx; 182789Sahrens } arc_state_t; 183789Sahrens 184789Sahrens /* The 5 states: */ 185789Sahrens static arc_state_t ARC_anon; 1861544Seschrock static arc_state_t ARC_mru; 1871544Seschrock static arc_state_t ARC_mru_ghost; 1881544Seschrock static arc_state_t ARC_mfu; 1891544Seschrock static arc_state_t ARC_mfu_ghost; 190789Sahrens 191789Sahrens static struct arc { 192789Sahrens arc_state_t *anon; 1931544Seschrock arc_state_t *mru; 1941544Seschrock arc_state_t *mru_ghost; 1951544Seschrock arc_state_t *mfu; 1961544Seschrock arc_state_t *mfu_ghost; 197789Sahrens uint64_t size; /* Actual total arc size */ 1981544Seschrock uint64_t p; /* Target size (in bytes) of mru */ 199789Sahrens uint64_t c; /* Target size of cache (in bytes) */ 200789Sahrens uint64_t c_min; /* Minimum target cache size */ 201789Sahrens uint64_t c_max; /* Maximum target cache size */ 202789Sahrens 203789Sahrens /* performance stats */ 204789Sahrens uint64_t hits; 205789Sahrens uint64_t misses; 206789Sahrens uint64_t deleted; 2072688Smaybee uint64_t recycle_miss; 2082688Smaybee uint64_t mutex_miss; 2092688Smaybee uint64_t evict_skip; 210789Sahrens uint64_t hash_elements; 211789Sahrens uint64_t hash_elements_max; 212789Sahrens uint64_t hash_collisions; 213789Sahrens uint64_t hash_chains; 214789Sahrens uint32_t hash_chain_max; 215789Sahrens 216789Sahrens int no_grow; /* Don't try to grow cache size */ 217789Sahrens } arc; 218789Sahrens 219789Sahrens static uint64_t arc_tempreserve; 220789Sahrens 221789Sahrens typedef struct arc_callback arc_callback_t; 222789Sahrens 223789Sahrens struct arc_callback { 224789Sahrens arc_done_func_t *acb_done; 225789Sahrens void *acb_private; 226789Sahrens arc_byteswap_func_t *acb_byteswap; 227789Sahrens arc_buf_t *acb_buf; 228789Sahrens zio_t *acb_zio_dummy; 229789Sahrens arc_callback_t *acb_next; 230789Sahrens }; 231789Sahrens 232789Sahrens struct arc_buf_hdr { 233789Sahrens /* protected by hash lock */ 234789Sahrens dva_t b_dva; 235789Sahrens uint64_t b_birth; 236789Sahrens uint64_t b_cksum0; 237789Sahrens 2383093Sahrens kmutex_t b_freeze_lock; 2393093Sahrens zio_cksum_t *b_freeze_cksum; 2403093Sahrens 241789Sahrens arc_buf_hdr_t *b_hash_next; 242789Sahrens arc_buf_t *b_buf; 243789Sahrens uint32_t b_flags; 2441544Seschrock uint32_t b_datacnt; 245789Sahrens 246*3290Sjohansen arc_callback_t *b_acb; 247789Sahrens kcondvar_t b_cv; 248*3290Sjohansen 249*3290Sjohansen /* immutable */ 250*3290Sjohansen arc_buf_contents_t b_type; 251*3290Sjohansen uint64_t b_size; 252*3290Sjohansen spa_t *b_spa; 253789Sahrens 254789Sahrens /* protected by arc state mutex */ 255789Sahrens arc_state_t *b_state; 256789Sahrens list_node_t b_arc_node; 257789Sahrens 258789Sahrens /* updated atomically */ 259789Sahrens clock_t b_arc_access; 260789Sahrens 261789Sahrens /* self protecting */ 262789Sahrens refcount_t b_refcnt; 263789Sahrens }; 264789Sahrens 2651544Seschrock static arc_buf_t *arc_eviction_list; 2661544Seschrock static kmutex_t arc_eviction_mtx; 2672887Smaybee static arc_buf_hdr_t arc_eviction_hdr; 2682688Smaybee static void arc_get_data_buf(arc_buf_t *buf); 2692688Smaybee static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 2701544Seschrock 2711544Seschrock #define GHOST_STATE(state) \ 2721544Seschrock ((state) == arc.mru_ghost || (state) == arc.mfu_ghost) 2731544Seschrock 274789Sahrens /* 275789Sahrens * Private ARC flags. These flags are private ARC only flags that will show up 276789Sahrens * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 277789Sahrens * be passed in as arc_flags in things like arc_read. However, these flags 278789Sahrens * should never be passed and should only be set by ARC code. When adding new 279789Sahrens * public flags, make sure not to smash the private ones. 280789Sahrens */ 281789Sahrens 2821544Seschrock #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 283789Sahrens #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 284789Sahrens #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 285789Sahrens #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 2861544Seschrock #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 2872391Smaybee #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 288789Sahrens 2891544Seschrock #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 290789Sahrens #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 291789Sahrens #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 292789Sahrens #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 2931544Seschrock #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 294789Sahrens 295789Sahrens /* 296789Sahrens * Hash table routines 297789Sahrens */ 298789Sahrens 299789Sahrens #define HT_LOCK_PAD 64 300789Sahrens 301789Sahrens struct ht_lock { 302789Sahrens kmutex_t ht_lock; 303789Sahrens #ifdef _KERNEL 304789Sahrens unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 305789Sahrens #endif 306789Sahrens }; 307789Sahrens 308789Sahrens #define BUF_LOCKS 256 309789Sahrens typedef struct buf_hash_table { 310789Sahrens uint64_t ht_mask; 311789Sahrens arc_buf_hdr_t **ht_table; 312789Sahrens struct ht_lock ht_locks[BUF_LOCKS]; 313789Sahrens } buf_hash_table_t; 314789Sahrens 315789Sahrens static buf_hash_table_t buf_hash_table; 316789Sahrens 317789Sahrens #define BUF_HASH_INDEX(spa, dva, birth) \ 318789Sahrens (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 319789Sahrens #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 320789Sahrens #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 321789Sahrens #define HDR_LOCK(buf) \ 322789Sahrens (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 323789Sahrens 324789Sahrens uint64_t zfs_crc64_table[256]; 325789Sahrens 326789Sahrens static uint64_t 327789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 328789Sahrens { 329789Sahrens uintptr_t spav = (uintptr_t)spa; 330789Sahrens uint8_t *vdva = (uint8_t *)dva; 331789Sahrens uint64_t crc = -1ULL; 332789Sahrens int i; 333789Sahrens 334789Sahrens ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 335789Sahrens 336789Sahrens for (i = 0; i < sizeof (dva_t); i++) 337789Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 338789Sahrens 339789Sahrens crc ^= (spav>>8) ^ birth; 340789Sahrens 341789Sahrens return (crc); 342789Sahrens } 343789Sahrens 344789Sahrens #define BUF_EMPTY(buf) \ 345789Sahrens ((buf)->b_dva.dva_word[0] == 0 && \ 346789Sahrens (buf)->b_dva.dva_word[1] == 0 && \ 347789Sahrens (buf)->b_birth == 0) 348789Sahrens 349789Sahrens #define BUF_EQUAL(spa, dva, birth, buf) \ 350789Sahrens ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 351789Sahrens ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 352789Sahrens ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 353789Sahrens 354789Sahrens static arc_buf_hdr_t * 355789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 356789Sahrens { 357789Sahrens uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 358789Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 359789Sahrens arc_buf_hdr_t *buf; 360789Sahrens 361789Sahrens mutex_enter(hash_lock); 362789Sahrens for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 363789Sahrens buf = buf->b_hash_next) { 364789Sahrens if (BUF_EQUAL(spa, dva, birth, buf)) { 365789Sahrens *lockp = hash_lock; 366789Sahrens return (buf); 367789Sahrens } 368789Sahrens } 369789Sahrens mutex_exit(hash_lock); 370789Sahrens *lockp = NULL; 371789Sahrens return (NULL); 372789Sahrens } 373789Sahrens 374789Sahrens /* 375789Sahrens * Insert an entry into the hash table. If there is already an element 376789Sahrens * equal to elem in the hash table, then the already existing element 377789Sahrens * will be returned and the new element will not be inserted. 378789Sahrens * Otherwise returns NULL. 379789Sahrens */ 380789Sahrens static arc_buf_hdr_t * 381789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 382789Sahrens { 383789Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 384789Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 385789Sahrens arc_buf_hdr_t *fbuf; 386789Sahrens uint32_t max, i; 387789Sahrens 3881544Seschrock ASSERT(!HDR_IN_HASH_TABLE(buf)); 389789Sahrens *lockp = hash_lock; 390789Sahrens mutex_enter(hash_lock); 391789Sahrens for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 392789Sahrens fbuf = fbuf->b_hash_next, i++) { 393789Sahrens if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 394789Sahrens return (fbuf); 395789Sahrens } 396789Sahrens 397789Sahrens buf->b_hash_next = buf_hash_table.ht_table[idx]; 398789Sahrens buf_hash_table.ht_table[idx] = buf; 3991544Seschrock buf->b_flags |= ARC_IN_HASH_TABLE; 400789Sahrens 401789Sahrens /* collect some hash table performance data */ 402789Sahrens if (i > 0) { 403789Sahrens atomic_add_64(&arc.hash_collisions, 1); 404789Sahrens if (i == 1) 405789Sahrens atomic_add_64(&arc.hash_chains, 1); 406789Sahrens } 407789Sahrens while (i > (max = arc.hash_chain_max) && 408789Sahrens max != atomic_cas_32(&arc.hash_chain_max, max, i)) { 409789Sahrens continue; 410789Sahrens } 411789Sahrens atomic_add_64(&arc.hash_elements, 1); 412789Sahrens if (arc.hash_elements > arc.hash_elements_max) 413789Sahrens atomic_add_64(&arc.hash_elements_max, 1); 414789Sahrens 415789Sahrens return (NULL); 416789Sahrens } 417789Sahrens 418789Sahrens static void 419789Sahrens buf_hash_remove(arc_buf_hdr_t *buf) 420789Sahrens { 421789Sahrens arc_buf_hdr_t *fbuf, **bufp; 422789Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 423789Sahrens 424789Sahrens ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 4251544Seschrock ASSERT(HDR_IN_HASH_TABLE(buf)); 426789Sahrens 427789Sahrens bufp = &buf_hash_table.ht_table[idx]; 428789Sahrens while ((fbuf = *bufp) != buf) { 429789Sahrens ASSERT(fbuf != NULL); 430789Sahrens bufp = &fbuf->b_hash_next; 431789Sahrens } 432789Sahrens *bufp = buf->b_hash_next; 433789Sahrens buf->b_hash_next = NULL; 4341544Seschrock buf->b_flags &= ~ARC_IN_HASH_TABLE; 435789Sahrens 436789Sahrens /* collect some hash table performance data */ 437789Sahrens atomic_add_64(&arc.hash_elements, -1); 438789Sahrens if (buf_hash_table.ht_table[idx] && 439789Sahrens buf_hash_table.ht_table[idx]->b_hash_next == NULL) 440789Sahrens atomic_add_64(&arc.hash_chains, -1); 441789Sahrens } 442789Sahrens 443789Sahrens /* 444789Sahrens * Global data structures and functions for the buf kmem cache. 445789Sahrens */ 446789Sahrens static kmem_cache_t *hdr_cache; 447789Sahrens static kmem_cache_t *buf_cache; 448789Sahrens 449789Sahrens static void 450789Sahrens buf_fini(void) 451789Sahrens { 452789Sahrens int i; 453789Sahrens 454789Sahrens kmem_free(buf_hash_table.ht_table, 455789Sahrens (buf_hash_table.ht_mask + 1) * sizeof (void *)); 456789Sahrens for (i = 0; i < BUF_LOCKS; i++) 457789Sahrens mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 458789Sahrens kmem_cache_destroy(hdr_cache); 459789Sahrens kmem_cache_destroy(buf_cache); 460789Sahrens } 461789Sahrens 462789Sahrens /* 463789Sahrens * Constructor callback - called when the cache is empty 464789Sahrens * and a new buf is requested. 465789Sahrens */ 466789Sahrens /* ARGSUSED */ 467789Sahrens static int 468789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag) 469789Sahrens { 470789Sahrens arc_buf_hdr_t *buf = vbuf; 471789Sahrens 472789Sahrens bzero(buf, sizeof (arc_buf_hdr_t)); 473789Sahrens refcount_create(&buf->b_refcnt); 474789Sahrens cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 475789Sahrens return (0); 476789Sahrens } 477789Sahrens 478789Sahrens /* 479789Sahrens * Destructor callback - called when a cached buf is 480789Sahrens * no longer required. 481789Sahrens */ 482789Sahrens /* ARGSUSED */ 483789Sahrens static void 484789Sahrens hdr_dest(void *vbuf, void *unused) 485789Sahrens { 486789Sahrens arc_buf_hdr_t *buf = vbuf; 487789Sahrens 488789Sahrens refcount_destroy(&buf->b_refcnt); 489789Sahrens cv_destroy(&buf->b_cv); 490789Sahrens } 491789Sahrens 492789Sahrens /* 493789Sahrens * Reclaim callback -- invoked when memory is low. 494789Sahrens */ 495789Sahrens /* ARGSUSED */ 496789Sahrens static void 497789Sahrens hdr_recl(void *unused) 498789Sahrens { 499789Sahrens dprintf("hdr_recl called\n"); 5003158Smaybee /* 5013158Smaybee * umem calls the reclaim func when we destroy the buf cache, 5023158Smaybee * which is after we do arc_fini(). 5033158Smaybee */ 5043158Smaybee if (!arc_dead) 5053158Smaybee cv_signal(&arc_reclaim_thr_cv); 506789Sahrens } 507789Sahrens 508789Sahrens static void 509789Sahrens buf_init(void) 510789Sahrens { 511789Sahrens uint64_t *ct; 5121544Seschrock uint64_t hsize = 1ULL << 12; 513789Sahrens int i, j; 514789Sahrens 515789Sahrens /* 516789Sahrens * The hash table is big enough to fill all of physical memory 5171544Seschrock * with an average 64K block size. The table will take up 5181544Seschrock * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 519789Sahrens */ 5201544Seschrock while (hsize * 65536 < physmem * PAGESIZE) 521789Sahrens hsize <<= 1; 5221544Seschrock retry: 523789Sahrens buf_hash_table.ht_mask = hsize - 1; 5241544Seschrock buf_hash_table.ht_table = 5251544Seschrock kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 5261544Seschrock if (buf_hash_table.ht_table == NULL) { 5271544Seschrock ASSERT(hsize > (1ULL << 8)); 5281544Seschrock hsize >>= 1; 5291544Seschrock goto retry; 5301544Seschrock } 531789Sahrens 532789Sahrens hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 533789Sahrens 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 534789Sahrens buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 535789Sahrens 0, NULL, NULL, NULL, NULL, NULL, 0); 536789Sahrens 537789Sahrens for (i = 0; i < 256; i++) 538789Sahrens for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 539789Sahrens *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 540789Sahrens 541789Sahrens for (i = 0; i < BUF_LOCKS; i++) { 542789Sahrens mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 543789Sahrens NULL, MUTEX_DEFAULT, NULL); 544789Sahrens } 545789Sahrens } 546789Sahrens 547789Sahrens #define ARC_MINTIME (hz>>4) /* 62 ms */ 548789Sahrens 549789Sahrens static void 5503093Sahrens arc_cksum_verify(arc_buf_t *buf) 5513093Sahrens { 5523093Sahrens zio_cksum_t zc; 5533093Sahrens 5543093Sahrens if (!zfs_flags & ZFS_DEBUG_MODIFY) 5553093Sahrens return; 5563093Sahrens 5573093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 5583265Sahrens if (buf->b_hdr->b_freeze_cksum == NULL || 5593265Sahrens (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 5603093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 5613093Sahrens return; 5623093Sahrens } 5633093Sahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 5643093Sahrens if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 5653093Sahrens panic("buffer modified while frozen!"); 5663093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 5673093Sahrens } 5683093Sahrens 5693093Sahrens static void 5703093Sahrens arc_cksum_compute(arc_buf_t *buf) 5713093Sahrens { 5723093Sahrens if (!zfs_flags & ZFS_DEBUG_MODIFY) 5733093Sahrens return; 5743093Sahrens 5753093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 5763093Sahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 5773093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 5783093Sahrens return; 5793093Sahrens } 5803093Sahrens buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 5813093Sahrens fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 5823093Sahrens buf->b_hdr->b_freeze_cksum); 5833093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 5843093Sahrens } 5853093Sahrens 5863093Sahrens void 5873093Sahrens arc_buf_thaw(arc_buf_t *buf) 5883093Sahrens { 5893093Sahrens if (!zfs_flags & ZFS_DEBUG_MODIFY) 5903093Sahrens return; 5913093Sahrens 5923093Sahrens if (buf->b_hdr->b_state != arc.anon) 5933093Sahrens panic("modifying non-anon buffer!"); 5943093Sahrens if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 5953093Sahrens panic("modifying buffer while i/o in progress!"); 5963093Sahrens arc_cksum_verify(buf); 5973093Sahrens mutex_enter(&buf->b_hdr->b_freeze_lock); 5983093Sahrens if (buf->b_hdr->b_freeze_cksum != NULL) { 5993093Sahrens kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 6003093Sahrens buf->b_hdr->b_freeze_cksum = NULL; 6013093Sahrens } 6023093Sahrens mutex_exit(&buf->b_hdr->b_freeze_lock); 6033093Sahrens } 6043093Sahrens 6053093Sahrens void 6063093Sahrens arc_buf_freeze(arc_buf_t *buf) 6073093Sahrens { 6083093Sahrens ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 6093093Sahrens buf->b_hdr->b_state == arc.anon); 6103093Sahrens arc_cksum_compute(buf); 6113093Sahrens } 6123093Sahrens 6133093Sahrens static void 614789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 615789Sahrens { 616789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 617789Sahrens 618789Sahrens if ((refcount_add(&ab->b_refcnt, tag) == 1) && 619789Sahrens (ab->b_state != arc.anon)) { 6201544Seschrock int delta = ab->b_size * ab->b_datacnt; 621789Sahrens 622789Sahrens ASSERT(!MUTEX_HELD(&ab->b_state->mtx)); 623789Sahrens mutex_enter(&ab->b_state->mtx); 624789Sahrens ASSERT(list_link_active(&ab->b_arc_node)); 625789Sahrens list_remove(&ab->b_state->list, ab); 6261544Seschrock if (GHOST_STATE(ab->b_state)) { 6271544Seschrock ASSERT3U(ab->b_datacnt, ==, 0); 6281544Seschrock ASSERT3P(ab->b_buf, ==, NULL); 6291544Seschrock delta = ab->b_size; 6301544Seschrock } 6311544Seschrock ASSERT(delta > 0); 6321544Seschrock ASSERT3U(ab->b_state->lsize, >=, delta); 6331544Seschrock atomic_add_64(&ab->b_state->lsize, -delta); 634789Sahrens mutex_exit(&ab->b_state->mtx); 6352391Smaybee /* remove the prefetch flag is we get a reference */ 6362391Smaybee if (ab->b_flags & ARC_PREFETCH) 6372391Smaybee ab->b_flags &= ~ARC_PREFETCH; 638789Sahrens } 639789Sahrens } 640789Sahrens 641789Sahrens static int 642789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 643789Sahrens { 644789Sahrens int cnt; 645789Sahrens 6461544Seschrock ASSERT(ab->b_state == arc.anon || MUTEX_HELD(hash_lock)); 6471544Seschrock ASSERT(!GHOST_STATE(ab->b_state)); 648789Sahrens 649789Sahrens if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 650789Sahrens (ab->b_state != arc.anon)) { 651789Sahrens 652789Sahrens ASSERT(!MUTEX_HELD(&ab->b_state->mtx)); 653789Sahrens mutex_enter(&ab->b_state->mtx); 654789Sahrens ASSERT(!list_link_active(&ab->b_arc_node)); 655789Sahrens list_insert_head(&ab->b_state->list, ab); 6561544Seschrock ASSERT(ab->b_datacnt > 0); 6571544Seschrock atomic_add_64(&ab->b_state->lsize, ab->b_size * ab->b_datacnt); 6581544Seschrock ASSERT3U(ab->b_state->size, >=, ab->b_state->lsize); 659789Sahrens mutex_exit(&ab->b_state->mtx); 660789Sahrens } 661789Sahrens return (cnt); 662789Sahrens } 663789Sahrens 664789Sahrens /* 665789Sahrens * Move the supplied buffer to the indicated state. The mutex 666789Sahrens * for the buffer must be held by the caller. 667789Sahrens */ 668789Sahrens static void 6691544Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 670789Sahrens { 6711544Seschrock arc_state_t *old_state = ab->b_state; 6721544Seschrock int refcnt = refcount_count(&ab->b_refcnt); 6731544Seschrock int from_delta, to_delta; 674789Sahrens 675789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 6761544Seschrock ASSERT(new_state != old_state); 6771544Seschrock ASSERT(refcnt == 0 || ab->b_datacnt > 0); 6781544Seschrock ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 6791544Seschrock 6801544Seschrock from_delta = to_delta = ab->b_datacnt * ab->b_size; 681789Sahrens 682789Sahrens /* 683789Sahrens * If this buffer is evictable, transfer it from the 684789Sahrens * old state list to the new state list. 685789Sahrens */ 6861544Seschrock if (refcnt == 0) { 6871544Seschrock if (old_state != arc.anon) { 6881544Seschrock int use_mutex = !MUTEX_HELD(&old_state->mtx); 6891544Seschrock 6901544Seschrock if (use_mutex) 6911544Seschrock mutex_enter(&old_state->mtx); 6921544Seschrock 6931544Seschrock ASSERT(list_link_active(&ab->b_arc_node)); 6941544Seschrock list_remove(&old_state->list, ab); 695789Sahrens 6962391Smaybee /* 6972391Smaybee * If prefetching out of the ghost cache, 6982391Smaybee * we will have a non-null datacnt. 6992391Smaybee */ 7002391Smaybee if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 7012391Smaybee /* ghost elements have a ghost size */ 7021544Seschrock ASSERT(ab->b_buf == NULL); 7031544Seschrock from_delta = ab->b_size; 704789Sahrens } 7051544Seschrock ASSERT3U(old_state->lsize, >=, from_delta); 7061544Seschrock atomic_add_64(&old_state->lsize, -from_delta); 7071544Seschrock 7081544Seschrock if (use_mutex) 7091544Seschrock mutex_exit(&old_state->mtx); 710789Sahrens } 711789Sahrens if (new_state != arc.anon) { 7121544Seschrock int use_mutex = !MUTEX_HELD(&new_state->mtx); 713789Sahrens 7141544Seschrock if (use_mutex) 715789Sahrens mutex_enter(&new_state->mtx); 7161544Seschrock 717789Sahrens list_insert_head(&new_state->list, ab); 7181544Seschrock 7191544Seschrock /* ghost elements have a ghost size */ 7201544Seschrock if (GHOST_STATE(new_state)) { 7211544Seschrock ASSERT(ab->b_datacnt == 0); 7221544Seschrock ASSERT(ab->b_buf == NULL); 7231544Seschrock to_delta = ab->b_size; 7241544Seschrock } 7251544Seschrock atomic_add_64(&new_state->lsize, to_delta); 7261544Seschrock ASSERT3U(new_state->size + to_delta, >=, 7271544Seschrock new_state->lsize); 7281544Seschrock 7291544Seschrock if (use_mutex) 730789Sahrens mutex_exit(&new_state->mtx); 731789Sahrens } 732789Sahrens } 733789Sahrens 734789Sahrens ASSERT(!BUF_EMPTY(ab)); 7351544Seschrock if (new_state == arc.anon && old_state != arc.anon) { 736789Sahrens buf_hash_remove(ab); 737789Sahrens } 738789Sahrens 7391544Seschrock /* adjust state sizes */ 7401544Seschrock if (to_delta) 7411544Seschrock atomic_add_64(&new_state->size, to_delta); 7421544Seschrock if (from_delta) { 7431544Seschrock ASSERT3U(old_state->size, >=, from_delta); 7441544Seschrock atomic_add_64(&old_state->size, -from_delta); 745789Sahrens } 746789Sahrens ab->b_state = new_state; 747789Sahrens } 748789Sahrens 749789Sahrens arc_buf_t * 750*3290Sjohansen arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 751789Sahrens { 752789Sahrens arc_buf_hdr_t *hdr; 753789Sahrens arc_buf_t *buf; 754789Sahrens 755789Sahrens ASSERT3U(size, >, 0); 756789Sahrens hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 757789Sahrens ASSERT(BUF_EMPTY(hdr)); 758789Sahrens hdr->b_size = size; 759*3290Sjohansen hdr->b_type = type; 760789Sahrens hdr->b_spa = spa; 761789Sahrens hdr->b_state = arc.anon; 762789Sahrens hdr->b_arc_access = 0; 763789Sahrens buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 764789Sahrens buf->b_hdr = hdr; 7652688Smaybee buf->b_data = NULL; 7661544Seschrock buf->b_efunc = NULL; 7671544Seschrock buf->b_private = NULL; 768789Sahrens buf->b_next = NULL; 769789Sahrens hdr->b_buf = buf; 7702688Smaybee arc_get_data_buf(buf); 7711544Seschrock hdr->b_datacnt = 1; 772789Sahrens hdr->b_flags = 0; 773789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 774789Sahrens (void) refcount_add(&hdr->b_refcnt, tag); 775789Sahrens 776789Sahrens return (buf); 777789Sahrens } 778789Sahrens 7792688Smaybee static arc_buf_t * 7802688Smaybee arc_buf_clone(arc_buf_t *from) 7811544Seschrock { 7822688Smaybee arc_buf_t *buf; 7832688Smaybee arc_buf_hdr_t *hdr = from->b_hdr; 7842688Smaybee uint64_t size = hdr->b_size; 7851544Seschrock 7862688Smaybee buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 7872688Smaybee buf->b_hdr = hdr; 7882688Smaybee buf->b_data = NULL; 7892688Smaybee buf->b_efunc = NULL; 7902688Smaybee buf->b_private = NULL; 7912688Smaybee buf->b_next = hdr->b_buf; 7922688Smaybee hdr->b_buf = buf; 7932688Smaybee arc_get_data_buf(buf); 7942688Smaybee bcopy(from->b_data, buf->b_data, size); 7952688Smaybee hdr->b_datacnt += 1; 7962688Smaybee return (buf); 7971544Seschrock } 7981544Seschrock 7991544Seschrock void 8001544Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag) 8011544Seschrock { 8022887Smaybee arc_buf_hdr_t *hdr; 8031544Seschrock kmutex_t *hash_lock; 8041544Seschrock 8052724Smaybee /* 8062724Smaybee * Check to see if this buffer is currently being evicted via 8072887Smaybee * arc_do_user_evicts(). 8082724Smaybee */ 8092887Smaybee mutex_enter(&arc_eviction_mtx); 8102887Smaybee hdr = buf->b_hdr; 8112887Smaybee if (hdr == NULL) { 8122887Smaybee mutex_exit(&arc_eviction_mtx); 8132724Smaybee return; 8142887Smaybee } 8152887Smaybee hash_lock = HDR_LOCK(hdr); 8162887Smaybee mutex_exit(&arc_eviction_mtx); 8172724Smaybee 8182724Smaybee mutex_enter(hash_lock); 8191544Seschrock if (buf->b_data == NULL) { 8201544Seschrock /* 8211544Seschrock * This buffer is evicted. 8221544Seschrock */ 8232724Smaybee mutex_exit(hash_lock); 8241544Seschrock return; 8251544Seschrock } 8261544Seschrock 8272724Smaybee ASSERT(buf->b_hdr == hdr); 8282724Smaybee ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu); 8291544Seschrock add_reference(hdr, hash_lock, tag); 8302688Smaybee arc_access(hdr, hash_lock); 8312688Smaybee mutex_exit(hash_lock); 8321544Seschrock atomic_add_64(&arc.hits, 1); 8331544Seschrock } 8341544Seschrock 835789Sahrens static void 8362688Smaybee arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 8371544Seschrock { 8381544Seschrock arc_buf_t **bufp; 8391544Seschrock 8401544Seschrock /* free up data associated with the buf */ 8411544Seschrock if (buf->b_data) { 8421544Seschrock arc_state_t *state = buf->b_hdr->b_state; 8431544Seschrock uint64_t size = buf->b_hdr->b_size; 844*3290Sjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 8451544Seschrock 8463093Sahrens arc_cksum_verify(buf); 8472688Smaybee if (!recycle) { 848*3290Sjohansen if (type == ARC_BUFC_METADATA) { 849*3290Sjohansen zio_buf_free(buf->b_data, size); 850*3290Sjohansen } else { 851*3290Sjohansen ASSERT(type == ARC_BUFC_DATA); 852*3290Sjohansen zio_data_buf_free(buf->b_data, size); 853*3290Sjohansen } 8542688Smaybee atomic_add_64(&arc.size, -size); 8552688Smaybee } 8561544Seschrock if (list_link_active(&buf->b_hdr->b_arc_node)) { 8571544Seschrock ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 8581544Seschrock ASSERT(state != arc.anon); 8591544Seschrock ASSERT3U(state->lsize, >=, size); 8601544Seschrock atomic_add_64(&state->lsize, -size); 8611544Seschrock } 8621544Seschrock ASSERT3U(state->size, >=, size); 8631544Seschrock atomic_add_64(&state->size, -size); 8641544Seschrock buf->b_data = NULL; 8651544Seschrock ASSERT(buf->b_hdr->b_datacnt > 0); 8661544Seschrock buf->b_hdr->b_datacnt -= 1; 8671544Seschrock } 8681544Seschrock 8691544Seschrock /* only remove the buf if requested */ 8701544Seschrock if (!all) 8711544Seschrock return; 8721544Seschrock 8731544Seschrock /* remove the buf from the hdr list */ 8741544Seschrock for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 8751544Seschrock continue; 8761544Seschrock *bufp = buf->b_next; 8771544Seschrock 8781544Seschrock ASSERT(buf->b_efunc == NULL); 8791544Seschrock 8801544Seschrock /* clean up the buf */ 8811544Seschrock buf->b_hdr = NULL; 8821544Seschrock kmem_cache_free(buf_cache, buf); 8831544Seschrock } 8841544Seschrock 8851544Seschrock static void 8861544Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr) 887789Sahrens { 888789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 889789Sahrens ASSERT3P(hdr->b_state, ==, arc.anon); 8901544Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 891789Sahrens 892789Sahrens if (!BUF_EMPTY(hdr)) { 8931544Seschrock ASSERT(!HDR_IN_HASH_TABLE(hdr)); 894789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 895789Sahrens hdr->b_birth = 0; 896789Sahrens hdr->b_cksum0 = 0; 897789Sahrens } 8981544Seschrock while (hdr->b_buf) { 899789Sahrens arc_buf_t *buf = hdr->b_buf; 900789Sahrens 9011544Seschrock if (buf->b_efunc) { 9021544Seschrock mutex_enter(&arc_eviction_mtx); 9031544Seschrock ASSERT(buf->b_hdr != NULL); 9042688Smaybee arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 9051544Seschrock hdr->b_buf = buf->b_next; 9062887Smaybee buf->b_hdr = &arc_eviction_hdr; 9071544Seschrock buf->b_next = arc_eviction_list; 9081544Seschrock arc_eviction_list = buf; 9091544Seschrock mutex_exit(&arc_eviction_mtx); 9101544Seschrock } else { 9112688Smaybee arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 9121544Seschrock } 913789Sahrens } 9143093Sahrens if (hdr->b_freeze_cksum != NULL) { 9153093Sahrens kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 9163093Sahrens hdr->b_freeze_cksum = NULL; 9173093Sahrens } 9181544Seschrock 919789Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 920789Sahrens ASSERT3P(hdr->b_hash_next, ==, NULL); 921789Sahrens ASSERT3P(hdr->b_acb, ==, NULL); 922789Sahrens kmem_cache_free(hdr_cache, hdr); 923789Sahrens } 924789Sahrens 925789Sahrens void 926789Sahrens arc_buf_free(arc_buf_t *buf, void *tag) 927789Sahrens { 928789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 9291544Seschrock int hashed = hdr->b_state != arc.anon; 9301544Seschrock 9311544Seschrock ASSERT(buf->b_efunc == NULL); 9321544Seschrock ASSERT(buf->b_data != NULL); 9331544Seschrock 9341544Seschrock if (hashed) { 9351544Seschrock kmutex_t *hash_lock = HDR_LOCK(hdr); 9361544Seschrock 9371544Seschrock mutex_enter(hash_lock); 9381544Seschrock (void) remove_reference(hdr, hash_lock, tag); 9391544Seschrock if (hdr->b_datacnt > 1) 9402688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 9411544Seschrock else 9421544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 9431544Seschrock mutex_exit(hash_lock); 9441544Seschrock } else if (HDR_IO_IN_PROGRESS(hdr)) { 9451544Seschrock int destroy_hdr; 9461544Seschrock /* 9471544Seschrock * We are in the middle of an async write. Don't destroy 9481544Seschrock * this buffer unless the write completes before we finish 9491544Seschrock * decrementing the reference count. 9501544Seschrock */ 9511544Seschrock mutex_enter(&arc_eviction_mtx); 9521544Seschrock (void) remove_reference(hdr, NULL, tag); 9531544Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 9541544Seschrock destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 9551544Seschrock mutex_exit(&arc_eviction_mtx); 9561544Seschrock if (destroy_hdr) 9571544Seschrock arc_hdr_destroy(hdr); 9581544Seschrock } else { 9591544Seschrock if (remove_reference(hdr, NULL, tag) > 0) { 9601544Seschrock ASSERT(HDR_IO_ERROR(hdr)); 9612688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 9621544Seschrock } else { 9631544Seschrock arc_hdr_destroy(hdr); 9641544Seschrock } 9651544Seschrock } 9661544Seschrock } 9671544Seschrock 9681544Seschrock int 9691544Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag) 9701544Seschrock { 9711544Seschrock arc_buf_hdr_t *hdr = buf->b_hdr; 972789Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 9731544Seschrock int no_callback = (buf->b_efunc == NULL); 9741544Seschrock 9751544Seschrock if (hdr->b_state == arc.anon) { 9761544Seschrock arc_buf_free(buf, tag); 9771544Seschrock return (no_callback); 9781544Seschrock } 979789Sahrens 980789Sahrens mutex_enter(hash_lock); 9811544Seschrock ASSERT(hdr->b_state != arc.anon); 9821544Seschrock ASSERT(buf->b_data != NULL); 983789Sahrens 9841544Seschrock (void) remove_reference(hdr, hash_lock, tag); 9851544Seschrock if (hdr->b_datacnt > 1) { 9861544Seschrock if (no_callback) 9872688Smaybee arc_buf_destroy(buf, FALSE, TRUE); 9881544Seschrock } else if (no_callback) { 9891544Seschrock ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 9901544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 991789Sahrens } 9921544Seschrock ASSERT(no_callback || hdr->b_datacnt > 1 || 9931544Seschrock refcount_is_zero(&hdr->b_refcnt)); 994789Sahrens mutex_exit(hash_lock); 9951544Seschrock return (no_callback); 996789Sahrens } 997789Sahrens 998789Sahrens int 999789Sahrens arc_buf_size(arc_buf_t *buf) 1000789Sahrens { 1001789Sahrens return (buf->b_hdr->b_size); 1002789Sahrens } 1003789Sahrens 1004789Sahrens /* 1005789Sahrens * Evict buffers from list until we've removed the specified number of 1006789Sahrens * bytes. Move the removed buffers to the appropriate evict state. 10072688Smaybee * If the recycle flag is set, then attempt to "recycle" a buffer: 10082688Smaybee * - look for a buffer to evict that is `bytes' long. 10092688Smaybee * - return the data block from this buffer rather than freeing it. 10102688Smaybee * This flag is used by callers that are trying to make space for a 10112688Smaybee * new buffer in a full arc cache. 1012789Sahrens */ 10132688Smaybee static void * 1014*3290Sjohansen arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle, 1015*3290Sjohansen arc_buf_contents_t type) 1016789Sahrens { 1017789Sahrens arc_state_t *evicted_state; 10182688Smaybee uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 10192918Smaybee arc_buf_hdr_t *ab, *ab_prev = NULL; 1020789Sahrens kmutex_t *hash_lock; 10212688Smaybee boolean_t have_lock; 10222918Smaybee void *stolen = NULL; 1023789Sahrens 10241544Seschrock ASSERT(state == arc.mru || state == arc.mfu); 1025789Sahrens 10261544Seschrock evicted_state = (state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost; 1027789Sahrens 1028789Sahrens mutex_enter(&state->mtx); 1029789Sahrens mutex_enter(&evicted_state->mtx); 1030789Sahrens 1031789Sahrens for (ab = list_tail(&state->list); ab; ab = ab_prev) { 1032789Sahrens ab_prev = list_prev(&state->list, ab); 10332391Smaybee /* prefetch buffers have a minimum lifespan */ 10342688Smaybee if (HDR_IO_IN_PROGRESS(ab) || 10352688Smaybee (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 10362688Smaybee lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 10372391Smaybee skipped++; 10382391Smaybee continue; 10392391Smaybee } 10402918Smaybee /* "lookahead" for better eviction candidate */ 10412918Smaybee if (recycle && ab->b_size != bytes && 10422918Smaybee ab_prev && ab_prev->b_size == bytes) 10432688Smaybee continue; 1044789Sahrens hash_lock = HDR_LOCK(ab); 10452688Smaybee have_lock = MUTEX_HELD(hash_lock); 10462688Smaybee if (have_lock || mutex_tryenter(hash_lock)) { 1047789Sahrens ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 10481544Seschrock ASSERT(ab->b_datacnt > 0); 10491544Seschrock while (ab->b_buf) { 10501544Seschrock arc_buf_t *buf = ab->b_buf; 10512688Smaybee if (buf->b_data) { 10521544Seschrock bytes_evicted += ab->b_size; 1053*3290Sjohansen if (recycle && ab->b_type == type && 1054*3290Sjohansen ab->b_size == bytes) { 10552918Smaybee stolen = buf->b_data; 10562918Smaybee recycle = FALSE; 10572918Smaybee } 10582688Smaybee } 10591544Seschrock if (buf->b_efunc) { 10601544Seschrock mutex_enter(&arc_eviction_mtx); 10612918Smaybee arc_buf_destroy(buf, 10622918Smaybee buf->b_data == stolen, FALSE); 10631544Seschrock ab->b_buf = buf->b_next; 10642887Smaybee buf->b_hdr = &arc_eviction_hdr; 10651544Seschrock buf->b_next = arc_eviction_list; 10661544Seschrock arc_eviction_list = buf; 10671544Seschrock mutex_exit(&arc_eviction_mtx); 10681544Seschrock } else { 10692918Smaybee arc_buf_destroy(buf, 10702918Smaybee buf->b_data == stolen, TRUE); 10711544Seschrock } 10721544Seschrock } 10731544Seschrock ASSERT(ab->b_datacnt == 0); 1074789Sahrens arc_change_state(evicted_state, ab, hash_lock); 10751544Seschrock ASSERT(HDR_IN_HASH_TABLE(ab)); 10761544Seschrock ab->b_flags = ARC_IN_HASH_TABLE; 1077789Sahrens DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 10782688Smaybee if (!have_lock) 10792688Smaybee mutex_exit(hash_lock); 10801544Seschrock if (bytes >= 0 && bytes_evicted >= bytes) 1081789Sahrens break; 1082789Sahrens } else { 10832688Smaybee missed += 1; 1084789Sahrens } 1085789Sahrens } 1086789Sahrens mutex_exit(&evicted_state->mtx); 1087789Sahrens mutex_exit(&state->mtx); 1088789Sahrens 1089789Sahrens if (bytes_evicted < bytes) 1090789Sahrens dprintf("only evicted %lld bytes from %x", 1091789Sahrens (longlong_t)bytes_evicted, state); 1092789Sahrens 10932688Smaybee if (skipped) 10942688Smaybee atomic_add_64(&arc.evict_skip, skipped); 10952688Smaybee if (missed) 10962688Smaybee atomic_add_64(&arc.mutex_miss, missed); 10972918Smaybee return (stolen); 1098789Sahrens } 1099789Sahrens 1100789Sahrens /* 1101789Sahrens * Remove buffers from list until we've removed the specified number of 1102789Sahrens * bytes. Destroy the buffers that are removed. 1103789Sahrens */ 1104789Sahrens static void 11051544Seschrock arc_evict_ghost(arc_state_t *state, int64_t bytes) 1106789Sahrens { 1107789Sahrens arc_buf_hdr_t *ab, *ab_prev; 1108789Sahrens kmutex_t *hash_lock; 11091544Seschrock uint64_t bytes_deleted = 0; 11101544Seschrock uint_t bufs_skipped = 0; 1111789Sahrens 11121544Seschrock ASSERT(GHOST_STATE(state)); 1113789Sahrens top: 1114789Sahrens mutex_enter(&state->mtx); 1115789Sahrens for (ab = list_tail(&state->list); ab; ab = ab_prev) { 1116789Sahrens ab_prev = list_prev(&state->list, ab); 1117789Sahrens hash_lock = HDR_LOCK(ab); 1118789Sahrens if (mutex_tryenter(hash_lock)) { 11192391Smaybee ASSERT(!HDR_IO_IN_PROGRESS(ab)); 11201544Seschrock ASSERT(ab->b_buf == NULL); 1121789Sahrens arc_change_state(arc.anon, ab, hash_lock); 1122789Sahrens mutex_exit(hash_lock); 1123789Sahrens atomic_add_64(&arc.deleted, 1); 11241544Seschrock bytes_deleted += ab->b_size; 11251544Seschrock arc_hdr_destroy(ab); 1126789Sahrens DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1127789Sahrens if (bytes >= 0 && bytes_deleted >= bytes) 1128789Sahrens break; 1129789Sahrens } else { 1130789Sahrens if (bytes < 0) { 1131789Sahrens mutex_exit(&state->mtx); 1132789Sahrens mutex_enter(hash_lock); 1133789Sahrens mutex_exit(hash_lock); 1134789Sahrens goto top; 1135789Sahrens } 1136789Sahrens bufs_skipped += 1; 1137789Sahrens } 1138789Sahrens } 1139789Sahrens mutex_exit(&state->mtx); 1140789Sahrens 1141789Sahrens if (bufs_skipped) { 11422688Smaybee atomic_add_64(&arc.mutex_miss, bufs_skipped); 1143789Sahrens ASSERT(bytes >= 0); 1144789Sahrens } 1145789Sahrens 1146789Sahrens if (bytes_deleted < bytes) 1147789Sahrens dprintf("only deleted %lld bytes from %p", 1148789Sahrens (longlong_t)bytes_deleted, state); 1149789Sahrens } 1150789Sahrens 1151789Sahrens static void 1152789Sahrens arc_adjust(void) 1153789Sahrens { 1154789Sahrens int64_t top_sz, mru_over, arc_over; 1155789Sahrens 11561544Seschrock top_sz = arc.anon->size + arc.mru->size; 1157789Sahrens 11581544Seschrock if (top_sz > arc.p && arc.mru->lsize > 0) { 11591544Seschrock int64_t toevict = MIN(arc.mru->lsize, top_sz-arc.p); 1160*3290Sjohansen (void) arc_evict(arc.mru, toevict, FALSE, ARC_BUFC_UNDEF); 11611544Seschrock top_sz = arc.anon->size + arc.mru->size; 1162789Sahrens } 1163789Sahrens 11641544Seschrock mru_over = top_sz + arc.mru_ghost->size - arc.c; 1165789Sahrens 1166789Sahrens if (mru_over > 0) { 11671544Seschrock if (arc.mru_ghost->lsize > 0) { 11681544Seschrock int64_t todelete = MIN(arc.mru_ghost->lsize, mru_over); 11691544Seschrock arc_evict_ghost(arc.mru_ghost, todelete); 1170789Sahrens } 1171789Sahrens } 1172789Sahrens 1173789Sahrens if ((arc_over = arc.size - arc.c) > 0) { 11741544Seschrock int64_t tbl_over; 1175789Sahrens 11761544Seschrock if (arc.mfu->lsize > 0) { 11771544Seschrock int64_t toevict = MIN(arc.mfu->lsize, arc_over); 1178*3290Sjohansen (void) arc_evict(arc.mfu, toevict, FALSE, 1179*3290Sjohansen ARC_BUFC_UNDEF); 1180789Sahrens } 1181789Sahrens 11821544Seschrock tbl_over = arc.size + arc.mru_ghost->lsize + 11831544Seschrock arc.mfu_ghost->lsize - arc.c*2; 1184789Sahrens 11851544Seschrock if (tbl_over > 0 && arc.mfu_ghost->lsize > 0) { 11861544Seschrock int64_t todelete = MIN(arc.mfu_ghost->lsize, tbl_over); 11871544Seschrock arc_evict_ghost(arc.mfu_ghost, todelete); 1188789Sahrens } 1189789Sahrens } 1190789Sahrens } 1191789Sahrens 11921544Seschrock static void 11931544Seschrock arc_do_user_evicts(void) 11941544Seschrock { 11951544Seschrock mutex_enter(&arc_eviction_mtx); 11961544Seschrock while (arc_eviction_list != NULL) { 11971544Seschrock arc_buf_t *buf = arc_eviction_list; 11981544Seschrock arc_eviction_list = buf->b_next; 11991544Seschrock buf->b_hdr = NULL; 12001544Seschrock mutex_exit(&arc_eviction_mtx); 12011544Seschrock 12021819Smaybee if (buf->b_efunc != NULL) 12031819Smaybee VERIFY(buf->b_efunc(buf) == 0); 12041544Seschrock 12051544Seschrock buf->b_efunc = NULL; 12061544Seschrock buf->b_private = NULL; 12071544Seschrock kmem_cache_free(buf_cache, buf); 12081544Seschrock mutex_enter(&arc_eviction_mtx); 12091544Seschrock } 12101544Seschrock mutex_exit(&arc_eviction_mtx); 12111544Seschrock } 12121544Seschrock 1213789Sahrens /* 1214789Sahrens * Flush all *evictable* data from the cache. 1215789Sahrens * NOTE: this will not touch "active" (i.e. referenced) data. 1216789Sahrens */ 1217789Sahrens void 1218789Sahrens arc_flush(void) 1219789Sahrens { 12202688Smaybee while (list_head(&arc.mru->list)) 1221*3290Sjohansen (void) arc_evict(arc.mru, -1, FALSE, ARC_BUFC_UNDEF); 12222688Smaybee while (list_head(&arc.mfu->list)) 1223*3290Sjohansen (void) arc_evict(arc.mfu, -1, FALSE, ARC_BUFC_UNDEF); 1224789Sahrens 12251544Seschrock arc_evict_ghost(arc.mru_ghost, -1); 12261544Seschrock arc_evict_ghost(arc.mfu_ghost, -1); 12271544Seschrock 12281544Seschrock mutex_enter(&arc_reclaim_thr_lock); 12291544Seschrock arc_do_user_evicts(); 12301544Seschrock mutex_exit(&arc_reclaim_thr_lock); 12311544Seschrock ASSERT(arc_eviction_list == NULL); 1232789Sahrens } 1233789Sahrens 12343158Smaybee int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 12352391Smaybee 1236789Sahrens void 12373158Smaybee arc_shrink(void) 1238789Sahrens { 12393158Smaybee if (arc.c > arc.c_min) { 12403158Smaybee uint64_t to_free; 1241789Sahrens 12422048Sstans #ifdef _KERNEL 12433158Smaybee to_free = MAX(arc.c >> arc_shrink_shift, ptob(needfree)); 12442048Sstans #else 12453158Smaybee to_free = arc.c >> arc_shrink_shift; 12462048Sstans #endif 12473158Smaybee if (arc.c > arc.c_min + to_free) 12483158Smaybee atomic_add_64(&arc.c, -to_free); 12493158Smaybee else 12503158Smaybee arc.c = arc.c_min; 12512048Sstans 12523158Smaybee atomic_add_64(&arc.p, -(arc.p >> arc_shrink_shift)); 12533158Smaybee if (arc.c > arc.size) 12543158Smaybee arc.c = MAX(arc.size, arc.c_min); 12553158Smaybee if (arc.p > arc.c) 12563158Smaybee arc.p = (arc.c >> 1); 12573158Smaybee ASSERT(arc.c >= arc.c_min); 12583158Smaybee ASSERT((int64_t)arc.p >= 0); 12593158Smaybee } 1260789Sahrens 12613158Smaybee if (arc.size > arc.c) 12623158Smaybee arc_adjust(); 1263789Sahrens } 1264789Sahrens 1265789Sahrens static int 1266789Sahrens arc_reclaim_needed(void) 1267789Sahrens { 1268789Sahrens uint64_t extra; 1269789Sahrens 1270789Sahrens #ifdef _KERNEL 12712048Sstans 12722048Sstans if (needfree) 12732048Sstans return (1); 12742048Sstans 1275789Sahrens /* 1276789Sahrens * take 'desfree' extra pages, so we reclaim sooner, rather than later 1277789Sahrens */ 1278789Sahrens extra = desfree; 1279789Sahrens 1280789Sahrens /* 1281789Sahrens * check that we're out of range of the pageout scanner. It starts to 1282789Sahrens * schedule paging if freemem is less than lotsfree and needfree. 1283789Sahrens * lotsfree is the high-water mark for pageout, and needfree is the 1284789Sahrens * number of needed free pages. We add extra pages here to make sure 1285789Sahrens * the scanner doesn't start up while we're freeing memory. 1286789Sahrens */ 1287789Sahrens if (freemem < lotsfree + needfree + extra) 1288789Sahrens return (1); 1289789Sahrens 1290789Sahrens /* 1291789Sahrens * check to make sure that swapfs has enough space so that anon 1292789Sahrens * reservations can still succeeed. anon_resvmem() checks that the 1293789Sahrens * availrmem is greater than swapfs_minfree, and the number of reserved 1294789Sahrens * swap pages. We also add a bit of extra here just to prevent 1295789Sahrens * circumstances from getting really dire. 1296789Sahrens */ 1297789Sahrens if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1298789Sahrens return (1); 1299789Sahrens 13001936Smaybee #if defined(__i386) 1301789Sahrens /* 1302789Sahrens * If we're on an i386 platform, it's possible that we'll exhaust the 1303789Sahrens * kernel heap space before we ever run out of available physical 1304789Sahrens * memory. Most checks of the size of the heap_area compare against 1305789Sahrens * tune.t_minarmem, which is the minimum available real memory that we 1306789Sahrens * can have in the system. However, this is generally fixed at 25 pages 1307789Sahrens * which is so low that it's useless. In this comparison, we seek to 1308789Sahrens * calculate the total heap-size, and reclaim if more than 3/4ths of the 1309789Sahrens * heap is allocated. (Or, in the caclulation, if less than 1/4th is 1310789Sahrens * free) 1311789Sahrens */ 1312789Sahrens if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1313789Sahrens (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1314789Sahrens return (1); 1315789Sahrens #endif 1316789Sahrens 1317789Sahrens #else 1318789Sahrens if (spa_get_random(100) == 0) 1319789Sahrens return (1); 1320789Sahrens #endif 1321789Sahrens return (0); 1322789Sahrens } 1323789Sahrens 1324789Sahrens static void 1325789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1326789Sahrens { 1327789Sahrens size_t i; 1328789Sahrens kmem_cache_t *prev_cache = NULL; 1329*3290Sjohansen kmem_cache_t *prev_data_cache = NULL; 1330789Sahrens extern kmem_cache_t *zio_buf_cache[]; 1331*3290Sjohansen extern kmem_cache_t *zio_data_buf_cache[]; 1332789Sahrens 13331484Sek110237 #ifdef _KERNEL 13341484Sek110237 /* 13351484Sek110237 * First purge some DNLC entries, in case the DNLC is using 13361484Sek110237 * up too much memory. 13371484Sek110237 */ 13381505Sek110237 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 13391936Smaybee 13401936Smaybee #if defined(__i386) 13411936Smaybee /* 13421936Smaybee * Reclaim unused memory from all kmem caches. 13431936Smaybee */ 13441936Smaybee kmem_reap(); 13451936Smaybee #endif 13461484Sek110237 #endif 13471484Sek110237 1348789Sahrens /* 13491544Seschrock * An agressive reclamation will shrink the cache size as well as 13501544Seschrock * reap free buffers from the arc kmem caches. 1351789Sahrens */ 1352789Sahrens if (strat == ARC_RECLAIM_AGGR) 13533158Smaybee arc_shrink(); 1354789Sahrens 1355789Sahrens for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1356789Sahrens if (zio_buf_cache[i] != prev_cache) { 1357789Sahrens prev_cache = zio_buf_cache[i]; 1358789Sahrens kmem_cache_reap_now(zio_buf_cache[i]); 1359789Sahrens } 1360*3290Sjohansen if (zio_data_buf_cache[i] != prev_data_cache) { 1361*3290Sjohansen prev_data_cache = zio_data_buf_cache[i]; 1362*3290Sjohansen kmem_cache_reap_now(zio_data_buf_cache[i]); 1363*3290Sjohansen } 1364789Sahrens } 13651544Seschrock kmem_cache_reap_now(buf_cache); 13661544Seschrock kmem_cache_reap_now(hdr_cache); 1367789Sahrens } 1368789Sahrens 1369789Sahrens static void 1370789Sahrens arc_reclaim_thread(void) 1371789Sahrens { 1372789Sahrens clock_t growtime = 0; 1373789Sahrens arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1374789Sahrens callb_cpr_t cpr; 1375789Sahrens 1376789Sahrens CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1377789Sahrens 1378789Sahrens mutex_enter(&arc_reclaim_thr_lock); 1379789Sahrens while (arc_thread_exit == 0) { 1380789Sahrens if (arc_reclaim_needed()) { 1381789Sahrens 1382789Sahrens if (arc.no_grow) { 1383789Sahrens if (last_reclaim == ARC_RECLAIM_CONS) { 1384789Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1385789Sahrens } else { 1386789Sahrens last_reclaim = ARC_RECLAIM_CONS; 1387789Sahrens } 1388789Sahrens } else { 1389789Sahrens arc.no_grow = TRUE; 1390789Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1391789Sahrens membar_producer(); 1392789Sahrens } 1393789Sahrens 1394789Sahrens /* reset the growth delay for every reclaim */ 1395789Sahrens growtime = lbolt + (arc_grow_retry * hz); 13962856Snd150628 ASSERT(growtime > 0); 1397789Sahrens 1398789Sahrens arc_kmem_reap_now(last_reclaim); 1399789Sahrens 1400789Sahrens } else if ((growtime > 0) && ((growtime - lbolt) <= 0)) { 1401789Sahrens arc.no_grow = FALSE; 1402789Sahrens } 1403789Sahrens 14041544Seschrock if (arc_eviction_list != NULL) 14051544Seschrock arc_do_user_evicts(); 14061544Seschrock 1407789Sahrens /* block until needed, or one second, whichever is shorter */ 1408789Sahrens CALLB_CPR_SAFE_BEGIN(&cpr); 1409789Sahrens (void) cv_timedwait(&arc_reclaim_thr_cv, 1410789Sahrens &arc_reclaim_thr_lock, (lbolt + hz)); 1411789Sahrens CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1412789Sahrens } 1413789Sahrens 1414789Sahrens arc_thread_exit = 0; 1415789Sahrens cv_broadcast(&arc_reclaim_thr_cv); 1416789Sahrens CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1417789Sahrens thread_exit(); 1418789Sahrens } 1419789Sahrens 14201544Seschrock /* 14211544Seschrock * Adapt arc info given the number of bytes we are trying to add and 14221544Seschrock * the state that we are comming from. This function is only called 14231544Seschrock * when we are adding new content to the cache. 14241544Seschrock */ 1425789Sahrens static void 14261544Seschrock arc_adapt(int bytes, arc_state_t *state) 1427789Sahrens { 14281544Seschrock int mult; 14291544Seschrock 14301544Seschrock ASSERT(bytes > 0); 1431789Sahrens /* 14321544Seschrock * Adapt the target size of the MRU list: 14331544Seschrock * - if we just hit in the MRU ghost list, then increase 14341544Seschrock * the target size of the MRU list. 14351544Seschrock * - if we just hit in the MFU ghost list, then increase 14361544Seschrock * the target size of the MFU list by decreasing the 14371544Seschrock * target size of the MRU list. 1438789Sahrens */ 14391544Seschrock if (state == arc.mru_ghost) { 14401544Seschrock mult = ((arc.mru_ghost->size >= arc.mfu_ghost->size) ? 14411544Seschrock 1 : (arc.mfu_ghost->size/arc.mru_ghost->size)); 14421544Seschrock 14431544Seschrock arc.p = MIN(arc.c, arc.p + bytes * mult); 14441544Seschrock } else if (state == arc.mfu_ghost) { 14451544Seschrock mult = ((arc.mfu_ghost->size >= arc.mru_ghost->size) ? 14461544Seschrock 1 : (arc.mru_ghost->size/arc.mfu_ghost->size)); 14471544Seschrock 14481544Seschrock arc.p = MAX(0, (int64_t)arc.p - bytes * mult); 14491544Seschrock } 14501544Seschrock ASSERT((int64_t)arc.p >= 0); 1451789Sahrens 1452789Sahrens if (arc_reclaim_needed()) { 1453789Sahrens cv_signal(&arc_reclaim_thr_cv); 1454789Sahrens return; 1455789Sahrens } 1456789Sahrens 1457789Sahrens if (arc.no_grow) 1458789Sahrens return; 1459789Sahrens 14601544Seschrock if (arc.c >= arc.c_max) 14611544Seschrock return; 14621544Seschrock 1463789Sahrens /* 14641544Seschrock * If we're within (2 * maxblocksize) bytes of the target 14651544Seschrock * cache size, increment the target cache size 1466789Sahrens */ 14671544Seschrock if (arc.size > arc.c - (2ULL << SPA_MAXBLOCKSHIFT)) { 14681544Seschrock atomic_add_64(&arc.c, (int64_t)bytes); 1469789Sahrens if (arc.c > arc.c_max) 1470789Sahrens arc.c = arc.c_max; 14711544Seschrock else if (state == arc.anon) 14721544Seschrock atomic_add_64(&arc.p, (int64_t)bytes); 14731544Seschrock if (arc.p > arc.c) 14741544Seschrock arc.p = arc.c; 1475789Sahrens } 14761544Seschrock ASSERT((int64_t)arc.p >= 0); 1477789Sahrens } 1478789Sahrens 1479789Sahrens /* 14801544Seschrock * Check if the cache has reached its limits and eviction is required 14811544Seschrock * prior to insert. 1482789Sahrens */ 1483789Sahrens static int 1484789Sahrens arc_evict_needed() 1485789Sahrens { 1486789Sahrens if (arc_reclaim_needed()) 1487789Sahrens return (1); 1488789Sahrens 14891544Seschrock return (arc.size > arc.c); 1490789Sahrens } 1491789Sahrens 1492789Sahrens /* 14932688Smaybee * The buffer, supplied as the first argument, needs a data block. 14942688Smaybee * So, if we are at cache max, determine which cache should be victimized. 14952688Smaybee * We have the following cases: 1496789Sahrens * 14971544Seschrock * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru) -> 1498789Sahrens * In this situation if we're out of space, but the resident size of the MFU is 1499789Sahrens * under the limit, victimize the MFU cache to satisfy this insertion request. 1500789Sahrens * 15011544Seschrock * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru) -> 1502789Sahrens * Here, we've used up all of the available space for the MRU, so we need to 1503789Sahrens * evict from our own cache instead. Evict from the set of resident MRU 1504789Sahrens * entries. 1505789Sahrens * 15061544Seschrock * 3. Insert for MFU (c - p) > sizeof(arc.mfu) -> 1507789Sahrens * c minus p represents the MFU space in the cache, since p is the size of the 1508789Sahrens * cache that is dedicated to the MRU. In this situation there's still space on 1509789Sahrens * the MFU side, so the MRU side needs to be victimized. 1510789Sahrens * 15111544Seschrock * 4. Insert for MFU (c - p) < sizeof(arc.mfu) -> 1512789Sahrens * MFU's resident set is consuming more space than it has been allotted. In 1513789Sahrens * this situation, we must victimize our own cache, the MFU, for this insertion. 1514789Sahrens */ 1515789Sahrens static void 15162688Smaybee arc_get_data_buf(arc_buf_t *buf) 1517789Sahrens { 1518*3290Sjohansen arc_state_t *state = buf->b_hdr->b_state; 1519*3290Sjohansen uint64_t size = buf->b_hdr->b_size; 1520*3290Sjohansen arc_buf_contents_t type = buf->b_hdr->b_type; 15212688Smaybee 15222688Smaybee arc_adapt(size, state); 1523789Sahrens 15242688Smaybee /* 15252688Smaybee * We have not yet reached cache maximum size, 15262688Smaybee * just allocate a new buffer. 15272688Smaybee */ 15282688Smaybee if (!arc_evict_needed()) { 1529*3290Sjohansen if (type == ARC_BUFC_METADATA) { 1530*3290Sjohansen buf->b_data = zio_buf_alloc(size); 1531*3290Sjohansen } else { 1532*3290Sjohansen ASSERT(type == ARC_BUFC_DATA); 1533*3290Sjohansen buf->b_data = zio_data_buf_alloc(size); 1534*3290Sjohansen } 15352688Smaybee atomic_add_64(&arc.size, size); 15362688Smaybee goto out; 15372688Smaybee } 15382688Smaybee 15392688Smaybee /* 15402688Smaybee * If we are prefetching from the mfu ghost list, this buffer 15412688Smaybee * will end up on the mru list; so steal space from there. 15422688Smaybee */ 15432688Smaybee if (state == arc.mfu_ghost) 15442688Smaybee state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc.mru : arc.mfu; 15452688Smaybee else if (state == arc.mru_ghost) 15462688Smaybee state = arc.mru; 1547789Sahrens 15482688Smaybee if (state == arc.mru || state == arc.anon) { 15492688Smaybee uint64_t mru_used = arc.anon->size + arc.mru->size; 15502688Smaybee state = (arc.p > mru_used) ? arc.mfu : arc.mru; 1551789Sahrens } else { 15522688Smaybee /* MFU cases */ 15532688Smaybee uint64_t mfu_space = arc.c - arc.p; 15542688Smaybee state = (mfu_space > arc.mfu->size) ? arc.mru : arc.mfu; 15552688Smaybee } 1556*3290Sjohansen if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) { 1557*3290Sjohansen if (type == ARC_BUFC_METADATA) { 1558*3290Sjohansen buf->b_data = zio_buf_alloc(size); 1559*3290Sjohansen } else { 1560*3290Sjohansen ASSERT(type == ARC_BUFC_DATA); 1561*3290Sjohansen buf->b_data = zio_data_buf_alloc(size); 1562*3290Sjohansen } 15632688Smaybee atomic_add_64(&arc.size, size); 15642688Smaybee atomic_add_64(&arc.recycle_miss, 1); 15652688Smaybee } 15662688Smaybee ASSERT(buf->b_data != NULL); 15672688Smaybee out: 15682688Smaybee /* 15692688Smaybee * Update the state size. Note that ghost states have a 15702688Smaybee * "ghost size" and so don't need to be updated. 15712688Smaybee */ 15722688Smaybee if (!GHOST_STATE(buf->b_hdr->b_state)) { 15732688Smaybee arc_buf_hdr_t *hdr = buf->b_hdr; 15742688Smaybee 15752688Smaybee atomic_add_64(&hdr->b_state->size, size); 15762688Smaybee if (list_link_active(&hdr->b_arc_node)) { 15772688Smaybee ASSERT(refcount_is_zero(&hdr->b_refcnt)); 15782688Smaybee atomic_add_64(&hdr->b_state->lsize, size); 1579789Sahrens } 1580789Sahrens } 1581789Sahrens } 1582789Sahrens 1583789Sahrens /* 1584789Sahrens * This routine is called whenever a buffer is accessed. 15851544Seschrock * NOTE: the hash lock is dropped in this function. 1586789Sahrens */ 1587789Sahrens static void 15882688Smaybee arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 1589789Sahrens { 1590789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 1591789Sahrens 1592789Sahrens if (buf->b_state == arc.anon) { 1593789Sahrens /* 1594789Sahrens * This buffer is not in the cache, and does not 1595789Sahrens * appear in our "ghost" list. Add the new buffer 1596789Sahrens * to the MRU state. 1597789Sahrens */ 1598789Sahrens 1599789Sahrens ASSERT(buf->b_arc_access == 0); 1600789Sahrens buf->b_arc_access = lbolt; 16011544Seschrock DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 16021544Seschrock arc_change_state(arc.mru, buf, hash_lock); 1603789Sahrens 16041544Seschrock } else if (buf->b_state == arc.mru) { 1605789Sahrens /* 16062391Smaybee * If this buffer is here because of a prefetch, then either: 16072391Smaybee * - clear the flag if this is a "referencing" read 16082391Smaybee * (any subsequent access will bump this into the MFU state). 16092391Smaybee * or 16102391Smaybee * - move the buffer to the head of the list if this is 16112391Smaybee * another prefetch (to make it less likely to be evicted). 1612789Sahrens */ 1613789Sahrens if ((buf->b_flags & ARC_PREFETCH) != 0) { 16142391Smaybee if (refcount_count(&buf->b_refcnt) == 0) { 16152391Smaybee ASSERT(list_link_active(&buf->b_arc_node)); 16162391Smaybee mutex_enter(&arc.mru->mtx); 16172391Smaybee list_remove(&arc.mru->list, buf); 16182391Smaybee list_insert_head(&arc.mru->list, buf); 16192391Smaybee mutex_exit(&arc.mru->mtx); 16202391Smaybee } else { 16212391Smaybee buf->b_flags &= ~ARC_PREFETCH; 16222391Smaybee atomic_add_64(&arc.mru->hits, 1); 16232391Smaybee } 16242391Smaybee buf->b_arc_access = lbolt; 1625789Sahrens return; 1626789Sahrens } 1627789Sahrens 1628789Sahrens /* 1629789Sahrens * This buffer has been "accessed" only once so far, 1630789Sahrens * but it is still in the cache. Move it to the MFU 1631789Sahrens * state. 1632789Sahrens */ 1633789Sahrens if (lbolt > buf->b_arc_access + ARC_MINTIME) { 1634789Sahrens /* 1635789Sahrens * More than 125ms have passed since we 1636789Sahrens * instantiated this buffer. Move it to the 1637789Sahrens * most frequently used state. 1638789Sahrens */ 1639789Sahrens buf->b_arc_access = lbolt; 16401544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 16411544Seschrock arc_change_state(arc.mfu, buf, hash_lock); 1642789Sahrens } 16431544Seschrock atomic_add_64(&arc.mru->hits, 1); 16441544Seschrock } else if (buf->b_state == arc.mru_ghost) { 1645789Sahrens arc_state_t *new_state; 1646789Sahrens /* 1647789Sahrens * This buffer has been "accessed" recently, but 1648789Sahrens * was evicted from the cache. Move it to the 1649789Sahrens * MFU state. 1650789Sahrens */ 1651789Sahrens 1652789Sahrens if (buf->b_flags & ARC_PREFETCH) { 16531544Seschrock new_state = arc.mru; 16542391Smaybee if (refcount_count(&buf->b_refcnt) > 0) 16552391Smaybee buf->b_flags &= ~ARC_PREFETCH; 16561544Seschrock DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1657789Sahrens } else { 16581544Seschrock new_state = arc.mfu; 16591544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1660789Sahrens } 1661789Sahrens 1662789Sahrens buf->b_arc_access = lbolt; 1663789Sahrens arc_change_state(new_state, buf, hash_lock); 1664789Sahrens 16651544Seschrock atomic_add_64(&arc.mru_ghost->hits, 1); 16661544Seschrock } else if (buf->b_state == arc.mfu) { 1667789Sahrens /* 1668789Sahrens * This buffer has been accessed more than once and is 1669789Sahrens * still in the cache. Keep it in the MFU state. 1670789Sahrens * 16712391Smaybee * NOTE: an add_reference() that occurred when we did 16722391Smaybee * the arc_read() will have kicked this off the list. 16732391Smaybee * If it was a prefetch, we will explicitly move it to 16742391Smaybee * the head of the list now. 1675789Sahrens */ 16762391Smaybee if ((buf->b_flags & ARC_PREFETCH) != 0) { 16772391Smaybee ASSERT(refcount_count(&buf->b_refcnt) == 0); 16782391Smaybee ASSERT(list_link_active(&buf->b_arc_node)); 16792391Smaybee mutex_enter(&arc.mfu->mtx); 16802391Smaybee list_remove(&arc.mfu->list, buf); 16812391Smaybee list_insert_head(&arc.mfu->list, buf); 16822391Smaybee mutex_exit(&arc.mfu->mtx); 16832391Smaybee } 16841544Seschrock atomic_add_64(&arc.mfu->hits, 1); 16852391Smaybee buf->b_arc_access = lbolt; 16861544Seschrock } else if (buf->b_state == arc.mfu_ghost) { 16872391Smaybee arc_state_t *new_state = arc.mfu; 1688789Sahrens /* 1689789Sahrens * This buffer has been accessed more than once but has 1690789Sahrens * been evicted from the cache. Move it back to the 1691789Sahrens * MFU state. 1692789Sahrens */ 1693789Sahrens 16942391Smaybee if (buf->b_flags & ARC_PREFETCH) { 16952391Smaybee /* 16962391Smaybee * This is a prefetch access... 16972391Smaybee * move this block back to the MRU state. 16982391Smaybee */ 16992391Smaybee ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 17002391Smaybee new_state = arc.mru; 17012391Smaybee } 17022391Smaybee 1703789Sahrens buf->b_arc_access = lbolt; 17041544Seschrock DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 17052391Smaybee arc_change_state(new_state, buf, hash_lock); 1706789Sahrens 17071544Seschrock atomic_add_64(&arc.mfu_ghost->hits, 1); 1708789Sahrens } else { 1709789Sahrens ASSERT(!"invalid arc state"); 1710789Sahrens } 1711789Sahrens } 1712789Sahrens 1713789Sahrens /* a generic arc_done_func_t which you can use */ 1714789Sahrens /* ARGSUSED */ 1715789Sahrens void 1716789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 1717789Sahrens { 1718789Sahrens bcopy(buf->b_data, arg, buf->b_hdr->b_size); 17191544Seschrock VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1720789Sahrens } 1721789Sahrens 1722789Sahrens /* a generic arc_done_func_t which you can use */ 1723789Sahrens void 1724789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 1725789Sahrens { 1726789Sahrens arc_buf_t **bufp = arg; 1727789Sahrens if (zio && zio->io_error) { 17281544Seschrock VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1729789Sahrens *bufp = NULL; 1730789Sahrens } else { 1731789Sahrens *bufp = buf; 1732789Sahrens } 1733789Sahrens } 1734789Sahrens 1735789Sahrens static void 1736789Sahrens arc_read_done(zio_t *zio) 1737789Sahrens { 17381589Smaybee arc_buf_hdr_t *hdr, *found; 1739789Sahrens arc_buf_t *buf; 1740789Sahrens arc_buf_t *abuf; /* buffer we're assigning to callback */ 1741789Sahrens kmutex_t *hash_lock; 1742789Sahrens arc_callback_t *callback_list, *acb; 1743789Sahrens int freeable = FALSE; 1744789Sahrens 1745789Sahrens buf = zio->io_private; 1746789Sahrens hdr = buf->b_hdr; 1747789Sahrens 17481589Smaybee /* 17491589Smaybee * The hdr was inserted into hash-table and removed from lists 17501589Smaybee * prior to starting I/O. We should find this header, since 17511589Smaybee * it's in the hash table, and it should be legit since it's 17521589Smaybee * not possible to evict it during the I/O. The only possible 17531589Smaybee * reason for it not to be found is if we were freed during the 17541589Smaybee * read. 17551589Smaybee */ 17561589Smaybee found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 17573093Sahrens &hash_lock); 1758789Sahrens 17591589Smaybee ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 17601589Smaybee (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp)))); 1761789Sahrens 1762789Sahrens /* byteswap if necessary */ 1763789Sahrens callback_list = hdr->b_acb; 1764789Sahrens ASSERT(callback_list != NULL); 1765789Sahrens if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 1766789Sahrens callback_list->acb_byteswap(buf->b_data, hdr->b_size); 1767789Sahrens 17683093Sahrens arc_cksum_compute(buf); 17693093Sahrens 1770789Sahrens /* create copies of the data buffer for the callers */ 1771789Sahrens abuf = buf; 1772789Sahrens for (acb = callback_list; acb; acb = acb->acb_next) { 1773789Sahrens if (acb->acb_done) { 17742688Smaybee if (abuf == NULL) 17752688Smaybee abuf = arc_buf_clone(buf); 1776789Sahrens acb->acb_buf = abuf; 1777789Sahrens abuf = NULL; 1778789Sahrens } 1779789Sahrens } 1780789Sahrens hdr->b_acb = NULL; 1781789Sahrens hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 17821544Seschrock ASSERT(!HDR_BUF_AVAILABLE(hdr)); 17831544Seschrock if (abuf == buf) 17841544Seschrock hdr->b_flags |= ARC_BUF_AVAILABLE; 1785789Sahrens 1786789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 1787789Sahrens 1788789Sahrens if (zio->io_error != 0) { 1789789Sahrens hdr->b_flags |= ARC_IO_ERROR; 1790789Sahrens if (hdr->b_state != arc.anon) 1791789Sahrens arc_change_state(arc.anon, hdr, hash_lock); 17921544Seschrock if (HDR_IN_HASH_TABLE(hdr)) 17931544Seschrock buf_hash_remove(hdr); 1794789Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 17952391Smaybee /* convert checksum errors into IO errors */ 17961544Seschrock if (zio->io_error == ECKSUM) 17971544Seschrock zio->io_error = EIO; 1798789Sahrens } 1799789Sahrens 18001544Seschrock /* 18012391Smaybee * Broadcast before we drop the hash_lock to avoid the possibility 18022391Smaybee * that the hdr (and hence the cv) might be freed before we get to 18032391Smaybee * the cv_broadcast(). 18041544Seschrock */ 18051544Seschrock cv_broadcast(&hdr->b_cv); 18061544Seschrock 18071589Smaybee if (hash_lock) { 1808789Sahrens /* 1809789Sahrens * Only call arc_access on anonymous buffers. This is because 1810789Sahrens * if we've issued an I/O for an evicted buffer, we've already 1811789Sahrens * called arc_access (to prevent any simultaneous readers from 1812789Sahrens * getting confused). 1813789Sahrens */ 1814789Sahrens if (zio->io_error == 0 && hdr->b_state == arc.anon) 18152688Smaybee arc_access(hdr, hash_lock); 18162688Smaybee mutex_exit(hash_lock); 1817789Sahrens } else { 1818789Sahrens /* 1819789Sahrens * This block was freed while we waited for the read to 1820789Sahrens * complete. It has been removed from the hash table and 1821789Sahrens * moved to the anonymous state (so that it won't show up 1822789Sahrens * in the cache). 1823789Sahrens */ 1824789Sahrens ASSERT3P(hdr->b_state, ==, arc.anon); 1825789Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 1826789Sahrens } 1827789Sahrens 1828789Sahrens /* execute each callback and free its structure */ 1829789Sahrens while ((acb = callback_list) != NULL) { 1830789Sahrens if (acb->acb_done) 1831789Sahrens acb->acb_done(zio, acb->acb_buf, acb->acb_private); 1832789Sahrens 1833789Sahrens if (acb->acb_zio_dummy != NULL) { 1834789Sahrens acb->acb_zio_dummy->io_error = zio->io_error; 1835789Sahrens zio_nowait(acb->acb_zio_dummy); 1836789Sahrens } 1837789Sahrens 1838789Sahrens callback_list = acb->acb_next; 1839789Sahrens kmem_free(acb, sizeof (arc_callback_t)); 1840789Sahrens } 1841789Sahrens 1842789Sahrens if (freeable) 18431544Seschrock arc_hdr_destroy(hdr); 1844789Sahrens } 1845789Sahrens 1846789Sahrens /* 1847789Sahrens * "Read" the block block at the specified DVA (in bp) via the 1848789Sahrens * cache. If the block is found in the cache, invoke the provided 1849789Sahrens * callback immediately and return. Note that the `zio' parameter 1850789Sahrens * in the callback will be NULL in this case, since no IO was 1851789Sahrens * required. If the block is not in the cache pass the read request 1852789Sahrens * on to the spa with a substitute callback function, so that the 1853789Sahrens * requested block will be added to the cache. 1854789Sahrens * 1855789Sahrens * If a read request arrives for a block that has a read in-progress, 1856789Sahrens * either wait for the in-progress read to complete (and return the 1857789Sahrens * results); or, if this is a read with a "done" func, add a record 1858789Sahrens * to the read to invoke the "done" func when the read completes, 1859789Sahrens * and return; or just return. 1860789Sahrens * 1861789Sahrens * arc_read_done() will invoke all the requested "done" functions 1862789Sahrens * for readers of this block. 1863789Sahrens */ 1864789Sahrens int 1865789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 1866789Sahrens arc_done_func_t *done, void *private, int priority, int flags, 18672391Smaybee uint32_t *arc_flags, zbookmark_t *zb) 1868789Sahrens { 1869789Sahrens arc_buf_hdr_t *hdr; 1870789Sahrens arc_buf_t *buf; 1871789Sahrens kmutex_t *hash_lock; 1872789Sahrens zio_t *rzio; 1873789Sahrens 1874789Sahrens top: 1875789Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 18761544Seschrock if (hdr && hdr->b_datacnt > 0) { 1877789Sahrens 18782391Smaybee *arc_flags |= ARC_CACHED; 18792391Smaybee 1880789Sahrens if (HDR_IO_IN_PROGRESS(hdr)) { 18812391Smaybee 18822391Smaybee if (*arc_flags & ARC_WAIT) { 18832391Smaybee cv_wait(&hdr->b_cv, hash_lock); 18842391Smaybee mutex_exit(hash_lock); 18852391Smaybee goto top; 18862391Smaybee } 18872391Smaybee ASSERT(*arc_flags & ARC_NOWAIT); 18882391Smaybee 18892391Smaybee if (done) { 1890789Sahrens arc_callback_t *acb = NULL; 1891789Sahrens 1892789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), 1893789Sahrens KM_SLEEP); 1894789Sahrens acb->acb_done = done; 1895789Sahrens acb->acb_private = private; 1896789Sahrens acb->acb_byteswap = swap; 1897789Sahrens if (pio != NULL) 1898789Sahrens acb->acb_zio_dummy = zio_null(pio, 1899789Sahrens spa, NULL, NULL, flags); 1900789Sahrens 1901789Sahrens ASSERT(acb->acb_done != NULL); 1902789Sahrens acb->acb_next = hdr->b_acb; 1903789Sahrens hdr->b_acb = acb; 1904789Sahrens add_reference(hdr, hash_lock, private); 1905789Sahrens mutex_exit(hash_lock); 1906789Sahrens return (0); 1907789Sahrens } 1908789Sahrens mutex_exit(hash_lock); 1909789Sahrens return (0); 1910789Sahrens } 1911789Sahrens 19121544Seschrock ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu); 1913789Sahrens 19141544Seschrock if (done) { 19152688Smaybee add_reference(hdr, hash_lock, private); 19161544Seschrock /* 19171544Seschrock * If this block is already in use, create a new 19181544Seschrock * copy of the data so that we will be guaranteed 19191544Seschrock * that arc_release() will always succeed. 19201544Seschrock */ 19211544Seschrock buf = hdr->b_buf; 19221544Seschrock ASSERT(buf); 19231544Seschrock ASSERT(buf->b_data); 19242688Smaybee if (HDR_BUF_AVAILABLE(hdr)) { 19251544Seschrock ASSERT(buf->b_efunc == NULL); 19261544Seschrock hdr->b_flags &= ~ARC_BUF_AVAILABLE; 19272688Smaybee } else { 19282688Smaybee buf = arc_buf_clone(buf); 19291544Seschrock } 19302391Smaybee } else if (*arc_flags & ARC_PREFETCH && 19312391Smaybee refcount_count(&hdr->b_refcnt) == 0) { 19322391Smaybee hdr->b_flags |= ARC_PREFETCH; 1933789Sahrens } 1934789Sahrens DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 19352688Smaybee arc_access(hdr, hash_lock); 19362688Smaybee mutex_exit(hash_lock); 1937789Sahrens atomic_add_64(&arc.hits, 1); 1938789Sahrens if (done) 1939789Sahrens done(NULL, buf, private); 1940789Sahrens } else { 1941789Sahrens uint64_t size = BP_GET_LSIZE(bp); 1942789Sahrens arc_callback_t *acb; 1943789Sahrens 1944789Sahrens if (hdr == NULL) { 1945789Sahrens /* this block is not in the cache */ 1946789Sahrens arc_buf_hdr_t *exists; 1947*3290Sjohansen arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 1948*3290Sjohansen buf = arc_buf_alloc(spa, size, private, type); 1949789Sahrens hdr = buf->b_hdr; 1950789Sahrens hdr->b_dva = *BP_IDENTITY(bp); 1951789Sahrens hdr->b_birth = bp->blk_birth; 1952789Sahrens hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 1953789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 1954789Sahrens if (exists) { 1955789Sahrens /* somebody beat us to the hash insert */ 1956789Sahrens mutex_exit(hash_lock); 1957789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 1958789Sahrens hdr->b_birth = 0; 1959789Sahrens hdr->b_cksum0 = 0; 19601544Seschrock (void) arc_buf_remove_ref(buf, private); 1961789Sahrens goto top; /* restart the IO request */ 1962789Sahrens } 19632391Smaybee /* if this is a prefetch, we don't have a reference */ 19642391Smaybee if (*arc_flags & ARC_PREFETCH) { 19652391Smaybee (void) remove_reference(hdr, hash_lock, 19662391Smaybee private); 19672391Smaybee hdr->b_flags |= ARC_PREFETCH; 19682391Smaybee } 19692391Smaybee if (BP_GET_LEVEL(bp) > 0) 19702391Smaybee hdr->b_flags |= ARC_INDIRECT; 1971789Sahrens } else { 1972789Sahrens /* this block is in the ghost cache */ 19731544Seschrock ASSERT(GHOST_STATE(hdr->b_state)); 19741544Seschrock ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 19752391Smaybee ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 19762391Smaybee ASSERT(hdr->b_buf == NULL); 1977789Sahrens 19782391Smaybee /* if this is a prefetch, we don't have a reference */ 19792391Smaybee if (*arc_flags & ARC_PREFETCH) 19802391Smaybee hdr->b_flags |= ARC_PREFETCH; 19812391Smaybee else 19822391Smaybee add_reference(hdr, hash_lock, private); 1983789Sahrens buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 19841544Seschrock buf->b_hdr = hdr; 19852688Smaybee buf->b_data = NULL; 19861544Seschrock buf->b_efunc = NULL; 19871544Seschrock buf->b_private = NULL; 19881544Seschrock buf->b_next = NULL; 19891544Seschrock hdr->b_buf = buf; 19902688Smaybee arc_get_data_buf(buf); 19911544Seschrock ASSERT(hdr->b_datacnt == 0); 19921544Seschrock hdr->b_datacnt = 1; 19932391Smaybee 1994789Sahrens } 1995789Sahrens 1996789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 1997789Sahrens acb->acb_done = done; 1998789Sahrens acb->acb_private = private; 1999789Sahrens acb->acb_byteswap = swap; 2000789Sahrens 2001789Sahrens ASSERT(hdr->b_acb == NULL); 2002789Sahrens hdr->b_acb = acb; 2003789Sahrens hdr->b_flags |= ARC_IO_IN_PROGRESS; 2004789Sahrens 2005789Sahrens /* 2006789Sahrens * If the buffer has been evicted, migrate it to a present state 2007789Sahrens * before issuing the I/O. Once we drop the hash-table lock, 2008789Sahrens * the header will be marked as I/O in progress and have an 2009789Sahrens * attached buffer. At this point, anybody who finds this 2010789Sahrens * buffer ought to notice that it's legit but has a pending I/O. 2011789Sahrens */ 2012789Sahrens 20131544Seschrock if (GHOST_STATE(hdr->b_state)) 20142688Smaybee arc_access(hdr, hash_lock); 20152688Smaybee mutex_exit(hash_lock); 2016789Sahrens 2017789Sahrens ASSERT3U(hdr->b_size, ==, size); 20181596Sahrens DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 20191596Sahrens zbookmark_t *, zb); 2020789Sahrens atomic_add_64(&arc.misses, 1); 20211544Seschrock 2022789Sahrens rzio = zio_read(pio, spa, bp, buf->b_data, size, 20231544Seschrock arc_read_done, buf, priority, flags, zb); 2024789Sahrens 20252391Smaybee if (*arc_flags & ARC_WAIT) 2026789Sahrens return (zio_wait(rzio)); 2027789Sahrens 20282391Smaybee ASSERT(*arc_flags & ARC_NOWAIT); 2029789Sahrens zio_nowait(rzio); 2030789Sahrens } 2031789Sahrens return (0); 2032789Sahrens } 2033789Sahrens 2034789Sahrens /* 2035789Sahrens * arc_read() variant to support pool traversal. If the block is already 2036789Sahrens * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2037789Sahrens * The idea is that we don't want pool traversal filling up memory, but 2038789Sahrens * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2039789Sahrens */ 2040789Sahrens int 2041789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2042789Sahrens { 2043789Sahrens arc_buf_hdr_t *hdr; 2044789Sahrens kmutex_t *hash_mtx; 2045789Sahrens int rc = 0; 2046789Sahrens 2047789Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2048789Sahrens 20491544Seschrock if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 20501544Seschrock arc_buf_t *buf = hdr->b_buf; 20511544Seschrock 20521544Seschrock ASSERT(buf); 20531544Seschrock while (buf->b_data == NULL) { 20541544Seschrock buf = buf->b_next; 20551544Seschrock ASSERT(buf); 20561544Seschrock } 20571544Seschrock bcopy(buf->b_data, data, hdr->b_size); 20581544Seschrock } else { 2059789Sahrens rc = ENOENT; 20601544Seschrock } 2061789Sahrens 2062789Sahrens if (hash_mtx) 2063789Sahrens mutex_exit(hash_mtx); 2064789Sahrens 2065789Sahrens return (rc); 2066789Sahrens } 2067789Sahrens 20681544Seschrock void 20691544Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 20701544Seschrock { 20711544Seschrock ASSERT(buf->b_hdr != NULL); 20721544Seschrock ASSERT(buf->b_hdr->b_state != arc.anon); 20731544Seschrock ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 20741544Seschrock buf->b_efunc = func; 20751544Seschrock buf->b_private = private; 20761544Seschrock } 20771544Seschrock 20781544Seschrock /* 20791544Seschrock * This is used by the DMU to let the ARC know that a buffer is 20801544Seschrock * being evicted, so the ARC should clean up. If this arc buf 20811544Seschrock * is not yet in the evicted state, it will be put there. 20821544Seschrock */ 20831544Seschrock int 20841544Seschrock arc_buf_evict(arc_buf_t *buf) 20851544Seschrock { 20862887Smaybee arc_buf_hdr_t *hdr; 20871544Seschrock kmutex_t *hash_lock; 20881544Seschrock arc_buf_t **bufp; 20891544Seschrock 20902887Smaybee mutex_enter(&arc_eviction_mtx); 20912887Smaybee hdr = buf->b_hdr; 20921544Seschrock if (hdr == NULL) { 20931544Seschrock /* 20941544Seschrock * We are in arc_do_user_evicts(). 20951544Seschrock */ 20961544Seschrock ASSERT(buf->b_data == NULL); 20972887Smaybee mutex_exit(&arc_eviction_mtx); 20981544Seschrock return (0); 20991544Seschrock } 21002887Smaybee hash_lock = HDR_LOCK(hdr); 21012887Smaybee mutex_exit(&arc_eviction_mtx); 21021544Seschrock 21031544Seschrock mutex_enter(hash_lock); 21041544Seschrock 21052724Smaybee if (buf->b_data == NULL) { 21062724Smaybee /* 21072724Smaybee * We are on the eviction list. 21082724Smaybee */ 21092724Smaybee mutex_exit(hash_lock); 21102724Smaybee mutex_enter(&arc_eviction_mtx); 21112724Smaybee if (buf->b_hdr == NULL) { 21122724Smaybee /* 21132724Smaybee * We are already in arc_do_user_evicts(). 21142724Smaybee */ 21152724Smaybee mutex_exit(&arc_eviction_mtx); 21162724Smaybee return (0); 21172724Smaybee } else { 21182724Smaybee arc_buf_t copy = *buf; /* structure assignment */ 21192724Smaybee /* 21202724Smaybee * Process this buffer now 21212724Smaybee * but let arc_do_user_evicts() do the reaping. 21222724Smaybee */ 21232724Smaybee buf->b_efunc = NULL; 21242724Smaybee mutex_exit(&arc_eviction_mtx); 21252724Smaybee VERIFY(copy.b_efunc(©) == 0); 21262724Smaybee return (1); 21272724Smaybee } 21282724Smaybee } 21292724Smaybee 21302724Smaybee ASSERT(buf->b_hdr == hdr); 21312724Smaybee ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 21321544Seschrock ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu); 21331544Seschrock 21341544Seschrock /* 21351544Seschrock * Pull this buffer off of the hdr 21361544Seschrock */ 21371544Seschrock bufp = &hdr->b_buf; 21381544Seschrock while (*bufp != buf) 21391544Seschrock bufp = &(*bufp)->b_next; 21401544Seschrock *bufp = buf->b_next; 21411544Seschrock 21421544Seschrock ASSERT(buf->b_data != NULL); 21432688Smaybee arc_buf_destroy(buf, FALSE, FALSE); 21441544Seschrock 21451544Seschrock if (hdr->b_datacnt == 0) { 21461544Seschrock arc_state_t *old_state = hdr->b_state; 21471544Seschrock arc_state_t *evicted_state; 21481544Seschrock 21491544Seschrock ASSERT(refcount_is_zero(&hdr->b_refcnt)); 21501544Seschrock 21511544Seschrock evicted_state = 21521544Seschrock (old_state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost; 21531544Seschrock 21541544Seschrock mutex_enter(&old_state->mtx); 21551544Seschrock mutex_enter(&evicted_state->mtx); 21561544Seschrock 21571544Seschrock arc_change_state(evicted_state, hdr, hash_lock); 21581544Seschrock ASSERT(HDR_IN_HASH_TABLE(hdr)); 21591544Seschrock hdr->b_flags = ARC_IN_HASH_TABLE; 21601544Seschrock 21611544Seschrock mutex_exit(&evicted_state->mtx); 21621544Seschrock mutex_exit(&old_state->mtx); 21631544Seschrock } 21641544Seschrock mutex_exit(hash_lock); 21651819Smaybee 21661544Seschrock VERIFY(buf->b_efunc(buf) == 0); 21671544Seschrock buf->b_efunc = NULL; 21681544Seschrock buf->b_private = NULL; 21691544Seschrock buf->b_hdr = NULL; 21701544Seschrock kmem_cache_free(buf_cache, buf); 21711544Seschrock return (1); 21721544Seschrock } 21731544Seschrock 2174789Sahrens /* 2175789Sahrens * Release this buffer from the cache. This must be done 2176789Sahrens * after a read and prior to modifying the buffer contents. 2177789Sahrens * If the buffer has more than one reference, we must make 2178789Sahrens * make a new hdr for the buffer. 2179789Sahrens */ 2180789Sahrens void 2181789Sahrens arc_release(arc_buf_t *buf, void *tag) 2182789Sahrens { 2183789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 2184789Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 2185789Sahrens 2186789Sahrens /* this buffer is not on any list */ 2187789Sahrens ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2188789Sahrens 2189789Sahrens if (hdr->b_state == arc.anon) { 2190789Sahrens /* this buffer is already released */ 2191789Sahrens ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2192789Sahrens ASSERT(BUF_EMPTY(hdr)); 21931544Seschrock ASSERT(buf->b_efunc == NULL); 21943093Sahrens arc_buf_thaw(buf); 2195789Sahrens return; 2196789Sahrens } 2197789Sahrens 2198789Sahrens mutex_enter(hash_lock); 2199789Sahrens 22001544Seschrock /* 22011544Seschrock * Do we have more than one buf? 22021544Seschrock */ 22031544Seschrock if (hdr->b_buf != buf || buf->b_next != NULL) { 2204789Sahrens arc_buf_hdr_t *nhdr; 2205789Sahrens arc_buf_t **bufp; 2206789Sahrens uint64_t blksz = hdr->b_size; 2207789Sahrens spa_t *spa = hdr->b_spa; 2208*3290Sjohansen arc_buf_contents_t type = hdr->b_type; 2209789Sahrens 22101544Seschrock ASSERT(hdr->b_datacnt > 1); 2211789Sahrens /* 2212789Sahrens * Pull the data off of this buf and attach it to 2213789Sahrens * a new anonymous buf. 2214789Sahrens */ 22151544Seschrock (void) remove_reference(hdr, hash_lock, tag); 2216789Sahrens bufp = &hdr->b_buf; 22171544Seschrock while (*bufp != buf) 2218789Sahrens bufp = &(*bufp)->b_next; 2219789Sahrens *bufp = (*bufp)->b_next; 22201544Seschrock 2221789Sahrens ASSERT3U(hdr->b_state->size, >=, hdr->b_size); 2222789Sahrens atomic_add_64(&hdr->b_state->size, -hdr->b_size); 22231544Seschrock if (refcount_is_zero(&hdr->b_refcnt)) { 22241544Seschrock ASSERT3U(hdr->b_state->lsize, >=, hdr->b_size); 22251544Seschrock atomic_add_64(&hdr->b_state->lsize, -hdr->b_size); 22261544Seschrock } 22271544Seschrock hdr->b_datacnt -= 1; 22281544Seschrock 2229789Sahrens mutex_exit(hash_lock); 2230789Sahrens 2231789Sahrens nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 2232789Sahrens nhdr->b_size = blksz; 2233789Sahrens nhdr->b_spa = spa; 2234*3290Sjohansen nhdr->b_type = type; 2235789Sahrens nhdr->b_buf = buf; 2236789Sahrens nhdr->b_state = arc.anon; 2237789Sahrens nhdr->b_arc_access = 0; 2238789Sahrens nhdr->b_flags = 0; 22391544Seschrock nhdr->b_datacnt = 1; 22403093Sahrens nhdr->b_freeze_cksum = 22413093Sahrens kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 22423093Sahrens *nhdr->b_freeze_cksum = *hdr->b_freeze_cksum; /* struct copy */ 2243789Sahrens buf->b_hdr = nhdr; 2244789Sahrens buf->b_next = NULL; 2245789Sahrens (void) refcount_add(&nhdr->b_refcnt, tag); 2246789Sahrens atomic_add_64(&arc.anon->size, blksz); 2247789Sahrens 2248789Sahrens hdr = nhdr; 2249789Sahrens } else { 22501544Seschrock ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2251789Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 2252789Sahrens ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2253789Sahrens arc_change_state(arc.anon, hdr, hash_lock); 2254789Sahrens hdr->b_arc_access = 0; 2255789Sahrens mutex_exit(hash_lock); 2256789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 2257789Sahrens hdr->b_birth = 0; 2258789Sahrens hdr->b_cksum0 = 0; 2259789Sahrens } 22601544Seschrock buf->b_efunc = NULL; 22611544Seschrock buf->b_private = NULL; 22623093Sahrens arc_buf_thaw(buf); 2263789Sahrens } 2264789Sahrens 2265789Sahrens int 2266789Sahrens arc_released(arc_buf_t *buf) 2267789Sahrens { 22681544Seschrock return (buf->b_data != NULL && buf->b_hdr->b_state == arc.anon); 22691544Seschrock } 22701544Seschrock 22711544Seschrock int 22721544Seschrock arc_has_callback(arc_buf_t *buf) 22731544Seschrock { 22741544Seschrock return (buf->b_efunc != NULL); 2275789Sahrens } 2276789Sahrens 22771544Seschrock #ifdef ZFS_DEBUG 22781544Seschrock int 22791544Seschrock arc_referenced(arc_buf_t *buf) 22801544Seschrock { 22811544Seschrock return (refcount_count(&buf->b_hdr->b_refcnt)); 22821544Seschrock } 22831544Seschrock #endif 22841544Seschrock 2285789Sahrens static void 2286789Sahrens arc_write_done(zio_t *zio) 2287789Sahrens { 2288789Sahrens arc_buf_t *buf; 2289789Sahrens arc_buf_hdr_t *hdr; 2290789Sahrens arc_callback_t *acb; 2291789Sahrens 2292789Sahrens buf = zio->io_private; 2293789Sahrens hdr = buf->b_hdr; 2294789Sahrens acb = hdr->b_acb; 2295789Sahrens hdr->b_acb = NULL; 22961544Seschrock ASSERT(acb != NULL); 2297789Sahrens 2298789Sahrens /* this buffer is on no lists and is not in the hash table */ 2299789Sahrens ASSERT3P(hdr->b_state, ==, arc.anon); 2300789Sahrens 2301789Sahrens hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2302789Sahrens hdr->b_birth = zio->io_bp->blk_birth; 2303789Sahrens hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 23041544Seschrock /* 23051544Seschrock * If the block to be written was all-zero, we may have 23061544Seschrock * compressed it away. In this case no write was performed 23071544Seschrock * so there will be no dva/birth-date/checksum. The buffer 23081544Seschrock * must therefor remain anonymous (and uncached). 23091544Seschrock */ 2310789Sahrens if (!BUF_EMPTY(hdr)) { 2311789Sahrens arc_buf_hdr_t *exists; 2312789Sahrens kmutex_t *hash_lock; 2313789Sahrens 23143093Sahrens arc_cksum_verify(buf); 23153093Sahrens 2316789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2317789Sahrens if (exists) { 2318789Sahrens /* 2319789Sahrens * This can only happen if we overwrite for 2320789Sahrens * sync-to-convergence, because we remove 2321789Sahrens * buffers from the hash table when we arc_free(). 2322789Sahrens */ 2323789Sahrens ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2324789Sahrens BP_IDENTITY(zio->io_bp))); 2325789Sahrens ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2326789Sahrens zio->io_bp->blk_birth); 2327789Sahrens 2328789Sahrens ASSERT(refcount_is_zero(&exists->b_refcnt)); 2329789Sahrens arc_change_state(arc.anon, exists, hash_lock); 2330789Sahrens mutex_exit(hash_lock); 23311544Seschrock arc_hdr_destroy(exists); 2332789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 2333789Sahrens ASSERT3P(exists, ==, NULL); 2334789Sahrens } 23351544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 23362688Smaybee arc_access(hdr, hash_lock); 23372688Smaybee mutex_exit(hash_lock); 23381544Seschrock } else if (acb->acb_done == NULL) { 23391544Seschrock int destroy_hdr; 23401544Seschrock /* 23411544Seschrock * This is an anonymous buffer with no user callback, 23421544Seschrock * destroy it if there are no active references. 23431544Seschrock */ 23441544Seschrock mutex_enter(&arc_eviction_mtx); 23451544Seschrock destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 23461544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 23471544Seschrock mutex_exit(&arc_eviction_mtx); 23481544Seschrock if (destroy_hdr) 23491544Seschrock arc_hdr_destroy(hdr); 23501544Seschrock } else { 23511544Seschrock hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2352789Sahrens } 23531544Seschrock 23541544Seschrock if (acb->acb_done) { 2355789Sahrens ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 2356789Sahrens acb->acb_done(zio, buf, acb->acb_private); 2357789Sahrens } 2358789Sahrens 23591544Seschrock kmem_free(acb, sizeof (arc_callback_t)); 2360789Sahrens } 2361789Sahrens 2362789Sahrens int 23631775Sbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 2364789Sahrens uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 2365789Sahrens arc_done_func_t *done, void *private, int priority, int flags, 23661544Seschrock uint32_t arc_flags, zbookmark_t *zb) 2367789Sahrens { 2368789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 2369789Sahrens arc_callback_t *acb; 2370789Sahrens zio_t *rzio; 2371789Sahrens 2372789Sahrens /* this is a private buffer - no locking required */ 2373789Sahrens ASSERT3P(hdr->b_state, ==, arc.anon); 2374789Sahrens ASSERT(BUF_EMPTY(hdr)); 2375789Sahrens ASSERT(!HDR_IO_ERROR(hdr)); 23762237Smaybee ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 23772237Smaybee ASSERT(hdr->b_acb == 0); 2378789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2379789Sahrens acb->acb_done = done; 2380789Sahrens acb->acb_private = private; 2381789Sahrens acb->acb_byteswap = (arc_byteswap_func_t *)-1; 2382789Sahrens hdr->b_acb = acb; 23831544Seschrock hdr->b_flags |= ARC_IO_IN_PROGRESS; 23843093Sahrens arc_cksum_compute(buf); 23851775Sbillm rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 23861544Seschrock buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb); 2387789Sahrens 2388789Sahrens if (arc_flags & ARC_WAIT) 2389789Sahrens return (zio_wait(rzio)); 2390789Sahrens 2391789Sahrens ASSERT(arc_flags & ARC_NOWAIT); 2392789Sahrens zio_nowait(rzio); 2393789Sahrens 2394789Sahrens return (0); 2395789Sahrens } 2396789Sahrens 2397789Sahrens int 2398789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 2399789Sahrens zio_done_func_t *done, void *private, uint32_t arc_flags) 2400789Sahrens { 2401789Sahrens arc_buf_hdr_t *ab; 2402789Sahrens kmutex_t *hash_lock; 2403789Sahrens zio_t *zio; 2404789Sahrens 2405789Sahrens /* 2406789Sahrens * If this buffer is in the cache, release it, so it 2407789Sahrens * can be re-used. 2408789Sahrens */ 2409789Sahrens ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2410789Sahrens if (ab != NULL) { 2411789Sahrens /* 2412789Sahrens * The checksum of blocks to free is not always 2413789Sahrens * preserved (eg. on the deadlist). However, if it is 2414789Sahrens * nonzero, it should match what we have in the cache. 2415789Sahrens */ 2416789Sahrens ASSERT(bp->blk_cksum.zc_word[0] == 0 || 2417789Sahrens ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 24181990Smaybee if (ab->b_state != arc.anon) 24191990Smaybee arc_change_state(arc.anon, ab, hash_lock); 24202391Smaybee if (HDR_IO_IN_PROGRESS(ab)) { 24212391Smaybee /* 24222391Smaybee * This should only happen when we prefetch. 24232391Smaybee */ 24242391Smaybee ASSERT(ab->b_flags & ARC_PREFETCH); 24252391Smaybee ASSERT3U(ab->b_datacnt, ==, 1); 24262391Smaybee ab->b_flags |= ARC_FREED_IN_READ; 24272391Smaybee if (HDR_IN_HASH_TABLE(ab)) 24282391Smaybee buf_hash_remove(ab); 24292391Smaybee ab->b_arc_access = 0; 24302391Smaybee bzero(&ab->b_dva, sizeof (dva_t)); 24312391Smaybee ab->b_birth = 0; 24322391Smaybee ab->b_cksum0 = 0; 24332391Smaybee ab->b_buf->b_efunc = NULL; 24342391Smaybee ab->b_buf->b_private = NULL; 24352391Smaybee mutex_exit(hash_lock); 24362391Smaybee } else if (refcount_is_zero(&ab->b_refcnt)) { 2437789Sahrens mutex_exit(hash_lock); 24381544Seschrock arc_hdr_destroy(ab); 2439789Sahrens atomic_add_64(&arc.deleted, 1); 2440789Sahrens } else { 24411589Smaybee /* 24422391Smaybee * We still have an active reference on this 24432391Smaybee * buffer. This can happen, e.g., from 24442391Smaybee * dbuf_unoverride(). 24451589Smaybee */ 24462391Smaybee ASSERT(!HDR_IN_HASH_TABLE(ab)); 2447789Sahrens ab->b_arc_access = 0; 2448789Sahrens bzero(&ab->b_dva, sizeof (dva_t)); 2449789Sahrens ab->b_birth = 0; 2450789Sahrens ab->b_cksum0 = 0; 24511544Seschrock ab->b_buf->b_efunc = NULL; 24521544Seschrock ab->b_buf->b_private = NULL; 2453789Sahrens mutex_exit(hash_lock); 2454789Sahrens } 2455789Sahrens } 2456789Sahrens 2457789Sahrens zio = zio_free(pio, spa, txg, bp, done, private); 2458789Sahrens 2459789Sahrens if (arc_flags & ARC_WAIT) 2460789Sahrens return (zio_wait(zio)); 2461789Sahrens 2462789Sahrens ASSERT(arc_flags & ARC_NOWAIT); 2463789Sahrens zio_nowait(zio); 2464789Sahrens 2465789Sahrens return (0); 2466789Sahrens } 2467789Sahrens 2468789Sahrens void 2469789Sahrens arc_tempreserve_clear(uint64_t tempreserve) 2470789Sahrens { 2471789Sahrens atomic_add_64(&arc_tempreserve, -tempreserve); 2472789Sahrens ASSERT((int64_t)arc_tempreserve >= 0); 2473789Sahrens } 2474789Sahrens 2475789Sahrens int 2476789Sahrens arc_tempreserve_space(uint64_t tempreserve) 2477789Sahrens { 2478789Sahrens #ifdef ZFS_DEBUG 2479789Sahrens /* 2480789Sahrens * Once in a while, fail for no reason. Everything should cope. 2481789Sahrens */ 2482789Sahrens if (spa_get_random(10000) == 0) { 2483789Sahrens dprintf("forcing random failure\n"); 2484789Sahrens return (ERESTART); 2485789Sahrens } 2486789Sahrens #endif 2487982Smaybee if (tempreserve > arc.c/4 && !arc.no_grow) 2488982Smaybee arc.c = MIN(arc.c_max, tempreserve * 4); 2489982Smaybee if (tempreserve > arc.c) 2490982Smaybee return (ENOMEM); 2491982Smaybee 2492789Sahrens /* 2493982Smaybee * Throttle writes when the amount of dirty data in the cache 2494982Smaybee * gets too large. We try to keep the cache less than half full 2495982Smaybee * of dirty blocks so that our sync times don't grow too large. 2496982Smaybee * Note: if two requests come in concurrently, we might let them 2497982Smaybee * both succeed, when one of them should fail. Not a huge deal. 2498982Smaybee * 2499982Smaybee * XXX The limit should be adjusted dynamically to keep the time 2500982Smaybee * to sync a dataset fixed (around 1-5 seconds?). 2501789Sahrens */ 2502789Sahrens 2503982Smaybee if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 && 2504982Smaybee arc_tempreserve + arc.anon->size > arc.c / 4) { 2505789Sahrens dprintf("failing, arc_tempreserve=%lluK anon=%lluK " 2506789Sahrens "tempreserve=%lluK arc.c=%lluK\n", 2507789Sahrens arc_tempreserve>>10, arc.anon->lsize>>10, 2508789Sahrens tempreserve>>10, arc.c>>10); 2509789Sahrens return (ERESTART); 2510789Sahrens } 2511789Sahrens atomic_add_64(&arc_tempreserve, tempreserve); 2512789Sahrens return (0); 2513789Sahrens } 2514789Sahrens 2515789Sahrens void 2516789Sahrens arc_init(void) 2517789Sahrens { 2518789Sahrens mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 2519789Sahrens cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 2520789Sahrens 25212391Smaybee /* Convert seconds to clock ticks */ 25222638Sperrin arc_min_prefetch_lifespan = 1 * hz; 25232391Smaybee 2524789Sahrens /* Start out with 1/8 of all memory */ 2525789Sahrens arc.c = physmem * PAGESIZE / 8; 2526789Sahrens 2527789Sahrens #ifdef _KERNEL 2528789Sahrens /* 2529789Sahrens * On architectures where the physical memory can be larger 2530789Sahrens * than the addressable space (intel in 32-bit mode), we may 2531789Sahrens * need to limit the cache to 1/8 of VM size. 2532789Sahrens */ 2533789Sahrens arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 2534789Sahrens #endif 2535789Sahrens 2536982Smaybee /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 2537789Sahrens arc.c_min = MAX(arc.c / 4, 64<<20); 2538982Smaybee /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 2539789Sahrens if (arc.c * 8 >= 1<<30) 2540789Sahrens arc.c_max = (arc.c * 8) - (1<<30); 2541789Sahrens else 2542789Sahrens arc.c_max = arc.c_min; 2543789Sahrens arc.c_max = MAX(arc.c * 6, arc.c_max); 25442885Sahrens 25452885Sahrens /* 25462885Sahrens * Allow the tunables to override our calculations if they are 25472885Sahrens * reasonable (ie. over 64MB) 25482885Sahrens */ 25492885Sahrens if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 25502885Sahrens arc.c_max = zfs_arc_max; 25512885Sahrens if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc.c_max) 25522885Sahrens arc.c_min = zfs_arc_min; 25532885Sahrens 2554789Sahrens arc.c = arc.c_max; 2555789Sahrens arc.p = (arc.c >> 1); 2556789Sahrens 2557789Sahrens /* if kmem_flags are set, lets try to use less memory */ 2558789Sahrens if (kmem_debugging()) 2559789Sahrens arc.c = arc.c / 2; 2560789Sahrens if (arc.c < arc.c_min) 2561789Sahrens arc.c = arc.c_min; 2562789Sahrens 2563789Sahrens arc.anon = &ARC_anon; 25641544Seschrock arc.mru = &ARC_mru; 25651544Seschrock arc.mru_ghost = &ARC_mru_ghost; 25661544Seschrock arc.mfu = &ARC_mfu; 25671544Seschrock arc.mfu_ghost = &ARC_mfu_ghost; 25681544Seschrock arc.size = 0; 2569789Sahrens 25702688Smaybee arc.hits = 0; 25712688Smaybee arc.recycle_miss = 0; 25722688Smaybee arc.evict_skip = 0; 25732688Smaybee arc.mutex_miss = 0; 25742688Smaybee 25752856Snd150628 mutex_init(&arc.anon->mtx, NULL, MUTEX_DEFAULT, NULL); 25762856Snd150628 mutex_init(&arc.mru->mtx, NULL, MUTEX_DEFAULT, NULL); 25772856Snd150628 mutex_init(&arc.mru_ghost->mtx, NULL, MUTEX_DEFAULT, NULL); 25782856Snd150628 mutex_init(&arc.mfu->mtx, NULL, MUTEX_DEFAULT, NULL); 25792856Snd150628 mutex_init(&arc.mfu_ghost->mtx, NULL, MUTEX_DEFAULT, NULL); 25802856Snd150628 25811544Seschrock list_create(&arc.mru->list, sizeof (arc_buf_hdr_t), 2582789Sahrens offsetof(arc_buf_hdr_t, b_arc_node)); 25831544Seschrock list_create(&arc.mru_ghost->list, sizeof (arc_buf_hdr_t), 2584789Sahrens offsetof(arc_buf_hdr_t, b_arc_node)); 25851544Seschrock list_create(&arc.mfu->list, sizeof (arc_buf_hdr_t), 2586789Sahrens offsetof(arc_buf_hdr_t, b_arc_node)); 25871544Seschrock list_create(&arc.mfu_ghost->list, sizeof (arc_buf_hdr_t), 2588789Sahrens offsetof(arc_buf_hdr_t, b_arc_node)); 2589789Sahrens 2590789Sahrens buf_init(); 2591789Sahrens 2592789Sahrens arc_thread_exit = 0; 25931544Seschrock arc_eviction_list = NULL; 25941544Seschrock mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 25952887Smaybee bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 2596789Sahrens 2597789Sahrens (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 2598789Sahrens TS_RUN, minclsyspri); 25993158Smaybee 26003158Smaybee arc_dead = FALSE; 2601789Sahrens } 2602789Sahrens 2603789Sahrens void 2604789Sahrens arc_fini(void) 2605789Sahrens { 2606789Sahrens mutex_enter(&arc_reclaim_thr_lock); 2607789Sahrens arc_thread_exit = 1; 2608789Sahrens while (arc_thread_exit != 0) 2609789Sahrens cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 2610789Sahrens mutex_exit(&arc_reclaim_thr_lock); 2611789Sahrens 2612789Sahrens arc_flush(); 2613789Sahrens 2614789Sahrens arc_dead = TRUE; 2615789Sahrens 26161544Seschrock mutex_destroy(&arc_eviction_mtx); 2617789Sahrens mutex_destroy(&arc_reclaim_thr_lock); 2618789Sahrens cv_destroy(&arc_reclaim_thr_cv); 2619789Sahrens 26201544Seschrock list_destroy(&arc.mru->list); 26211544Seschrock list_destroy(&arc.mru_ghost->list); 26221544Seschrock list_destroy(&arc.mfu->list); 26231544Seschrock list_destroy(&arc.mfu_ghost->list); 2624789Sahrens 26252856Snd150628 mutex_destroy(&arc.anon->mtx); 26262856Snd150628 mutex_destroy(&arc.mru->mtx); 26272856Snd150628 mutex_destroy(&arc.mru_ghost->mtx); 26282856Snd150628 mutex_destroy(&arc.mfu->mtx); 26292856Snd150628 mutex_destroy(&arc.mfu_ghost->mtx); 26302856Snd150628 2631789Sahrens buf_fini(); 2632789Sahrens } 2633