1789Sahrens /* 2789Sahrens * CDDL HEADER START 3789Sahrens * 4789Sahrens * The contents of this file are subject to the terms of the 5*1484Sek110237 * Common Development and Distribution License (the "License"). 6*1484Sek110237 * You may not use this file except in compliance with the License. 7789Sahrens * 8789Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9789Sahrens * or http://www.opensolaris.org/os/licensing. 10789Sahrens * See the License for the specific language governing permissions 11789Sahrens * and limitations under the License. 12789Sahrens * 13789Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14789Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15789Sahrens * If applicable, add the following below this CDDL HEADER, with the 16789Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17789Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18789Sahrens * 19789Sahrens * CDDL HEADER END 20789Sahrens */ 21789Sahrens /* 22*1484Sek110237 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23789Sahrens * Use is subject to license terms. 24789Sahrens */ 25789Sahrens 26789Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 27789Sahrens 28789Sahrens /* 29789Sahrens * DVA-based Adjustable Relpacement Cache 30789Sahrens * 31789Sahrens * While much of the theory of operation and algorithms used here 32789Sahrens * are based on the self-tuning, low overhead replacement cache 33789Sahrens * presented by Megiddo and Modha at FAST 2003, there are some 34789Sahrens * significant differences: 35789Sahrens * 36789Sahrens * 1. The Megiddo and Modha model assumes any page is evictable. 37789Sahrens * Pages in its cache cannot be "locked" into memory. This makes 38789Sahrens * the eviction algorithm simple: evict the last page in the list. 39789Sahrens * This also make the performance characteristics easy to reason 40789Sahrens * about. Our cache is not so simple. At any given moment, some 41789Sahrens * subset of the blocks in the cache are un-evictable because we 42789Sahrens * have handed out a reference to them. Blocks are only evictable 43789Sahrens * when there are no external references active. This makes 44789Sahrens * eviction far more problematic: we choose to evict the evictable 45789Sahrens * blocks that are the "lowest" in the list. 46789Sahrens * 47789Sahrens * There are times when it is not possible to evict the requested 48789Sahrens * space. In these circumstances we are unable to adjust the cache 49789Sahrens * size. To prevent the cache growing unbounded at these times we 50789Sahrens * implement a "cache throttle" that slowes the flow of new data 51789Sahrens * into the cache until we can make space avaiable. 52789Sahrens * 53789Sahrens * 2. The Megiddo and Modha model assumes a fixed cache size. 54789Sahrens * Pages are evicted when the cache is full and there is a cache 55789Sahrens * miss. Our model has a variable sized cache. It grows with 56789Sahrens * high use, but also tries to react to memory preasure from the 57789Sahrens * operating system: decreasing its size when system memory is 58789Sahrens * tight. 59789Sahrens * 60789Sahrens * 3. The Megiddo and Modha model assumes a fixed page size. All 61789Sahrens * elements of the cache are therefor exactly the same size. So 62789Sahrens * when adjusting the cache size following a cache miss, its simply 63789Sahrens * a matter of choosing a single page to evict. In our model, we 64789Sahrens * have variable sized cache blocks (rangeing from 512 bytes to 65789Sahrens * 128K bytes). We therefor choose a set of blocks to evict to make 66789Sahrens * space for a cache miss that approximates as closely as possible 67789Sahrens * the space used by the new block. 68789Sahrens * 69789Sahrens * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70789Sahrens * by N. Megiddo & D. Modha, FAST 2003 71789Sahrens */ 72789Sahrens 73789Sahrens /* 74789Sahrens * The locking model: 75789Sahrens * 76789Sahrens * A new reference to a cache buffer can be obtained in two 77789Sahrens * ways: 1) via a hash table lookup using the DVA as a key, 78789Sahrens * or 2) via one of the ARC lists. The arc_read() inerface 79789Sahrens * uses method 1, while the internal arc algorithms for 80789Sahrens * adjusting the cache use method 2. We therefor provide two 81789Sahrens * types of locks: 1) the hash table lock array, and 2) the 82789Sahrens * arc list locks. 83789Sahrens * 84789Sahrens * Buffers do not have their own mutexs, rather they rely on the 85789Sahrens * hash table mutexs for the bulk of their protection (i.e. most 86789Sahrens * fields in the arc_buf_hdr_t are protected by these mutexs). 87789Sahrens * 88789Sahrens * buf_hash_find() returns the appropriate mutex (held) when it 89789Sahrens * locates the requested buffer in the hash table. It returns 90789Sahrens * NULL for the mutex if the buffer was not in the table. 91789Sahrens * 92789Sahrens * buf_hash_remove() expects the appropriate hash mutex to be 93789Sahrens * already held before it is invoked. 94789Sahrens * 95789Sahrens * Each arc state also has a mutex which is used to protect the 96789Sahrens * buffer list associated with the state. When attempting to 97789Sahrens * obtain a hash table lock while holding an arc list lock you 98789Sahrens * must use: mutex_tryenter() to avoid deadlock. Also note that 99789Sahrens * the "top" state mutex must be held before the "bot" state mutex. 100789Sahrens * 101789Sahrens * Note that the majority of the performance stats are manipulated 102789Sahrens * with atomic operations. 103789Sahrens */ 104789Sahrens 105789Sahrens #include <sys/spa.h> 106789Sahrens #include <sys/zio.h> 107789Sahrens #include <sys/zfs_context.h> 108789Sahrens #include <sys/arc.h> 109789Sahrens #include <sys/refcount.h> 110789Sahrens #ifdef _KERNEL 111789Sahrens #include <sys/vmsystm.h> 112789Sahrens #include <vm/anon.h> 113789Sahrens #include <sys/fs/swapnode.h> 114*1484Sek110237 #include <sys/dnlc.h> 115789Sahrens #endif 116789Sahrens #include <sys/callb.h> 117789Sahrens 118789Sahrens static kmutex_t arc_reclaim_thr_lock; 119789Sahrens static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 120789Sahrens static uint8_t arc_thread_exit; 121789Sahrens 122*1484Sek110237 #define ARC_REDUCE_DNLC_PERCENT 3 123*1484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 124*1484Sek110237 125789Sahrens typedef enum arc_reclaim_strategy { 126789Sahrens ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 127789Sahrens ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 128789Sahrens } arc_reclaim_strategy_t; 129789Sahrens 130789Sahrens /* number of seconds before growing cache again */ 131789Sahrens static int arc_grow_retry = 60; 132789Sahrens 133789Sahrens static kmutex_t arc_reclaim_lock; 134789Sahrens static int arc_dead; 135789Sahrens 136789Sahrens /* 137789Sahrens * Note that buffers can be on one of 5 states: 138789Sahrens * ARC_anon - anonymous (discussed below) 139789Sahrens * ARC_mru_top - recently used, currently cached 140789Sahrens * ARC_mru_bot - recentely used, no longer in cache 141789Sahrens * ARC_mfu_top - frequently used, currently cached 142789Sahrens * ARC_mfu_bot - frequently used, no longer in cache 143789Sahrens * When there are no active references to the buffer, they 144789Sahrens * are linked onto one of the lists in arc. These are the 145789Sahrens * only buffers that can be evicted or deleted. 146789Sahrens * 147789Sahrens * Anonymous buffers are buffers that are not associated with 148789Sahrens * a DVA. These are buffers that hold dirty block copies 149789Sahrens * before they are written to stable storage. By definition, 150789Sahrens * they are "ref'd" and are considered part of arc_mru_top 151789Sahrens * that cannot be freed. Generally, they will aquire a DVA 152789Sahrens * as they are written and migrate onto the arc_mru_top list. 153789Sahrens */ 154789Sahrens 155789Sahrens typedef struct arc_state { 156789Sahrens list_t list; /* linked list of evictable buffer in state */ 157789Sahrens uint64_t lsize; /* total size of buffers in the linked list */ 158789Sahrens uint64_t size; /* total size of all buffers in this state */ 159789Sahrens uint64_t hits; 160789Sahrens kmutex_t mtx; 161789Sahrens } arc_state_t; 162789Sahrens 163789Sahrens /* The 5 states: */ 164789Sahrens static arc_state_t ARC_anon; 165789Sahrens static arc_state_t ARC_mru_top; 166789Sahrens static arc_state_t ARC_mru_bot; 167789Sahrens static arc_state_t ARC_mfu_top; 168789Sahrens static arc_state_t ARC_mfu_bot; 169789Sahrens 170789Sahrens static struct arc { 171789Sahrens arc_state_t *anon; 172789Sahrens arc_state_t *mru_top; 173789Sahrens arc_state_t *mru_bot; 174789Sahrens arc_state_t *mfu_top; 175789Sahrens arc_state_t *mfu_bot; 176789Sahrens uint64_t size; /* Actual total arc size */ 177789Sahrens uint64_t p; /* Target size (in bytes) of mru_top */ 178789Sahrens uint64_t c; /* Target size of cache (in bytes) */ 179789Sahrens uint64_t c_min; /* Minimum target cache size */ 180789Sahrens uint64_t c_max; /* Maximum target cache size */ 181789Sahrens uint64_t incr; /* Size by which to increment arc.c */ 182789Sahrens int64_t size_check; 183789Sahrens 184789Sahrens /* performance stats */ 185789Sahrens uint64_t hits; 186789Sahrens uint64_t misses; 187789Sahrens uint64_t deleted; 188789Sahrens uint64_t skipped; 189789Sahrens uint64_t hash_elements; 190789Sahrens uint64_t hash_elements_max; 191789Sahrens uint64_t hash_collisions; 192789Sahrens uint64_t hash_chains; 193789Sahrens uint32_t hash_chain_max; 194789Sahrens 195789Sahrens int no_grow; /* Don't try to grow cache size */ 196789Sahrens } arc; 197789Sahrens 198789Sahrens /* Default amount to grow arc.incr */ 199789Sahrens static int64_t arc_incr_size = 1024; 200789Sahrens 201789Sahrens /* > 0 ==> time to increment arc.c */ 202789Sahrens static int64_t arc_size_check_default = -1000; 203789Sahrens 204789Sahrens static uint64_t arc_tempreserve; 205789Sahrens 206789Sahrens typedef struct arc_callback arc_callback_t; 207789Sahrens 208789Sahrens struct arc_callback { 209789Sahrens arc_done_func_t *acb_done; 210789Sahrens void *acb_private; 211789Sahrens arc_byteswap_func_t *acb_byteswap; 212789Sahrens arc_buf_t *acb_buf; 213789Sahrens zio_t *acb_zio_dummy; 214789Sahrens arc_callback_t *acb_next; 215789Sahrens }; 216789Sahrens 217789Sahrens struct arc_buf_hdr { 218789Sahrens /* immutable */ 219789Sahrens uint64_t b_size; 220789Sahrens spa_t *b_spa; 221789Sahrens 222789Sahrens /* protected by hash lock */ 223789Sahrens dva_t b_dva; 224789Sahrens uint64_t b_birth; 225789Sahrens uint64_t b_cksum0; 226789Sahrens 227789Sahrens arc_buf_hdr_t *b_hash_next; 228789Sahrens arc_buf_t *b_buf; 229789Sahrens uint32_t b_flags; 230789Sahrens 231789Sahrens kcondvar_t b_cv; 232789Sahrens arc_callback_t *b_acb; 233789Sahrens 234789Sahrens /* protected by arc state mutex */ 235789Sahrens arc_state_t *b_state; 236789Sahrens list_node_t b_arc_node; 237789Sahrens 238789Sahrens /* updated atomically */ 239789Sahrens clock_t b_arc_access; 240789Sahrens 241789Sahrens /* self protecting */ 242789Sahrens refcount_t b_refcnt; 243789Sahrens }; 244789Sahrens 245789Sahrens /* 246789Sahrens * Private ARC flags. These flags are private ARC only flags that will show up 247789Sahrens * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 248789Sahrens * be passed in as arc_flags in things like arc_read. However, these flags 249789Sahrens * should never be passed and should only be set by ARC code. When adding new 250789Sahrens * public flags, make sure not to smash the private ones. 251789Sahrens */ 252789Sahrens 253789Sahrens #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 254789Sahrens #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 255789Sahrens #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 256789Sahrens 257789Sahrens #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 258789Sahrens #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 259789Sahrens #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 260789Sahrens 261789Sahrens /* 262789Sahrens * Hash table routines 263789Sahrens */ 264789Sahrens 265789Sahrens #define HT_LOCK_PAD 64 266789Sahrens 267789Sahrens struct ht_lock { 268789Sahrens kmutex_t ht_lock; 269789Sahrens #ifdef _KERNEL 270789Sahrens unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 271789Sahrens #endif 272789Sahrens }; 273789Sahrens 274789Sahrens #define BUF_LOCKS 256 275789Sahrens typedef struct buf_hash_table { 276789Sahrens uint64_t ht_mask; 277789Sahrens arc_buf_hdr_t **ht_table; 278789Sahrens struct ht_lock ht_locks[BUF_LOCKS]; 279789Sahrens } buf_hash_table_t; 280789Sahrens 281789Sahrens static buf_hash_table_t buf_hash_table; 282789Sahrens 283789Sahrens #define BUF_HASH_INDEX(spa, dva, birth) \ 284789Sahrens (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 285789Sahrens #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 286789Sahrens #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 287789Sahrens #define HDR_LOCK(buf) \ 288789Sahrens (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 289789Sahrens 290789Sahrens uint64_t zfs_crc64_table[256]; 291789Sahrens 292789Sahrens static uint64_t 293789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 294789Sahrens { 295789Sahrens uintptr_t spav = (uintptr_t)spa; 296789Sahrens uint8_t *vdva = (uint8_t *)dva; 297789Sahrens uint64_t crc = -1ULL; 298789Sahrens int i; 299789Sahrens 300789Sahrens ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 301789Sahrens 302789Sahrens for (i = 0; i < sizeof (dva_t); i++) 303789Sahrens crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 304789Sahrens 305789Sahrens crc ^= (spav>>8) ^ birth; 306789Sahrens 307789Sahrens return (crc); 308789Sahrens } 309789Sahrens 310789Sahrens #define BUF_EMPTY(buf) \ 311789Sahrens ((buf)->b_dva.dva_word[0] == 0 && \ 312789Sahrens (buf)->b_dva.dva_word[1] == 0 && \ 313789Sahrens (buf)->b_birth == 0) 314789Sahrens 315789Sahrens #define BUF_EQUAL(spa, dva, birth, buf) \ 316789Sahrens ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 317789Sahrens ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 318789Sahrens ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 319789Sahrens 320789Sahrens static arc_buf_hdr_t * 321789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 322789Sahrens { 323789Sahrens uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 324789Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 325789Sahrens arc_buf_hdr_t *buf; 326789Sahrens 327789Sahrens mutex_enter(hash_lock); 328789Sahrens for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 329789Sahrens buf = buf->b_hash_next) { 330789Sahrens if (BUF_EQUAL(spa, dva, birth, buf)) { 331789Sahrens *lockp = hash_lock; 332789Sahrens return (buf); 333789Sahrens } 334789Sahrens } 335789Sahrens mutex_exit(hash_lock); 336789Sahrens *lockp = NULL; 337789Sahrens return (NULL); 338789Sahrens } 339789Sahrens 340789Sahrens /* 341789Sahrens * Insert an entry into the hash table. If there is already an element 342789Sahrens * equal to elem in the hash table, then the already existing element 343789Sahrens * will be returned and the new element will not be inserted. 344789Sahrens * Otherwise returns NULL. 345789Sahrens */ 346789Sahrens static arc_buf_hdr_t *fbufs[4]; /* XXX to find 6341326 */ 347789Sahrens static kthread_t *fbufs_lastthread; 348789Sahrens static arc_buf_hdr_t * 349789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 350789Sahrens { 351789Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 352789Sahrens kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 353789Sahrens arc_buf_hdr_t *fbuf; 354789Sahrens uint32_t max, i; 355789Sahrens 356789Sahrens fbufs_lastthread = curthread; 357789Sahrens *lockp = hash_lock; 358789Sahrens mutex_enter(hash_lock); 359789Sahrens for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 360789Sahrens fbuf = fbuf->b_hash_next, i++) { 361789Sahrens if (i < sizeof (fbufs) / sizeof (fbufs[0])) 362789Sahrens fbufs[i] = fbuf; 363789Sahrens if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 364789Sahrens return (fbuf); 365789Sahrens } 366789Sahrens 367789Sahrens buf->b_hash_next = buf_hash_table.ht_table[idx]; 368789Sahrens buf_hash_table.ht_table[idx] = buf; 369789Sahrens 370789Sahrens /* collect some hash table performance data */ 371789Sahrens if (i > 0) { 372789Sahrens atomic_add_64(&arc.hash_collisions, 1); 373789Sahrens if (i == 1) 374789Sahrens atomic_add_64(&arc.hash_chains, 1); 375789Sahrens } 376789Sahrens while (i > (max = arc.hash_chain_max) && 377789Sahrens max != atomic_cas_32(&arc.hash_chain_max, max, i)) { 378789Sahrens continue; 379789Sahrens } 380789Sahrens atomic_add_64(&arc.hash_elements, 1); 381789Sahrens if (arc.hash_elements > arc.hash_elements_max) 382789Sahrens atomic_add_64(&arc.hash_elements_max, 1); 383789Sahrens 384789Sahrens return (NULL); 385789Sahrens } 386789Sahrens 387789Sahrens static void 388789Sahrens buf_hash_remove(arc_buf_hdr_t *buf) 389789Sahrens { 390789Sahrens arc_buf_hdr_t *fbuf, **bufp; 391789Sahrens uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 392789Sahrens 393789Sahrens ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 394789Sahrens 395789Sahrens bufp = &buf_hash_table.ht_table[idx]; 396789Sahrens while ((fbuf = *bufp) != buf) { 397789Sahrens ASSERT(fbuf != NULL); 398789Sahrens bufp = &fbuf->b_hash_next; 399789Sahrens } 400789Sahrens *bufp = buf->b_hash_next; 401789Sahrens buf->b_hash_next = NULL; 402789Sahrens 403789Sahrens /* collect some hash table performance data */ 404789Sahrens atomic_add_64(&arc.hash_elements, -1); 405789Sahrens if (buf_hash_table.ht_table[idx] && 406789Sahrens buf_hash_table.ht_table[idx]->b_hash_next == NULL) 407789Sahrens atomic_add_64(&arc.hash_chains, -1); 408789Sahrens } 409789Sahrens 410789Sahrens /* 411789Sahrens * Global data structures and functions for the buf kmem cache. 412789Sahrens */ 413789Sahrens static kmem_cache_t *hdr_cache; 414789Sahrens static kmem_cache_t *buf_cache; 415789Sahrens 416789Sahrens static void 417789Sahrens buf_fini(void) 418789Sahrens { 419789Sahrens int i; 420789Sahrens 421789Sahrens kmem_free(buf_hash_table.ht_table, 422789Sahrens (buf_hash_table.ht_mask + 1) * sizeof (void *)); 423789Sahrens for (i = 0; i < BUF_LOCKS; i++) 424789Sahrens mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 425789Sahrens kmem_cache_destroy(hdr_cache); 426789Sahrens kmem_cache_destroy(buf_cache); 427789Sahrens } 428789Sahrens 429789Sahrens /* 430789Sahrens * Constructor callback - called when the cache is empty 431789Sahrens * and a new buf is requested. 432789Sahrens */ 433789Sahrens /* ARGSUSED */ 434789Sahrens static int 435789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag) 436789Sahrens { 437789Sahrens arc_buf_hdr_t *buf = vbuf; 438789Sahrens 439789Sahrens bzero(buf, sizeof (arc_buf_hdr_t)); 440789Sahrens refcount_create(&buf->b_refcnt); 441789Sahrens cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 442789Sahrens return (0); 443789Sahrens } 444789Sahrens 445789Sahrens /* 446789Sahrens * Destructor callback - called when a cached buf is 447789Sahrens * no longer required. 448789Sahrens */ 449789Sahrens /* ARGSUSED */ 450789Sahrens static void 451789Sahrens hdr_dest(void *vbuf, void *unused) 452789Sahrens { 453789Sahrens arc_buf_hdr_t *buf = vbuf; 454789Sahrens 455789Sahrens refcount_destroy(&buf->b_refcnt); 456789Sahrens cv_destroy(&buf->b_cv); 457789Sahrens } 458789Sahrens 459789Sahrens void arc_kmem_reclaim(void); 460789Sahrens 461789Sahrens /* 462789Sahrens * Reclaim callback -- invoked when memory is low. 463789Sahrens */ 464789Sahrens /* ARGSUSED */ 465789Sahrens static void 466789Sahrens hdr_recl(void *unused) 467789Sahrens { 468789Sahrens dprintf("hdr_recl called\n"); 469789Sahrens arc_kmem_reclaim(); 470789Sahrens } 471789Sahrens 472789Sahrens static void 473789Sahrens buf_init(void) 474789Sahrens { 475789Sahrens uint64_t *ct; 476789Sahrens uint64_t hsize = 1ULL << 10; 477789Sahrens int i, j; 478789Sahrens 479789Sahrens /* 480789Sahrens * The hash table is big enough to fill all of physical memory 481789Sahrens * with an average 4k block size. The table will take up 482789Sahrens * totalmem*sizeof(void*)/4k bytes (eg. 2MB/GB with 8-byte 483789Sahrens * pointers). 484789Sahrens */ 485789Sahrens while (hsize * 4096 < physmem * PAGESIZE) 486789Sahrens hsize <<= 1; 487789Sahrens 488789Sahrens buf_hash_table.ht_mask = hsize - 1; 489789Sahrens buf_hash_table.ht_table = kmem_zalloc(hsize * sizeof (void*), KM_SLEEP); 490789Sahrens 491789Sahrens hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 492789Sahrens 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 493789Sahrens buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 494789Sahrens 0, NULL, NULL, NULL, NULL, NULL, 0); 495789Sahrens 496789Sahrens for (i = 0; i < 256; i++) 497789Sahrens for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 498789Sahrens *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 499789Sahrens 500789Sahrens for (i = 0; i < BUF_LOCKS; i++) { 501789Sahrens mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 502789Sahrens NULL, MUTEX_DEFAULT, NULL); 503789Sahrens } 504789Sahrens } 505789Sahrens 506789Sahrens #define ARC_MINTIME (hz>>4) /* 62 ms */ 507789Sahrens 508789Sahrens #define ARC_TAG (void *)0x05201962 509789Sahrens 510789Sahrens static void 511789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 512789Sahrens { 513789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 514789Sahrens 515789Sahrens if ((refcount_add(&ab->b_refcnt, tag) == 1) && 516789Sahrens (ab->b_state != arc.anon)) { 517789Sahrens 518789Sahrens ASSERT(!MUTEX_HELD(&ab->b_state->mtx)); 519789Sahrens mutex_enter(&ab->b_state->mtx); 520789Sahrens ASSERT(!refcount_is_zero(&ab->b_refcnt)); 521789Sahrens ASSERT(list_link_active(&ab->b_arc_node)); 522789Sahrens list_remove(&ab->b_state->list, ab); 523789Sahrens ASSERT3U(ab->b_state->lsize, >=, ab->b_size); 524789Sahrens ab->b_state->lsize -= ab->b_size; 525789Sahrens mutex_exit(&ab->b_state->mtx); 526789Sahrens } 527789Sahrens } 528789Sahrens 529789Sahrens static int 530789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 531789Sahrens { 532789Sahrens int cnt; 533789Sahrens 534789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 535789Sahrens 536789Sahrens if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 537789Sahrens (ab->b_state != arc.anon)) { 538789Sahrens 539789Sahrens ASSERT(!MUTEX_HELD(&ab->b_state->mtx)); 540789Sahrens mutex_enter(&ab->b_state->mtx); 541789Sahrens ASSERT(!list_link_active(&ab->b_arc_node)); 542789Sahrens list_insert_head(&ab->b_state->list, ab); 543789Sahrens ASSERT(ab->b_buf != NULL); 544789Sahrens ab->b_state->lsize += ab->b_size; 545789Sahrens mutex_exit(&ab->b_state->mtx); 546789Sahrens } 547789Sahrens return (cnt); 548789Sahrens } 549789Sahrens 550789Sahrens /* 551789Sahrens * Move the supplied buffer to the indicated state. The mutex 552789Sahrens * for the buffer must be held by the caller. 553789Sahrens */ 554789Sahrens static void 555789Sahrens arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, 556789Sahrens kmutex_t *hash_lock) 557789Sahrens { 558789Sahrens arc_buf_t *buf; 559789Sahrens 560789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 561789Sahrens 562789Sahrens /* 563789Sahrens * If this buffer is evictable, transfer it from the 564789Sahrens * old state list to the new state list. 565789Sahrens */ 566789Sahrens if (refcount_is_zero(&ab->b_refcnt)) { 567789Sahrens if (ab->b_state != arc.anon) { 568789Sahrens int drop_mutex = FALSE; 569789Sahrens 570789Sahrens if (!MUTEX_HELD(&ab->b_state->mtx)) { 571789Sahrens mutex_enter(&ab->b_state->mtx); 572789Sahrens drop_mutex = TRUE; 573789Sahrens } 574789Sahrens ASSERT(list_link_active(&ab->b_arc_node)); 575789Sahrens list_remove(&ab->b_state->list, ab); 576789Sahrens ASSERT3U(ab->b_state->lsize, >=, ab->b_size); 577789Sahrens ab->b_state->lsize -= ab->b_size; 578789Sahrens if (drop_mutex) 579789Sahrens mutex_exit(&ab->b_state->mtx); 580789Sahrens } 581789Sahrens if (new_state != arc.anon) { 582789Sahrens int drop_mutex = FALSE; 583789Sahrens 584789Sahrens if (!MUTEX_HELD(&new_state->mtx)) { 585789Sahrens mutex_enter(&new_state->mtx); 586789Sahrens drop_mutex = TRUE; 587789Sahrens } 588789Sahrens list_insert_head(&new_state->list, ab); 589789Sahrens ASSERT(ab->b_buf != NULL); 590789Sahrens new_state->lsize += ab->b_size; 591789Sahrens if (drop_mutex) 592789Sahrens mutex_exit(&new_state->mtx); 593789Sahrens } 594789Sahrens } 595789Sahrens 596789Sahrens ASSERT(!BUF_EMPTY(ab)); 597789Sahrens if (new_state == arc.anon && ab->b_state != arc.anon) { 598789Sahrens buf_hash_remove(ab); 599789Sahrens } 600789Sahrens 601789Sahrens /* 602789Sahrens * If this buffer isn't being transferred to the MRU-top 603789Sahrens * state, it's safe to clear its prefetch flag 604789Sahrens */ 605789Sahrens if ((new_state != arc.mru_top) && (new_state != arc.mru_bot)) { 606789Sahrens ab->b_flags &= ~ARC_PREFETCH; 607789Sahrens } 608789Sahrens 609789Sahrens buf = ab->b_buf; 610789Sahrens if (buf == NULL) { 611789Sahrens ASSERT3U(ab->b_state->size, >=, ab->b_size); 612789Sahrens atomic_add_64(&ab->b_state->size, -ab->b_size); 613789Sahrens /* we should only be here if we are deleting state */ 614789Sahrens ASSERT(new_state == arc.anon && 615789Sahrens (ab->b_state == arc.mru_bot || ab->b_state == arc.mfu_bot)); 616789Sahrens } else while (buf) { 617789Sahrens ASSERT3U(ab->b_state->size, >=, ab->b_size); 618789Sahrens atomic_add_64(&ab->b_state->size, -ab->b_size); 619789Sahrens atomic_add_64(&new_state->size, ab->b_size); 620789Sahrens buf = buf->b_next; 621789Sahrens } 622789Sahrens ab->b_state = new_state; 623789Sahrens } 624789Sahrens 625789Sahrens arc_buf_t * 626789Sahrens arc_buf_alloc(spa_t *spa, int size, void *tag) 627789Sahrens { 628789Sahrens arc_buf_hdr_t *hdr; 629789Sahrens arc_buf_t *buf; 630789Sahrens 631789Sahrens ASSERT3U(size, >, 0); 632789Sahrens hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 633789Sahrens ASSERT(BUF_EMPTY(hdr)); 634789Sahrens hdr->b_size = size; 635789Sahrens hdr->b_spa = spa; 636789Sahrens hdr->b_state = arc.anon; 637789Sahrens hdr->b_arc_access = 0; 638789Sahrens buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 639789Sahrens buf->b_hdr = hdr; 640789Sahrens buf->b_next = NULL; 641789Sahrens buf->b_data = zio_buf_alloc(size); 642789Sahrens hdr->b_buf = buf; 643789Sahrens hdr->b_flags = 0; 644789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 645789Sahrens (void) refcount_add(&hdr->b_refcnt, tag); 646789Sahrens 647789Sahrens atomic_add_64(&arc.size, size); 648789Sahrens atomic_add_64(&arc.anon->size, size); 649789Sahrens 650789Sahrens return (buf); 651789Sahrens } 652789Sahrens 653789Sahrens static void 654789Sahrens arc_hdr_free(arc_buf_hdr_t *hdr) 655789Sahrens { 656789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt)); 657789Sahrens ASSERT3P(hdr->b_state, ==, arc.anon); 658789Sahrens 659789Sahrens if (!BUF_EMPTY(hdr)) { 660789Sahrens /* 661789Sahrens * We can be called with an arc state lock held, 662789Sahrens * so we can't hold a hash lock here. 663789Sahrens * ASSERT(not in hash table) 664789Sahrens */ 665789Sahrens ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 666789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 667789Sahrens hdr->b_birth = 0; 668789Sahrens hdr->b_cksum0 = 0; 669789Sahrens } 670789Sahrens if (hdr->b_buf) { 671789Sahrens arc_buf_t *buf = hdr->b_buf; 672789Sahrens 673789Sahrens ASSERT3U(hdr->b_size, >, 0); 674789Sahrens zio_buf_free(buf->b_data, hdr->b_size); 675789Sahrens atomic_add_64(&arc.size, -hdr->b_size); 676789Sahrens ASSERT3U(arc.anon->size, >=, hdr->b_size); 677789Sahrens atomic_add_64(&arc.anon->size, -hdr->b_size); 678789Sahrens ASSERT3P(buf->b_next, ==, NULL); 679789Sahrens kmem_cache_free(buf_cache, buf); 680789Sahrens hdr->b_buf = NULL; 681789Sahrens } 682789Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 683789Sahrens ASSERT3P(hdr->b_hash_next, ==, NULL); 684789Sahrens ASSERT3P(hdr->b_acb, ==, NULL); 685789Sahrens kmem_cache_free(hdr_cache, hdr); 686789Sahrens } 687789Sahrens 688789Sahrens void 689789Sahrens arc_buf_free(arc_buf_t *buf, void *tag) 690789Sahrens { 691789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 692789Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 693789Sahrens int freeable; 694789Sahrens 695789Sahrens mutex_enter(hash_lock); 696789Sahrens if (remove_reference(hdr, hash_lock, tag) > 0) { 697789Sahrens arc_buf_t **bufp = &hdr->b_buf; 698789Sahrens arc_state_t *state = hdr->b_state; 699789Sahrens uint64_t size = hdr->b_size; 700789Sahrens 701789Sahrens ASSERT(hdr->b_state != arc.anon || HDR_IO_ERROR(hdr)); 702789Sahrens while (*bufp != buf) { 703789Sahrens ASSERT(*bufp); 704789Sahrens bufp = &(*bufp)->b_next; 705789Sahrens } 706789Sahrens *bufp = buf->b_next; 707789Sahrens mutex_exit(hash_lock); 708789Sahrens zio_buf_free(buf->b_data, size); 709789Sahrens atomic_add_64(&arc.size, -size); 710789Sahrens kmem_cache_free(buf_cache, buf); 711789Sahrens ASSERT3U(state->size, >=, size); 712789Sahrens atomic_add_64(&state->size, -size); 713789Sahrens return; 714789Sahrens } 715789Sahrens 716789Sahrens /* don't free buffers that are in the middle of an async write */ 717789Sahrens freeable = (hdr->b_state == arc.anon && hdr->b_acb == NULL); 718789Sahrens mutex_exit(hash_lock); 719789Sahrens 720789Sahrens if (freeable) 721789Sahrens arc_hdr_free(hdr); 722789Sahrens } 723789Sahrens 724789Sahrens int 725789Sahrens arc_buf_size(arc_buf_t *buf) 726789Sahrens { 727789Sahrens return (buf->b_hdr->b_size); 728789Sahrens } 729789Sahrens 730789Sahrens /* 731789Sahrens * Evict buffers from list until we've removed the specified number of 732789Sahrens * bytes. Move the removed buffers to the appropriate evict state. 733789Sahrens */ 734789Sahrens static uint64_t 735789Sahrens arc_evict_state(arc_state_t *state, int64_t bytes) 736789Sahrens { 737789Sahrens arc_state_t *evicted_state; 738789Sahrens uint64_t bytes_evicted = 0; 739789Sahrens arc_buf_hdr_t *ab, *ab_prev; 740789Sahrens kmutex_t *hash_lock; 741789Sahrens 742789Sahrens ASSERT(state == arc.mru_top || state == arc.mfu_top); 743789Sahrens 744789Sahrens if (state == arc.mru_top) 745789Sahrens evicted_state = arc.mru_bot; 746789Sahrens else 747789Sahrens evicted_state = arc.mfu_bot; 748789Sahrens 749789Sahrens mutex_enter(&state->mtx); 750789Sahrens mutex_enter(&evicted_state->mtx); 751789Sahrens 752789Sahrens for (ab = list_tail(&state->list); ab; ab = ab_prev) { 753789Sahrens ab_prev = list_prev(&state->list, ab); 754789Sahrens hash_lock = HDR_LOCK(ab); 755789Sahrens if (mutex_tryenter(hash_lock)) { 756789Sahrens ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 757789Sahrens arc_change_state(evicted_state, ab, hash_lock); 758789Sahrens zio_buf_free(ab->b_buf->b_data, ab->b_size); 759789Sahrens atomic_add_64(&arc.size, -ab->b_size); 760789Sahrens ASSERT3P(ab->b_buf->b_next, ==, NULL); 761789Sahrens kmem_cache_free(buf_cache, ab->b_buf); 762789Sahrens ab->b_buf = NULL; 763789Sahrens DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 764789Sahrens bytes_evicted += ab->b_size; 765789Sahrens mutex_exit(hash_lock); 766789Sahrens if (bytes_evicted >= bytes) 767789Sahrens break; 768789Sahrens } else { 769789Sahrens atomic_add_64(&arc.skipped, 1); 770789Sahrens } 771789Sahrens } 772789Sahrens mutex_exit(&evicted_state->mtx); 773789Sahrens mutex_exit(&state->mtx); 774789Sahrens 775789Sahrens if (bytes_evicted < bytes) 776789Sahrens dprintf("only evicted %lld bytes from %x", 777789Sahrens (longlong_t)bytes_evicted, state); 778789Sahrens 779789Sahrens return (bytes_evicted); 780789Sahrens } 781789Sahrens 782789Sahrens /* 783789Sahrens * Remove buffers from list until we've removed the specified number of 784789Sahrens * bytes. Destroy the buffers that are removed. 785789Sahrens */ 786789Sahrens static void 787789Sahrens arc_delete_state(arc_state_t *state, int64_t bytes) 788789Sahrens { 789789Sahrens uint_t bufs_skipped = 0; 790789Sahrens uint64_t bytes_deleted = 0; 791789Sahrens arc_buf_hdr_t *ab, *ab_prev; 792789Sahrens kmutex_t *hash_lock; 793789Sahrens 794789Sahrens top: 795789Sahrens mutex_enter(&state->mtx); 796789Sahrens for (ab = list_tail(&state->list); ab; ab = ab_prev) { 797789Sahrens ab_prev = list_prev(&state->list, ab); 798789Sahrens hash_lock = HDR_LOCK(ab); 799789Sahrens if (mutex_tryenter(hash_lock)) { 800789Sahrens arc_change_state(arc.anon, ab, hash_lock); 801789Sahrens mutex_exit(hash_lock); 802789Sahrens atomic_add_64(&arc.deleted, 1); 803789Sahrens DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 804789Sahrens bytes_deleted += ab->b_size; 805789Sahrens arc_hdr_free(ab); 806789Sahrens if (bytes >= 0 && bytes_deleted >= bytes) 807789Sahrens break; 808789Sahrens } else { 809789Sahrens if (bytes < 0) { 810789Sahrens mutex_exit(&state->mtx); 811789Sahrens mutex_enter(hash_lock); 812789Sahrens mutex_exit(hash_lock); 813789Sahrens goto top; 814789Sahrens } 815789Sahrens bufs_skipped += 1; 816789Sahrens } 817789Sahrens } 818789Sahrens mutex_exit(&state->mtx); 819789Sahrens 820789Sahrens if (bufs_skipped) { 821789Sahrens atomic_add_64(&arc.skipped, bufs_skipped); 822789Sahrens ASSERT(bytes >= 0); 823789Sahrens } 824789Sahrens 825789Sahrens if (bytes_deleted < bytes) 826789Sahrens dprintf("only deleted %lld bytes from %p", 827789Sahrens (longlong_t)bytes_deleted, state); 828789Sahrens } 829789Sahrens 830789Sahrens static void 831789Sahrens arc_adjust(void) 832789Sahrens { 833789Sahrens int64_t top_sz, mru_over, arc_over; 834789Sahrens 835789Sahrens top_sz = arc.anon->size + arc.mru_top->size; 836789Sahrens 837789Sahrens if (top_sz > arc.p && arc.mru_top->lsize > 0) { 838789Sahrens int64_t toevict = MIN(arc.mru_top->lsize, top_sz-arc.p); 839789Sahrens (void) arc_evict_state(arc.mru_top, toevict); 840789Sahrens top_sz = arc.anon->size + arc.mru_top->size; 841789Sahrens } 842789Sahrens 843789Sahrens mru_over = top_sz + arc.mru_bot->size - arc.c; 844789Sahrens 845789Sahrens if (mru_over > 0) { 846789Sahrens if (arc.mru_bot->lsize > 0) { 847789Sahrens int64_t todelete = MIN(arc.mru_bot->lsize, mru_over); 848789Sahrens arc_delete_state(arc.mru_bot, todelete); 849789Sahrens } 850789Sahrens } 851789Sahrens 852789Sahrens if ((arc_over = arc.size - arc.c) > 0) { 853789Sahrens int64_t table_over; 854789Sahrens 855789Sahrens if (arc.mfu_top->lsize > 0) { 856789Sahrens int64_t toevict = MIN(arc.mfu_top->lsize, arc_over); 857789Sahrens (void) arc_evict_state(arc.mfu_top, toevict); 858789Sahrens } 859789Sahrens 860789Sahrens table_over = arc.size + arc.mru_bot->lsize + arc.mfu_bot->lsize 861789Sahrens - arc.c*2; 862789Sahrens 863789Sahrens if (table_over > 0 && arc.mfu_bot->lsize > 0) { 864789Sahrens int64_t todelete = MIN(arc.mfu_bot->lsize, table_over); 865789Sahrens arc_delete_state(arc.mfu_bot, todelete); 866789Sahrens } 867789Sahrens } 868789Sahrens } 869789Sahrens 870789Sahrens /* 871789Sahrens * Flush all *evictable* data from the cache. 872789Sahrens * NOTE: this will not touch "active" (i.e. referenced) data. 873789Sahrens */ 874789Sahrens void 875789Sahrens arc_flush(void) 876789Sahrens { 877789Sahrens arc_delete_state(arc.mru_top, -1); 878789Sahrens arc_delete_state(arc.mfu_top, -1); 879789Sahrens 880789Sahrens arc_delete_state(arc.mru_bot, -1); 881789Sahrens arc_delete_state(arc.mfu_bot, -1); 882789Sahrens } 883789Sahrens 884789Sahrens void 885789Sahrens arc_kmem_reclaim(void) 886789Sahrens { 887789Sahrens /* Remove 6.25% */ 888789Sahrens /* 889789Sahrens * We need arc_reclaim_lock because we don't want multiple 890789Sahrens * threads trying to reclaim concurrently. 891789Sahrens */ 892789Sahrens 893789Sahrens /* 894789Sahrens * umem calls the reclaim func when we destroy the buf cache, 895789Sahrens * which is after we do arc_fini(). So we set a flag to prevent 896789Sahrens * accessing the destroyed mutexes and lists. 897789Sahrens */ 898789Sahrens if (arc_dead) 899789Sahrens return; 900789Sahrens 901789Sahrens mutex_enter(&arc_reclaim_lock); 902789Sahrens 903789Sahrens atomic_add_64(&arc.c, -(arc.c >> 4)); 904789Sahrens if (arc.c < arc.c_min) 905789Sahrens arc.c = arc.c_min; 906789Sahrens atomic_add_64(&arc.p, -(arc.p >> 4)); 907789Sahrens 908789Sahrens arc_adjust(); 909789Sahrens 910789Sahrens /* Cool it for a while */ 911789Sahrens arc.incr = 0; 912789Sahrens arc.size_check = arc_size_check_default << 3; 913789Sahrens 914789Sahrens mutex_exit(&arc_reclaim_lock); 915789Sahrens } 916789Sahrens 917789Sahrens static int 918789Sahrens arc_reclaim_needed(void) 919789Sahrens { 920789Sahrens uint64_t extra; 921789Sahrens 922789Sahrens #ifdef _KERNEL 923789Sahrens /* 924789Sahrens * take 'desfree' extra pages, so we reclaim sooner, rather than later 925789Sahrens */ 926789Sahrens extra = desfree; 927789Sahrens 928789Sahrens /* 929789Sahrens * check that we're out of range of the pageout scanner. It starts to 930789Sahrens * schedule paging if freemem is less than lotsfree and needfree. 931789Sahrens * lotsfree is the high-water mark for pageout, and needfree is the 932789Sahrens * number of needed free pages. We add extra pages here to make sure 933789Sahrens * the scanner doesn't start up while we're freeing memory. 934789Sahrens */ 935789Sahrens if (freemem < lotsfree + needfree + extra) 936789Sahrens return (1); 937789Sahrens 938789Sahrens /* 939789Sahrens * check to make sure that swapfs has enough space so that anon 940789Sahrens * reservations can still succeeed. anon_resvmem() checks that the 941789Sahrens * availrmem is greater than swapfs_minfree, and the number of reserved 942789Sahrens * swap pages. We also add a bit of extra here just to prevent 943789Sahrens * circumstances from getting really dire. 944789Sahrens */ 945789Sahrens if (availrmem < swapfs_minfree + swapfs_reserve + extra) 946789Sahrens return (1); 947789Sahrens 948789Sahrens /* 949789Sahrens * If we're on an i386 platform, it's possible that we'll exhaust the 950789Sahrens * kernel heap space before we ever run out of available physical 951789Sahrens * memory. Most checks of the size of the heap_area compare against 952789Sahrens * tune.t_minarmem, which is the minimum available real memory that we 953789Sahrens * can have in the system. However, this is generally fixed at 25 pages 954789Sahrens * which is so low that it's useless. In this comparison, we seek to 955789Sahrens * calculate the total heap-size, and reclaim if more than 3/4ths of the 956789Sahrens * heap is allocated. (Or, in the caclulation, if less than 1/4th is 957789Sahrens * free) 958789Sahrens */ 959789Sahrens #if defined(__i386) 960789Sahrens if (btop(vmem_size(heap_arena, VMEM_FREE)) < 961789Sahrens (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 962789Sahrens return (1); 963789Sahrens #endif 964789Sahrens 965789Sahrens #else 966789Sahrens if (spa_get_random(100) == 0) 967789Sahrens return (1); 968789Sahrens #endif 969789Sahrens return (0); 970789Sahrens } 971789Sahrens 972789Sahrens static void 973789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat) 974789Sahrens { 975789Sahrens size_t i; 976789Sahrens kmem_cache_t *prev_cache = NULL; 977789Sahrens extern kmem_cache_t *zio_buf_cache[]; 978789Sahrens 979*1484Sek110237 #ifdef _KERNEL 980*1484Sek110237 /* 981*1484Sek110237 * First purge some DNLC entries, in case the DNLC is using 982*1484Sek110237 * up too much memory. 983*1484Sek110237 */ 984*1484Sek110237 dnlc_reduce_cache((void *)arc_reduce_dnlc_percent); 985*1484Sek110237 #endif 986*1484Sek110237 987789Sahrens /* 988789Sahrens * an agressive reclamation will shrink the cache size as well as reap 989789Sahrens * free kmem buffers. The arc_kmem_reclaim function is called when the 990789Sahrens * header-cache is reaped, so we only reap the header cache if we're 991789Sahrens * performing an agressive reclaim. If we're not, just clean the kmem 992789Sahrens * buffer caches. 993789Sahrens */ 994789Sahrens if (strat == ARC_RECLAIM_AGGR) 995789Sahrens kmem_cache_reap_now(hdr_cache); 996789Sahrens 997789Sahrens kmem_cache_reap_now(buf_cache); 998789Sahrens 999789Sahrens for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1000789Sahrens if (zio_buf_cache[i] != prev_cache) { 1001789Sahrens prev_cache = zio_buf_cache[i]; 1002789Sahrens kmem_cache_reap_now(zio_buf_cache[i]); 1003789Sahrens } 1004789Sahrens } 1005789Sahrens } 1006789Sahrens 1007789Sahrens static void 1008789Sahrens arc_reclaim_thread(void) 1009789Sahrens { 1010789Sahrens clock_t growtime = 0; 1011789Sahrens arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1012789Sahrens callb_cpr_t cpr; 1013789Sahrens 1014789Sahrens CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1015789Sahrens 1016789Sahrens mutex_enter(&arc_reclaim_thr_lock); 1017789Sahrens while (arc_thread_exit == 0) { 1018789Sahrens if (arc_reclaim_needed()) { 1019789Sahrens 1020789Sahrens if (arc.no_grow) { 1021789Sahrens if (last_reclaim == ARC_RECLAIM_CONS) { 1022789Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1023789Sahrens } else { 1024789Sahrens last_reclaim = ARC_RECLAIM_CONS; 1025789Sahrens } 1026789Sahrens } else { 1027789Sahrens arc.no_grow = TRUE; 1028789Sahrens last_reclaim = ARC_RECLAIM_AGGR; 1029789Sahrens membar_producer(); 1030789Sahrens } 1031789Sahrens 1032789Sahrens /* reset the growth delay for every reclaim */ 1033789Sahrens growtime = lbolt + (arc_grow_retry * hz); 1034789Sahrens 1035789Sahrens arc_kmem_reap_now(last_reclaim); 1036789Sahrens 1037789Sahrens } else if ((growtime > 0) && ((growtime - lbolt) <= 0)) { 1038789Sahrens arc.no_grow = FALSE; 1039789Sahrens } 1040789Sahrens 1041789Sahrens /* block until needed, or one second, whichever is shorter */ 1042789Sahrens CALLB_CPR_SAFE_BEGIN(&cpr); 1043789Sahrens (void) cv_timedwait(&arc_reclaim_thr_cv, 1044789Sahrens &arc_reclaim_thr_lock, (lbolt + hz)); 1045789Sahrens CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1046789Sahrens } 1047789Sahrens 1048789Sahrens arc_thread_exit = 0; 1049789Sahrens cv_broadcast(&arc_reclaim_thr_cv); 1050789Sahrens CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1051789Sahrens thread_exit(); 1052789Sahrens } 1053789Sahrens 1054789Sahrens static void 1055789Sahrens arc_try_grow(int64_t bytes) 1056789Sahrens { 1057789Sahrens /* 1058789Sahrens * If we're within (2 * maxblocksize) bytes of the target 1059789Sahrens * cache size, increment the target cache size 1060789Sahrens */ 1061789Sahrens atomic_add_64((uint64_t *)&arc.size_check, 1); 1062789Sahrens 1063789Sahrens if (arc_reclaim_needed()) { 1064789Sahrens cv_signal(&arc_reclaim_thr_cv); 1065789Sahrens return; 1066789Sahrens } 1067789Sahrens 1068789Sahrens if (arc.no_grow) 1069789Sahrens return; 1070789Sahrens 1071789Sahrens /* 1072789Sahrens * return true if we successfully grow, or if there's enough space that 1073789Sahrens * we don't have to grow. Above, we return false if we can't grow, or 1074789Sahrens * if we shouldn't because a reclaim is in progress. 1075789Sahrens */ 1076789Sahrens if ((arc.c - arc.size) <= (2ULL << SPA_MAXBLOCKSHIFT)) { 1077789Sahrens if (arc.size_check > 0) { 1078789Sahrens arc.size_check = arc_size_check_default; 1079789Sahrens atomic_add_64(&arc.incr, arc_incr_size); 1080789Sahrens } 1081789Sahrens atomic_add_64(&arc.c, MIN(bytes, arc.incr)); 1082789Sahrens if (arc.c > arc.c_max) 1083789Sahrens arc.c = arc.c_max; 1084789Sahrens else 1085789Sahrens atomic_add_64(&arc.p, MIN(bytes, arc.incr)); 1086789Sahrens } else if (arc.size > arc.c) { 1087789Sahrens if (arc.size_check > 0) { 1088789Sahrens arc.size_check = arc_size_check_default; 1089789Sahrens atomic_add_64(&arc.incr, arc_incr_size); 1090789Sahrens } 1091789Sahrens atomic_add_64(&arc.c, MIN(bytes, arc.incr)); 1092789Sahrens if (arc.c > arc.c_max) 1093789Sahrens arc.c = arc.c_max; 1094789Sahrens else 1095789Sahrens atomic_add_64(&arc.p, MIN(bytes, arc.incr)); 1096789Sahrens } 1097789Sahrens } 1098789Sahrens 1099789Sahrens /* 1100789Sahrens * check if the cache has reached its limits and eviction is required prior to 1101789Sahrens * insert. In this situation, we want to evict if no_grow is set Otherwise, the 1102789Sahrens * cache is either big enough that we can insert, or a arc_try_grow will result 1103789Sahrens * in more space being made available. 1104789Sahrens */ 1105789Sahrens 1106789Sahrens static int 1107789Sahrens arc_evict_needed() 1108789Sahrens { 1109789Sahrens 1110789Sahrens if (arc_reclaim_needed()) 1111789Sahrens return (1); 1112789Sahrens 1113789Sahrens if (arc.no_grow || (arc.c > arc.c_max) || (arc.size > arc.c)) 1114789Sahrens return (1); 1115789Sahrens 1116789Sahrens return (0); 1117789Sahrens } 1118789Sahrens 1119789Sahrens /* 1120789Sahrens * The state, supplied as the first argument, is going to have something 1121789Sahrens * inserted on its behalf. So, determine which cache must be victimized to 1122789Sahrens * satisfy an insertion for this state. We have the following cases: 1123789Sahrens * 1124789Sahrens * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru_top) -> 1125789Sahrens * In this situation if we're out of space, but the resident size of the MFU is 1126789Sahrens * under the limit, victimize the MFU cache to satisfy this insertion request. 1127789Sahrens * 1128789Sahrens * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru_top) -> 1129789Sahrens * Here, we've used up all of the available space for the MRU, so we need to 1130789Sahrens * evict from our own cache instead. Evict from the set of resident MRU 1131789Sahrens * entries. 1132789Sahrens * 1133789Sahrens * 3. Insert for MFU (c - p) > sizeof(arc.mfu_top) -> 1134789Sahrens * c minus p represents the MFU space in the cache, since p is the size of the 1135789Sahrens * cache that is dedicated to the MRU. In this situation there's still space on 1136789Sahrens * the MFU side, so the MRU side needs to be victimized. 1137789Sahrens * 1138789Sahrens * 4. Insert for MFU (c - p) < sizeof(arc.mfu_top) -> 1139789Sahrens * MFU's resident set is consuming more space than it has been allotted. In 1140789Sahrens * this situation, we must victimize our own cache, the MFU, for this insertion. 1141789Sahrens */ 1142789Sahrens static void 1143789Sahrens arc_evict_for_state(arc_state_t *state, uint64_t bytes) 1144789Sahrens { 1145789Sahrens uint64_t mru_used; 1146789Sahrens uint64_t mfu_space; 1147789Sahrens uint64_t evicted; 1148789Sahrens 1149789Sahrens ASSERT(state == arc.mru_top || state == arc.mfu_top); 1150789Sahrens 1151789Sahrens if (state == arc.mru_top) { 1152789Sahrens mru_used = arc.anon->size + arc.mru_top->size; 1153789Sahrens if (arc.p > mru_used) { 1154789Sahrens /* case 1 */ 1155789Sahrens evicted = arc_evict_state(arc.mfu_top, bytes); 1156789Sahrens if (evicted < bytes) { 1157789Sahrens arc_adjust(); 1158789Sahrens } 1159789Sahrens } else { 1160789Sahrens /* case 2 */ 1161789Sahrens evicted = arc_evict_state(arc.mru_top, bytes); 1162789Sahrens if (evicted < bytes) { 1163789Sahrens arc_adjust(); 1164789Sahrens } 1165789Sahrens } 1166789Sahrens } else { 1167789Sahrens /* MFU_top case */ 1168789Sahrens mfu_space = arc.c - arc.p; 1169789Sahrens if (mfu_space > arc.mfu_top->size) { 1170789Sahrens /* case 3 */ 1171789Sahrens evicted = arc_evict_state(arc.mru_top, bytes); 1172789Sahrens if (evicted < bytes) { 1173789Sahrens arc_adjust(); 1174789Sahrens } 1175789Sahrens } else { 1176789Sahrens /* case 4 */ 1177789Sahrens evicted = arc_evict_state(arc.mfu_top, bytes); 1178789Sahrens if (evicted < bytes) { 1179789Sahrens arc_adjust(); 1180789Sahrens } 1181789Sahrens } 1182789Sahrens } 1183789Sahrens } 1184789Sahrens 1185789Sahrens /* 1186789Sahrens * This routine is called whenever a buffer is accessed. 1187789Sahrens */ 1188789Sahrens static void 1189789Sahrens arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 1190789Sahrens { 1191789Sahrens int blksz, mult; 1192789Sahrens 1193789Sahrens ASSERT(MUTEX_HELD(hash_lock)); 1194789Sahrens 1195789Sahrens blksz = buf->b_size; 1196789Sahrens 1197789Sahrens if (buf->b_state == arc.anon) { 1198789Sahrens /* 1199789Sahrens * This buffer is not in the cache, and does not 1200789Sahrens * appear in our "ghost" list. Add the new buffer 1201789Sahrens * to the MRU state. 1202789Sahrens */ 1203789Sahrens 1204789Sahrens arc_try_grow(blksz); 1205789Sahrens if (arc_evict_needed()) { 1206789Sahrens arc_evict_for_state(arc.mru_top, blksz); 1207789Sahrens } 1208789Sahrens 1209789Sahrens ASSERT(buf->b_arc_access == 0); 1210789Sahrens buf->b_arc_access = lbolt; 1211789Sahrens DTRACE_PROBE1(new_state__mru_top, arc_buf_hdr_t *, 1212789Sahrens buf); 1213789Sahrens arc_change_state(arc.mru_top, buf, hash_lock); 1214789Sahrens 1215789Sahrens /* 1216789Sahrens * If we are using less than 2/3 of our total target 1217789Sahrens * cache size, bump up the target size for the MRU 1218789Sahrens * list. 1219789Sahrens */ 1220789Sahrens if (arc.size < arc.c*2/3) { 1221789Sahrens arc.p = arc.anon->size + arc.mru_top->size + arc.c/6; 1222789Sahrens } 1223789Sahrens 1224789Sahrens } else if (buf->b_state == arc.mru_top) { 1225789Sahrens /* 1226789Sahrens * If this buffer is in the MRU-top state and has the prefetch 1227789Sahrens * flag, the first read was actually part of a prefetch. In 1228789Sahrens * this situation, we simply want to clear the flag and return. 1229789Sahrens * A subsequent access should bump this into the MFU state. 1230789Sahrens */ 1231789Sahrens if ((buf->b_flags & ARC_PREFETCH) != 0) { 1232789Sahrens buf->b_flags &= ~ARC_PREFETCH; 1233789Sahrens atomic_add_64(&arc.mru_top->hits, 1); 1234789Sahrens return; 1235789Sahrens } 1236789Sahrens 1237789Sahrens /* 1238789Sahrens * This buffer has been "accessed" only once so far, 1239789Sahrens * but it is still in the cache. Move it to the MFU 1240789Sahrens * state. 1241789Sahrens */ 1242789Sahrens if (lbolt > buf->b_arc_access + ARC_MINTIME) { 1243789Sahrens /* 1244789Sahrens * More than 125ms have passed since we 1245789Sahrens * instantiated this buffer. Move it to the 1246789Sahrens * most frequently used state. 1247789Sahrens */ 1248789Sahrens buf->b_arc_access = lbolt; 1249789Sahrens DTRACE_PROBE1(new_state__mfu_top, 1250789Sahrens arc_buf_hdr_t *, buf); 1251789Sahrens arc_change_state(arc.mfu_top, buf, hash_lock); 1252789Sahrens } 1253789Sahrens atomic_add_64(&arc.mru_top->hits, 1); 1254789Sahrens } else if (buf->b_state == arc.mru_bot) { 1255789Sahrens arc_state_t *new_state; 1256789Sahrens /* 1257789Sahrens * This buffer has been "accessed" recently, but 1258789Sahrens * was evicted from the cache. Move it to the 1259789Sahrens * MFU state. 1260789Sahrens */ 1261789Sahrens 1262789Sahrens if (buf->b_flags & ARC_PREFETCH) { 1263789Sahrens new_state = arc.mru_top; 1264789Sahrens DTRACE_PROBE1(new_state__mru_top, 1265789Sahrens arc_buf_hdr_t *, buf); 1266789Sahrens } else { 1267789Sahrens new_state = arc.mfu_top; 1268789Sahrens DTRACE_PROBE1(new_state__mfu_top, 1269789Sahrens arc_buf_hdr_t *, buf); 1270789Sahrens } 1271789Sahrens 1272789Sahrens arc_try_grow(blksz); 1273789Sahrens if (arc_evict_needed()) { 1274789Sahrens arc_evict_for_state(new_state, blksz); 1275789Sahrens } 1276789Sahrens 1277789Sahrens /* Bump up the target size of the MRU list */ 1278789Sahrens mult = ((arc.mru_bot->size >= arc.mfu_bot->size) ? 1279789Sahrens 1 : (arc.mfu_bot->size/arc.mru_bot->size)); 1280789Sahrens arc.p = MIN(arc.c, arc.p + blksz * mult); 1281789Sahrens 1282789Sahrens buf->b_arc_access = lbolt; 1283789Sahrens arc_change_state(new_state, buf, hash_lock); 1284789Sahrens 1285789Sahrens atomic_add_64(&arc.mru_bot->hits, 1); 1286789Sahrens } else if (buf->b_state == arc.mfu_top) { 1287789Sahrens /* 1288789Sahrens * This buffer has been accessed more than once and is 1289789Sahrens * still in the cache. Keep it in the MFU state. 1290789Sahrens * 1291789Sahrens * NOTE: the add_reference() that occurred when we did 1292789Sahrens * the arc_read() should have kicked this off the list, 1293789Sahrens * so even if it was a prefetch, it will be put back at 1294789Sahrens * the head of the list when we remove_reference(). 1295789Sahrens */ 1296789Sahrens atomic_add_64(&arc.mfu_top->hits, 1); 1297789Sahrens } else if (buf->b_state == arc.mfu_bot) { 1298789Sahrens /* 1299789Sahrens * This buffer has been accessed more than once but has 1300789Sahrens * been evicted from the cache. Move it back to the 1301789Sahrens * MFU state. 1302789Sahrens */ 1303789Sahrens 1304789Sahrens arc_try_grow(blksz); 1305789Sahrens if (arc_evict_needed()) { 1306789Sahrens arc_evict_for_state(arc.mfu_top, blksz); 1307789Sahrens } 1308789Sahrens 1309789Sahrens /* Bump up the target size for the MFU list */ 1310789Sahrens mult = ((arc.mfu_bot->size >= arc.mru_bot->size) ? 1311789Sahrens 1 : (arc.mru_bot->size/arc.mfu_bot->size)); 1312789Sahrens arc.p = MAX(0, (int64_t)arc.p - blksz * mult); 1313789Sahrens 1314789Sahrens buf->b_arc_access = lbolt; 1315789Sahrens DTRACE_PROBE1(new_state__mfu_top, 1316789Sahrens arc_buf_hdr_t *, buf); 1317789Sahrens arc_change_state(arc.mfu_top, buf, hash_lock); 1318789Sahrens 1319789Sahrens atomic_add_64(&arc.mfu_bot->hits, 1); 1320789Sahrens } else { 1321789Sahrens ASSERT(!"invalid arc state"); 1322789Sahrens } 1323789Sahrens 1324789Sahrens } 1325789Sahrens 1326789Sahrens /* a generic arc_done_func_t which you can use */ 1327789Sahrens /* ARGSUSED */ 1328789Sahrens void 1329789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 1330789Sahrens { 1331789Sahrens bcopy(buf->b_data, arg, buf->b_hdr->b_size); 1332789Sahrens arc_buf_free(buf, arg); 1333789Sahrens } 1334789Sahrens 1335789Sahrens /* a generic arc_done_func_t which you can use */ 1336789Sahrens void 1337789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 1338789Sahrens { 1339789Sahrens arc_buf_t **bufp = arg; 1340789Sahrens if (zio && zio->io_error) { 1341789Sahrens arc_buf_free(buf, arg); 1342789Sahrens *bufp = NULL; 1343789Sahrens } else { 1344789Sahrens *bufp = buf; 1345789Sahrens } 1346789Sahrens } 1347789Sahrens 1348789Sahrens static void 1349789Sahrens arc_read_done(zio_t *zio) 1350789Sahrens { 1351789Sahrens arc_buf_hdr_t *hdr; 1352789Sahrens arc_buf_t *buf; 1353789Sahrens arc_buf_t *abuf; /* buffer we're assigning to callback */ 1354789Sahrens kmutex_t *hash_lock; 1355789Sahrens arc_callback_t *callback_list, *acb; 1356789Sahrens int freeable = FALSE; 1357789Sahrens 1358789Sahrens buf = zio->io_private; 1359789Sahrens hdr = buf->b_hdr; 1360789Sahrens 1361789Sahrens if (!HDR_FREED_IN_READ(hdr)) { 1362789Sahrens arc_buf_hdr_t *found; 1363789Sahrens 1364789Sahrens found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 1365789Sahrens &hash_lock); 1366789Sahrens 1367789Sahrens /* 1368789Sahrens * Buffer was inserted into hash-table and removed from lists 1369789Sahrens * prior to starting I/O. We should find this header, since 1370789Sahrens * it's in the hash table, and it should be legit since it's 1371789Sahrens * not possible to evict it during the I/O. 1372789Sahrens */ 1373789Sahrens 1374789Sahrens ASSERT(found); 1375789Sahrens ASSERT(DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))); 1376789Sahrens } 1377789Sahrens 1378789Sahrens /* byteswap if necessary */ 1379789Sahrens callback_list = hdr->b_acb; 1380789Sahrens ASSERT(callback_list != NULL); 1381789Sahrens if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 1382789Sahrens callback_list->acb_byteswap(buf->b_data, hdr->b_size); 1383789Sahrens 1384789Sahrens /* create copies of the data buffer for the callers */ 1385789Sahrens abuf = buf; 1386789Sahrens for (acb = callback_list; acb; acb = acb->acb_next) { 1387789Sahrens if (acb->acb_done) { 1388789Sahrens if (abuf == NULL) { 1389789Sahrens abuf = kmem_cache_alloc(buf_cache, KM_SLEEP); 1390789Sahrens abuf->b_data = zio_buf_alloc(hdr->b_size); 1391789Sahrens atomic_add_64(&arc.size, hdr->b_size); 1392789Sahrens bcopy(buf->b_data, abuf->b_data, hdr->b_size); 1393789Sahrens abuf->b_hdr = hdr; 1394789Sahrens abuf->b_next = hdr->b_buf; 1395789Sahrens hdr->b_buf = abuf; 1396789Sahrens atomic_add_64(&hdr->b_state->size, hdr->b_size); 1397789Sahrens } 1398789Sahrens acb->acb_buf = abuf; 1399789Sahrens abuf = NULL; 1400789Sahrens } else { 1401789Sahrens /* 1402789Sahrens * The caller did not provide a callback function. 1403789Sahrens * In this case, we should just remove the reference. 1404789Sahrens */ 1405789Sahrens if (HDR_FREED_IN_READ(hdr)) { 1406789Sahrens ASSERT3P(hdr->b_state, ==, arc.anon); 1407789Sahrens (void) refcount_remove(&hdr->b_refcnt, 1408789Sahrens acb->acb_private); 1409789Sahrens } else { 1410789Sahrens (void) remove_reference(hdr, hash_lock, 1411789Sahrens acb->acb_private); 1412789Sahrens } 1413789Sahrens } 1414789Sahrens } 1415789Sahrens hdr->b_acb = NULL; 1416789Sahrens hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 1417789Sahrens 1418789Sahrens ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 1419789Sahrens 1420789Sahrens if (zio->io_error != 0) { 1421789Sahrens hdr->b_flags |= ARC_IO_ERROR; 1422789Sahrens if (hdr->b_state != arc.anon) 1423789Sahrens arc_change_state(arc.anon, hdr, hash_lock); 1424789Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 1425789Sahrens } 1426789Sahrens 1427789Sahrens if (!HDR_FREED_IN_READ(hdr)) { 1428789Sahrens /* 1429789Sahrens * Only call arc_access on anonymous buffers. This is because 1430789Sahrens * if we've issued an I/O for an evicted buffer, we've already 1431789Sahrens * called arc_access (to prevent any simultaneous readers from 1432789Sahrens * getting confused). 1433789Sahrens */ 1434789Sahrens if (zio->io_error == 0 && hdr->b_state == arc.anon) 1435789Sahrens arc_access(hdr, hash_lock); 1436789Sahrens mutex_exit(hash_lock); 1437789Sahrens } else { 1438789Sahrens /* 1439789Sahrens * This block was freed while we waited for the read to 1440789Sahrens * complete. It has been removed from the hash table and 1441789Sahrens * moved to the anonymous state (so that it won't show up 1442789Sahrens * in the cache). 1443789Sahrens */ 1444789Sahrens ASSERT3P(hdr->b_state, ==, arc.anon); 1445789Sahrens freeable = refcount_is_zero(&hdr->b_refcnt); 1446789Sahrens } 1447789Sahrens 1448789Sahrens cv_broadcast(&hdr->b_cv); 1449789Sahrens 1450789Sahrens /* execute each callback and free its structure */ 1451789Sahrens while ((acb = callback_list) != NULL) { 1452789Sahrens if (acb->acb_done) 1453789Sahrens acb->acb_done(zio, acb->acb_buf, acb->acb_private); 1454789Sahrens 1455789Sahrens if (acb->acb_zio_dummy != NULL) { 1456789Sahrens acb->acb_zio_dummy->io_error = zio->io_error; 1457789Sahrens zio_nowait(acb->acb_zio_dummy); 1458789Sahrens } 1459789Sahrens 1460789Sahrens callback_list = acb->acb_next; 1461789Sahrens kmem_free(acb, sizeof (arc_callback_t)); 1462789Sahrens } 1463789Sahrens 1464789Sahrens if (freeable) 1465789Sahrens arc_hdr_free(hdr); 1466789Sahrens } 1467789Sahrens 1468789Sahrens /* 1469789Sahrens * "Read" the block block at the specified DVA (in bp) via the 1470789Sahrens * cache. If the block is found in the cache, invoke the provided 1471789Sahrens * callback immediately and return. Note that the `zio' parameter 1472789Sahrens * in the callback will be NULL in this case, since no IO was 1473789Sahrens * required. If the block is not in the cache pass the read request 1474789Sahrens * on to the spa with a substitute callback function, so that the 1475789Sahrens * requested block will be added to the cache. 1476789Sahrens * 1477789Sahrens * If a read request arrives for a block that has a read in-progress, 1478789Sahrens * either wait for the in-progress read to complete (and return the 1479789Sahrens * results); or, if this is a read with a "done" func, add a record 1480789Sahrens * to the read to invoke the "done" func when the read completes, 1481789Sahrens * and return; or just return. 1482789Sahrens * 1483789Sahrens * arc_read_done() will invoke all the requested "done" functions 1484789Sahrens * for readers of this block. 1485789Sahrens */ 1486789Sahrens int 1487789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 1488789Sahrens arc_done_func_t *done, void *private, int priority, int flags, 1489789Sahrens uint32_t arc_flags) 1490789Sahrens { 1491789Sahrens arc_buf_hdr_t *hdr; 1492789Sahrens arc_buf_t *buf; 1493789Sahrens kmutex_t *hash_lock; 1494789Sahrens zio_t *rzio; 1495789Sahrens 1496789Sahrens top: 1497789Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 1498789Sahrens if (hdr && hdr->b_buf) { 1499789Sahrens 1500789Sahrens ASSERT((hdr->b_state == arc.mru_top) || 1501789Sahrens (hdr->b_state == arc.mfu_top) || 1502789Sahrens ((hdr->b_state == arc.anon) && 1503789Sahrens (HDR_IO_IN_PROGRESS(hdr)))); 1504789Sahrens 1505789Sahrens if (HDR_IO_IN_PROGRESS(hdr)) { 1506789Sahrens 1507789Sahrens if ((arc_flags & ARC_NOWAIT) && done) { 1508789Sahrens arc_callback_t *acb = NULL; 1509789Sahrens 1510789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), 1511789Sahrens KM_SLEEP); 1512789Sahrens acb->acb_done = done; 1513789Sahrens acb->acb_private = private; 1514789Sahrens acb->acb_byteswap = swap; 1515789Sahrens if (pio != NULL) 1516789Sahrens acb->acb_zio_dummy = zio_null(pio, 1517789Sahrens spa, NULL, NULL, flags); 1518789Sahrens 1519789Sahrens ASSERT(acb->acb_done != NULL); 1520789Sahrens acb->acb_next = hdr->b_acb; 1521789Sahrens hdr->b_acb = acb; 1522789Sahrens add_reference(hdr, hash_lock, private); 1523789Sahrens mutex_exit(hash_lock); 1524789Sahrens return (0); 1525789Sahrens } else if (arc_flags & ARC_WAIT) { 1526789Sahrens cv_wait(&hdr->b_cv, hash_lock); 1527789Sahrens mutex_exit(hash_lock); 1528789Sahrens goto top; 1529789Sahrens } 1530789Sahrens 1531789Sahrens mutex_exit(hash_lock); 1532789Sahrens return (0); 1533789Sahrens } 1534789Sahrens 1535789Sahrens /* 1536789Sahrens * If there is already a reference on this block, create 1537789Sahrens * a new copy of the data so that we will be guaranteed 1538789Sahrens * that arc_release() will always succeed. 1539789Sahrens */ 1540789Sahrens 1541789Sahrens if (done) 1542789Sahrens add_reference(hdr, hash_lock, private); 1543789Sahrens if (done && refcount_count(&hdr->b_refcnt) > 1) { 1544789Sahrens buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 1545789Sahrens buf->b_data = zio_buf_alloc(hdr->b_size); 1546789Sahrens ASSERT3U(refcount_count(&hdr->b_refcnt), >, 1); 1547789Sahrens atomic_add_64(&arc.size, hdr->b_size); 1548789Sahrens bcopy(hdr->b_buf->b_data, buf->b_data, hdr->b_size); 1549789Sahrens buf->b_hdr = hdr; 1550789Sahrens buf->b_next = hdr->b_buf; 1551789Sahrens hdr->b_buf = buf; 1552789Sahrens atomic_add_64(&hdr->b_state->size, hdr->b_size); 1553789Sahrens } else { 1554789Sahrens buf = hdr->b_buf; 1555789Sahrens } 1556789Sahrens DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1557789Sahrens arc_access(hdr, hash_lock); 1558789Sahrens mutex_exit(hash_lock); 1559789Sahrens atomic_add_64(&arc.hits, 1); 1560789Sahrens if (done) 1561789Sahrens done(NULL, buf, private); 1562789Sahrens } else { 1563789Sahrens uint64_t size = BP_GET_LSIZE(bp); 1564789Sahrens arc_callback_t *acb; 1565789Sahrens 1566789Sahrens if (hdr == NULL) { 1567789Sahrens /* this block is not in the cache */ 1568789Sahrens arc_buf_hdr_t *exists; 1569789Sahrens 1570789Sahrens buf = arc_buf_alloc(spa, size, private); 1571789Sahrens hdr = buf->b_hdr; 1572789Sahrens hdr->b_dva = *BP_IDENTITY(bp); 1573789Sahrens hdr->b_birth = bp->blk_birth; 1574789Sahrens hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 1575789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 1576789Sahrens if (exists) { 1577789Sahrens /* somebody beat us to the hash insert */ 1578789Sahrens mutex_exit(hash_lock); 1579789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 1580789Sahrens hdr->b_birth = 0; 1581789Sahrens hdr->b_cksum0 = 0; 1582789Sahrens arc_buf_free(buf, private); 1583789Sahrens goto top; /* restart the IO request */ 1584789Sahrens } 1585789Sahrens 1586789Sahrens } else { 1587789Sahrens /* this block is in the ghost cache */ 1588789Sahrens ASSERT((hdr->b_state == arc.mru_bot) || 1589789Sahrens (hdr->b_state == arc.mfu_bot)); 1590789Sahrens add_reference(hdr, hash_lock, private); 1591789Sahrens 1592789Sahrens buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 1593789Sahrens buf->b_data = zio_buf_alloc(hdr->b_size); 1594789Sahrens atomic_add_64(&arc.size, hdr->b_size); 1595789Sahrens ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1596789Sahrens ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 1597789Sahrens buf->b_hdr = hdr; 1598789Sahrens buf->b_next = NULL; 1599789Sahrens hdr->b_buf = buf; 1600789Sahrens } 1601789Sahrens 1602789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 1603789Sahrens acb->acb_done = done; 1604789Sahrens acb->acb_private = private; 1605789Sahrens acb->acb_byteswap = swap; 1606789Sahrens 1607789Sahrens ASSERT(hdr->b_acb == NULL); 1608789Sahrens hdr->b_acb = acb; 1609789Sahrens 1610789Sahrens /* 1611789Sahrens * If this DVA is part of a prefetch, mark the buf 1612789Sahrens * header with the prefetch flag 1613789Sahrens */ 1614789Sahrens if (arc_flags & ARC_PREFETCH) 1615789Sahrens hdr->b_flags |= ARC_PREFETCH; 1616789Sahrens hdr->b_flags |= ARC_IO_IN_PROGRESS; 1617789Sahrens 1618789Sahrens /* 1619789Sahrens * If the buffer has been evicted, migrate it to a present state 1620789Sahrens * before issuing the I/O. Once we drop the hash-table lock, 1621789Sahrens * the header will be marked as I/O in progress and have an 1622789Sahrens * attached buffer. At this point, anybody who finds this 1623789Sahrens * buffer ought to notice that it's legit but has a pending I/O. 1624789Sahrens */ 1625789Sahrens 1626789Sahrens if ((hdr->b_state == arc.mru_bot) || 1627789Sahrens (hdr->b_state == arc.mfu_bot)) 1628789Sahrens arc_access(hdr, hash_lock); 1629789Sahrens 1630789Sahrens mutex_exit(hash_lock); 1631789Sahrens 1632789Sahrens ASSERT3U(hdr->b_size, ==, size); 1633789Sahrens DTRACE_PROBE2(arc__miss, blkptr_t *, bp, 1634789Sahrens uint64_t, size); 1635789Sahrens atomic_add_64(&arc.misses, 1); 1636789Sahrens rzio = zio_read(pio, spa, bp, buf->b_data, size, 1637789Sahrens arc_read_done, buf, priority, flags); 1638789Sahrens 1639789Sahrens if (arc_flags & ARC_WAIT) 1640789Sahrens return (zio_wait(rzio)); 1641789Sahrens 1642789Sahrens ASSERT(arc_flags & ARC_NOWAIT); 1643789Sahrens zio_nowait(rzio); 1644789Sahrens } 1645789Sahrens return (0); 1646789Sahrens } 1647789Sahrens 1648789Sahrens /* 1649789Sahrens * arc_read() variant to support pool traversal. If the block is already 1650789Sahrens * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 1651789Sahrens * The idea is that we don't want pool traversal filling up memory, but 1652789Sahrens * if the ARC already has the data anyway, we shouldn't pay for the I/O. 1653789Sahrens */ 1654789Sahrens int 1655789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 1656789Sahrens { 1657789Sahrens arc_buf_hdr_t *hdr; 1658789Sahrens kmutex_t *hash_mtx; 1659789Sahrens int rc = 0; 1660789Sahrens 1661789Sahrens hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 1662789Sahrens 1663789Sahrens if (hdr && hdr->b_buf && !HDR_IO_IN_PROGRESS(hdr)) 1664789Sahrens bcopy(hdr->b_buf->b_data, data, hdr->b_size); 1665789Sahrens else 1666789Sahrens rc = ENOENT; 1667789Sahrens 1668789Sahrens if (hash_mtx) 1669789Sahrens mutex_exit(hash_mtx); 1670789Sahrens 1671789Sahrens return (rc); 1672789Sahrens } 1673789Sahrens 1674789Sahrens /* 1675789Sahrens * Release this buffer from the cache. This must be done 1676789Sahrens * after a read and prior to modifying the buffer contents. 1677789Sahrens * If the buffer has more than one reference, we must make 1678789Sahrens * make a new hdr for the buffer. 1679789Sahrens */ 1680789Sahrens void 1681789Sahrens arc_release(arc_buf_t *buf, void *tag) 1682789Sahrens { 1683789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 1684789Sahrens kmutex_t *hash_lock = HDR_LOCK(hdr); 1685789Sahrens 1686789Sahrens /* this buffer is not on any list */ 1687789Sahrens ASSERT(refcount_count(&hdr->b_refcnt) > 0); 1688789Sahrens 1689789Sahrens if (hdr->b_state == arc.anon) { 1690789Sahrens /* this buffer is already released */ 1691789Sahrens ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 1692789Sahrens ASSERT(BUF_EMPTY(hdr)); 1693789Sahrens return; 1694789Sahrens } 1695789Sahrens 1696789Sahrens mutex_enter(hash_lock); 1697789Sahrens 1698789Sahrens if (refcount_count(&hdr->b_refcnt) > 1) { 1699789Sahrens arc_buf_hdr_t *nhdr; 1700789Sahrens arc_buf_t **bufp; 1701789Sahrens uint64_t blksz = hdr->b_size; 1702789Sahrens spa_t *spa = hdr->b_spa; 1703789Sahrens 1704789Sahrens /* 1705789Sahrens * Pull the data off of this buf and attach it to 1706789Sahrens * a new anonymous buf. 1707789Sahrens */ 1708789Sahrens bufp = &hdr->b_buf; 1709789Sahrens while (*bufp != buf) { 1710789Sahrens ASSERT(*bufp); 1711789Sahrens bufp = &(*bufp)->b_next; 1712789Sahrens } 1713789Sahrens *bufp = (*bufp)->b_next; 1714789Sahrens (void) refcount_remove(&hdr->b_refcnt, tag); 1715789Sahrens ASSERT3U(hdr->b_state->size, >=, hdr->b_size); 1716789Sahrens atomic_add_64(&hdr->b_state->size, -hdr->b_size); 1717789Sahrens mutex_exit(hash_lock); 1718789Sahrens 1719789Sahrens nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 1720789Sahrens nhdr->b_size = blksz; 1721789Sahrens nhdr->b_spa = spa; 1722789Sahrens nhdr->b_buf = buf; 1723789Sahrens nhdr->b_state = arc.anon; 1724789Sahrens nhdr->b_arc_access = 0; 1725789Sahrens nhdr->b_flags = 0; 1726789Sahrens buf->b_hdr = nhdr; 1727789Sahrens buf->b_next = NULL; 1728789Sahrens (void) refcount_add(&nhdr->b_refcnt, tag); 1729789Sahrens atomic_add_64(&arc.anon->size, blksz); 1730789Sahrens 1731789Sahrens hdr = nhdr; 1732789Sahrens } else { 1733789Sahrens ASSERT(!list_link_active(&hdr->b_arc_node)); 1734789Sahrens ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1735789Sahrens arc_change_state(arc.anon, hdr, hash_lock); 1736789Sahrens hdr->b_arc_access = 0; 1737789Sahrens mutex_exit(hash_lock); 1738789Sahrens bzero(&hdr->b_dva, sizeof (dva_t)); 1739789Sahrens hdr->b_birth = 0; 1740789Sahrens hdr->b_cksum0 = 0; 1741789Sahrens } 1742789Sahrens } 1743789Sahrens 1744789Sahrens int 1745789Sahrens arc_released(arc_buf_t *buf) 1746789Sahrens { 1747789Sahrens return (buf->b_hdr->b_state == arc.anon); 1748789Sahrens } 1749789Sahrens 1750789Sahrens static void 1751789Sahrens arc_write_done(zio_t *zio) 1752789Sahrens { 1753789Sahrens arc_buf_t *buf; 1754789Sahrens arc_buf_hdr_t *hdr; 1755789Sahrens arc_callback_t *acb; 1756789Sahrens 1757789Sahrens buf = zio->io_private; 1758789Sahrens hdr = buf->b_hdr; 1759789Sahrens acb = hdr->b_acb; 1760789Sahrens hdr->b_acb = NULL; 1761789Sahrens 1762789Sahrens /* this buffer is on no lists and is not in the hash table */ 1763789Sahrens ASSERT3P(hdr->b_state, ==, arc.anon); 1764789Sahrens 1765789Sahrens hdr->b_dva = *BP_IDENTITY(zio->io_bp); 1766789Sahrens hdr->b_birth = zio->io_bp->blk_birth; 1767789Sahrens hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 1768789Sahrens /* clear the "in-write" flag */ 1769789Sahrens hdr->b_hash_next = NULL; 1770789Sahrens /* This write may be all-zero */ 1771789Sahrens if (!BUF_EMPTY(hdr)) { 1772789Sahrens arc_buf_hdr_t *exists; 1773789Sahrens kmutex_t *hash_lock; 1774789Sahrens 1775789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 1776789Sahrens if (exists) { 1777789Sahrens /* 1778789Sahrens * This can only happen if we overwrite for 1779789Sahrens * sync-to-convergence, because we remove 1780789Sahrens * buffers from the hash table when we arc_free(). 1781789Sahrens */ 1782789Sahrens ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 1783789Sahrens BP_IDENTITY(zio->io_bp))); 1784789Sahrens ASSERT3U(zio->io_bp_orig.blk_birth, ==, 1785789Sahrens zio->io_bp->blk_birth); 1786789Sahrens 1787789Sahrens ASSERT(refcount_is_zero(&exists->b_refcnt)); 1788789Sahrens arc_change_state(arc.anon, exists, hash_lock); 1789789Sahrens mutex_exit(hash_lock); 1790789Sahrens arc_hdr_free(exists); 1791789Sahrens exists = buf_hash_insert(hdr, &hash_lock); 1792789Sahrens ASSERT3P(exists, ==, NULL); 1793789Sahrens } 1794789Sahrens arc_access(hdr, hash_lock); 1795789Sahrens mutex_exit(hash_lock); 1796789Sahrens } 1797789Sahrens if (acb && acb->acb_done) { 1798789Sahrens ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 1799789Sahrens acb->acb_done(zio, buf, acb->acb_private); 1800789Sahrens } 1801789Sahrens 1802789Sahrens if (acb) 1803789Sahrens kmem_free(acb, sizeof (arc_callback_t)); 1804789Sahrens } 1805789Sahrens 1806789Sahrens int 1807789Sahrens arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, 1808789Sahrens uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 1809789Sahrens arc_done_func_t *done, void *private, int priority, int flags, 1810789Sahrens uint32_t arc_flags) 1811789Sahrens { 1812789Sahrens arc_buf_hdr_t *hdr = buf->b_hdr; 1813789Sahrens arc_callback_t *acb; 1814789Sahrens zio_t *rzio; 1815789Sahrens 1816789Sahrens /* this is a private buffer - no locking required */ 1817789Sahrens ASSERT3P(hdr->b_state, ==, arc.anon); 1818789Sahrens ASSERT(BUF_EMPTY(hdr)); 1819789Sahrens ASSERT(!HDR_IO_ERROR(hdr)); 1820789Sahrens acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 1821789Sahrens acb->acb_done = done; 1822789Sahrens acb->acb_private = private; 1823789Sahrens acb->acb_byteswap = (arc_byteswap_func_t *)-1; 1824789Sahrens hdr->b_acb = acb; 1825789Sahrens rzio = zio_write(pio, spa, checksum, compress, txg, bp, 1826789Sahrens buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags); 1827789Sahrens 1828789Sahrens if (arc_flags & ARC_WAIT) 1829789Sahrens return (zio_wait(rzio)); 1830789Sahrens 1831789Sahrens ASSERT(arc_flags & ARC_NOWAIT); 1832789Sahrens zio_nowait(rzio); 1833789Sahrens 1834789Sahrens return (0); 1835789Sahrens } 1836789Sahrens 1837789Sahrens int 1838789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 1839789Sahrens zio_done_func_t *done, void *private, uint32_t arc_flags) 1840789Sahrens { 1841789Sahrens arc_buf_hdr_t *ab; 1842789Sahrens kmutex_t *hash_lock; 1843789Sahrens zio_t *zio; 1844789Sahrens 1845789Sahrens /* 1846789Sahrens * If this buffer is in the cache, release it, so it 1847789Sahrens * can be re-used. 1848789Sahrens */ 1849789Sahrens ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 1850789Sahrens if (ab != NULL) { 1851789Sahrens /* 1852789Sahrens * The checksum of blocks to free is not always 1853789Sahrens * preserved (eg. on the deadlist). However, if it is 1854789Sahrens * nonzero, it should match what we have in the cache. 1855789Sahrens */ 1856789Sahrens ASSERT(bp->blk_cksum.zc_word[0] == 0 || 1857789Sahrens ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 1858789Sahrens arc_change_state(arc.anon, ab, hash_lock); 1859789Sahrens if (refcount_is_zero(&ab->b_refcnt)) { 1860789Sahrens mutex_exit(hash_lock); 1861789Sahrens arc_hdr_free(ab); 1862789Sahrens atomic_add_64(&arc.deleted, 1); 1863789Sahrens } else { 1864789Sahrens ASSERT3U(refcount_count(&ab->b_refcnt), ==, 1); 1865789Sahrens if (HDR_IO_IN_PROGRESS(ab)) 1866789Sahrens ab->b_flags |= ARC_FREED_IN_READ; 1867789Sahrens ab->b_arc_access = 0; 1868789Sahrens bzero(&ab->b_dva, sizeof (dva_t)); 1869789Sahrens ab->b_birth = 0; 1870789Sahrens ab->b_cksum0 = 0; 1871789Sahrens mutex_exit(hash_lock); 1872789Sahrens } 1873789Sahrens } 1874789Sahrens 1875789Sahrens zio = zio_free(pio, spa, txg, bp, done, private); 1876789Sahrens 1877789Sahrens if (arc_flags & ARC_WAIT) 1878789Sahrens return (zio_wait(zio)); 1879789Sahrens 1880789Sahrens ASSERT(arc_flags & ARC_NOWAIT); 1881789Sahrens zio_nowait(zio); 1882789Sahrens 1883789Sahrens return (0); 1884789Sahrens } 1885789Sahrens 1886789Sahrens void 1887789Sahrens arc_tempreserve_clear(uint64_t tempreserve) 1888789Sahrens { 1889789Sahrens atomic_add_64(&arc_tempreserve, -tempreserve); 1890789Sahrens ASSERT((int64_t)arc_tempreserve >= 0); 1891789Sahrens } 1892789Sahrens 1893789Sahrens int 1894789Sahrens arc_tempreserve_space(uint64_t tempreserve) 1895789Sahrens { 1896789Sahrens #ifdef ZFS_DEBUG 1897789Sahrens /* 1898789Sahrens * Once in a while, fail for no reason. Everything should cope. 1899789Sahrens */ 1900789Sahrens if (spa_get_random(10000) == 0) { 1901789Sahrens dprintf("forcing random failure\n"); 1902789Sahrens return (ERESTART); 1903789Sahrens } 1904789Sahrens #endif 1905982Smaybee if (tempreserve > arc.c/4 && !arc.no_grow) 1906982Smaybee arc.c = MIN(arc.c_max, tempreserve * 4); 1907982Smaybee if (tempreserve > arc.c) 1908982Smaybee return (ENOMEM); 1909982Smaybee 1910789Sahrens /* 1911982Smaybee * Throttle writes when the amount of dirty data in the cache 1912982Smaybee * gets too large. We try to keep the cache less than half full 1913982Smaybee * of dirty blocks so that our sync times don't grow too large. 1914982Smaybee * Note: if two requests come in concurrently, we might let them 1915982Smaybee * both succeed, when one of them should fail. Not a huge deal. 1916982Smaybee * 1917982Smaybee * XXX The limit should be adjusted dynamically to keep the time 1918982Smaybee * to sync a dataset fixed (around 1-5 seconds?). 1919789Sahrens */ 1920789Sahrens 1921982Smaybee if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 && 1922982Smaybee arc_tempreserve + arc.anon->size > arc.c / 4) { 1923789Sahrens dprintf("failing, arc_tempreserve=%lluK anon=%lluK " 1924789Sahrens "tempreserve=%lluK arc.c=%lluK\n", 1925789Sahrens arc_tempreserve>>10, arc.anon->lsize>>10, 1926789Sahrens tempreserve>>10, arc.c>>10); 1927789Sahrens return (ERESTART); 1928789Sahrens } 1929789Sahrens atomic_add_64(&arc_tempreserve, tempreserve); 1930789Sahrens return (0); 1931789Sahrens } 1932789Sahrens 1933789Sahrens void 1934789Sahrens arc_init(void) 1935789Sahrens { 1936789Sahrens mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL); 1937789Sahrens mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 1938789Sahrens cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 1939789Sahrens 1940789Sahrens /* Start out with 1/8 of all memory */ 1941789Sahrens arc.c = physmem * PAGESIZE / 8; 1942789Sahrens 1943789Sahrens #ifdef _KERNEL 1944789Sahrens /* 1945789Sahrens * On architectures where the physical memory can be larger 1946789Sahrens * than the addressable space (intel in 32-bit mode), we may 1947789Sahrens * need to limit the cache to 1/8 of VM size. 1948789Sahrens */ 1949789Sahrens arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 1950789Sahrens #endif 1951789Sahrens 1952982Smaybee /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 1953789Sahrens arc.c_min = MAX(arc.c / 4, 64<<20); 1954982Smaybee /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 1955789Sahrens if (arc.c * 8 >= 1<<30) 1956789Sahrens arc.c_max = (arc.c * 8) - (1<<30); 1957789Sahrens else 1958789Sahrens arc.c_max = arc.c_min; 1959789Sahrens arc.c_max = MAX(arc.c * 6, arc.c_max); 1960789Sahrens arc.c = arc.c_max; 1961789Sahrens arc.p = (arc.c >> 1); 1962789Sahrens 1963789Sahrens /* if kmem_flags are set, lets try to use less memory */ 1964789Sahrens if (kmem_debugging()) 1965789Sahrens arc.c = arc.c / 2; 1966789Sahrens if (arc.c < arc.c_min) 1967789Sahrens arc.c = arc.c_min; 1968789Sahrens 1969789Sahrens arc.anon = &ARC_anon; 1970789Sahrens arc.mru_top = &ARC_mru_top; 1971789Sahrens arc.mru_bot = &ARC_mru_bot; 1972789Sahrens arc.mfu_top = &ARC_mfu_top; 1973789Sahrens arc.mfu_bot = &ARC_mfu_bot; 1974789Sahrens 1975789Sahrens list_create(&arc.mru_top->list, sizeof (arc_buf_hdr_t), 1976789Sahrens offsetof(arc_buf_hdr_t, b_arc_node)); 1977789Sahrens list_create(&arc.mru_bot->list, sizeof (arc_buf_hdr_t), 1978789Sahrens offsetof(arc_buf_hdr_t, b_arc_node)); 1979789Sahrens list_create(&arc.mfu_top->list, sizeof (arc_buf_hdr_t), 1980789Sahrens offsetof(arc_buf_hdr_t, b_arc_node)); 1981789Sahrens list_create(&arc.mfu_bot->list, sizeof (arc_buf_hdr_t), 1982789Sahrens offsetof(arc_buf_hdr_t, b_arc_node)); 1983789Sahrens 1984789Sahrens buf_init(); 1985789Sahrens 1986789Sahrens arc_thread_exit = 0; 1987789Sahrens 1988789Sahrens (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 1989789Sahrens TS_RUN, minclsyspri); 1990789Sahrens } 1991789Sahrens 1992789Sahrens void 1993789Sahrens arc_fini(void) 1994789Sahrens { 1995789Sahrens mutex_enter(&arc_reclaim_thr_lock); 1996789Sahrens arc_thread_exit = 1; 1997789Sahrens while (arc_thread_exit != 0) 1998789Sahrens cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 1999789Sahrens mutex_exit(&arc_reclaim_thr_lock); 2000789Sahrens 2001789Sahrens arc_flush(); 2002789Sahrens 2003789Sahrens arc_dead = TRUE; 2004789Sahrens 2005789Sahrens mutex_destroy(&arc_reclaim_lock); 2006789Sahrens mutex_destroy(&arc_reclaim_thr_lock); 2007789Sahrens cv_destroy(&arc_reclaim_thr_cv); 2008789Sahrens 2009789Sahrens list_destroy(&arc.mru_top->list); 2010789Sahrens list_destroy(&arc.mru_bot->list); 2011789Sahrens list_destroy(&arc.mfu_top->list); 2012789Sahrens list_destroy(&arc.mfu_bot->list); 2013789Sahrens 2014789Sahrens buf_fini(); 2015789Sahrens } 2016