xref: /onnv-gate/usr/src/uts/common/fs/zfs/arc.c (revision 3312)
1789Sahrens /*
2789Sahrens  * CDDL HEADER START
3789Sahrens  *
4789Sahrens  * The contents of this file are subject to the terms of the
51484Sek110237  * Common Development and Distribution License (the "License").
61484Sek110237  * You may not use this file except in compliance with the License.
7789Sahrens  *
8789Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9789Sahrens  * or http://www.opensolaris.org/os/licensing.
10789Sahrens  * See the License for the specific language governing permissions
11789Sahrens  * and limitations under the License.
12789Sahrens  *
13789Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14789Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15789Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16789Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17789Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18789Sahrens  *
19789Sahrens  * CDDL HEADER END
20789Sahrens  */
21789Sahrens /*
221484Sek110237  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23789Sahrens  * Use is subject to license terms.
24789Sahrens  */
25789Sahrens 
26789Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
27789Sahrens 
28789Sahrens /*
29789Sahrens  * DVA-based Adjustable Relpacement Cache
30789Sahrens  *
311544Seschrock  * While much of the theory of operation used here is
321544Seschrock  * based on the self-tuning, low overhead replacement cache
33789Sahrens  * presented by Megiddo and Modha at FAST 2003, there are some
34789Sahrens  * significant differences:
35789Sahrens  *
36789Sahrens  * 1. The Megiddo and Modha model assumes any page is evictable.
37789Sahrens  * Pages in its cache cannot be "locked" into memory.  This makes
38789Sahrens  * the eviction algorithm simple: evict the last page in the list.
39789Sahrens  * This also make the performance characteristics easy to reason
40789Sahrens  * about.  Our cache is not so simple.  At any given moment, some
41789Sahrens  * subset of the blocks in the cache are un-evictable because we
42789Sahrens  * have handed out a reference to them.  Blocks are only evictable
43789Sahrens  * when there are no external references active.  This makes
44789Sahrens  * eviction far more problematic:  we choose to evict the evictable
45789Sahrens  * blocks that are the "lowest" in the list.
46789Sahrens  *
47789Sahrens  * There are times when it is not possible to evict the requested
48789Sahrens  * space.  In these circumstances we are unable to adjust the cache
49789Sahrens  * size.  To prevent the cache growing unbounded at these times we
50789Sahrens  * implement a "cache throttle" that slowes the flow of new data
51789Sahrens  * into the cache until we can make space avaiable.
52789Sahrens  *
53789Sahrens  * 2. The Megiddo and Modha model assumes a fixed cache size.
54789Sahrens  * Pages are evicted when the cache is full and there is a cache
55789Sahrens  * miss.  Our model has a variable sized cache.  It grows with
56789Sahrens  * high use, but also tries to react to memory preasure from the
57789Sahrens  * operating system: decreasing its size when system memory is
58789Sahrens  * tight.
59789Sahrens  *
60789Sahrens  * 3. The Megiddo and Modha model assumes a fixed page size. All
61789Sahrens  * elements of the cache are therefor exactly the same size.  So
62789Sahrens  * when adjusting the cache size following a cache miss, its simply
63789Sahrens  * a matter of choosing a single page to evict.  In our model, we
64789Sahrens  * have variable sized cache blocks (rangeing from 512 bytes to
65789Sahrens  * 128K bytes).  We therefor choose a set of blocks to evict to make
66789Sahrens  * space for a cache miss that approximates as closely as possible
67789Sahrens  * the space used by the new block.
68789Sahrens  *
69789Sahrens  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70789Sahrens  * by N. Megiddo & D. Modha, FAST 2003
71789Sahrens  */
72789Sahrens 
73789Sahrens /*
74789Sahrens  * The locking model:
75789Sahrens  *
76789Sahrens  * A new reference to a cache buffer can be obtained in two
77789Sahrens  * ways: 1) via a hash table lookup using the DVA as a key,
78789Sahrens  * or 2) via one of the ARC lists.  The arc_read() inerface
79789Sahrens  * uses method 1, while the internal arc algorithms for
80789Sahrens  * adjusting the cache use method 2.  We therefor provide two
81789Sahrens  * types of locks: 1) the hash table lock array, and 2) the
82789Sahrens  * arc list locks.
83789Sahrens  *
84789Sahrens  * Buffers do not have their own mutexs, rather they rely on the
85789Sahrens  * hash table mutexs for the bulk of their protection (i.e. most
86789Sahrens  * fields in the arc_buf_hdr_t are protected by these mutexs).
87789Sahrens  *
88789Sahrens  * buf_hash_find() returns the appropriate mutex (held) when it
89789Sahrens  * locates the requested buffer in the hash table.  It returns
90789Sahrens  * NULL for the mutex if the buffer was not in the table.
91789Sahrens  *
92789Sahrens  * buf_hash_remove() expects the appropriate hash mutex to be
93789Sahrens  * already held before it is invoked.
94789Sahrens  *
95789Sahrens  * Each arc state also has a mutex which is used to protect the
96789Sahrens  * buffer list associated with the state.  When attempting to
97789Sahrens  * obtain a hash table lock while holding an arc list lock you
98789Sahrens  * must use: mutex_tryenter() to avoid deadlock.  Also note that
992688Smaybee  * the active state mutex must be held before the ghost state mutex.
100789Sahrens  *
1011544Seschrock  * Arc buffers may have an associated eviction callback function.
1021544Seschrock  * This function will be invoked prior to removing the buffer (e.g.
1031544Seschrock  * in arc_do_user_evicts()).  Note however that the data associated
1041544Seschrock  * with the buffer may be evicted prior to the callback.  The callback
1051544Seschrock  * must be made with *no locks held* (to prevent deadlock).  Additionally,
1061544Seschrock  * the users of callbacks must ensure that their private data is
1071544Seschrock  * protected from simultaneous callbacks from arc_buf_evict()
1081544Seschrock  * and arc_do_user_evicts().
1091544Seschrock  *
110789Sahrens  * Note that the majority of the performance stats are manipulated
111789Sahrens  * with atomic operations.
112789Sahrens  */
113789Sahrens 
114789Sahrens #include <sys/spa.h>
115789Sahrens #include <sys/zio.h>
1163093Sahrens #include <sys/zio_checksum.h>
117789Sahrens #include <sys/zfs_context.h>
118789Sahrens #include <sys/arc.h>
119789Sahrens #include <sys/refcount.h>
120789Sahrens #ifdef _KERNEL
121789Sahrens #include <sys/vmsystm.h>
122789Sahrens #include <vm/anon.h>
123789Sahrens #include <sys/fs/swapnode.h>
1241484Sek110237 #include <sys/dnlc.h>
125789Sahrens #endif
126789Sahrens #include <sys/callb.h>
127789Sahrens 
128789Sahrens static kmutex_t		arc_reclaim_thr_lock;
129789Sahrens static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
130789Sahrens static uint8_t		arc_thread_exit;
131789Sahrens 
1321484Sek110237 #define	ARC_REDUCE_DNLC_PERCENT	3
1331484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
1341484Sek110237 
135789Sahrens typedef enum arc_reclaim_strategy {
136789Sahrens 	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
137789Sahrens 	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
138789Sahrens } arc_reclaim_strategy_t;
139789Sahrens 
140789Sahrens /* number of seconds before growing cache again */
141789Sahrens static int		arc_grow_retry = 60;
142789Sahrens 
1432391Smaybee /*
1442638Sperrin  * minimum lifespan of a prefetch block in clock ticks
1452638Sperrin  * (initialized in arc_init())
1462391Smaybee  */
1472638Sperrin static int		arc_min_prefetch_lifespan;
1482391Smaybee 
149789Sahrens static int arc_dead;
150789Sahrens 
151789Sahrens /*
1522885Sahrens  * These tunables are for performance analysis.
1532885Sahrens  */
1542885Sahrens uint64_t zfs_arc_max;
1552885Sahrens uint64_t zfs_arc_min;
1562885Sahrens 
1572885Sahrens /*
158789Sahrens  * Note that buffers can be on one of 5 states:
159789Sahrens  *	ARC_anon	- anonymous (discussed below)
1601544Seschrock  *	ARC_mru		- recently used, currently cached
1611544Seschrock  *	ARC_mru_ghost	- recentely used, no longer in cache
1621544Seschrock  *	ARC_mfu		- frequently used, currently cached
1631544Seschrock  *	ARC_mfu_ghost	- frequently used, no longer in cache
164789Sahrens  * When there are no active references to the buffer, they
165789Sahrens  * are linked onto one of the lists in arc.  These are the
166789Sahrens  * only buffers that can be evicted or deleted.
167789Sahrens  *
168789Sahrens  * Anonymous buffers are buffers that are not associated with
169789Sahrens  * a DVA.  These are buffers that hold dirty block copies
170789Sahrens  * before they are written to stable storage.  By definition,
1711544Seschrock  * they are "ref'd" and are considered part of arc_mru
172789Sahrens  * that cannot be freed.  Generally, they will aquire a DVA
1731544Seschrock  * as they are written and migrate onto the arc_mru list.
174789Sahrens  */
175789Sahrens 
176789Sahrens typedef struct arc_state {
177789Sahrens 	list_t	list;	/* linked list of evictable buffer in state */
178789Sahrens 	uint64_t lsize;	/* total size of buffers in the linked list */
179789Sahrens 	uint64_t size;	/* total size of all buffers in this state */
180789Sahrens 	uint64_t hits;
181789Sahrens 	kmutex_t mtx;
182789Sahrens } arc_state_t;
183789Sahrens 
184789Sahrens /* The 5 states: */
185789Sahrens static arc_state_t ARC_anon;
1861544Seschrock static arc_state_t ARC_mru;
1871544Seschrock static arc_state_t ARC_mru_ghost;
1881544Seschrock static arc_state_t ARC_mfu;
1891544Seschrock static arc_state_t ARC_mfu_ghost;
190789Sahrens 
191789Sahrens static struct arc {
192789Sahrens 	arc_state_t 	*anon;
1931544Seschrock 	arc_state_t	*mru;
1941544Seschrock 	arc_state_t	*mru_ghost;
1951544Seschrock 	arc_state_t	*mfu;
1961544Seschrock 	arc_state_t	*mfu_ghost;
197789Sahrens 	uint64_t	size;		/* Actual total arc size */
1981544Seschrock 	uint64_t	p;		/* Target size (in bytes) of mru */
199789Sahrens 	uint64_t	c;		/* Target size of cache (in bytes) */
200789Sahrens 	uint64_t	c_min;		/* Minimum target cache size */
201789Sahrens 	uint64_t	c_max;		/* Maximum target cache size */
202789Sahrens 
203789Sahrens 	/* performance stats */
204789Sahrens 	uint64_t	hits;
205789Sahrens 	uint64_t	misses;
206789Sahrens 	uint64_t	deleted;
2072688Smaybee 	uint64_t	recycle_miss;
2082688Smaybee 	uint64_t	mutex_miss;
2092688Smaybee 	uint64_t	evict_skip;
210789Sahrens 	uint64_t	hash_elements;
211789Sahrens 	uint64_t	hash_elements_max;
212789Sahrens 	uint64_t	hash_collisions;
213789Sahrens 	uint64_t	hash_chains;
214789Sahrens 	uint32_t	hash_chain_max;
215789Sahrens 
216789Sahrens 	int		no_grow;	/* Don't try to grow cache size */
217789Sahrens } arc;
218789Sahrens 
219789Sahrens static uint64_t arc_tempreserve;
220789Sahrens 
221789Sahrens typedef struct arc_callback arc_callback_t;
222789Sahrens 
223789Sahrens struct arc_callback {
224789Sahrens 	arc_done_func_t		*acb_done;
225789Sahrens 	void			*acb_private;
226789Sahrens 	arc_byteswap_func_t	*acb_byteswap;
227789Sahrens 	arc_buf_t		*acb_buf;
228789Sahrens 	zio_t			*acb_zio_dummy;
229789Sahrens 	arc_callback_t		*acb_next;
230789Sahrens };
231789Sahrens 
232789Sahrens struct arc_buf_hdr {
233789Sahrens 	/* protected by hash lock */
234789Sahrens 	dva_t			b_dva;
235789Sahrens 	uint64_t		b_birth;
236789Sahrens 	uint64_t		b_cksum0;
237789Sahrens 
2383093Sahrens 	kmutex_t		b_freeze_lock;
2393093Sahrens 	zio_cksum_t		*b_freeze_cksum;
2403093Sahrens 
241789Sahrens 	arc_buf_hdr_t		*b_hash_next;
242789Sahrens 	arc_buf_t		*b_buf;
243789Sahrens 	uint32_t		b_flags;
2441544Seschrock 	uint32_t		b_datacnt;
245789Sahrens 
2463290Sjohansen 	arc_callback_t		*b_acb;
247789Sahrens 	kcondvar_t		b_cv;
2483290Sjohansen 
2493290Sjohansen 	/* immutable */
2503290Sjohansen 	arc_buf_contents_t	b_type;
2513290Sjohansen 	uint64_t		b_size;
2523290Sjohansen 	spa_t			*b_spa;
253789Sahrens 
254789Sahrens 	/* protected by arc state mutex */
255789Sahrens 	arc_state_t		*b_state;
256789Sahrens 	list_node_t		b_arc_node;
257789Sahrens 
258789Sahrens 	/* updated atomically */
259789Sahrens 	clock_t			b_arc_access;
260789Sahrens 
261789Sahrens 	/* self protecting */
262789Sahrens 	refcount_t		b_refcnt;
263789Sahrens };
264789Sahrens 
2651544Seschrock static arc_buf_t *arc_eviction_list;
2661544Seschrock static kmutex_t arc_eviction_mtx;
2672887Smaybee static arc_buf_hdr_t arc_eviction_hdr;
2682688Smaybee static void arc_get_data_buf(arc_buf_t *buf);
2692688Smaybee static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
2701544Seschrock 
2711544Seschrock #define	GHOST_STATE(state)	\
2721544Seschrock 	((state) == arc.mru_ghost || (state) == arc.mfu_ghost)
2731544Seschrock 
274789Sahrens /*
275789Sahrens  * Private ARC flags.  These flags are private ARC only flags that will show up
276789Sahrens  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
277789Sahrens  * be passed in as arc_flags in things like arc_read.  However, these flags
278789Sahrens  * should never be passed and should only be set by ARC code.  When adding new
279789Sahrens  * public flags, make sure not to smash the private ones.
280789Sahrens  */
281789Sahrens 
2821544Seschrock #define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
283789Sahrens #define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
284789Sahrens #define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
285789Sahrens #define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
2861544Seschrock #define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
2872391Smaybee #define	ARC_INDIRECT		(1 << 14)	/* this is an indirect block */
288789Sahrens 
2891544Seschrock #define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
290789Sahrens #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
291789Sahrens #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
292789Sahrens #define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
2931544Seschrock #define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
294789Sahrens 
295789Sahrens /*
296789Sahrens  * Hash table routines
297789Sahrens  */
298789Sahrens 
299789Sahrens #define	HT_LOCK_PAD	64
300789Sahrens 
301789Sahrens struct ht_lock {
302789Sahrens 	kmutex_t	ht_lock;
303789Sahrens #ifdef _KERNEL
304789Sahrens 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
305789Sahrens #endif
306789Sahrens };
307789Sahrens 
308789Sahrens #define	BUF_LOCKS 256
309789Sahrens typedef struct buf_hash_table {
310789Sahrens 	uint64_t ht_mask;
311789Sahrens 	arc_buf_hdr_t **ht_table;
312789Sahrens 	struct ht_lock ht_locks[BUF_LOCKS];
313789Sahrens } buf_hash_table_t;
314789Sahrens 
315789Sahrens static buf_hash_table_t buf_hash_table;
316789Sahrens 
317789Sahrens #define	BUF_HASH_INDEX(spa, dva, birth) \
318789Sahrens 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
319789Sahrens #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
320789Sahrens #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
321789Sahrens #define	HDR_LOCK(buf) \
322789Sahrens 	(BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
323789Sahrens 
324789Sahrens uint64_t zfs_crc64_table[256];
325789Sahrens 
326789Sahrens static uint64_t
327789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
328789Sahrens {
329789Sahrens 	uintptr_t spav = (uintptr_t)spa;
330789Sahrens 	uint8_t *vdva = (uint8_t *)dva;
331789Sahrens 	uint64_t crc = -1ULL;
332789Sahrens 	int i;
333789Sahrens 
334789Sahrens 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
335789Sahrens 
336789Sahrens 	for (i = 0; i < sizeof (dva_t); i++)
337789Sahrens 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
338789Sahrens 
339789Sahrens 	crc ^= (spav>>8) ^ birth;
340789Sahrens 
341789Sahrens 	return (crc);
342789Sahrens }
343789Sahrens 
344789Sahrens #define	BUF_EMPTY(buf)						\
345789Sahrens 	((buf)->b_dva.dva_word[0] == 0 &&			\
346789Sahrens 	(buf)->b_dva.dva_word[1] == 0 &&			\
347789Sahrens 	(buf)->b_birth == 0)
348789Sahrens 
349789Sahrens #define	BUF_EQUAL(spa, dva, birth, buf)				\
350789Sahrens 	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
351789Sahrens 	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
352789Sahrens 	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
353789Sahrens 
354789Sahrens static arc_buf_hdr_t *
355789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
356789Sahrens {
357789Sahrens 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
358789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
359789Sahrens 	arc_buf_hdr_t *buf;
360789Sahrens 
361789Sahrens 	mutex_enter(hash_lock);
362789Sahrens 	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
363789Sahrens 	    buf = buf->b_hash_next) {
364789Sahrens 		if (BUF_EQUAL(spa, dva, birth, buf)) {
365789Sahrens 			*lockp = hash_lock;
366789Sahrens 			return (buf);
367789Sahrens 		}
368789Sahrens 	}
369789Sahrens 	mutex_exit(hash_lock);
370789Sahrens 	*lockp = NULL;
371789Sahrens 	return (NULL);
372789Sahrens }
373789Sahrens 
374789Sahrens /*
375789Sahrens  * Insert an entry into the hash table.  If there is already an element
376789Sahrens  * equal to elem in the hash table, then the already existing element
377789Sahrens  * will be returned and the new element will not be inserted.
378789Sahrens  * Otherwise returns NULL.
379789Sahrens  */
380789Sahrens static arc_buf_hdr_t *
381789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
382789Sahrens {
383789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
384789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
385789Sahrens 	arc_buf_hdr_t *fbuf;
386789Sahrens 	uint32_t max, i;
387789Sahrens 
3881544Seschrock 	ASSERT(!HDR_IN_HASH_TABLE(buf));
389789Sahrens 	*lockp = hash_lock;
390789Sahrens 	mutex_enter(hash_lock);
391789Sahrens 	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
392789Sahrens 	    fbuf = fbuf->b_hash_next, i++) {
393789Sahrens 		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
394789Sahrens 			return (fbuf);
395789Sahrens 	}
396789Sahrens 
397789Sahrens 	buf->b_hash_next = buf_hash_table.ht_table[idx];
398789Sahrens 	buf_hash_table.ht_table[idx] = buf;
3991544Seschrock 	buf->b_flags |= ARC_IN_HASH_TABLE;
400789Sahrens 
401789Sahrens 	/* collect some hash table performance data */
402789Sahrens 	if (i > 0) {
403789Sahrens 		atomic_add_64(&arc.hash_collisions, 1);
404789Sahrens 		if (i == 1)
405789Sahrens 			atomic_add_64(&arc.hash_chains, 1);
406789Sahrens 	}
407789Sahrens 	while (i > (max = arc.hash_chain_max) &&
408789Sahrens 	    max != atomic_cas_32(&arc.hash_chain_max, max, i)) {
409789Sahrens 		continue;
410789Sahrens 	}
411789Sahrens 	atomic_add_64(&arc.hash_elements, 1);
412789Sahrens 	if (arc.hash_elements > arc.hash_elements_max)
413789Sahrens 		atomic_add_64(&arc.hash_elements_max, 1);
414789Sahrens 
415789Sahrens 	return (NULL);
416789Sahrens }
417789Sahrens 
418789Sahrens static void
419789Sahrens buf_hash_remove(arc_buf_hdr_t *buf)
420789Sahrens {
421789Sahrens 	arc_buf_hdr_t *fbuf, **bufp;
422789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
423789Sahrens 
424789Sahrens 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
4251544Seschrock 	ASSERT(HDR_IN_HASH_TABLE(buf));
426789Sahrens 
427789Sahrens 	bufp = &buf_hash_table.ht_table[idx];
428789Sahrens 	while ((fbuf = *bufp) != buf) {
429789Sahrens 		ASSERT(fbuf != NULL);
430789Sahrens 		bufp = &fbuf->b_hash_next;
431789Sahrens 	}
432789Sahrens 	*bufp = buf->b_hash_next;
433789Sahrens 	buf->b_hash_next = NULL;
4341544Seschrock 	buf->b_flags &= ~ARC_IN_HASH_TABLE;
435789Sahrens 
436789Sahrens 	/* collect some hash table performance data */
437789Sahrens 	atomic_add_64(&arc.hash_elements, -1);
438789Sahrens 	if (buf_hash_table.ht_table[idx] &&
439789Sahrens 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
440789Sahrens 		atomic_add_64(&arc.hash_chains, -1);
441789Sahrens }
442789Sahrens 
443789Sahrens /*
444789Sahrens  * Global data structures and functions for the buf kmem cache.
445789Sahrens  */
446789Sahrens static kmem_cache_t *hdr_cache;
447789Sahrens static kmem_cache_t *buf_cache;
448789Sahrens 
449789Sahrens static void
450789Sahrens buf_fini(void)
451789Sahrens {
452789Sahrens 	int i;
453789Sahrens 
454789Sahrens 	kmem_free(buf_hash_table.ht_table,
455789Sahrens 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
456789Sahrens 	for (i = 0; i < BUF_LOCKS; i++)
457789Sahrens 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
458789Sahrens 	kmem_cache_destroy(hdr_cache);
459789Sahrens 	kmem_cache_destroy(buf_cache);
460789Sahrens }
461789Sahrens 
462789Sahrens /*
463789Sahrens  * Constructor callback - called when the cache is empty
464789Sahrens  * and a new buf is requested.
465789Sahrens  */
466789Sahrens /* ARGSUSED */
467789Sahrens static int
468789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag)
469789Sahrens {
470789Sahrens 	arc_buf_hdr_t *buf = vbuf;
471789Sahrens 
472789Sahrens 	bzero(buf, sizeof (arc_buf_hdr_t));
473789Sahrens 	refcount_create(&buf->b_refcnt);
474789Sahrens 	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
475789Sahrens 	return (0);
476789Sahrens }
477789Sahrens 
478789Sahrens /*
479789Sahrens  * Destructor callback - called when a cached buf is
480789Sahrens  * no longer required.
481789Sahrens  */
482789Sahrens /* ARGSUSED */
483789Sahrens static void
484789Sahrens hdr_dest(void *vbuf, void *unused)
485789Sahrens {
486789Sahrens 	arc_buf_hdr_t *buf = vbuf;
487789Sahrens 
488789Sahrens 	refcount_destroy(&buf->b_refcnt);
489789Sahrens 	cv_destroy(&buf->b_cv);
490789Sahrens }
491789Sahrens 
492789Sahrens /*
493789Sahrens  * Reclaim callback -- invoked when memory is low.
494789Sahrens  */
495789Sahrens /* ARGSUSED */
496789Sahrens static void
497789Sahrens hdr_recl(void *unused)
498789Sahrens {
499789Sahrens 	dprintf("hdr_recl called\n");
5003158Smaybee 	/*
5013158Smaybee 	 * umem calls the reclaim func when we destroy the buf cache,
5023158Smaybee 	 * which is after we do arc_fini().
5033158Smaybee 	 */
5043158Smaybee 	if (!arc_dead)
5053158Smaybee 		cv_signal(&arc_reclaim_thr_cv);
506789Sahrens }
507789Sahrens 
508789Sahrens static void
509789Sahrens buf_init(void)
510789Sahrens {
511789Sahrens 	uint64_t *ct;
5121544Seschrock 	uint64_t hsize = 1ULL << 12;
513789Sahrens 	int i, j;
514789Sahrens 
515789Sahrens 	/*
516789Sahrens 	 * The hash table is big enough to fill all of physical memory
5171544Seschrock 	 * with an average 64K block size.  The table will take up
5181544Seschrock 	 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
519789Sahrens 	 */
5201544Seschrock 	while (hsize * 65536 < physmem * PAGESIZE)
521789Sahrens 		hsize <<= 1;
5221544Seschrock retry:
523789Sahrens 	buf_hash_table.ht_mask = hsize - 1;
5241544Seschrock 	buf_hash_table.ht_table =
5251544Seschrock 	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
5261544Seschrock 	if (buf_hash_table.ht_table == NULL) {
5271544Seschrock 		ASSERT(hsize > (1ULL << 8));
5281544Seschrock 		hsize >>= 1;
5291544Seschrock 		goto retry;
5301544Seschrock 	}
531789Sahrens 
532789Sahrens 	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
533789Sahrens 	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
534789Sahrens 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
535789Sahrens 	    0, NULL, NULL, NULL, NULL, NULL, 0);
536789Sahrens 
537789Sahrens 	for (i = 0; i < 256; i++)
538789Sahrens 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
539789Sahrens 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
540789Sahrens 
541789Sahrens 	for (i = 0; i < BUF_LOCKS; i++) {
542789Sahrens 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
543789Sahrens 		    NULL, MUTEX_DEFAULT, NULL);
544789Sahrens 	}
545789Sahrens }
546789Sahrens 
547789Sahrens #define	ARC_MINTIME	(hz>>4) /* 62 ms */
548789Sahrens 
549789Sahrens static void
5503093Sahrens arc_cksum_verify(arc_buf_t *buf)
5513093Sahrens {
5523093Sahrens 	zio_cksum_t zc;
5533093Sahrens 
554*3312Sahrens 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
5553093Sahrens 		return;
5563093Sahrens 
5573093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
5583265Sahrens 	if (buf->b_hdr->b_freeze_cksum == NULL ||
5593265Sahrens 	    (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
5603093Sahrens 		mutex_exit(&buf->b_hdr->b_freeze_lock);
5613093Sahrens 		return;
5623093Sahrens 	}
5633093Sahrens 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
5643093Sahrens 	if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
5653093Sahrens 		panic("buffer modified while frozen!");
5663093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
5673093Sahrens }
5683093Sahrens 
5693093Sahrens static void
5703093Sahrens arc_cksum_compute(arc_buf_t *buf)
5713093Sahrens {
572*3312Sahrens 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
5733093Sahrens 		return;
5743093Sahrens 
5753093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
5763093Sahrens 	if (buf->b_hdr->b_freeze_cksum != NULL) {
5773093Sahrens 		mutex_exit(&buf->b_hdr->b_freeze_lock);
5783093Sahrens 		return;
5793093Sahrens 	}
5803093Sahrens 	buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
5813093Sahrens 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
5823093Sahrens 	    buf->b_hdr->b_freeze_cksum);
5833093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
5843093Sahrens }
5853093Sahrens 
5863093Sahrens void
5873093Sahrens arc_buf_thaw(arc_buf_t *buf)
5883093Sahrens {
589*3312Sahrens 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
5903093Sahrens 		return;
5913093Sahrens 
5923093Sahrens 	if (buf->b_hdr->b_state != arc.anon)
5933093Sahrens 		panic("modifying non-anon buffer!");
5943093Sahrens 	if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
5953093Sahrens 		panic("modifying buffer while i/o in progress!");
5963093Sahrens 	arc_cksum_verify(buf);
5973093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
5983093Sahrens 	if (buf->b_hdr->b_freeze_cksum != NULL) {
5993093Sahrens 		kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
6003093Sahrens 		buf->b_hdr->b_freeze_cksum = NULL;
6013093Sahrens 	}
6023093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
6033093Sahrens }
6043093Sahrens 
6053093Sahrens void
6063093Sahrens arc_buf_freeze(arc_buf_t *buf)
6073093Sahrens {
608*3312Sahrens 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
609*3312Sahrens 		return;
610*3312Sahrens 
6113093Sahrens 	ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
6123093Sahrens 	    buf->b_hdr->b_state == arc.anon);
6133093Sahrens 	arc_cksum_compute(buf);
6143093Sahrens }
6153093Sahrens 
6163093Sahrens static void
617789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
618789Sahrens {
619789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
620789Sahrens 
621789Sahrens 	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
622789Sahrens 	    (ab->b_state != arc.anon)) {
6231544Seschrock 		int delta = ab->b_size * ab->b_datacnt;
624789Sahrens 
625789Sahrens 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
626789Sahrens 		mutex_enter(&ab->b_state->mtx);
627789Sahrens 		ASSERT(list_link_active(&ab->b_arc_node));
628789Sahrens 		list_remove(&ab->b_state->list, ab);
6291544Seschrock 		if (GHOST_STATE(ab->b_state)) {
6301544Seschrock 			ASSERT3U(ab->b_datacnt, ==, 0);
6311544Seschrock 			ASSERT3P(ab->b_buf, ==, NULL);
6321544Seschrock 			delta = ab->b_size;
6331544Seschrock 		}
6341544Seschrock 		ASSERT(delta > 0);
6351544Seschrock 		ASSERT3U(ab->b_state->lsize, >=, delta);
6361544Seschrock 		atomic_add_64(&ab->b_state->lsize, -delta);
637789Sahrens 		mutex_exit(&ab->b_state->mtx);
6382391Smaybee 		/* remove the prefetch flag is we get a reference */
6392391Smaybee 		if (ab->b_flags & ARC_PREFETCH)
6402391Smaybee 			ab->b_flags &= ~ARC_PREFETCH;
641789Sahrens 	}
642789Sahrens }
643789Sahrens 
644789Sahrens static int
645789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
646789Sahrens {
647789Sahrens 	int cnt;
648789Sahrens 
6491544Seschrock 	ASSERT(ab->b_state == arc.anon || MUTEX_HELD(hash_lock));
6501544Seschrock 	ASSERT(!GHOST_STATE(ab->b_state));
651789Sahrens 
652789Sahrens 	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
653789Sahrens 	    (ab->b_state != arc.anon)) {
654789Sahrens 
655789Sahrens 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
656789Sahrens 		mutex_enter(&ab->b_state->mtx);
657789Sahrens 		ASSERT(!list_link_active(&ab->b_arc_node));
658789Sahrens 		list_insert_head(&ab->b_state->list, ab);
6591544Seschrock 		ASSERT(ab->b_datacnt > 0);
6601544Seschrock 		atomic_add_64(&ab->b_state->lsize, ab->b_size * ab->b_datacnt);
6611544Seschrock 		ASSERT3U(ab->b_state->size, >=, ab->b_state->lsize);
662789Sahrens 		mutex_exit(&ab->b_state->mtx);
663789Sahrens 	}
664789Sahrens 	return (cnt);
665789Sahrens }
666789Sahrens 
667789Sahrens /*
668789Sahrens  * Move the supplied buffer to the indicated state.  The mutex
669789Sahrens  * for the buffer must be held by the caller.
670789Sahrens  */
671789Sahrens static void
6721544Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
673789Sahrens {
6741544Seschrock 	arc_state_t *old_state = ab->b_state;
6751544Seschrock 	int refcnt = refcount_count(&ab->b_refcnt);
6761544Seschrock 	int from_delta, to_delta;
677789Sahrens 
678789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
6791544Seschrock 	ASSERT(new_state != old_state);
6801544Seschrock 	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
6811544Seschrock 	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
6821544Seschrock 
6831544Seschrock 	from_delta = to_delta = ab->b_datacnt * ab->b_size;
684789Sahrens 
685789Sahrens 	/*
686789Sahrens 	 * If this buffer is evictable, transfer it from the
687789Sahrens 	 * old state list to the new state list.
688789Sahrens 	 */
6891544Seschrock 	if (refcnt == 0) {
6901544Seschrock 		if (old_state != arc.anon) {
6911544Seschrock 			int use_mutex = !MUTEX_HELD(&old_state->mtx);
6921544Seschrock 
6931544Seschrock 			if (use_mutex)
6941544Seschrock 				mutex_enter(&old_state->mtx);
6951544Seschrock 
6961544Seschrock 			ASSERT(list_link_active(&ab->b_arc_node));
6971544Seschrock 			list_remove(&old_state->list, ab);
698789Sahrens 
6992391Smaybee 			/*
7002391Smaybee 			 * If prefetching out of the ghost cache,
7012391Smaybee 			 * we will have a non-null datacnt.
7022391Smaybee 			 */
7032391Smaybee 			if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
7042391Smaybee 				/* ghost elements have a ghost size */
7051544Seschrock 				ASSERT(ab->b_buf == NULL);
7061544Seschrock 				from_delta = ab->b_size;
707789Sahrens 			}
7081544Seschrock 			ASSERT3U(old_state->lsize, >=, from_delta);
7091544Seschrock 			atomic_add_64(&old_state->lsize, -from_delta);
7101544Seschrock 
7111544Seschrock 			if (use_mutex)
7121544Seschrock 				mutex_exit(&old_state->mtx);
713789Sahrens 		}
714789Sahrens 		if (new_state != arc.anon) {
7151544Seschrock 			int use_mutex = !MUTEX_HELD(&new_state->mtx);
716789Sahrens 
7171544Seschrock 			if (use_mutex)
718789Sahrens 				mutex_enter(&new_state->mtx);
7191544Seschrock 
720789Sahrens 			list_insert_head(&new_state->list, ab);
7211544Seschrock 
7221544Seschrock 			/* ghost elements have a ghost size */
7231544Seschrock 			if (GHOST_STATE(new_state)) {
7241544Seschrock 				ASSERT(ab->b_datacnt == 0);
7251544Seschrock 				ASSERT(ab->b_buf == NULL);
7261544Seschrock 				to_delta = ab->b_size;
7271544Seschrock 			}
7281544Seschrock 			atomic_add_64(&new_state->lsize, to_delta);
7291544Seschrock 			ASSERT3U(new_state->size + to_delta, >=,
7301544Seschrock 			    new_state->lsize);
7311544Seschrock 
7321544Seschrock 			if (use_mutex)
733789Sahrens 				mutex_exit(&new_state->mtx);
734789Sahrens 		}
735789Sahrens 	}
736789Sahrens 
737789Sahrens 	ASSERT(!BUF_EMPTY(ab));
7381544Seschrock 	if (new_state == arc.anon && old_state != arc.anon) {
739789Sahrens 		buf_hash_remove(ab);
740789Sahrens 	}
741789Sahrens 
7421544Seschrock 	/* adjust state sizes */
7431544Seschrock 	if (to_delta)
7441544Seschrock 		atomic_add_64(&new_state->size, to_delta);
7451544Seschrock 	if (from_delta) {
7461544Seschrock 		ASSERT3U(old_state->size, >=, from_delta);
7471544Seschrock 		atomic_add_64(&old_state->size, -from_delta);
748789Sahrens 	}
749789Sahrens 	ab->b_state = new_state;
750789Sahrens }
751789Sahrens 
752789Sahrens arc_buf_t *
7533290Sjohansen arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
754789Sahrens {
755789Sahrens 	arc_buf_hdr_t *hdr;
756789Sahrens 	arc_buf_t *buf;
757789Sahrens 
758789Sahrens 	ASSERT3U(size, >, 0);
759789Sahrens 	hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
760789Sahrens 	ASSERT(BUF_EMPTY(hdr));
761789Sahrens 	hdr->b_size = size;
7623290Sjohansen 	hdr->b_type = type;
763789Sahrens 	hdr->b_spa = spa;
764789Sahrens 	hdr->b_state = arc.anon;
765789Sahrens 	hdr->b_arc_access = 0;
766789Sahrens 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
767789Sahrens 	buf->b_hdr = hdr;
7682688Smaybee 	buf->b_data = NULL;
7691544Seschrock 	buf->b_efunc = NULL;
7701544Seschrock 	buf->b_private = NULL;
771789Sahrens 	buf->b_next = NULL;
772789Sahrens 	hdr->b_buf = buf;
7732688Smaybee 	arc_get_data_buf(buf);
7741544Seschrock 	hdr->b_datacnt = 1;
775789Sahrens 	hdr->b_flags = 0;
776789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
777789Sahrens 	(void) refcount_add(&hdr->b_refcnt, tag);
778789Sahrens 
779789Sahrens 	return (buf);
780789Sahrens }
781789Sahrens 
7822688Smaybee static arc_buf_t *
7832688Smaybee arc_buf_clone(arc_buf_t *from)
7841544Seschrock {
7852688Smaybee 	arc_buf_t *buf;
7862688Smaybee 	arc_buf_hdr_t *hdr = from->b_hdr;
7872688Smaybee 	uint64_t size = hdr->b_size;
7881544Seschrock 
7892688Smaybee 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
7902688Smaybee 	buf->b_hdr = hdr;
7912688Smaybee 	buf->b_data = NULL;
7922688Smaybee 	buf->b_efunc = NULL;
7932688Smaybee 	buf->b_private = NULL;
7942688Smaybee 	buf->b_next = hdr->b_buf;
7952688Smaybee 	hdr->b_buf = buf;
7962688Smaybee 	arc_get_data_buf(buf);
7972688Smaybee 	bcopy(from->b_data, buf->b_data, size);
7982688Smaybee 	hdr->b_datacnt += 1;
7992688Smaybee 	return (buf);
8001544Seschrock }
8011544Seschrock 
8021544Seschrock void
8031544Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag)
8041544Seschrock {
8052887Smaybee 	arc_buf_hdr_t *hdr;
8061544Seschrock 	kmutex_t *hash_lock;
8071544Seschrock 
8082724Smaybee 	/*
8092724Smaybee 	 * Check to see if this buffer is currently being evicted via
8102887Smaybee 	 * arc_do_user_evicts().
8112724Smaybee 	 */
8122887Smaybee 	mutex_enter(&arc_eviction_mtx);
8132887Smaybee 	hdr = buf->b_hdr;
8142887Smaybee 	if (hdr == NULL) {
8152887Smaybee 		mutex_exit(&arc_eviction_mtx);
8162724Smaybee 		return;
8172887Smaybee 	}
8182887Smaybee 	hash_lock = HDR_LOCK(hdr);
8192887Smaybee 	mutex_exit(&arc_eviction_mtx);
8202724Smaybee 
8212724Smaybee 	mutex_enter(hash_lock);
8221544Seschrock 	if (buf->b_data == NULL) {
8231544Seschrock 		/*
8241544Seschrock 		 * This buffer is evicted.
8251544Seschrock 		 */
8262724Smaybee 		mutex_exit(hash_lock);
8271544Seschrock 		return;
8281544Seschrock 	}
8291544Seschrock 
8302724Smaybee 	ASSERT(buf->b_hdr == hdr);
8312724Smaybee 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
8321544Seschrock 	add_reference(hdr, hash_lock, tag);
8332688Smaybee 	arc_access(hdr, hash_lock);
8342688Smaybee 	mutex_exit(hash_lock);
8351544Seschrock 	atomic_add_64(&arc.hits, 1);
8361544Seschrock }
8371544Seschrock 
838789Sahrens static void
8392688Smaybee arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
8401544Seschrock {
8411544Seschrock 	arc_buf_t **bufp;
8421544Seschrock 
8431544Seschrock 	/* free up data associated with the buf */
8441544Seschrock 	if (buf->b_data) {
8451544Seschrock 		arc_state_t *state = buf->b_hdr->b_state;
8461544Seschrock 		uint64_t size = buf->b_hdr->b_size;
8473290Sjohansen 		arc_buf_contents_t type = buf->b_hdr->b_type;
8481544Seschrock 
8493093Sahrens 		arc_cksum_verify(buf);
8502688Smaybee 		if (!recycle) {
8513290Sjohansen 			if (type == ARC_BUFC_METADATA) {
8523290Sjohansen 				zio_buf_free(buf->b_data, size);
8533290Sjohansen 			} else {
8543290Sjohansen 				ASSERT(type == ARC_BUFC_DATA);
8553290Sjohansen 				zio_data_buf_free(buf->b_data, size);
8563290Sjohansen 			}
8572688Smaybee 			atomic_add_64(&arc.size, -size);
8582688Smaybee 		}
8591544Seschrock 		if (list_link_active(&buf->b_hdr->b_arc_node)) {
8601544Seschrock 			ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
8611544Seschrock 			ASSERT(state != arc.anon);
8621544Seschrock 			ASSERT3U(state->lsize, >=, size);
8631544Seschrock 			atomic_add_64(&state->lsize, -size);
8641544Seschrock 		}
8651544Seschrock 		ASSERT3U(state->size, >=, size);
8661544Seschrock 		atomic_add_64(&state->size, -size);
8671544Seschrock 		buf->b_data = NULL;
8681544Seschrock 		ASSERT(buf->b_hdr->b_datacnt > 0);
8691544Seschrock 		buf->b_hdr->b_datacnt -= 1;
8701544Seschrock 	}
8711544Seschrock 
8721544Seschrock 	/* only remove the buf if requested */
8731544Seschrock 	if (!all)
8741544Seschrock 		return;
8751544Seschrock 
8761544Seschrock 	/* remove the buf from the hdr list */
8771544Seschrock 	for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
8781544Seschrock 		continue;
8791544Seschrock 	*bufp = buf->b_next;
8801544Seschrock 
8811544Seschrock 	ASSERT(buf->b_efunc == NULL);
8821544Seschrock 
8831544Seschrock 	/* clean up the buf */
8841544Seschrock 	buf->b_hdr = NULL;
8851544Seschrock 	kmem_cache_free(buf_cache, buf);
8861544Seschrock }
8871544Seschrock 
8881544Seschrock static void
8891544Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr)
890789Sahrens {
891789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
892789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
8931544Seschrock 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
894789Sahrens 
895789Sahrens 	if (!BUF_EMPTY(hdr)) {
8961544Seschrock 		ASSERT(!HDR_IN_HASH_TABLE(hdr));
897789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
898789Sahrens 		hdr->b_birth = 0;
899789Sahrens 		hdr->b_cksum0 = 0;
900789Sahrens 	}
9011544Seschrock 	while (hdr->b_buf) {
902789Sahrens 		arc_buf_t *buf = hdr->b_buf;
903789Sahrens 
9041544Seschrock 		if (buf->b_efunc) {
9051544Seschrock 			mutex_enter(&arc_eviction_mtx);
9061544Seschrock 			ASSERT(buf->b_hdr != NULL);
9072688Smaybee 			arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
9081544Seschrock 			hdr->b_buf = buf->b_next;
9092887Smaybee 			buf->b_hdr = &arc_eviction_hdr;
9101544Seschrock 			buf->b_next = arc_eviction_list;
9111544Seschrock 			arc_eviction_list = buf;
9121544Seschrock 			mutex_exit(&arc_eviction_mtx);
9131544Seschrock 		} else {
9142688Smaybee 			arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
9151544Seschrock 		}
916789Sahrens 	}
9173093Sahrens 	if (hdr->b_freeze_cksum != NULL) {
9183093Sahrens 		kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
9193093Sahrens 		hdr->b_freeze_cksum = NULL;
9203093Sahrens 	}
9211544Seschrock 
922789Sahrens 	ASSERT(!list_link_active(&hdr->b_arc_node));
923789Sahrens 	ASSERT3P(hdr->b_hash_next, ==, NULL);
924789Sahrens 	ASSERT3P(hdr->b_acb, ==, NULL);
925789Sahrens 	kmem_cache_free(hdr_cache, hdr);
926789Sahrens }
927789Sahrens 
928789Sahrens void
929789Sahrens arc_buf_free(arc_buf_t *buf, void *tag)
930789Sahrens {
931789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
9321544Seschrock 	int hashed = hdr->b_state != arc.anon;
9331544Seschrock 
9341544Seschrock 	ASSERT(buf->b_efunc == NULL);
9351544Seschrock 	ASSERT(buf->b_data != NULL);
9361544Seschrock 
9371544Seschrock 	if (hashed) {
9381544Seschrock 		kmutex_t *hash_lock = HDR_LOCK(hdr);
9391544Seschrock 
9401544Seschrock 		mutex_enter(hash_lock);
9411544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
9421544Seschrock 		if (hdr->b_datacnt > 1)
9432688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
9441544Seschrock 		else
9451544Seschrock 			hdr->b_flags |= ARC_BUF_AVAILABLE;
9461544Seschrock 		mutex_exit(hash_lock);
9471544Seschrock 	} else if (HDR_IO_IN_PROGRESS(hdr)) {
9481544Seschrock 		int destroy_hdr;
9491544Seschrock 		/*
9501544Seschrock 		 * We are in the middle of an async write.  Don't destroy
9511544Seschrock 		 * this buffer unless the write completes before we finish
9521544Seschrock 		 * decrementing the reference count.
9531544Seschrock 		 */
9541544Seschrock 		mutex_enter(&arc_eviction_mtx);
9551544Seschrock 		(void) remove_reference(hdr, NULL, tag);
9561544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
9571544Seschrock 		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
9581544Seschrock 		mutex_exit(&arc_eviction_mtx);
9591544Seschrock 		if (destroy_hdr)
9601544Seschrock 			arc_hdr_destroy(hdr);
9611544Seschrock 	} else {
9621544Seschrock 		if (remove_reference(hdr, NULL, tag) > 0) {
9631544Seschrock 			ASSERT(HDR_IO_ERROR(hdr));
9642688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
9651544Seschrock 		} else {
9661544Seschrock 			arc_hdr_destroy(hdr);
9671544Seschrock 		}
9681544Seschrock 	}
9691544Seschrock }
9701544Seschrock 
9711544Seschrock int
9721544Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag)
9731544Seschrock {
9741544Seschrock 	arc_buf_hdr_t *hdr = buf->b_hdr;
975789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
9761544Seschrock 	int no_callback = (buf->b_efunc == NULL);
9771544Seschrock 
9781544Seschrock 	if (hdr->b_state == arc.anon) {
9791544Seschrock 		arc_buf_free(buf, tag);
9801544Seschrock 		return (no_callback);
9811544Seschrock 	}
982789Sahrens 
983789Sahrens 	mutex_enter(hash_lock);
9841544Seschrock 	ASSERT(hdr->b_state != arc.anon);
9851544Seschrock 	ASSERT(buf->b_data != NULL);
986789Sahrens 
9871544Seschrock 	(void) remove_reference(hdr, hash_lock, tag);
9881544Seschrock 	if (hdr->b_datacnt > 1) {
9891544Seschrock 		if (no_callback)
9902688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
9911544Seschrock 	} else if (no_callback) {
9921544Seschrock 		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
9931544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
994789Sahrens 	}
9951544Seschrock 	ASSERT(no_callback || hdr->b_datacnt > 1 ||
9961544Seschrock 	    refcount_is_zero(&hdr->b_refcnt));
997789Sahrens 	mutex_exit(hash_lock);
9981544Seschrock 	return (no_callback);
999789Sahrens }
1000789Sahrens 
1001789Sahrens int
1002789Sahrens arc_buf_size(arc_buf_t *buf)
1003789Sahrens {
1004789Sahrens 	return (buf->b_hdr->b_size);
1005789Sahrens }
1006789Sahrens 
1007789Sahrens /*
1008789Sahrens  * Evict buffers from list until we've removed the specified number of
1009789Sahrens  * bytes.  Move the removed buffers to the appropriate evict state.
10102688Smaybee  * If the recycle flag is set, then attempt to "recycle" a buffer:
10112688Smaybee  * - look for a buffer to evict that is `bytes' long.
10122688Smaybee  * - return the data block from this buffer rather than freeing it.
10132688Smaybee  * This flag is used by callers that are trying to make space for a
10142688Smaybee  * new buffer in a full arc cache.
1015789Sahrens  */
10162688Smaybee static void *
10173290Sjohansen arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle,
10183290Sjohansen     arc_buf_contents_t type)
1019789Sahrens {
1020789Sahrens 	arc_state_t *evicted_state;
10212688Smaybee 	uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
10222918Smaybee 	arc_buf_hdr_t *ab, *ab_prev = NULL;
1023789Sahrens 	kmutex_t *hash_lock;
10242688Smaybee 	boolean_t have_lock;
10252918Smaybee 	void *stolen = NULL;
1026789Sahrens 
10271544Seschrock 	ASSERT(state == arc.mru || state == arc.mfu);
1028789Sahrens 
10291544Seschrock 	evicted_state = (state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
1030789Sahrens 
1031789Sahrens 	mutex_enter(&state->mtx);
1032789Sahrens 	mutex_enter(&evicted_state->mtx);
1033789Sahrens 
1034789Sahrens 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
1035789Sahrens 		ab_prev = list_prev(&state->list, ab);
10362391Smaybee 		/* prefetch buffers have a minimum lifespan */
10372688Smaybee 		if (HDR_IO_IN_PROGRESS(ab) ||
10382688Smaybee 		    (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
10392688Smaybee 		    lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) {
10402391Smaybee 			skipped++;
10412391Smaybee 			continue;
10422391Smaybee 		}
10432918Smaybee 		/* "lookahead" for better eviction candidate */
10442918Smaybee 		if (recycle && ab->b_size != bytes &&
10452918Smaybee 		    ab_prev && ab_prev->b_size == bytes)
10462688Smaybee 			continue;
1047789Sahrens 		hash_lock = HDR_LOCK(ab);
10482688Smaybee 		have_lock = MUTEX_HELD(hash_lock);
10492688Smaybee 		if (have_lock || mutex_tryenter(hash_lock)) {
1050789Sahrens 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
10511544Seschrock 			ASSERT(ab->b_datacnt > 0);
10521544Seschrock 			while (ab->b_buf) {
10531544Seschrock 				arc_buf_t *buf = ab->b_buf;
10542688Smaybee 				if (buf->b_data) {
10551544Seschrock 					bytes_evicted += ab->b_size;
10563290Sjohansen 					if (recycle && ab->b_type == type &&
10573290Sjohansen 					    ab->b_size == bytes) {
10582918Smaybee 						stolen = buf->b_data;
10592918Smaybee 						recycle = FALSE;
10602918Smaybee 					}
10612688Smaybee 				}
10621544Seschrock 				if (buf->b_efunc) {
10631544Seschrock 					mutex_enter(&arc_eviction_mtx);
10642918Smaybee 					arc_buf_destroy(buf,
10652918Smaybee 					    buf->b_data == stolen, FALSE);
10661544Seschrock 					ab->b_buf = buf->b_next;
10672887Smaybee 					buf->b_hdr = &arc_eviction_hdr;
10681544Seschrock 					buf->b_next = arc_eviction_list;
10691544Seschrock 					arc_eviction_list = buf;
10701544Seschrock 					mutex_exit(&arc_eviction_mtx);
10711544Seschrock 				} else {
10722918Smaybee 					arc_buf_destroy(buf,
10732918Smaybee 					    buf->b_data == stolen, TRUE);
10741544Seschrock 				}
10751544Seschrock 			}
10761544Seschrock 			ASSERT(ab->b_datacnt == 0);
1077789Sahrens 			arc_change_state(evicted_state, ab, hash_lock);
10781544Seschrock 			ASSERT(HDR_IN_HASH_TABLE(ab));
10791544Seschrock 			ab->b_flags = ARC_IN_HASH_TABLE;
1080789Sahrens 			DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
10812688Smaybee 			if (!have_lock)
10822688Smaybee 				mutex_exit(hash_lock);
10831544Seschrock 			if (bytes >= 0 && bytes_evicted >= bytes)
1084789Sahrens 				break;
1085789Sahrens 		} else {
10862688Smaybee 			missed += 1;
1087789Sahrens 		}
1088789Sahrens 	}
1089789Sahrens 	mutex_exit(&evicted_state->mtx);
1090789Sahrens 	mutex_exit(&state->mtx);
1091789Sahrens 
1092789Sahrens 	if (bytes_evicted < bytes)
1093789Sahrens 		dprintf("only evicted %lld bytes from %x",
1094789Sahrens 		    (longlong_t)bytes_evicted, state);
1095789Sahrens 
10962688Smaybee 	if (skipped)
10972688Smaybee 		atomic_add_64(&arc.evict_skip, skipped);
10982688Smaybee 	if (missed)
10992688Smaybee 		atomic_add_64(&arc.mutex_miss, missed);
11002918Smaybee 	return (stolen);
1101789Sahrens }
1102789Sahrens 
1103789Sahrens /*
1104789Sahrens  * Remove buffers from list until we've removed the specified number of
1105789Sahrens  * bytes.  Destroy the buffers that are removed.
1106789Sahrens  */
1107789Sahrens static void
11081544Seschrock arc_evict_ghost(arc_state_t *state, int64_t bytes)
1109789Sahrens {
1110789Sahrens 	arc_buf_hdr_t *ab, *ab_prev;
1111789Sahrens 	kmutex_t *hash_lock;
11121544Seschrock 	uint64_t bytes_deleted = 0;
11131544Seschrock 	uint_t bufs_skipped = 0;
1114789Sahrens 
11151544Seschrock 	ASSERT(GHOST_STATE(state));
1116789Sahrens top:
1117789Sahrens 	mutex_enter(&state->mtx);
1118789Sahrens 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
1119789Sahrens 		ab_prev = list_prev(&state->list, ab);
1120789Sahrens 		hash_lock = HDR_LOCK(ab);
1121789Sahrens 		if (mutex_tryenter(hash_lock)) {
11222391Smaybee 			ASSERT(!HDR_IO_IN_PROGRESS(ab));
11231544Seschrock 			ASSERT(ab->b_buf == NULL);
1124789Sahrens 			arc_change_state(arc.anon, ab, hash_lock);
1125789Sahrens 			mutex_exit(hash_lock);
1126789Sahrens 			atomic_add_64(&arc.deleted, 1);
11271544Seschrock 			bytes_deleted += ab->b_size;
11281544Seschrock 			arc_hdr_destroy(ab);
1129789Sahrens 			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1130789Sahrens 			if (bytes >= 0 && bytes_deleted >= bytes)
1131789Sahrens 				break;
1132789Sahrens 		} else {
1133789Sahrens 			if (bytes < 0) {
1134789Sahrens 				mutex_exit(&state->mtx);
1135789Sahrens 				mutex_enter(hash_lock);
1136789Sahrens 				mutex_exit(hash_lock);
1137789Sahrens 				goto top;
1138789Sahrens 			}
1139789Sahrens 			bufs_skipped += 1;
1140789Sahrens 		}
1141789Sahrens 	}
1142789Sahrens 	mutex_exit(&state->mtx);
1143789Sahrens 
1144789Sahrens 	if (bufs_skipped) {
11452688Smaybee 		atomic_add_64(&arc.mutex_miss, bufs_skipped);
1146789Sahrens 		ASSERT(bytes >= 0);
1147789Sahrens 	}
1148789Sahrens 
1149789Sahrens 	if (bytes_deleted < bytes)
1150789Sahrens 		dprintf("only deleted %lld bytes from %p",
1151789Sahrens 		    (longlong_t)bytes_deleted, state);
1152789Sahrens }
1153789Sahrens 
1154789Sahrens static void
1155789Sahrens arc_adjust(void)
1156789Sahrens {
1157789Sahrens 	int64_t top_sz, mru_over, arc_over;
1158789Sahrens 
11591544Seschrock 	top_sz = arc.anon->size + arc.mru->size;
1160789Sahrens 
11611544Seschrock 	if (top_sz > arc.p && arc.mru->lsize > 0) {
11621544Seschrock 		int64_t toevict = MIN(arc.mru->lsize, top_sz-arc.p);
11633290Sjohansen 		(void) arc_evict(arc.mru, toevict, FALSE, ARC_BUFC_UNDEF);
11641544Seschrock 		top_sz = arc.anon->size + arc.mru->size;
1165789Sahrens 	}
1166789Sahrens 
11671544Seschrock 	mru_over = top_sz + arc.mru_ghost->size - arc.c;
1168789Sahrens 
1169789Sahrens 	if (mru_over > 0) {
11701544Seschrock 		if (arc.mru_ghost->lsize > 0) {
11711544Seschrock 			int64_t todelete = MIN(arc.mru_ghost->lsize, mru_over);
11721544Seschrock 			arc_evict_ghost(arc.mru_ghost, todelete);
1173789Sahrens 		}
1174789Sahrens 	}
1175789Sahrens 
1176789Sahrens 	if ((arc_over = arc.size - arc.c) > 0) {
11771544Seschrock 		int64_t tbl_over;
1178789Sahrens 
11791544Seschrock 		if (arc.mfu->lsize > 0) {
11801544Seschrock 			int64_t toevict = MIN(arc.mfu->lsize, arc_over);
11813290Sjohansen 			(void) arc_evict(arc.mfu, toevict, FALSE,
11823290Sjohansen 			    ARC_BUFC_UNDEF);
1183789Sahrens 		}
1184789Sahrens 
11851544Seschrock 		tbl_over = arc.size + arc.mru_ghost->lsize +
11861544Seschrock 		    arc.mfu_ghost->lsize - arc.c*2;
1187789Sahrens 
11881544Seschrock 		if (tbl_over > 0 && arc.mfu_ghost->lsize > 0) {
11891544Seschrock 			int64_t todelete = MIN(arc.mfu_ghost->lsize, tbl_over);
11901544Seschrock 			arc_evict_ghost(arc.mfu_ghost, todelete);
1191789Sahrens 		}
1192789Sahrens 	}
1193789Sahrens }
1194789Sahrens 
11951544Seschrock static void
11961544Seschrock arc_do_user_evicts(void)
11971544Seschrock {
11981544Seschrock 	mutex_enter(&arc_eviction_mtx);
11991544Seschrock 	while (arc_eviction_list != NULL) {
12001544Seschrock 		arc_buf_t *buf = arc_eviction_list;
12011544Seschrock 		arc_eviction_list = buf->b_next;
12021544Seschrock 		buf->b_hdr = NULL;
12031544Seschrock 		mutex_exit(&arc_eviction_mtx);
12041544Seschrock 
12051819Smaybee 		if (buf->b_efunc != NULL)
12061819Smaybee 			VERIFY(buf->b_efunc(buf) == 0);
12071544Seschrock 
12081544Seschrock 		buf->b_efunc = NULL;
12091544Seschrock 		buf->b_private = NULL;
12101544Seschrock 		kmem_cache_free(buf_cache, buf);
12111544Seschrock 		mutex_enter(&arc_eviction_mtx);
12121544Seschrock 	}
12131544Seschrock 	mutex_exit(&arc_eviction_mtx);
12141544Seschrock }
12151544Seschrock 
1216789Sahrens /*
1217789Sahrens  * Flush all *evictable* data from the cache.
1218789Sahrens  * NOTE: this will not touch "active" (i.e. referenced) data.
1219789Sahrens  */
1220789Sahrens void
1221789Sahrens arc_flush(void)
1222789Sahrens {
12232688Smaybee 	while (list_head(&arc.mru->list))
12243290Sjohansen 		(void) arc_evict(arc.mru, -1, FALSE, ARC_BUFC_UNDEF);
12252688Smaybee 	while (list_head(&arc.mfu->list))
12263290Sjohansen 		(void) arc_evict(arc.mfu, -1, FALSE, ARC_BUFC_UNDEF);
1227789Sahrens 
12281544Seschrock 	arc_evict_ghost(arc.mru_ghost, -1);
12291544Seschrock 	arc_evict_ghost(arc.mfu_ghost, -1);
12301544Seschrock 
12311544Seschrock 	mutex_enter(&arc_reclaim_thr_lock);
12321544Seschrock 	arc_do_user_evicts();
12331544Seschrock 	mutex_exit(&arc_reclaim_thr_lock);
12341544Seschrock 	ASSERT(arc_eviction_list == NULL);
1235789Sahrens }
1236789Sahrens 
12373158Smaybee int arc_shrink_shift = 5;		/* log2(fraction of arc to reclaim) */
12382391Smaybee 
1239789Sahrens void
12403158Smaybee arc_shrink(void)
1241789Sahrens {
12423158Smaybee 	if (arc.c > arc.c_min) {
12433158Smaybee 		uint64_t to_free;
1244789Sahrens 
12452048Sstans #ifdef _KERNEL
12463158Smaybee 		to_free = MAX(arc.c >> arc_shrink_shift, ptob(needfree));
12472048Sstans #else
12483158Smaybee 		to_free = arc.c >> arc_shrink_shift;
12492048Sstans #endif
12503158Smaybee 		if (arc.c > arc.c_min + to_free)
12513158Smaybee 			atomic_add_64(&arc.c, -to_free);
12523158Smaybee 		else
12533158Smaybee 			arc.c = arc.c_min;
12542048Sstans 
12553158Smaybee 		atomic_add_64(&arc.p, -(arc.p >> arc_shrink_shift));
12563158Smaybee 		if (arc.c > arc.size)
12573158Smaybee 			arc.c = MAX(arc.size, arc.c_min);
12583158Smaybee 		if (arc.p > arc.c)
12593158Smaybee 			arc.p = (arc.c >> 1);
12603158Smaybee 		ASSERT(arc.c >= arc.c_min);
12613158Smaybee 		ASSERT((int64_t)arc.p >= 0);
12623158Smaybee 	}
1263789Sahrens 
12643158Smaybee 	if (arc.size > arc.c)
12653158Smaybee 		arc_adjust();
1266789Sahrens }
1267789Sahrens 
1268789Sahrens static int
1269789Sahrens arc_reclaim_needed(void)
1270789Sahrens {
1271789Sahrens 	uint64_t extra;
1272789Sahrens 
1273789Sahrens #ifdef _KERNEL
12742048Sstans 
12752048Sstans 	if (needfree)
12762048Sstans 		return (1);
12772048Sstans 
1278789Sahrens 	/*
1279789Sahrens 	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1280789Sahrens 	 */
1281789Sahrens 	extra = desfree;
1282789Sahrens 
1283789Sahrens 	/*
1284789Sahrens 	 * check that we're out of range of the pageout scanner.  It starts to
1285789Sahrens 	 * schedule paging if freemem is less than lotsfree and needfree.
1286789Sahrens 	 * lotsfree is the high-water mark for pageout, and needfree is the
1287789Sahrens 	 * number of needed free pages.  We add extra pages here to make sure
1288789Sahrens 	 * the scanner doesn't start up while we're freeing memory.
1289789Sahrens 	 */
1290789Sahrens 	if (freemem < lotsfree + needfree + extra)
1291789Sahrens 		return (1);
1292789Sahrens 
1293789Sahrens 	/*
1294789Sahrens 	 * check to make sure that swapfs has enough space so that anon
1295789Sahrens 	 * reservations can still succeeed. anon_resvmem() checks that the
1296789Sahrens 	 * availrmem is greater than swapfs_minfree, and the number of reserved
1297789Sahrens 	 * swap pages.  We also add a bit of extra here just to prevent
1298789Sahrens 	 * circumstances from getting really dire.
1299789Sahrens 	 */
1300789Sahrens 	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1301789Sahrens 		return (1);
1302789Sahrens 
13033307Sjohansen 	/*
13043307Sjohansen 	 * If zio data pages are being allocated out of a separate heap segment,
13053307Sjohansen 	 * then check that the size of available vmem for this area remains
13063307Sjohansen 	 * above 1/4th free.  This needs to be done since the size of the
13073307Sjohansen 	 * non-default segment is smaller than physical memory, so we could
13083307Sjohansen 	 * conceivably run out of VA in that segment before running out of
13093307Sjohansen 	 * physical memory.
13103307Sjohansen 	 */
13113307Sjohansen 	if ((zio_arena != NULL) && (btop(vmem_size(zio_arena, VMEM_FREE)) <
13123307Sjohansen 	    (btop(vmem_size(zio_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)))
13133307Sjohansen 		return (1);
13143307Sjohansen 
13151936Smaybee #if defined(__i386)
1316789Sahrens 	/*
1317789Sahrens 	 * If we're on an i386 platform, it's possible that we'll exhaust the
1318789Sahrens 	 * kernel heap space before we ever run out of available physical
1319789Sahrens 	 * memory.  Most checks of the size of the heap_area compare against
1320789Sahrens 	 * tune.t_minarmem, which is the minimum available real memory that we
1321789Sahrens 	 * can have in the system.  However, this is generally fixed at 25 pages
1322789Sahrens 	 * which is so low that it's useless.  In this comparison, we seek to
1323789Sahrens 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1324789Sahrens 	 * heap is allocated.  (Or, in the caclulation, if less than 1/4th is
1325789Sahrens 	 * free)
1326789Sahrens 	 */
1327789Sahrens 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1328789Sahrens 	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1329789Sahrens 		return (1);
1330789Sahrens #endif
1331789Sahrens 
1332789Sahrens #else
1333789Sahrens 	if (spa_get_random(100) == 0)
1334789Sahrens 		return (1);
1335789Sahrens #endif
1336789Sahrens 	return (0);
1337789Sahrens }
1338789Sahrens 
1339789Sahrens static void
1340789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1341789Sahrens {
1342789Sahrens 	size_t			i;
1343789Sahrens 	kmem_cache_t		*prev_cache = NULL;
13443290Sjohansen 	kmem_cache_t		*prev_data_cache = NULL;
1345789Sahrens 	extern kmem_cache_t	*zio_buf_cache[];
13463290Sjohansen 	extern kmem_cache_t	*zio_data_buf_cache[];
1347789Sahrens 
13481484Sek110237 #ifdef _KERNEL
13491484Sek110237 	/*
13501484Sek110237 	 * First purge some DNLC entries, in case the DNLC is using
13511484Sek110237 	 * up too much memory.
13521484Sek110237 	 */
13531505Sek110237 	dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
13541936Smaybee 
13551936Smaybee #if defined(__i386)
13561936Smaybee 	/*
13571936Smaybee 	 * Reclaim unused memory from all kmem caches.
13581936Smaybee 	 */
13591936Smaybee 	kmem_reap();
13601936Smaybee #endif
13611484Sek110237 #endif
13621484Sek110237 
1363789Sahrens 	/*
13641544Seschrock 	 * An agressive reclamation will shrink the cache size as well as
13651544Seschrock 	 * reap free buffers from the arc kmem caches.
1366789Sahrens 	 */
1367789Sahrens 	if (strat == ARC_RECLAIM_AGGR)
13683158Smaybee 		arc_shrink();
1369789Sahrens 
1370789Sahrens 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1371789Sahrens 		if (zio_buf_cache[i] != prev_cache) {
1372789Sahrens 			prev_cache = zio_buf_cache[i];
1373789Sahrens 			kmem_cache_reap_now(zio_buf_cache[i]);
1374789Sahrens 		}
13753290Sjohansen 		if (zio_data_buf_cache[i] != prev_data_cache) {
13763290Sjohansen 			prev_data_cache = zio_data_buf_cache[i];
13773290Sjohansen 			kmem_cache_reap_now(zio_data_buf_cache[i]);
13783290Sjohansen 		}
1379789Sahrens 	}
13801544Seschrock 	kmem_cache_reap_now(buf_cache);
13811544Seschrock 	kmem_cache_reap_now(hdr_cache);
1382789Sahrens }
1383789Sahrens 
1384789Sahrens static void
1385789Sahrens arc_reclaim_thread(void)
1386789Sahrens {
1387789Sahrens 	clock_t			growtime = 0;
1388789Sahrens 	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
1389789Sahrens 	callb_cpr_t		cpr;
1390789Sahrens 
1391789Sahrens 	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1392789Sahrens 
1393789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
1394789Sahrens 	while (arc_thread_exit == 0) {
1395789Sahrens 		if (arc_reclaim_needed()) {
1396789Sahrens 
1397789Sahrens 			if (arc.no_grow) {
1398789Sahrens 				if (last_reclaim == ARC_RECLAIM_CONS) {
1399789Sahrens 					last_reclaim = ARC_RECLAIM_AGGR;
1400789Sahrens 				} else {
1401789Sahrens 					last_reclaim = ARC_RECLAIM_CONS;
1402789Sahrens 				}
1403789Sahrens 			} else {
1404789Sahrens 				arc.no_grow = TRUE;
1405789Sahrens 				last_reclaim = ARC_RECLAIM_AGGR;
1406789Sahrens 				membar_producer();
1407789Sahrens 			}
1408789Sahrens 
1409789Sahrens 			/* reset the growth delay for every reclaim */
1410789Sahrens 			growtime = lbolt + (arc_grow_retry * hz);
14112856Snd150628 			ASSERT(growtime > 0);
1412789Sahrens 
1413789Sahrens 			arc_kmem_reap_now(last_reclaim);
1414789Sahrens 
1415789Sahrens 		} else if ((growtime > 0) && ((growtime - lbolt) <= 0)) {
1416789Sahrens 			arc.no_grow = FALSE;
1417789Sahrens 		}
1418789Sahrens 
14193298Smaybee 		if (2 * arc.c <
14203298Smaybee 		    arc.size + arc.mru_ghost->size + arc.mfu_ghost->size)
14213298Smaybee 			arc_adjust();
14223298Smaybee 
14231544Seschrock 		if (arc_eviction_list != NULL)
14241544Seschrock 			arc_do_user_evicts();
14251544Seschrock 
1426789Sahrens 		/* block until needed, or one second, whichever is shorter */
1427789Sahrens 		CALLB_CPR_SAFE_BEGIN(&cpr);
1428789Sahrens 		(void) cv_timedwait(&arc_reclaim_thr_cv,
1429789Sahrens 		    &arc_reclaim_thr_lock, (lbolt + hz));
1430789Sahrens 		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1431789Sahrens 	}
1432789Sahrens 
1433789Sahrens 	arc_thread_exit = 0;
1434789Sahrens 	cv_broadcast(&arc_reclaim_thr_cv);
1435789Sahrens 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
1436789Sahrens 	thread_exit();
1437789Sahrens }
1438789Sahrens 
14391544Seschrock /*
14401544Seschrock  * Adapt arc info given the number of bytes we are trying to add and
14411544Seschrock  * the state that we are comming from.  This function is only called
14421544Seschrock  * when we are adding new content to the cache.
14431544Seschrock  */
1444789Sahrens static void
14451544Seschrock arc_adapt(int bytes, arc_state_t *state)
1446789Sahrens {
14471544Seschrock 	int mult;
14481544Seschrock 
14491544Seschrock 	ASSERT(bytes > 0);
1450789Sahrens 	/*
14511544Seschrock 	 * Adapt the target size of the MRU list:
14521544Seschrock 	 *	- if we just hit in the MRU ghost list, then increase
14531544Seschrock 	 *	  the target size of the MRU list.
14541544Seschrock 	 *	- if we just hit in the MFU ghost list, then increase
14551544Seschrock 	 *	  the target size of the MFU list by decreasing the
14561544Seschrock 	 *	  target size of the MRU list.
1457789Sahrens 	 */
14581544Seschrock 	if (state == arc.mru_ghost) {
14591544Seschrock 		mult = ((arc.mru_ghost->size >= arc.mfu_ghost->size) ?
14601544Seschrock 		    1 : (arc.mfu_ghost->size/arc.mru_ghost->size));
14611544Seschrock 
14621544Seschrock 		arc.p = MIN(arc.c, arc.p + bytes * mult);
14631544Seschrock 	} else if (state == arc.mfu_ghost) {
14641544Seschrock 		mult = ((arc.mfu_ghost->size >= arc.mru_ghost->size) ?
14651544Seschrock 		    1 : (arc.mru_ghost->size/arc.mfu_ghost->size));
14661544Seschrock 
14671544Seschrock 		arc.p = MAX(0, (int64_t)arc.p - bytes * mult);
14681544Seschrock 	}
14691544Seschrock 	ASSERT((int64_t)arc.p >= 0);
1470789Sahrens 
1471789Sahrens 	if (arc_reclaim_needed()) {
1472789Sahrens 		cv_signal(&arc_reclaim_thr_cv);
1473789Sahrens 		return;
1474789Sahrens 	}
1475789Sahrens 
1476789Sahrens 	if (arc.no_grow)
1477789Sahrens 		return;
1478789Sahrens 
14791544Seschrock 	if (arc.c >= arc.c_max)
14801544Seschrock 		return;
14811544Seschrock 
1482789Sahrens 	/*
14831544Seschrock 	 * If we're within (2 * maxblocksize) bytes of the target
14841544Seschrock 	 * cache size, increment the target cache size
1485789Sahrens 	 */
14861544Seschrock 	if (arc.size > arc.c - (2ULL << SPA_MAXBLOCKSHIFT)) {
14871544Seschrock 		atomic_add_64(&arc.c, (int64_t)bytes);
1488789Sahrens 		if (arc.c > arc.c_max)
1489789Sahrens 			arc.c = arc.c_max;
14901544Seschrock 		else if (state == arc.anon)
14911544Seschrock 			atomic_add_64(&arc.p, (int64_t)bytes);
14921544Seschrock 		if (arc.p > arc.c)
14931544Seschrock 			arc.p = arc.c;
1494789Sahrens 	}
14951544Seschrock 	ASSERT((int64_t)arc.p >= 0);
1496789Sahrens }
1497789Sahrens 
1498789Sahrens /*
14991544Seschrock  * Check if the cache has reached its limits and eviction is required
15001544Seschrock  * prior to insert.
1501789Sahrens  */
1502789Sahrens static int
1503789Sahrens arc_evict_needed()
1504789Sahrens {
1505789Sahrens 	if (arc_reclaim_needed())
1506789Sahrens 		return (1);
1507789Sahrens 
15081544Seschrock 	return (arc.size > arc.c);
1509789Sahrens }
1510789Sahrens 
1511789Sahrens /*
15122688Smaybee  * The buffer, supplied as the first argument, needs a data block.
15132688Smaybee  * So, if we are at cache max, determine which cache should be victimized.
15142688Smaybee  * We have the following cases:
1515789Sahrens  *
15161544Seschrock  * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru) ->
1517789Sahrens  * In this situation if we're out of space, but the resident size of the MFU is
1518789Sahrens  * under the limit, victimize the MFU cache to satisfy this insertion request.
1519789Sahrens  *
15201544Seschrock  * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru) ->
1521789Sahrens  * Here, we've used up all of the available space for the MRU, so we need to
1522789Sahrens  * evict from our own cache instead.  Evict from the set of resident MRU
1523789Sahrens  * entries.
1524789Sahrens  *
15251544Seschrock  * 3. Insert for MFU (c - p) > sizeof(arc.mfu) ->
1526789Sahrens  * c minus p represents the MFU space in the cache, since p is the size of the
1527789Sahrens  * cache that is dedicated to the MRU.  In this situation there's still space on
1528789Sahrens  * the MFU side, so the MRU side needs to be victimized.
1529789Sahrens  *
15301544Seschrock  * 4. Insert for MFU (c - p) < sizeof(arc.mfu) ->
1531789Sahrens  * MFU's resident set is consuming more space than it has been allotted.  In
1532789Sahrens  * this situation, we must victimize our own cache, the MFU, for this insertion.
1533789Sahrens  */
1534789Sahrens static void
15352688Smaybee arc_get_data_buf(arc_buf_t *buf)
1536789Sahrens {
15373290Sjohansen 	arc_state_t		*state = buf->b_hdr->b_state;
15383290Sjohansen 	uint64_t		size = buf->b_hdr->b_size;
15393290Sjohansen 	arc_buf_contents_t	type = buf->b_hdr->b_type;
15402688Smaybee 
15412688Smaybee 	arc_adapt(size, state);
1542789Sahrens 
15432688Smaybee 	/*
15442688Smaybee 	 * We have not yet reached cache maximum size,
15452688Smaybee 	 * just allocate a new buffer.
15462688Smaybee 	 */
15472688Smaybee 	if (!arc_evict_needed()) {
15483290Sjohansen 		if (type == ARC_BUFC_METADATA) {
15493290Sjohansen 			buf->b_data = zio_buf_alloc(size);
15503290Sjohansen 		} else {
15513290Sjohansen 			ASSERT(type == ARC_BUFC_DATA);
15523290Sjohansen 			buf->b_data = zio_data_buf_alloc(size);
15533290Sjohansen 		}
15542688Smaybee 		atomic_add_64(&arc.size, size);
15552688Smaybee 		goto out;
15562688Smaybee 	}
15572688Smaybee 
15582688Smaybee 	/*
15592688Smaybee 	 * If we are prefetching from the mfu ghost list, this buffer
15602688Smaybee 	 * will end up on the mru list; so steal space from there.
15612688Smaybee 	 */
15622688Smaybee 	if (state == arc.mfu_ghost)
15632688Smaybee 		state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc.mru : arc.mfu;
15642688Smaybee 	else if (state == arc.mru_ghost)
15652688Smaybee 		state = arc.mru;
1566789Sahrens 
15672688Smaybee 	if (state == arc.mru || state == arc.anon) {
15682688Smaybee 		uint64_t mru_used = arc.anon->size + arc.mru->size;
15692688Smaybee 		state = (arc.p > mru_used) ? arc.mfu : arc.mru;
1570789Sahrens 	} else {
15712688Smaybee 		/* MFU cases */
15722688Smaybee 		uint64_t mfu_space = arc.c - arc.p;
15732688Smaybee 		state =  (mfu_space > arc.mfu->size) ? arc.mru : arc.mfu;
15742688Smaybee 	}
15753290Sjohansen 	if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) {
15763290Sjohansen 		if (type == ARC_BUFC_METADATA) {
15773290Sjohansen 			buf->b_data = zio_buf_alloc(size);
15783290Sjohansen 		} else {
15793290Sjohansen 			ASSERT(type == ARC_BUFC_DATA);
15803290Sjohansen 			buf->b_data = zio_data_buf_alloc(size);
15813290Sjohansen 		}
15822688Smaybee 		atomic_add_64(&arc.size, size);
15832688Smaybee 		atomic_add_64(&arc.recycle_miss, 1);
15842688Smaybee 	}
15852688Smaybee 	ASSERT(buf->b_data != NULL);
15862688Smaybee out:
15872688Smaybee 	/*
15882688Smaybee 	 * Update the state size.  Note that ghost states have a
15892688Smaybee 	 * "ghost size" and so don't need to be updated.
15902688Smaybee 	 */
15912688Smaybee 	if (!GHOST_STATE(buf->b_hdr->b_state)) {
15922688Smaybee 		arc_buf_hdr_t *hdr = buf->b_hdr;
15932688Smaybee 
15942688Smaybee 		atomic_add_64(&hdr->b_state->size, size);
15952688Smaybee 		if (list_link_active(&hdr->b_arc_node)) {
15962688Smaybee 			ASSERT(refcount_is_zero(&hdr->b_refcnt));
15972688Smaybee 			atomic_add_64(&hdr->b_state->lsize, size);
1598789Sahrens 		}
15993298Smaybee 		/*
16003298Smaybee 		 * If we are growing the cache, and we are adding anonymous
16013298Smaybee 		 * data, and we have outgrown arc.p, update arc.p
16023298Smaybee 		 */
16033298Smaybee 		if (arc.size < arc.c && hdr->b_state == arc.anon &&
16043298Smaybee 		    arc.anon->size + arc.mru->size > arc.p)
16053298Smaybee 			arc.p = MIN(arc.c, arc.p + size);
1606789Sahrens 	}
1607789Sahrens }
1608789Sahrens 
1609789Sahrens /*
1610789Sahrens  * This routine is called whenever a buffer is accessed.
16111544Seschrock  * NOTE: the hash lock is dropped in this function.
1612789Sahrens  */
1613789Sahrens static void
16142688Smaybee arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
1615789Sahrens {
1616789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
1617789Sahrens 
1618789Sahrens 	if (buf->b_state == arc.anon) {
1619789Sahrens 		/*
1620789Sahrens 		 * This buffer is not in the cache, and does not
1621789Sahrens 		 * appear in our "ghost" list.  Add the new buffer
1622789Sahrens 		 * to the MRU state.
1623789Sahrens 		 */
1624789Sahrens 
1625789Sahrens 		ASSERT(buf->b_arc_access == 0);
1626789Sahrens 		buf->b_arc_access = lbolt;
16271544Seschrock 		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
16281544Seschrock 		arc_change_state(arc.mru, buf, hash_lock);
1629789Sahrens 
16301544Seschrock 	} else if (buf->b_state == arc.mru) {
1631789Sahrens 		/*
16322391Smaybee 		 * If this buffer is here because of a prefetch, then either:
16332391Smaybee 		 * - clear the flag if this is a "referencing" read
16342391Smaybee 		 *   (any subsequent access will bump this into the MFU state).
16352391Smaybee 		 * or
16362391Smaybee 		 * - move the buffer to the head of the list if this is
16372391Smaybee 		 *   another prefetch (to make it less likely to be evicted).
1638789Sahrens 		 */
1639789Sahrens 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
16402391Smaybee 			if (refcount_count(&buf->b_refcnt) == 0) {
16412391Smaybee 				ASSERT(list_link_active(&buf->b_arc_node));
16422391Smaybee 				mutex_enter(&arc.mru->mtx);
16432391Smaybee 				list_remove(&arc.mru->list, buf);
16442391Smaybee 				list_insert_head(&arc.mru->list, buf);
16452391Smaybee 				mutex_exit(&arc.mru->mtx);
16462391Smaybee 			} else {
16472391Smaybee 				buf->b_flags &= ~ARC_PREFETCH;
16482391Smaybee 				atomic_add_64(&arc.mru->hits, 1);
16492391Smaybee 			}
16502391Smaybee 			buf->b_arc_access = lbolt;
1651789Sahrens 			return;
1652789Sahrens 		}
1653789Sahrens 
1654789Sahrens 		/*
1655789Sahrens 		 * This buffer has been "accessed" only once so far,
1656789Sahrens 		 * but it is still in the cache. Move it to the MFU
1657789Sahrens 		 * state.
1658789Sahrens 		 */
1659789Sahrens 		if (lbolt > buf->b_arc_access + ARC_MINTIME) {
1660789Sahrens 			/*
1661789Sahrens 			 * More than 125ms have passed since we
1662789Sahrens 			 * instantiated this buffer.  Move it to the
1663789Sahrens 			 * most frequently used state.
1664789Sahrens 			 */
1665789Sahrens 			buf->b_arc_access = lbolt;
16661544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
16671544Seschrock 			arc_change_state(arc.mfu, buf, hash_lock);
1668789Sahrens 		}
16691544Seschrock 		atomic_add_64(&arc.mru->hits, 1);
16701544Seschrock 	} else if (buf->b_state == arc.mru_ghost) {
1671789Sahrens 		arc_state_t	*new_state;
1672789Sahrens 		/*
1673789Sahrens 		 * This buffer has been "accessed" recently, but
1674789Sahrens 		 * was evicted from the cache.  Move it to the
1675789Sahrens 		 * MFU state.
1676789Sahrens 		 */
1677789Sahrens 
1678789Sahrens 		if (buf->b_flags & ARC_PREFETCH) {
16791544Seschrock 			new_state = arc.mru;
16802391Smaybee 			if (refcount_count(&buf->b_refcnt) > 0)
16812391Smaybee 				buf->b_flags &= ~ARC_PREFETCH;
16821544Seschrock 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1683789Sahrens 		} else {
16841544Seschrock 			new_state = arc.mfu;
16851544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1686789Sahrens 		}
1687789Sahrens 
1688789Sahrens 		buf->b_arc_access = lbolt;
1689789Sahrens 		arc_change_state(new_state, buf, hash_lock);
1690789Sahrens 
16911544Seschrock 		atomic_add_64(&arc.mru_ghost->hits, 1);
16921544Seschrock 	} else if (buf->b_state == arc.mfu) {
1693789Sahrens 		/*
1694789Sahrens 		 * This buffer has been accessed more than once and is
1695789Sahrens 		 * still in the cache.  Keep it in the MFU state.
1696789Sahrens 		 *
16972391Smaybee 		 * NOTE: an add_reference() that occurred when we did
16982391Smaybee 		 * the arc_read() will have kicked this off the list.
16992391Smaybee 		 * If it was a prefetch, we will explicitly move it to
17002391Smaybee 		 * the head of the list now.
1701789Sahrens 		 */
17022391Smaybee 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
17032391Smaybee 			ASSERT(refcount_count(&buf->b_refcnt) == 0);
17042391Smaybee 			ASSERT(list_link_active(&buf->b_arc_node));
17052391Smaybee 			mutex_enter(&arc.mfu->mtx);
17062391Smaybee 			list_remove(&arc.mfu->list, buf);
17072391Smaybee 			list_insert_head(&arc.mfu->list, buf);
17082391Smaybee 			mutex_exit(&arc.mfu->mtx);
17092391Smaybee 		}
17101544Seschrock 		atomic_add_64(&arc.mfu->hits, 1);
17112391Smaybee 		buf->b_arc_access = lbolt;
17121544Seschrock 	} else if (buf->b_state == arc.mfu_ghost) {
17132391Smaybee 		arc_state_t	*new_state = arc.mfu;
1714789Sahrens 		/*
1715789Sahrens 		 * This buffer has been accessed more than once but has
1716789Sahrens 		 * been evicted from the cache.  Move it back to the
1717789Sahrens 		 * MFU state.
1718789Sahrens 		 */
1719789Sahrens 
17202391Smaybee 		if (buf->b_flags & ARC_PREFETCH) {
17212391Smaybee 			/*
17222391Smaybee 			 * This is a prefetch access...
17232391Smaybee 			 * move this block back to the MRU state.
17242391Smaybee 			 */
17252391Smaybee 			ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
17262391Smaybee 			new_state = arc.mru;
17272391Smaybee 		}
17282391Smaybee 
1729789Sahrens 		buf->b_arc_access = lbolt;
17301544Seschrock 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
17312391Smaybee 		arc_change_state(new_state, buf, hash_lock);
1732789Sahrens 
17331544Seschrock 		atomic_add_64(&arc.mfu_ghost->hits, 1);
1734789Sahrens 	} else {
1735789Sahrens 		ASSERT(!"invalid arc state");
1736789Sahrens 	}
1737789Sahrens }
1738789Sahrens 
1739789Sahrens /* a generic arc_done_func_t which you can use */
1740789Sahrens /* ARGSUSED */
1741789Sahrens void
1742789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
1743789Sahrens {
1744789Sahrens 	bcopy(buf->b_data, arg, buf->b_hdr->b_size);
17451544Seschrock 	VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1746789Sahrens }
1747789Sahrens 
1748789Sahrens /* a generic arc_done_func_t which you can use */
1749789Sahrens void
1750789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
1751789Sahrens {
1752789Sahrens 	arc_buf_t **bufp = arg;
1753789Sahrens 	if (zio && zio->io_error) {
17541544Seschrock 		VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1755789Sahrens 		*bufp = NULL;
1756789Sahrens 	} else {
1757789Sahrens 		*bufp = buf;
1758789Sahrens 	}
1759789Sahrens }
1760789Sahrens 
1761789Sahrens static void
1762789Sahrens arc_read_done(zio_t *zio)
1763789Sahrens {
17641589Smaybee 	arc_buf_hdr_t	*hdr, *found;
1765789Sahrens 	arc_buf_t	*buf;
1766789Sahrens 	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
1767789Sahrens 	kmutex_t	*hash_lock;
1768789Sahrens 	arc_callback_t	*callback_list, *acb;
1769789Sahrens 	int		freeable = FALSE;
1770789Sahrens 
1771789Sahrens 	buf = zio->io_private;
1772789Sahrens 	hdr = buf->b_hdr;
1773789Sahrens 
17741589Smaybee 	/*
17751589Smaybee 	 * The hdr was inserted into hash-table and removed from lists
17761589Smaybee 	 * prior to starting I/O.  We should find this header, since
17771589Smaybee 	 * it's in the hash table, and it should be legit since it's
17781589Smaybee 	 * not possible to evict it during the I/O.  The only possible
17791589Smaybee 	 * reason for it not to be found is if we were freed during the
17801589Smaybee 	 * read.
17811589Smaybee 	 */
17821589Smaybee 	found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
17833093Sahrens 	    &hash_lock);
1784789Sahrens 
17851589Smaybee 	ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
17861589Smaybee 	    (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))));
1787789Sahrens 
1788789Sahrens 	/* byteswap if necessary */
1789789Sahrens 	callback_list = hdr->b_acb;
1790789Sahrens 	ASSERT(callback_list != NULL);
1791789Sahrens 	if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
1792789Sahrens 		callback_list->acb_byteswap(buf->b_data, hdr->b_size);
1793789Sahrens 
17943093Sahrens 	arc_cksum_compute(buf);
17953093Sahrens 
1796789Sahrens 	/* create copies of the data buffer for the callers */
1797789Sahrens 	abuf = buf;
1798789Sahrens 	for (acb = callback_list; acb; acb = acb->acb_next) {
1799789Sahrens 		if (acb->acb_done) {
18002688Smaybee 			if (abuf == NULL)
18012688Smaybee 				abuf = arc_buf_clone(buf);
1802789Sahrens 			acb->acb_buf = abuf;
1803789Sahrens 			abuf = NULL;
1804789Sahrens 		}
1805789Sahrens 	}
1806789Sahrens 	hdr->b_acb = NULL;
1807789Sahrens 	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
18081544Seschrock 	ASSERT(!HDR_BUF_AVAILABLE(hdr));
18091544Seschrock 	if (abuf == buf)
18101544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
1811789Sahrens 
1812789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
1813789Sahrens 
1814789Sahrens 	if (zio->io_error != 0) {
1815789Sahrens 		hdr->b_flags |= ARC_IO_ERROR;
1816789Sahrens 		if (hdr->b_state != arc.anon)
1817789Sahrens 			arc_change_state(arc.anon, hdr, hash_lock);
18181544Seschrock 		if (HDR_IN_HASH_TABLE(hdr))
18191544Seschrock 			buf_hash_remove(hdr);
1820789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
18212391Smaybee 		/* convert checksum errors into IO errors */
18221544Seschrock 		if (zio->io_error == ECKSUM)
18231544Seschrock 			zio->io_error = EIO;
1824789Sahrens 	}
1825789Sahrens 
18261544Seschrock 	/*
18272391Smaybee 	 * Broadcast before we drop the hash_lock to avoid the possibility
18282391Smaybee 	 * that the hdr (and hence the cv) might be freed before we get to
18292391Smaybee 	 * the cv_broadcast().
18301544Seschrock 	 */
18311544Seschrock 	cv_broadcast(&hdr->b_cv);
18321544Seschrock 
18331589Smaybee 	if (hash_lock) {
1834789Sahrens 		/*
1835789Sahrens 		 * Only call arc_access on anonymous buffers.  This is because
1836789Sahrens 		 * if we've issued an I/O for an evicted buffer, we've already
1837789Sahrens 		 * called arc_access (to prevent any simultaneous readers from
1838789Sahrens 		 * getting confused).
1839789Sahrens 		 */
1840789Sahrens 		if (zio->io_error == 0 && hdr->b_state == arc.anon)
18412688Smaybee 			arc_access(hdr, hash_lock);
18422688Smaybee 		mutex_exit(hash_lock);
1843789Sahrens 	} else {
1844789Sahrens 		/*
1845789Sahrens 		 * This block was freed while we waited for the read to
1846789Sahrens 		 * complete.  It has been removed from the hash table and
1847789Sahrens 		 * moved to the anonymous state (so that it won't show up
1848789Sahrens 		 * in the cache).
1849789Sahrens 		 */
1850789Sahrens 		ASSERT3P(hdr->b_state, ==, arc.anon);
1851789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
1852789Sahrens 	}
1853789Sahrens 
1854789Sahrens 	/* execute each callback and free its structure */
1855789Sahrens 	while ((acb = callback_list) != NULL) {
1856789Sahrens 		if (acb->acb_done)
1857789Sahrens 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
1858789Sahrens 
1859789Sahrens 		if (acb->acb_zio_dummy != NULL) {
1860789Sahrens 			acb->acb_zio_dummy->io_error = zio->io_error;
1861789Sahrens 			zio_nowait(acb->acb_zio_dummy);
1862789Sahrens 		}
1863789Sahrens 
1864789Sahrens 		callback_list = acb->acb_next;
1865789Sahrens 		kmem_free(acb, sizeof (arc_callback_t));
1866789Sahrens 	}
1867789Sahrens 
1868789Sahrens 	if (freeable)
18691544Seschrock 		arc_hdr_destroy(hdr);
1870789Sahrens }
1871789Sahrens 
1872789Sahrens /*
1873789Sahrens  * "Read" the block block at the specified DVA (in bp) via the
1874789Sahrens  * cache.  If the block is found in the cache, invoke the provided
1875789Sahrens  * callback immediately and return.  Note that the `zio' parameter
1876789Sahrens  * in the callback will be NULL in this case, since no IO was
1877789Sahrens  * required.  If the block is not in the cache pass the read request
1878789Sahrens  * on to the spa with a substitute callback function, so that the
1879789Sahrens  * requested block will be added to the cache.
1880789Sahrens  *
1881789Sahrens  * If a read request arrives for a block that has a read in-progress,
1882789Sahrens  * either wait for the in-progress read to complete (and return the
1883789Sahrens  * results); or, if this is a read with a "done" func, add a record
1884789Sahrens  * to the read to invoke the "done" func when the read completes,
1885789Sahrens  * and return; or just return.
1886789Sahrens  *
1887789Sahrens  * arc_read_done() will invoke all the requested "done" functions
1888789Sahrens  * for readers of this block.
1889789Sahrens  */
1890789Sahrens int
1891789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
1892789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
18932391Smaybee     uint32_t *arc_flags, zbookmark_t *zb)
1894789Sahrens {
1895789Sahrens 	arc_buf_hdr_t *hdr;
1896789Sahrens 	arc_buf_t *buf;
1897789Sahrens 	kmutex_t *hash_lock;
1898789Sahrens 	zio_t	*rzio;
1899789Sahrens 
1900789Sahrens top:
1901789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
19021544Seschrock 	if (hdr && hdr->b_datacnt > 0) {
1903789Sahrens 
19042391Smaybee 		*arc_flags |= ARC_CACHED;
19052391Smaybee 
1906789Sahrens 		if (HDR_IO_IN_PROGRESS(hdr)) {
19072391Smaybee 
19082391Smaybee 			if (*arc_flags & ARC_WAIT) {
19092391Smaybee 				cv_wait(&hdr->b_cv, hash_lock);
19102391Smaybee 				mutex_exit(hash_lock);
19112391Smaybee 				goto top;
19122391Smaybee 			}
19132391Smaybee 			ASSERT(*arc_flags & ARC_NOWAIT);
19142391Smaybee 
19152391Smaybee 			if (done) {
1916789Sahrens 				arc_callback_t	*acb = NULL;
1917789Sahrens 
1918789Sahrens 				acb = kmem_zalloc(sizeof (arc_callback_t),
1919789Sahrens 				    KM_SLEEP);
1920789Sahrens 				acb->acb_done = done;
1921789Sahrens 				acb->acb_private = private;
1922789Sahrens 				acb->acb_byteswap = swap;
1923789Sahrens 				if (pio != NULL)
1924789Sahrens 					acb->acb_zio_dummy = zio_null(pio,
1925789Sahrens 					    spa, NULL, NULL, flags);
1926789Sahrens 
1927789Sahrens 				ASSERT(acb->acb_done != NULL);
1928789Sahrens 				acb->acb_next = hdr->b_acb;
1929789Sahrens 				hdr->b_acb = acb;
1930789Sahrens 				add_reference(hdr, hash_lock, private);
1931789Sahrens 				mutex_exit(hash_lock);
1932789Sahrens 				return (0);
1933789Sahrens 			}
1934789Sahrens 			mutex_exit(hash_lock);
1935789Sahrens 			return (0);
1936789Sahrens 		}
1937789Sahrens 
19381544Seschrock 		ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
1939789Sahrens 
19401544Seschrock 		if (done) {
19412688Smaybee 			add_reference(hdr, hash_lock, private);
19421544Seschrock 			/*
19431544Seschrock 			 * If this block is already in use, create a new
19441544Seschrock 			 * copy of the data so that we will be guaranteed
19451544Seschrock 			 * that arc_release() will always succeed.
19461544Seschrock 			 */
19471544Seschrock 			buf = hdr->b_buf;
19481544Seschrock 			ASSERT(buf);
19491544Seschrock 			ASSERT(buf->b_data);
19502688Smaybee 			if (HDR_BUF_AVAILABLE(hdr)) {
19511544Seschrock 				ASSERT(buf->b_efunc == NULL);
19521544Seschrock 				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
19532688Smaybee 			} else {
19542688Smaybee 				buf = arc_buf_clone(buf);
19551544Seschrock 			}
19562391Smaybee 		} else if (*arc_flags & ARC_PREFETCH &&
19572391Smaybee 		    refcount_count(&hdr->b_refcnt) == 0) {
19582391Smaybee 			hdr->b_flags |= ARC_PREFETCH;
1959789Sahrens 		}
1960789Sahrens 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
19612688Smaybee 		arc_access(hdr, hash_lock);
19622688Smaybee 		mutex_exit(hash_lock);
1963789Sahrens 		atomic_add_64(&arc.hits, 1);
1964789Sahrens 		if (done)
1965789Sahrens 			done(NULL, buf, private);
1966789Sahrens 	} else {
1967789Sahrens 		uint64_t size = BP_GET_LSIZE(bp);
1968789Sahrens 		arc_callback_t	*acb;
1969789Sahrens 
1970789Sahrens 		if (hdr == NULL) {
1971789Sahrens 			/* this block is not in the cache */
1972789Sahrens 			arc_buf_hdr_t	*exists;
19733290Sjohansen 			arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
19743290Sjohansen 			buf = arc_buf_alloc(spa, size, private, type);
1975789Sahrens 			hdr = buf->b_hdr;
1976789Sahrens 			hdr->b_dva = *BP_IDENTITY(bp);
1977789Sahrens 			hdr->b_birth = bp->blk_birth;
1978789Sahrens 			hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
1979789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
1980789Sahrens 			if (exists) {
1981789Sahrens 				/* somebody beat us to the hash insert */
1982789Sahrens 				mutex_exit(hash_lock);
1983789Sahrens 				bzero(&hdr->b_dva, sizeof (dva_t));
1984789Sahrens 				hdr->b_birth = 0;
1985789Sahrens 				hdr->b_cksum0 = 0;
19861544Seschrock 				(void) arc_buf_remove_ref(buf, private);
1987789Sahrens 				goto top; /* restart the IO request */
1988789Sahrens 			}
19892391Smaybee 			/* if this is a prefetch, we don't have a reference */
19902391Smaybee 			if (*arc_flags & ARC_PREFETCH) {
19912391Smaybee 				(void) remove_reference(hdr, hash_lock,
19922391Smaybee 				    private);
19932391Smaybee 				hdr->b_flags |= ARC_PREFETCH;
19942391Smaybee 			}
19952391Smaybee 			if (BP_GET_LEVEL(bp) > 0)
19962391Smaybee 				hdr->b_flags |= ARC_INDIRECT;
1997789Sahrens 		} else {
1998789Sahrens 			/* this block is in the ghost cache */
19991544Seschrock 			ASSERT(GHOST_STATE(hdr->b_state));
20001544Seschrock 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
20012391Smaybee 			ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
20022391Smaybee 			ASSERT(hdr->b_buf == NULL);
2003789Sahrens 
20042391Smaybee 			/* if this is a prefetch, we don't have a reference */
20052391Smaybee 			if (*arc_flags & ARC_PREFETCH)
20062391Smaybee 				hdr->b_flags |= ARC_PREFETCH;
20072391Smaybee 			else
20082391Smaybee 				add_reference(hdr, hash_lock, private);
2009789Sahrens 			buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
20101544Seschrock 			buf->b_hdr = hdr;
20112688Smaybee 			buf->b_data = NULL;
20121544Seschrock 			buf->b_efunc = NULL;
20131544Seschrock 			buf->b_private = NULL;
20141544Seschrock 			buf->b_next = NULL;
20151544Seschrock 			hdr->b_buf = buf;
20162688Smaybee 			arc_get_data_buf(buf);
20171544Seschrock 			ASSERT(hdr->b_datacnt == 0);
20181544Seschrock 			hdr->b_datacnt = 1;
20192391Smaybee 
2020789Sahrens 		}
2021789Sahrens 
2022789Sahrens 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2023789Sahrens 		acb->acb_done = done;
2024789Sahrens 		acb->acb_private = private;
2025789Sahrens 		acb->acb_byteswap = swap;
2026789Sahrens 
2027789Sahrens 		ASSERT(hdr->b_acb == NULL);
2028789Sahrens 		hdr->b_acb = acb;
2029789Sahrens 		hdr->b_flags |= ARC_IO_IN_PROGRESS;
2030789Sahrens 
2031789Sahrens 		/*
2032789Sahrens 		 * If the buffer has been evicted, migrate it to a present state
2033789Sahrens 		 * before issuing the I/O.  Once we drop the hash-table lock,
2034789Sahrens 		 * the header will be marked as I/O in progress and have an
2035789Sahrens 		 * attached buffer.  At this point, anybody who finds this
2036789Sahrens 		 * buffer ought to notice that it's legit but has a pending I/O.
2037789Sahrens 		 */
2038789Sahrens 
20391544Seschrock 		if (GHOST_STATE(hdr->b_state))
20402688Smaybee 			arc_access(hdr, hash_lock);
20412688Smaybee 		mutex_exit(hash_lock);
2042789Sahrens 
2043789Sahrens 		ASSERT3U(hdr->b_size, ==, size);
20441596Sahrens 		DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
20451596Sahrens 		    zbookmark_t *, zb);
2046789Sahrens 		atomic_add_64(&arc.misses, 1);
20471544Seschrock 
2048789Sahrens 		rzio = zio_read(pio, spa, bp, buf->b_data, size,
20491544Seschrock 		    arc_read_done, buf, priority, flags, zb);
2050789Sahrens 
20512391Smaybee 		if (*arc_flags & ARC_WAIT)
2052789Sahrens 			return (zio_wait(rzio));
2053789Sahrens 
20542391Smaybee 		ASSERT(*arc_flags & ARC_NOWAIT);
2055789Sahrens 		zio_nowait(rzio);
2056789Sahrens 	}
2057789Sahrens 	return (0);
2058789Sahrens }
2059789Sahrens 
2060789Sahrens /*
2061789Sahrens  * arc_read() variant to support pool traversal.  If the block is already
2062789Sahrens  * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
2063789Sahrens  * The idea is that we don't want pool traversal filling up memory, but
2064789Sahrens  * if the ARC already has the data anyway, we shouldn't pay for the I/O.
2065789Sahrens  */
2066789Sahrens int
2067789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
2068789Sahrens {
2069789Sahrens 	arc_buf_hdr_t *hdr;
2070789Sahrens 	kmutex_t *hash_mtx;
2071789Sahrens 	int rc = 0;
2072789Sahrens 
2073789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
2074789Sahrens 
20751544Seschrock 	if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
20761544Seschrock 		arc_buf_t *buf = hdr->b_buf;
20771544Seschrock 
20781544Seschrock 		ASSERT(buf);
20791544Seschrock 		while (buf->b_data == NULL) {
20801544Seschrock 			buf = buf->b_next;
20811544Seschrock 			ASSERT(buf);
20821544Seschrock 		}
20831544Seschrock 		bcopy(buf->b_data, data, hdr->b_size);
20841544Seschrock 	} else {
2085789Sahrens 		rc = ENOENT;
20861544Seschrock 	}
2087789Sahrens 
2088789Sahrens 	if (hash_mtx)
2089789Sahrens 		mutex_exit(hash_mtx);
2090789Sahrens 
2091789Sahrens 	return (rc);
2092789Sahrens }
2093789Sahrens 
20941544Seschrock void
20951544Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
20961544Seschrock {
20971544Seschrock 	ASSERT(buf->b_hdr != NULL);
20981544Seschrock 	ASSERT(buf->b_hdr->b_state != arc.anon);
20991544Seschrock 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
21001544Seschrock 	buf->b_efunc = func;
21011544Seschrock 	buf->b_private = private;
21021544Seschrock }
21031544Seschrock 
21041544Seschrock /*
21051544Seschrock  * This is used by the DMU to let the ARC know that a buffer is
21061544Seschrock  * being evicted, so the ARC should clean up.  If this arc buf
21071544Seschrock  * is not yet in the evicted state, it will be put there.
21081544Seschrock  */
21091544Seschrock int
21101544Seschrock arc_buf_evict(arc_buf_t *buf)
21111544Seschrock {
21122887Smaybee 	arc_buf_hdr_t *hdr;
21131544Seschrock 	kmutex_t *hash_lock;
21141544Seschrock 	arc_buf_t **bufp;
21151544Seschrock 
21162887Smaybee 	mutex_enter(&arc_eviction_mtx);
21172887Smaybee 	hdr = buf->b_hdr;
21181544Seschrock 	if (hdr == NULL) {
21191544Seschrock 		/*
21201544Seschrock 		 * We are in arc_do_user_evicts().
21211544Seschrock 		 */
21221544Seschrock 		ASSERT(buf->b_data == NULL);
21232887Smaybee 		mutex_exit(&arc_eviction_mtx);
21241544Seschrock 		return (0);
21251544Seschrock 	}
21262887Smaybee 	hash_lock = HDR_LOCK(hdr);
21272887Smaybee 	mutex_exit(&arc_eviction_mtx);
21281544Seschrock 
21291544Seschrock 	mutex_enter(hash_lock);
21301544Seschrock 
21312724Smaybee 	if (buf->b_data == NULL) {
21322724Smaybee 		/*
21332724Smaybee 		 * We are on the eviction list.
21342724Smaybee 		 */
21352724Smaybee 		mutex_exit(hash_lock);
21362724Smaybee 		mutex_enter(&arc_eviction_mtx);
21372724Smaybee 		if (buf->b_hdr == NULL) {
21382724Smaybee 			/*
21392724Smaybee 			 * We are already in arc_do_user_evicts().
21402724Smaybee 			 */
21412724Smaybee 			mutex_exit(&arc_eviction_mtx);
21422724Smaybee 			return (0);
21432724Smaybee 		} else {
21442724Smaybee 			arc_buf_t copy = *buf; /* structure assignment */
21452724Smaybee 			/*
21462724Smaybee 			 * Process this buffer now
21472724Smaybee 			 * but let arc_do_user_evicts() do the reaping.
21482724Smaybee 			 */
21492724Smaybee 			buf->b_efunc = NULL;
21502724Smaybee 			mutex_exit(&arc_eviction_mtx);
21512724Smaybee 			VERIFY(copy.b_efunc(&copy) == 0);
21522724Smaybee 			return (1);
21532724Smaybee 		}
21542724Smaybee 	}
21552724Smaybee 
21562724Smaybee 	ASSERT(buf->b_hdr == hdr);
21572724Smaybee 	ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
21581544Seschrock 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
21591544Seschrock 
21601544Seschrock 	/*
21611544Seschrock 	 * Pull this buffer off of the hdr
21621544Seschrock 	 */
21631544Seschrock 	bufp = &hdr->b_buf;
21641544Seschrock 	while (*bufp != buf)
21651544Seschrock 		bufp = &(*bufp)->b_next;
21661544Seschrock 	*bufp = buf->b_next;
21671544Seschrock 
21681544Seschrock 	ASSERT(buf->b_data != NULL);
21692688Smaybee 	arc_buf_destroy(buf, FALSE, FALSE);
21701544Seschrock 
21711544Seschrock 	if (hdr->b_datacnt == 0) {
21721544Seschrock 		arc_state_t *old_state = hdr->b_state;
21731544Seschrock 		arc_state_t *evicted_state;
21741544Seschrock 
21751544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
21761544Seschrock 
21771544Seschrock 		evicted_state =
21781544Seschrock 		    (old_state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
21791544Seschrock 
21801544Seschrock 		mutex_enter(&old_state->mtx);
21811544Seschrock 		mutex_enter(&evicted_state->mtx);
21821544Seschrock 
21831544Seschrock 		arc_change_state(evicted_state, hdr, hash_lock);
21841544Seschrock 		ASSERT(HDR_IN_HASH_TABLE(hdr));
21851544Seschrock 		hdr->b_flags = ARC_IN_HASH_TABLE;
21861544Seschrock 
21871544Seschrock 		mutex_exit(&evicted_state->mtx);
21881544Seschrock 		mutex_exit(&old_state->mtx);
21891544Seschrock 	}
21901544Seschrock 	mutex_exit(hash_lock);
21911819Smaybee 
21921544Seschrock 	VERIFY(buf->b_efunc(buf) == 0);
21931544Seschrock 	buf->b_efunc = NULL;
21941544Seschrock 	buf->b_private = NULL;
21951544Seschrock 	buf->b_hdr = NULL;
21961544Seschrock 	kmem_cache_free(buf_cache, buf);
21971544Seschrock 	return (1);
21981544Seschrock }
21991544Seschrock 
2200789Sahrens /*
2201789Sahrens  * Release this buffer from the cache.  This must be done
2202789Sahrens  * after a read and prior to modifying the buffer contents.
2203789Sahrens  * If the buffer has more than one reference, we must make
2204789Sahrens  * make a new hdr for the buffer.
2205789Sahrens  */
2206789Sahrens void
2207789Sahrens arc_release(arc_buf_t *buf, void *tag)
2208789Sahrens {
2209789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2210789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
2211789Sahrens 
2212789Sahrens 	/* this buffer is not on any list */
2213789Sahrens 	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2214789Sahrens 
2215789Sahrens 	if (hdr->b_state == arc.anon) {
2216789Sahrens 		/* this buffer is already released */
2217789Sahrens 		ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2218789Sahrens 		ASSERT(BUF_EMPTY(hdr));
22191544Seschrock 		ASSERT(buf->b_efunc == NULL);
22203093Sahrens 		arc_buf_thaw(buf);
2221789Sahrens 		return;
2222789Sahrens 	}
2223789Sahrens 
2224789Sahrens 	mutex_enter(hash_lock);
2225789Sahrens 
22261544Seschrock 	/*
22271544Seschrock 	 * Do we have more than one buf?
22281544Seschrock 	 */
22291544Seschrock 	if (hdr->b_buf != buf || buf->b_next != NULL) {
2230789Sahrens 		arc_buf_hdr_t *nhdr;
2231789Sahrens 		arc_buf_t **bufp;
2232789Sahrens 		uint64_t blksz = hdr->b_size;
2233789Sahrens 		spa_t *spa = hdr->b_spa;
22343290Sjohansen 		arc_buf_contents_t type = hdr->b_type;
2235789Sahrens 
22361544Seschrock 		ASSERT(hdr->b_datacnt > 1);
2237789Sahrens 		/*
2238789Sahrens 		 * Pull the data off of this buf and attach it to
2239789Sahrens 		 * a new anonymous buf.
2240789Sahrens 		 */
22411544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
2242789Sahrens 		bufp = &hdr->b_buf;
22431544Seschrock 		while (*bufp != buf)
2244789Sahrens 			bufp = &(*bufp)->b_next;
2245789Sahrens 		*bufp = (*bufp)->b_next;
22461544Seschrock 
2247789Sahrens 		ASSERT3U(hdr->b_state->size, >=, hdr->b_size);
2248789Sahrens 		atomic_add_64(&hdr->b_state->size, -hdr->b_size);
22491544Seschrock 		if (refcount_is_zero(&hdr->b_refcnt)) {
22501544Seschrock 			ASSERT3U(hdr->b_state->lsize, >=, hdr->b_size);
22511544Seschrock 			atomic_add_64(&hdr->b_state->lsize, -hdr->b_size);
22521544Seschrock 		}
22531544Seschrock 		hdr->b_datacnt -= 1;
22541544Seschrock 
2255789Sahrens 		mutex_exit(hash_lock);
2256789Sahrens 
2257789Sahrens 		nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
2258789Sahrens 		nhdr->b_size = blksz;
2259789Sahrens 		nhdr->b_spa = spa;
22603290Sjohansen 		nhdr->b_type = type;
2261789Sahrens 		nhdr->b_buf = buf;
2262789Sahrens 		nhdr->b_state = arc.anon;
2263789Sahrens 		nhdr->b_arc_access = 0;
2264789Sahrens 		nhdr->b_flags = 0;
22651544Seschrock 		nhdr->b_datacnt = 1;
2266*3312Sahrens 		if (hdr->b_freeze_cksum != NULL) {
2267*3312Sahrens 			nhdr->b_freeze_cksum =
2268*3312Sahrens 			    kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
2269*3312Sahrens 			*nhdr->b_freeze_cksum = *hdr->b_freeze_cksum;
2270*3312Sahrens 		}
2271789Sahrens 		buf->b_hdr = nhdr;
2272789Sahrens 		buf->b_next = NULL;
2273789Sahrens 		(void) refcount_add(&nhdr->b_refcnt, tag);
2274789Sahrens 		atomic_add_64(&arc.anon->size, blksz);
2275789Sahrens 
2276789Sahrens 		hdr = nhdr;
2277789Sahrens 	} else {
22781544Seschrock 		ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2279789Sahrens 		ASSERT(!list_link_active(&hdr->b_arc_node));
2280789Sahrens 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2281789Sahrens 		arc_change_state(arc.anon, hdr, hash_lock);
2282789Sahrens 		hdr->b_arc_access = 0;
2283789Sahrens 		mutex_exit(hash_lock);
2284789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
2285789Sahrens 		hdr->b_birth = 0;
2286789Sahrens 		hdr->b_cksum0 = 0;
2287789Sahrens 	}
22881544Seschrock 	buf->b_efunc = NULL;
22891544Seschrock 	buf->b_private = NULL;
22903093Sahrens 	arc_buf_thaw(buf);
2291789Sahrens }
2292789Sahrens 
2293789Sahrens int
2294789Sahrens arc_released(arc_buf_t *buf)
2295789Sahrens {
22961544Seschrock 	return (buf->b_data != NULL && buf->b_hdr->b_state == arc.anon);
22971544Seschrock }
22981544Seschrock 
22991544Seschrock int
23001544Seschrock arc_has_callback(arc_buf_t *buf)
23011544Seschrock {
23021544Seschrock 	return (buf->b_efunc != NULL);
2303789Sahrens }
2304789Sahrens 
23051544Seschrock #ifdef ZFS_DEBUG
23061544Seschrock int
23071544Seschrock arc_referenced(arc_buf_t *buf)
23081544Seschrock {
23091544Seschrock 	return (refcount_count(&buf->b_hdr->b_refcnt));
23101544Seschrock }
23111544Seschrock #endif
23121544Seschrock 
2313789Sahrens static void
2314789Sahrens arc_write_done(zio_t *zio)
2315789Sahrens {
2316789Sahrens 	arc_buf_t *buf;
2317789Sahrens 	arc_buf_hdr_t *hdr;
2318789Sahrens 	arc_callback_t *acb;
2319789Sahrens 
2320789Sahrens 	buf = zio->io_private;
2321789Sahrens 	hdr = buf->b_hdr;
2322789Sahrens 	acb = hdr->b_acb;
2323789Sahrens 	hdr->b_acb = NULL;
23241544Seschrock 	ASSERT(acb != NULL);
2325789Sahrens 
2326789Sahrens 	/* this buffer is on no lists and is not in the hash table */
2327789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
2328789Sahrens 
2329789Sahrens 	hdr->b_dva = *BP_IDENTITY(zio->io_bp);
2330789Sahrens 	hdr->b_birth = zio->io_bp->blk_birth;
2331789Sahrens 	hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
23321544Seschrock 	/*
23331544Seschrock 	 * If the block to be written was all-zero, we may have
23341544Seschrock 	 * compressed it away.  In this case no write was performed
23351544Seschrock 	 * so there will be no dva/birth-date/checksum.  The buffer
23361544Seschrock 	 * must therefor remain anonymous (and uncached).
23371544Seschrock 	 */
2338789Sahrens 	if (!BUF_EMPTY(hdr)) {
2339789Sahrens 		arc_buf_hdr_t *exists;
2340789Sahrens 		kmutex_t *hash_lock;
2341789Sahrens 
23423093Sahrens 		arc_cksum_verify(buf);
23433093Sahrens 
2344789Sahrens 		exists = buf_hash_insert(hdr, &hash_lock);
2345789Sahrens 		if (exists) {
2346789Sahrens 			/*
2347789Sahrens 			 * This can only happen if we overwrite for
2348789Sahrens 			 * sync-to-convergence, because we remove
2349789Sahrens 			 * buffers from the hash table when we arc_free().
2350789Sahrens 			 */
2351789Sahrens 			ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
2352789Sahrens 			    BP_IDENTITY(zio->io_bp)));
2353789Sahrens 			ASSERT3U(zio->io_bp_orig.blk_birth, ==,
2354789Sahrens 			    zio->io_bp->blk_birth);
2355789Sahrens 
2356789Sahrens 			ASSERT(refcount_is_zero(&exists->b_refcnt));
2357789Sahrens 			arc_change_state(arc.anon, exists, hash_lock);
2358789Sahrens 			mutex_exit(hash_lock);
23591544Seschrock 			arc_hdr_destroy(exists);
2360789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
2361789Sahrens 			ASSERT3P(exists, ==, NULL);
2362789Sahrens 		}
23631544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
23642688Smaybee 		arc_access(hdr, hash_lock);
23652688Smaybee 		mutex_exit(hash_lock);
23661544Seschrock 	} else if (acb->acb_done == NULL) {
23671544Seschrock 		int destroy_hdr;
23681544Seschrock 		/*
23691544Seschrock 		 * This is an anonymous buffer with no user callback,
23701544Seschrock 		 * destroy it if there are no active references.
23711544Seschrock 		 */
23721544Seschrock 		mutex_enter(&arc_eviction_mtx);
23731544Seschrock 		destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
23741544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
23751544Seschrock 		mutex_exit(&arc_eviction_mtx);
23761544Seschrock 		if (destroy_hdr)
23771544Seschrock 			arc_hdr_destroy(hdr);
23781544Seschrock 	} else {
23791544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2380789Sahrens 	}
23811544Seschrock 
23821544Seschrock 	if (acb->acb_done) {
2383789Sahrens 		ASSERT(!refcount_is_zero(&hdr->b_refcnt));
2384789Sahrens 		acb->acb_done(zio, buf, acb->acb_private);
2385789Sahrens 	}
2386789Sahrens 
23871544Seschrock 	kmem_free(acb, sizeof (arc_callback_t));
2388789Sahrens }
2389789Sahrens 
2390789Sahrens int
23911775Sbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
2392789Sahrens     uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
2393789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
23941544Seschrock     uint32_t arc_flags, zbookmark_t *zb)
2395789Sahrens {
2396789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2397789Sahrens 	arc_callback_t	*acb;
2398789Sahrens 	zio_t	*rzio;
2399789Sahrens 
2400789Sahrens 	/* this is a private buffer - no locking required */
2401789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
2402789Sahrens 	ASSERT(BUF_EMPTY(hdr));
2403789Sahrens 	ASSERT(!HDR_IO_ERROR(hdr));
24042237Smaybee 	ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
24052237Smaybee 	ASSERT(hdr->b_acb == 0);
2406789Sahrens 	acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2407789Sahrens 	acb->acb_done = done;
2408789Sahrens 	acb->acb_private = private;
2409789Sahrens 	acb->acb_byteswap = (arc_byteswap_func_t *)-1;
2410789Sahrens 	hdr->b_acb = acb;
24111544Seschrock 	hdr->b_flags |= ARC_IO_IN_PROGRESS;
24123093Sahrens 	arc_cksum_compute(buf);
24131775Sbillm 	rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp,
24141544Seschrock 	    buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb);
2415789Sahrens 
2416789Sahrens 	if (arc_flags & ARC_WAIT)
2417789Sahrens 		return (zio_wait(rzio));
2418789Sahrens 
2419789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
2420789Sahrens 	zio_nowait(rzio);
2421789Sahrens 
2422789Sahrens 	return (0);
2423789Sahrens }
2424789Sahrens 
2425789Sahrens int
2426789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
2427789Sahrens     zio_done_func_t *done, void *private, uint32_t arc_flags)
2428789Sahrens {
2429789Sahrens 	arc_buf_hdr_t *ab;
2430789Sahrens 	kmutex_t *hash_lock;
2431789Sahrens 	zio_t	*zio;
2432789Sahrens 
2433789Sahrens 	/*
2434789Sahrens 	 * If this buffer is in the cache, release it, so it
2435789Sahrens 	 * can be re-used.
2436789Sahrens 	 */
2437789Sahrens 	ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2438789Sahrens 	if (ab != NULL) {
2439789Sahrens 		/*
2440789Sahrens 		 * The checksum of blocks to free is not always
2441789Sahrens 		 * preserved (eg. on the deadlist).  However, if it is
2442789Sahrens 		 * nonzero, it should match what we have in the cache.
2443789Sahrens 		 */
2444789Sahrens 		ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
2445789Sahrens 		    ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
24461990Smaybee 		if (ab->b_state != arc.anon)
24471990Smaybee 			arc_change_state(arc.anon, ab, hash_lock);
24482391Smaybee 		if (HDR_IO_IN_PROGRESS(ab)) {
24492391Smaybee 			/*
24502391Smaybee 			 * This should only happen when we prefetch.
24512391Smaybee 			 */
24522391Smaybee 			ASSERT(ab->b_flags & ARC_PREFETCH);
24532391Smaybee 			ASSERT3U(ab->b_datacnt, ==, 1);
24542391Smaybee 			ab->b_flags |= ARC_FREED_IN_READ;
24552391Smaybee 			if (HDR_IN_HASH_TABLE(ab))
24562391Smaybee 				buf_hash_remove(ab);
24572391Smaybee 			ab->b_arc_access = 0;
24582391Smaybee 			bzero(&ab->b_dva, sizeof (dva_t));
24592391Smaybee 			ab->b_birth = 0;
24602391Smaybee 			ab->b_cksum0 = 0;
24612391Smaybee 			ab->b_buf->b_efunc = NULL;
24622391Smaybee 			ab->b_buf->b_private = NULL;
24632391Smaybee 			mutex_exit(hash_lock);
24642391Smaybee 		} else if (refcount_is_zero(&ab->b_refcnt)) {
2465789Sahrens 			mutex_exit(hash_lock);
24661544Seschrock 			arc_hdr_destroy(ab);
2467789Sahrens 			atomic_add_64(&arc.deleted, 1);
2468789Sahrens 		} else {
24691589Smaybee 			/*
24702391Smaybee 			 * We still have an active reference on this
24712391Smaybee 			 * buffer.  This can happen, e.g., from
24722391Smaybee 			 * dbuf_unoverride().
24731589Smaybee 			 */
24742391Smaybee 			ASSERT(!HDR_IN_HASH_TABLE(ab));
2475789Sahrens 			ab->b_arc_access = 0;
2476789Sahrens 			bzero(&ab->b_dva, sizeof (dva_t));
2477789Sahrens 			ab->b_birth = 0;
2478789Sahrens 			ab->b_cksum0 = 0;
24791544Seschrock 			ab->b_buf->b_efunc = NULL;
24801544Seschrock 			ab->b_buf->b_private = NULL;
2481789Sahrens 			mutex_exit(hash_lock);
2482789Sahrens 		}
2483789Sahrens 	}
2484789Sahrens 
2485789Sahrens 	zio = zio_free(pio, spa, txg, bp, done, private);
2486789Sahrens 
2487789Sahrens 	if (arc_flags & ARC_WAIT)
2488789Sahrens 		return (zio_wait(zio));
2489789Sahrens 
2490789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
2491789Sahrens 	zio_nowait(zio);
2492789Sahrens 
2493789Sahrens 	return (0);
2494789Sahrens }
2495789Sahrens 
2496789Sahrens void
2497789Sahrens arc_tempreserve_clear(uint64_t tempreserve)
2498789Sahrens {
2499789Sahrens 	atomic_add_64(&arc_tempreserve, -tempreserve);
2500789Sahrens 	ASSERT((int64_t)arc_tempreserve >= 0);
2501789Sahrens }
2502789Sahrens 
2503789Sahrens int
2504789Sahrens arc_tempreserve_space(uint64_t tempreserve)
2505789Sahrens {
2506789Sahrens #ifdef ZFS_DEBUG
2507789Sahrens 	/*
2508789Sahrens 	 * Once in a while, fail for no reason.  Everything should cope.
2509789Sahrens 	 */
2510789Sahrens 	if (spa_get_random(10000) == 0) {
2511789Sahrens 		dprintf("forcing random failure\n");
2512789Sahrens 		return (ERESTART);
2513789Sahrens 	}
2514789Sahrens #endif
2515982Smaybee 	if (tempreserve > arc.c/4 && !arc.no_grow)
2516982Smaybee 		arc.c = MIN(arc.c_max, tempreserve * 4);
2517982Smaybee 	if (tempreserve > arc.c)
2518982Smaybee 		return (ENOMEM);
2519982Smaybee 
2520789Sahrens 	/*
2521982Smaybee 	 * Throttle writes when the amount of dirty data in the cache
2522982Smaybee 	 * gets too large.  We try to keep the cache less than half full
2523982Smaybee 	 * of dirty blocks so that our sync times don't grow too large.
2524982Smaybee 	 * Note: if two requests come in concurrently, we might let them
2525982Smaybee 	 * both succeed, when one of them should fail.  Not a huge deal.
2526982Smaybee 	 *
2527982Smaybee 	 * XXX The limit should be adjusted dynamically to keep the time
2528982Smaybee 	 * to sync a dataset fixed (around 1-5 seconds?).
2529789Sahrens 	 */
2530789Sahrens 
2531982Smaybee 	if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 &&
2532982Smaybee 	    arc_tempreserve + arc.anon->size > arc.c / 4) {
2533789Sahrens 		dprintf("failing, arc_tempreserve=%lluK anon=%lluK "
2534789Sahrens 		    "tempreserve=%lluK arc.c=%lluK\n",
2535789Sahrens 		    arc_tempreserve>>10, arc.anon->lsize>>10,
2536789Sahrens 		    tempreserve>>10, arc.c>>10);
2537789Sahrens 		return (ERESTART);
2538789Sahrens 	}
2539789Sahrens 	atomic_add_64(&arc_tempreserve, tempreserve);
2540789Sahrens 	return (0);
2541789Sahrens }
2542789Sahrens 
2543789Sahrens void
2544789Sahrens arc_init(void)
2545789Sahrens {
2546789Sahrens 	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
2547789Sahrens 	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
2548789Sahrens 
25492391Smaybee 	/* Convert seconds to clock ticks */
25502638Sperrin 	arc_min_prefetch_lifespan = 1 * hz;
25512391Smaybee 
2552789Sahrens 	/* Start out with 1/8 of all memory */
2553789Sahrens 	arc.c = physmem * PAGESIZE / 8;
2554789Sahrens 
2555789Sahrens #ifdef _KERNEL
2556789Sahrens 	/*
2557789Sahrens 	 * On architectures where the physical memory can be larger
2558789Sahrens 	 * than the addressable space (intel in 32-bit mode), we may
2559789Sahrens 	 * need to limit the cache to 1/8 of VM size.
2560789Sahrens 	 */
2561789Sahrens 	arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
2562789Sahrens #endif
2563789Sahrens 
2564982Smaybee 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
2565789Sahrens 	arc.c_min = MAX(arc.c / 4, 64<<20);
2566982Smaybee 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
2567789Sahrens 	if (arc.c * 8 >= 1<<30)
2568789Sahrens 		arc.c_max = (arc.c * 8) - (1<<30);
2569789Sahrens 	else
2570789Sahrens 		arc.c_max = arc.c_min;
2571789Sahrens 	arc.c_max = MAX(arc.c * 6, arc.c_max);
25722885Sahrens 
25732885Sahrens 	/*
25742885Sahrens 	 * Allow the tunables to override our calculations if they are
25752885Sahrens 	 * reasonable (ie. over 64MB)
25762885Sahrens 	 */
25772885Sahrens 	if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
25782885Sahrens 		arc.c_max = zfs_arc_max;
25792885Sahrens 	if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc.c_max)
25802885Sahrens 		arc.c_min = zfs_arc_min;
25812885Sahrens 
2582789Sahrens 	arc.c = arc.c_max;
2583789Sahrens 	arc.p = (arc.c >> 1);
2584789Sahrens 
2585789Sahrens 	/* if kmem_flags are set, lets try to use less memory */
2586789Sahrens 	if (kmem_debugging())
2587789Sahrens 		arc.c = arc.c / 2;
2588789Sahrens 	if (arc.c < arc.c_min)
2589789Sahrens 		arc.c = arc.c_min;
2590789Sahrens 
2591789Sahrens 	arc.anon = &ARC_anon;
25921544Seschrock 	arc.mru = &ARC_mru;
25931544Seschrock 	arc.mru_ghost = &ARC_mru_ghost;
25941544Seschrock 	arc.mfu = &ARC_mfu;
25951544Seschrock 	arc.mfu_ghost = &ARC_mfu_ghost;
25961544Seschrock 	arc.size = 0;
2597789Sahrens 
25982688Smaybee 	arc.hits = 0;
25992688Smaybee 	arc.recycle_miss = 0;
26002688Smaybee 	arc.evict_skip = 0;
26012688Smaybee 	arc.mutex_miss = 0;
26022688Smaybee 
26032856Snd150628 	mutex_init(&arc.anon->mtx, NULL, MUTEX_DEFAULT, NULL);
26042856Snd150628 	mutex_init(&arc.mru->mtx, NULL, MUTEX_DEFAULT, NULL);
26052856Snd150628 	mutex_init(&arc.mru_ghost->mtx, NULL, MUTEX_DEFAULT, NULL);
26062856Snd150628 	mutex_init(&arc.mfu->mtx, NULL, MUTEX_DEFAULT, NULL);
26072856Snd150628 	mutex_init(&arc.mfu_ghost->mtx, NULL, MUTEX_DEFAULT, NULL);
26082856Snd150628 
26091544Seschrock 	list_create(&arc.mru->list, sizeof (arc_buf_hdr_t),
2610789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
26111544Seschrock 	list_create(&arc.mru_ghost->list, sizeof (arc_buf_hdr_t),
2612789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
26131544Seschrock 	list_create(&arc.mfu->list, sizeof (arc_buf_hdr_t),
2614789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
26151544Seschrock 	list_create(&arc.mfu_ghost->list, sizeof (arc_buf_hdr_t),
2616789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
2617789Sahrens 
2618789Sahrens 	buf_init();
2619789Sahrens 
2620789Sahrens 	arc_thread_exit = 0;
26211544Seschrock 	arc_eviction_list = NULL;
26221544Seschrock 	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
26232887Smaybee 	bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
2624789Sahrens 
2625789Sahrens 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
2626789Sahrens 	    TS_RUN, minclsyspri);
26273158Smaybee 
26283158Smaybee 	arc_dead = FALSE;
2629789Sahrens }
2630789Sahrens 
2631789Sahrens void
2632789Sahrens arc_fini(void)
2633789Sahrens {
2634789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
2635789Sahrens 	arc_thread_exit = 1;
2636789Sahrens 	while (arc_thread_exit != 0)
2637789Sahrens 		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
2638789Sahrens 	mutex_exit(&arc_reclaim_thr_lock);
2639789Sahrens 
2640789Sahrens 	arc_flush();
2641789Sahrens 
2642789Sahrens 	arc_dead = TRUE;
2643789Sahrens 
26441544Seschrock 	mutex_destroy(&arc_eviction_mtx);
2645789Sahrens 	mutex_destroy(&arc_reclaim_thr_lock);
2646789Sahrens 	cv_destroy(&arc_reclaim_thr_cv);
2647789Sahrens 
26481544Seschrock 	list_destroy(&arc.mru->list);
26491544Seschrock 	list_destroy(&arc.mru_ghost->list);
26501544Seschrock 	list_destroy(&arc.mfu->list);
26511544Seschrock 	list_destroy(&arc.mfu_ghost->list);
2652789Sahrens 
26532856Snd150628 	mutex_destroy(&arc.anon->mtx);
26542856Snd150628 	mutex_destroy(&arc.mru->mtx);
26552856Snd150628 	mutex_destroy(&arc.mru_ghost->mtx);
26562856Snd150628 	mutex_destroy(&arc.mfu->mtx);
26572856Snd150628 	mutex_destroy(&arc.mfu_ghost->mtx);
26582856Snd150628 
2659789Sahrens 	buf_fini();
2660789Sahrens }
2661