xref: /onnv-gate/usr/src/uts/common/fs/zfs/arc.c (revision 3307:08dd9db5d94e)
1789Sahrens /*
2789Sahrens  * CDDL HEADER START
3789Sahrens  *
4789Sahrens  * The contents of this file are subject to the terms of the
51484Sek110237  * Common Development and Distribution License (the "License").
61484Sek110237  * You may not use this file except in compliance with the License.
7789Sahrens  *
8789Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9789Sahrens  * or http://www.opensolaris.org/os/licensing.
10789Sahrens  * See the License for the specific language governing permissions
11789Sahrens  * and limitations under the License.
12789Sahrens  *
13789Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14789Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15789Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16789Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17789Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18789Sahrens  *
19789Sahrens  * CDDL HEADER END
20789Sahrens  */
21789Sahrens /*
221484Sek110237  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23789Sahrens  * Use is subject to license terms.
24789Sahrens  */
25789Sahrens 
26789Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
27789Sahrens 
28789Sahrens /*
29789Sahrens  * DVA-based Adjustable Relpacement Cache
30789Sahrens  *
311544Seschrock  * While much of the theory of operation used here is
321544Seschrock  * based on the self-tuning, low overhead replacement cache
33789Sahrens  * presented by Megiddo and Modha at FAST 2003, there are some
34789Sahrens  * significant differences:
35789Sahrens  *
36789Sahrens  * 1. The Megiddo and Modha model assumes any page is evictable.
37789Sahrens  * Pages in its cache cannot be "locked" into memory.  This makes
38789Sahrens  * the eviction algorithm simple: evict the last page in the list.
39789Sahrens  * This also make the performance characteristics easy to reason
40789Sahrens  * about.  Our cache is not so simple.  At any given moment, some
41789Sahrens  * subset of the blocks in the cache are un-evictable because we
42789Sahrens  * have handed out a reference to them.  Blocks are only evictable
43789Sahrens  * when there are no external references active.  This makes
44789Sahrens  * eviction far more problematic:  we choose to evict the evictable
45789Sahrens  * blocks that are the "lowest" in the list.
46789Sahrens  *
47789Sahrens  * There are times when it is not possible to evict the requested
48789Sahrens  * space.  In these circumstances we are unable to adjust the cache
49789Sahrens  * size.  To prevent the cache growing unbounded at these times we
50789Sahrens  * implement a "cache throttle" that slowes the flow of new data
51789Sahrens  * into the cache until we can make space avaiable.
52789Sahrens  *
53789Sahrens  * 2. The Megiddo and Modha model assumes a fixed cache size.
54789Sahrens  * Pages are evicted when the cache is full and there is a cache
55789Sahrens  * miss.  Our model has a variable sized cache.  It grows with
56789Sahrens  * high use, but also tries to react to memory preasure from the
57789Sahrens  * operating system: decreasing its size when system memory is
58789Sahrens  * tight.
59789Sahrens  *
60789Sahrens  * 3. The Megiddo and Modha model assumes a fixed page size. All
61789Sahrens  * elements of the cache are therefor exactly the same size.  So
62789Sahrens  * when adjusting the cache size following a cache miss, its simply
63789Sahrens  * a matter of choosing a single page to evict.  In our model, we
64789Sahrens  * have variable sized cache blocks (rangeing from 512 bytes to
65789Sahrens  * 128K bytes).  We therefor choose a set of blocks to evict to make
66789Sahrens  * space for a cache miss that approximates as closely as possible
67789Sahrens  * the space used by the new block.
68789Sahrens  *
69789Sahrens  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70789Sahrens  * by N. Megiddo & D. Modha, FAST 2003
71789Sahrens  */
72789Sahrens 
73789Sahrens /*
74789Sahrens  * The locking model:
75789Sahrens  *
76789Sahrens  * A new reference to a cache buffer can be obtained in two
77789Sahrens  * ways: 1) via a hash table lookup using the DVA as a key,
78789Sahrens  * or 2) via one of the ARC lists.  The arc_read() inerface
79789Sahrens  * uses method 1, while the internal arc algorithms for
80789Sahrens  * adjusting the cache use method 2.  We therefor provide two
81789Sahrens  * types of locks: 1) the hash table lock array, and 2) the
82789Sahrens  * arc list locks.
83789Sahrens  *
84789Sahrens  * Buffers do not have their own mutexs, rather they rely on the
85789Sahrens  * hash table mutexs for the bulk of their protection (i.e. most
86789Sahrens  * fields in the arc_buf_hdr_t are protected by these mutexs).
87789Sahrens  *
88789Sahrens  * buf_hash_find() returns the appropriate mutex (held) when it
89789Sahrens  * locates the requested buffer in the hash table.  It returns
90789Sahrens  * NULL for the mutex if the buffer was not in the table.
91789Sahrens  *
92789Sahrens  * buf_hash_remove() expects the appropriate hash mutex to be
93789Sahrens  * already held before it is invoked.
94789Sahrens  *
95789Sahrens  * Each arc state also has a mutex which is used to protect the
96789Sahrens  * buffer list associated with the state.  When attempting to
97789Sahrens  * obtain a hash table lock while holding an arc list lock you
98789Sahrens  * must use: mutex_tryenter() to avoid deadlock.  Also note that
992688Smaybee  * the active state mutex must be held before the ghost state mutex.
100789Sahrens  *
1011544Seschrock  * Arc buffers may have an associated eviction callback function.
1021544Seschrock  * This function will be invoked prior to removing the buffer (e.g.
1031544Seschrock  * in arc_do_user_evicts()).  Note however that the data associated
1041544Seschrock  * with the buffer may be evicted prior to the callback.  The callback
1051544Seschrock  * must be made with *no locks held* (to prevent deadlock).  Additionally,
1061544Seschrock  * the users of callbacks must ensure that their private data is
1071544Seschrock  * protected from simultaneous callbacks from arc_buf_evict()
1081544Seschrock  * and arc_do_user_evicts().
1091544Seschrock  *
110789Sahrens  * Note that the majority of the performance stats are manipulated
111789Sahrens  * with atomic operations.
112789Sahrens  */
113789Sahrens 
114789Sahrens #include <sys/spa.h>
115789Sahrens #include <sys/zio.h>
1163093Sahrens #include <sys/zio_checksum.h>
117789Sahrens #include <sys/zfs_context.h>
118789Sahrens #include <sys/arc.h>
119789Sahrens #include <sys/refcount.h>
120789Sahrens #ifdef _KERNEL
121789Sahrens #include <sys/vmsystm.h>
122789Sahrens #include <vm/anon.h>
123789Sahrens #include <sys/fs/swapnode.h>
1241484Sek110237 #include <sys/dnlc.h>
125789Sahrens #endif
126789Sahrens #include <sys/callb.h>
127789Sahrens 
128789Sahrens static kmutex_t		arc_reclaim_thr_lock;
129789Sahrens static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
130789Sahrens static uint8_t		arc_thread_exit;
131789Sahrens 
1321484Sek110237 #define	ARC_REDUCE_DNLC_PERCENT	3
1331484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
1341484Sek110237 
135789Sahrens typedef enum arc_reclaim_strategy {
136789Sahrens 	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
137789Sahrens 	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
138789Sahrens } arc_reclaim_strategy_t;
139789Sahrens 
140789Sahrens /* number of seconds before growing cache again */
141789Sahrens static int		arc_grow_retry = 60;
142789Sahrens 
1432391Smaybee /*
1442638Sperrin  * minimum lifespan of a prefetch block in clock ticks
1452638Sperrin  * (initialized in arc_init())
1462391Smaybee  */
1472638Sperrin static int		arc_min_prefetch_lifespan;
1482391Smaybee 
149789Sahrens static int arc_dead;
150789Sahrens 
151789Sahrens /*
1522885Sahrens  * These tunables are for performance analysis.
1532885Sahrens  */
1542885Sahrens uint64_t zfs_arc_max;
1552885Sahrens uint64_t zfs_arc_min;
1562885Sahrens 
1572885Sahrens /*
158789Sahrens  * Note that buffers can be on one of 5 states:
159789Sahrens  *	ARC_anon	- anonymous (discussed below)
1601544Seschrock  *	ARC_mru		- recently used, currently cached
1611544Seschrock  *	ARC_mru_ghost	- recentely used, no longer in cache
1621544Seschrock  *	ARC_mfu		- frequently used, currently cached
1631544Seschrock  *	ARC_mfu_ghost	- frequently used, no longer in cache
164789Sahrens  * When there are no active references to the buffer, they
165789Sahrens  * are linked onto one of the lists in arc.  These are the
166789Sahrens  * only buffers that can be evicted or deleted.
167789Sahrens  *
168789Sahrens  * Anonymous buffers are buffers that are not associated with
169789Sahrens  * a DVA.  These are buffers that hold dirty block copies
170789Sahrens  * before they are written to stable storage.  By definition,
1711544Seschrock  * they are "ref'd" and are considered part of arc_mru
172789Sahrens  * that cannot be freed.  Generally, they will aquire a DVA
1731544Seschrock  * as they are written and migrate onto the arc_mru list.
174789Sahrens  */
175789Sahrens 
176789Sahrens typedef struct arc_state {
177789Sahrens 	list_t	list;	/* linked list of evictable buffer in state */
178789Sahrens 	uint64_t lsize;	/* total size of buffers in the linked list */
179789Sahrens 	uint64_t size;	/* total size of all buffers in this state */
180789Sahrens 	uint64_t hits;
181789Sahrens 	kmutex_t mtx;
182789Sahrens } arc_state_t;
183789Sahrens 
184789Sahrens /* The 5 states: */
185789Sahrens static arc_state_t ARC_anon;
1861544Seschrock static arc_state_t ARC_mru;
1871544Seschrock static arc_state_t ARC_mru_ghost;
1881544Seschrock static arc_state_t ARC_mfu;
1891544Seschrock static arc_state_t ARC_mfu_ghost;
190789Sahrens 
191789Sahrens static struct arc {
192789Sahrens 	arc_state_t 	*anon;
1931544Seschrock 	arc_state_t	*mru;
1941544Seschrock 	arc_state_t	*mru_ghost;
1951544Seschrock 	arc_state_t	*mfu;
1961544Seschrock 	arc_state_t	*mfu_ghost;
197789Sahrens 	uint64_t	size;		/* Actual total arc size */
1981544Seschrock 	uint64_t	p;		/* Target size (in bytes) of mru */
199789Sahrens 	uint64_t	c;		/* Target size of cache (in bytes) */
200789Sahrens 	uint64_t	c_min;		/* Minimum target cache size */
201789Sahrens 	uint64_t	c_max;		/* Maximum target cache size */
202789Sahrens 
203789Sahrens 	/* performance stats */
204789Sahrens 	uint64_t	hits;
205789Sahrens 	uint64_t	misses;
206789Sahrens 	uint64_t	deleted;
2072688Smaybee 	uint64_t	recycle_miss;
2082688Smaybee 	uint64_t	mutex_miss;
2092688Smaybee 	uint64_t	evict_skip;
210789Sahrens 	uint64_t	hash_elements;
211789Sahrens 	uint64_t	hash_elements_max;
212789Sahrens 	uint64_t	hash_collisions;
213789Sahrens 	uint64_t	hash_chains;
214789Sahrens 	uint32_t	hash_chain_max;
215789Sahrens 
216789Sahrens 	int		no_grow;	/* Don't try to grow cache size */
217789Sahrens } arc;
218789Sahrens 
219789Sahrens static uint64_t arc_tempreserve;
220789Sahrens 
221789Sahrens typedef struct arc_callback arc_callback_t;
222789Sahrens 
223789Sahrens struct arc_callback {
224789Sahrens 	arc_done_func_t		*acb_done;
225789Sahrens 	void			*acb_private;
226789Sahrens 	arc_byteswap_func_t	*acb_byteswap;
227789Sahrens 	arc_buf_t		*acb_buf;
228789Sahrens 	zio_t			*acb_zio_dummy;
229789Sahrens 	arc_callback_t		*acb_next;
230789Sahrens };
231789Sahrens 
232789Sahrens struct arc_buf_hdr {
233789Sahrens 	/* protected by hash lock */
234789Sahrens 	dva_t			b_dva;
235789Sahrens 	uint64_t		b_birth;
236789Sahrens 	uint64_t		b_cksum0;
237789Sahrens 
2383093Sahrens 	kmutex_t		b_freeze_lock;
2393093Sahrens 	zio_cksum_t		*b_freeze_cksum;
2403093Sahrens 
241789Sahrens 	arc_buf_hdr_t		*b_hash_next;
242789Sahrens 	arc_buf_t		*b_buf;
243789Sahrens 	uint32_t		b_flags;
2441544Seschrock 	uint32_t		b_datacnt;
245789Sahrens 
2463290Sjohansen 	arc_callback_t		*b_acb;
247789Sahrens 	kcondvar_t		b_cv;
2483290Sjohansen 
2493290Sjohansen 	/* immutable */
2503290Sjohansen 	arc_buf_contents_t	b_type;
2513290Sjohansen 	uint64_t		b_size;
2523290Sjohansen 	spa_t			*b_spa;
253789Sahrens 
254789Sahrens 	/* protected by arc state mutex */
255789Sahrens 	arc_state_t		*b_state;
256789Sahrens 	list_node_t		b_arc_node;
257789Sahrens 
258789Sahrens 	/* updated atomically */
259789Sahrens 	clock_t			b_arc_access;
260789Sahrens 
261789Sahrens 	/* self protecting */
262789Sahrens 	refcount_t		b_refcnt;
263789Sahrens };
264789Sahrens 
2651544Seschrock static arc_buf_t *arc_eviction_list;
2661544Seschrock static kmutex_t arc_eviction_mtx;
2672887Smaybee static arc_buf_hdr_t arc_eviction_hdr;
2682688Smaybee static void arc_get_data_buf(arc_buf_t *buf);
2692688Smaybee static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
2701544Seschrock 
2711544Seschrock #define	GHOST_STATE(state)	\
2721544Seschrock 	((state) == arc.mru_ghost || (state) == arc.mfu_ghost)
2731544Seschrock 
274789Sahrens /*
275789Sahrens  * Private ARC flags.  These flags are private ARC only flags that will show up
276789Sahrens  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
277789Sahrens  * be passed in as arc_flags in things like arc_read.  However, these flags
278789Sahrens  * should never be passed and should only be set by ARC code.  When adding new
279789Sahrens  * public flags, make sure not to smash the private ones.
280789Sahrens  */
281789Sahrens 
2821544Seschrock #define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
283789Sahrens #define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
284789Sahrens #define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
285789Sahrens #define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
2861544Seschrock #define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
2872391Smaybee #define	ARC_INDIRECT		(1 << 14)	/* this is an indirect block */
288789Sahrens 
2891544Seschrock #define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
290789Sahrens #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
291789Sahrens #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
292789Sahrens #define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
2931544Seschrock #define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
294789Sahrens 
295789Sahrens /*
296789Sahrens  * Hash table routines
297789Sahrens  */
298789Sahrens 
299789Sahrens #define	HT_LOCK_PAD	64
300789Sahrens 
301789Sahrens struct ht_lock {
302789Sahrens 	kmutex_t	ht_lock;
303789Sahrens #ifdef _KERNEL
304789Sahrens 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
305789Sahrens #endif
306789Sahrens };
307789Sahrens 
308789Sahrens #define	BUF_LOCKS 256
309789Sahrens typedef struct buf_hash_table {
310789Sahrens 	uint64_t ht_mask;
311789Sahrens 	arc_buf_hdr_t **ht_table;
312789Sahrens 	struct ht_lock ht_locks[BUF_LOCKS];
313789Sahrens } buf_hash_table_t;
314789Sahrens 
315789Sahrens static buf_hash_table_t buf_hash_table;
316789Sahrens 
317789Sahrens #define	BUF_HASH_INDEX(spa, dva, birth) \
318789Sahrens 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
319789Sahrens #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
320789Sahrens #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
321789Sahrens #define	HDR_LOCK(buf) \
322789Sahrens 	(BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
323789Sahrens 
324789Sahrens uint64_t zfs_crc64_table[256];
325789Sahrens 
326789Sahrens static uint64_t
327789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
328789Sahrens {
329789Sahrens 	uintptr_t spav = (uintptr_t)spa;
330789Sahrens 	uint8_t *vdva = (uint8_t *)dva;
331789Sahrens 	uint64_t crc = -1ULL;
332789Sahrens 	int i;
333789Sahrens 
334789Sahrens 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
335789Sahrens 
336789Sahrens 	for (i = 0; i < sizeof (dva_t); i++)
337789Sahrens 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
338789Sahrens 
339789Sahrens 	crc ^= (spav>>8) ^ birth;
340789Sahrens 
341789Sahrens 	return (crc);
342789Sahrens }
343789Sahrens 
344789Sahrens #define	BUF_EMPTY(buf)						\
345789Sahrens 	((buf)->b_dva.dva_word[0] == 0 &&			\
346789Sahrens 	(buf)->b_dva.dva_word[1] == 0 &&			\
347789Sahrens 	(buf)->b_birth == 0)
348789Sahrens 
349789Sahrens #define	BUF_EQUAL(spa, dva, birth, buf)				\
350789Sahrens 	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
351789Sahrens 	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
352789Sahrens 	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
353789Sahrens 
354789Sahrens static arc_buf_hdr_t *
355789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
356789Sahrens {
357789Sahrens 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
358789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
359789Sahrens 	arc_buf_hdr_t *buf;
360789Sahrens 
361789Sahrens 	mutex_enter(hash_lock);
362789Sahrens 	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
363789Sahrens 	    buf = buf->b_hash_next) {
364789Sahrens 		if (BUF_EQUAL(spa, dva, birth, buf)) {
365789Sahrens 			*lockp = hash_lock;
366789Sahrens 			return (buf);
367789Sahrens 		}
368789Sahrens 	}
369789Sahrens 	mutex_exit(hash_lock);
370789Sahrens 	*lockp = NULL;
371789Sahrens 	return (NULL);
372789Sahrens }
373789Sahrens 
374789Sahrens /*
375789Sahrens  * Insert an entry into the hash table.  If there is already an element
376789Sahrens  * equal to elem in the hash table, then the already existing element
377789Sahrens  * will be returned and the new element will not be inserted.
378789Sahrens  * Otherwise returns NULL.
379789Sahrens  */
380789Sahrens static arc_buf_hdr_t *
381789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
382789Sahrens {
383789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
384789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
385789Sahrens 	arc_buf_hdr_t *fbuf;
386789Sahrens 	uint32_t max, i;
387789Sahrens 
3881544Seschrock 	ASSERT(!HDR_IN_HASH_TABLE(buf));
389789Sahrens 	*lockp = hash_lock;
390789Sahrens 	mutex_enter(hash_lock);
391789Sahrens 	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
392789Sahrens 	    fbuf = fbuf->b_hash_next, i++) {
393789Sahrens 		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
394789Sahrens 			return (fbuf);
395789Sahrens 	}
396789Sahrens 
397789Sahrens 	buf->b_hash_next = buf_hash_table.ht_table[idx];
398789Sahrens 	buf_hash_table.ht_table[idx] = buf;
3991544Seschrock 	buf->b_flags |= ARC_IN_HASH_TABLE;
400789Sahrens 
401789Sahrens 	/* collect some hash table performance data */
402789Sahrens 	if (i > 0) {
403789Sahrens 		atomic_add_64(&arc.hash_collisions, 1);
404789Sahrens 		if (i == 1)
405789Sahrens 			atomic_add_64(&arc.hash_chains, 1);
406789Sahrens 	}
407789Sahrens 	while (i > (max = arc.hash_chain_max) &&
408789Sahrens 	    max != atomic_cas_32(&arc.hash_chain_max, max, i)) {
409789Sahrens 		continue;
410789Sahrens 	}
411789Sahrens 	atomic_add_64(&arc.hash_elements, 1);
412789Sahrens 	if (arc.hash_elements > arc.hash_elements_max)
413789Sahrens 		atomic_add_64(&arc.hash_elements_max, 1);
414789Sahrens 
415789Sahrens 	return (NULL);
416789Sahrens }
417789Sahrens 
418789Sahrens static void
419789Sahrens buf_hash_remove(arc_buf_hdr_t *buf)
420789Sahrens {
421789Sahrens 	arc_buf_hdr_t *fbuf, **bufp;
422789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
423789Sahrens 
424789Sahrens 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
4251544Seschrock 	ASSERT(HDR_IN_HASH_TABLE(buf));
426789Sahrens 
427789Sahrens 	bufp = &buf_hash_table.ht_table[idx];
428789Sahrens 	while ((fbuf = *bufp) != buf) {
429789Sahrens 		ASSERT(fbuf != NULL);
430789Sahrens 		bufp = &fbuf->b_hash_next;
431789Sahrens 	}
432789Sahrens 	*bufp = buf->b_hash_next;
433789Sahrens 	buf->b_hash_next = NULL;
4341544Seschrock 	buf->b_flags &= ~ARC_IN_HASH_TABLE;
435789Sahrens 
436789Sahrens 	/* collect some hash table performance data */
437789Sahrens 	atomic_add_64(&arc.hash_elements, -1);
438789Sahrens 	if (buf_hash_table.ht_table[idx] &&
439789Sahrens 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
440789Sahrens 		atomic_add_64(&arc.hash_chains, -1);
441789Sahrens }
442789Sahrens 
443789Sahrens /*
444789Sahrens  * Global data structures and functions for the buf kmem cache.
445789Sahrens  */
446789Sahrens static kmem_cache_t *hdr_cache;
447789Sahrens static kmem_cache_t *buf_cache;
448789Sahrens 
449789Sahrens static void
450789Sahrens buf_fini(void)
451789Sahrens {
452789Sahrens 	int i;
453789Sahrens 
454789Sahrens 	kmem_free(buf_hash_table.ht_table,
455789Sahrens 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
456789Sahrens 	for (i = 0; i < BUF_LOCKS; i++)
457789Sahrens 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
458789Sahrens 	kmem_cache_destroy(hdr_cache);
459789Sahrens 	kmem_cache_destroy(buf_cache);
460789Sahrens }
461789Sahrens 
462789Sahrens /*
463789Sahrens  * Constructor callback - called when the cache is empty
464789Sahrens  * and a new buf is requested.
465789Sahrens  */
466789Sahrens /* ARGSUSED */
467789Sahrens static int
468789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag)
469789Sahrens {
470789Sahrens 	arc_buf_hdr_t *buf = vbuf;
471789Sahrens 
472789Sahrens 	bzero(buf, sizeof (arc_buf_hdr_t));
473789Sahrens 	refcount_create(&buf->b_refcnt);
474789Sahrens 	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
475789Sahrens 	return (0);
476789Sahrens }
477789Sahrens 
478789Sahrens /*
479789Sahrens  * Destructor callback - called when a cached buf is
480789Sahrens  * no longer required.
481789Sahrens  */
482789Sahrens /* ARGSUSED */
483789Sahrens static void
484789Sahrens hdr_dest(void *vbuf, void *unused)
485789Sahrens {
486789Sahrens 	arc_buf_hdr_t *buf = vbuf;
487789Sahrens 
488789Sahrens 	refcount_destroy(&buf->b_refcnt);
489789Sahrens 	cv_destroy(&buf->b_cv);
490789Sahrens }
491789Sahrens 
492789Sahrens /*
493789Sahrens  * Reclaim callback -- invoked when memory is low.
494789Sahrens  */
495789Sahrens /* ARGSUSED */
496789Sahrens static void
497789Sahrens hdr_recl(void *unused)
498789Sahrens {
499789Sahrens 	dprintf("hdr_recl called\n");
5003158Smaybee 	/*
5013158Smaybee 	 * umem calls the reclaim func when we destroy the buf cache,
5023158Smaybee 	 * which is after we do arc_fini().
5033158Smaybee 	 */
5043158Smaybee 	if (!arc_dead)
5053158Smaybee 		cv_signal(&arc_reclaim_thr_cv);
506789Sahrens }
507789Sahrens 
508789Sahrens static void
509789Sahrens buf_init(void)
510789Sahrens {
511789Sahrens 	uint64_t *ct;
5121544Seschrock 	uint64_t hsize = 1ULL << 12;
513789Sahrens 	int i, j;
514789Sahrens 
515789Sahrens 	/*
516789Sahrens 	 * The hash table is big enough to fill all of physical memory
5171544Seschrock 	 * with an average 64K block size.  The table will take up
5181544Seschrock 	 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
519789Sahrens 	 */
5201544Seschrock 	while (hsize * 65536 < physmem * PAGESIZE)
521789Sahrens 		hsize <<= 1;
5221544Seschrock retry:
523789Sahrens 	buf_hash_table.ht_mask = hsize - 1;
5241544Seschrock 	buf_hash_table.ht_table =
5251544Seschrock 	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
5261544Seschrock 	if (buf_hash_table.ht_table == NULL) {
5271544Seschrock 		ASSERT(hsize > (1ULL << 8));
5281544Seschrock 		hsize >>= 1;
5291544Seschrock 		goto retry;
5301544Seschrock 	}
531789Sahrens 
532789Sahrens 	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
533789Sahrens 	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
534789Sahrens 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
535789Sahrens 	    0, NULL, NULL, NULL, NULL, NULL, 0);
536789Sahrens 
537789Sahrens 	for (i = 0; i < 256; i++)
538789Sahrens 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
539789Sahrens 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
540789Sahrens 
541789Sahrens 	for (i = 0; i < BUF_LOCKS; i++) {
542789Sahrens 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
543789Sahrens 		    NULL, MUTEX_DEFAULT, NULL);
544789Sahrens 	}
545789Sahrens }
546789Sahrens 
547789Sahrens #define	ARC_MINTIME	(hz>>4) /* 62 ms */
548789Sahrens 
549789Sahrens static void
5503093Sahrens arc_cksum_verify(arc_buf_t *buf)
5513093Sahrens {
5523093Sahrens 	zio_cksum_t zc;
5533093Sahrens 
5543093Sahrens 	if (!zfs_flags & ZFS_DEBUG_MODIFY)
5553093Sahrens 		return;
5563093Sahrens 
5573093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
5583265Sahrens 	if (buf->b_hdr->b_freeze_cksum == NULL ||
5593265Sahrens 	    (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
5603093Sahrens 		mutex_exit(&buf->b_hdr->b_freeze_lock);
5613093Sahrens 		return;
5623093Sahrens 	}
5633093Sahrens 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
5643093Sahrens 	if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
5653093Sahrens 		panic("buffer modified while frozen!");
5663093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
5673093Sahrens }
5683093Sahrens 
5693093Sahrens static void
5703093Sahrens arc_cksum_compute(arc_buf_t *buf)
5713093Sahrens {
5723093Sahrens 	if (!zfs_flags & ZFS_DEBUG_MODIFY)
5733093Sahrens 		return;
5743093Sahrens 
5753093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
5763093Sahrens 	if (buf->b_hdr->b_freeze_cksum != NULL) {
5773093Sahrens 		mutex_exit(&buf->b_hdr->b_freeze_lock);
5783093Sahrens 		return;
5793093Sahrens 	}
5803093Sahrens 	buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
5813093Sahrens 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
5823093Sahrens 	    buf->b_hdr->b_freeze_cksum);
5833093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
5843093Sahrens }
5853093Sahrens 
5863093Sahrens void
5873093Sahrens arc_buf_thaw(arc_buf_t *buf)
5883093Sahrens {
5893093Sahrens 	if (!zfs_flags & ZFS_DEBUG_MODIFY)
5903093Sahrens 		return;
5913093Sahrens 
5923093Sahrens 	if (buf->b_hdr->b_state != arc.anon)
5933093Sahrens 		panic("modifying non-anon buffer!");
5943093Sahrens 	if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
5953093Sahrens 		panic("modifying buffer while i/o in progress!");
5963093Sahrens 	arc_cksum_verify(buf);
5973093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
5983093Sahrens 	if (buf->b_hdr->b_freeze_cksum != NULL) {
5993093Sahrens 		kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
6003093Sahrens 		buf->b_hdr->b_freeze_cksum = NULL;
6013093Sahrens 	}
6023093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
6033093Sahrens }
6043093Sahrens 
6053093Sahrens void
6063093Sahrens arc_buf_freeze(arc_buf_t *buf)
6073093Sahrens {
6083093Sahrens 	ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
6093093Sahrens 	    buf->b_hdr->b_state == arc.anon);
6103093Sahrens 	arc_cksum_compute(buf);
6113093Sahrens }
6123093Sahrens 
6133093Sahrens static void
614789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
615789Sahrens {
616789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
617789Sahrens 
618789Sahrens 	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
619789Sahrens 	    (ab->b_state != arc.anon)) {
6201544Seschrock 		int delta = ab->b_size * ab->b_datacnt;
621789Sahrens 
622789Sahrens 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
623789Sahrens 		mutex_enter(&ab->b_state->mtx);
624789Sahrens 		ASSERT(list_link_active(&ab->b_arc_node));
625789Sahrens 		list_remove(&ab->b_state->list, ab);
6261544Seschrock 		if (GHOST_STATE(ab->b_state)) {
6271544Seschrock 			ASSERT3U(ab->b_datacnt, ==, 0);
6281544Seschrock 			ASSERT3P(ab->b_buf, ==, NULL);
6291544Seschrock 			delta = ab->b_size;
6301544Seschrock 		}
6311544Seschrock 		ASSERT(delta > 0);
6321544Seschrock 		ASSERT3U(ab->b_state->lsize, >=, delta);
6331544Seschrock 		atomic_add_64(&ab->b_state->lsize, -delta);
634789Sahrens 		mutex_exit(&ab->b_state->mtx);
6352391Smaybee 		/* remove the prefetch flag is we get a reference */
6362391Smaybee 		if (ab->b_flags & ARC_PREFETCH)
6372391Smaybee 			ab->b_flags &= ~ARC_PREFETCH;
638789Sahrens 	}
639789Sahrens }
640789Sahrens 
641789Sahrens static int
642789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
643789Sahrens {
644789Sahrens 	int cnt;
645789Sahrens 
6461544Seschrock 	ASSERT(ab->b_state == arc.anon || MUTEX_HELD(hash_lock));
6471544Seschrock 	ASSERT(!GHOST_STATE(ab->b_state));
648789Sahrens 
649789Sahrens 	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
650789Sahrens 	    (ab->b_state != arc.anon)) {
651789Sahrens 
652789Sahrens 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
653789Sahrens 		mutex_enter(&ab->b_state->mtx);
654789Sahrens 		ASSERT(!list_link_active(&ab->b_arc_node));
655789Sahrens 		list_insert_head(&ab->b_state->list, ab);
6561544Seschrock 		ASSERT(ab->b_datacnt > 0);
6571544Seschrock 		atomic_add_64(&ab->b_state->lsize, ab->b_size * ab->b_datacnt);
6581544Seschrock 		ASSERT3U(ab->b_state->size, >=, ab->b_state->lsize);
659789Sahrens 		mutex_exit(&ab->b_state->mtx);
660789Sahrens 	}
661789Sahrens 	return (cnt);
662789Sahrens }
663789Sahrens 
664789Sahrens /*
665789Sahrens  * Move the supplied buffer to the indicated state.  The mutex
666789Sahrens  * for the buffer must be held by the caller.
667789Sahrens  */
668789Sahrens static void
6691544Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
670789Sahrens {
6711544Seschrock 	arc_state_t *old_state = ab->b_state;
6721544Seschrock 	int refcnt = refcount_count(&ab->b_refcnt);
6731544Seschrock 	int from_delta, to_delta;
674789Sahrens 
675789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
6761544Seschrock 	ASSERT(new_state != old_state);
6771544Seschrock 	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
6781544Seschrock 	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
6791544Seschrock 
6801544Seschrock 	from_delta = to_delta = ab->b_datacnt * ab->b_size;
681789Sahrens 
682789Sahrens 	/*
683789Sahrens 	 * If this buffer is evictable, transfer it from the
684789Sahrens 	 * old state list to the new state list.
685789Sahrens 	 */
6861544Seschrock 	if (refcnt == 0) {
6871544Seschrock 		if (old_state != arc.anon) {
6881544Seschrock 			int use_mutex = !MUTEX_HELD(&old_state->mtx);
6891544Seschrock 
6901544Seschrock 			if (use_mutex)
6911544Seschrock 				mutex_enter(&old_state->mtx);
6921544Seschrock 
6931544Seschrock 			ASSERT(list_link_active(&ab->b_arc_node));
6941544Seschrock 			list_remove(&old_state->list, ab);
695789Sahrens 
6962391Smaybee 			/*
6972391Smaybee 			 * If prefetching out of the ghost cache,
6982391Smaybee 			 * we will have a non-null datacnt.
6992391Smaybee 			 */
7002391Smaybee 			if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
7012391Smaybee 				/* ghost elements have a ghost size */
7021544Seschrock 				ASSERT(ab->b_buf == NULL);
7031544Seschrock 				from_delta = ab->b_size;
704789Sahrens 			}
7051544Seschrock 			ASSERT3U(old_state->lsize, >=, from_delta);
7061544Seschrock 			atomic_add_64(&old_state->lsize, -from_delta);
7071544Seschrock 
7081544Seschrock 			if (use_mutex)
7091544Seschrock 				mutex_exit(&old_state->mtx);
710789Sahrens 		}
711789Sahrens 		if (new_state != arc.anon) {
7121544Seschrock 			int use_mutex = !MUTEX_HELD(&new_state->mtx);
713789Sahrens 
7141544Seschrock 			if (use_mutex)
715789Sahrens 				mutex_enter(&new_state->mtx);
7161544Seschrock 
717789Sahrens 			list_insert_head(&new_state->list, ab);
7181544Seschrock 
7191544Seschrock 			/* ghost elements have a ghost size */
7201544Seschrock 			if (GHOST_STATE(new_state)) {
7211544Seschrock 				ASSERT(ab->b_datacnt == 0);
7221544Seschrock 				ASSERT(ab->b_buf == NULL);
7231544Seschrock 				to_delta = ab->b_size;
7241544Seschrock 			}
7251544Seschrock 			atomic_add_64(&new_state->lsize, to_delta);
7261544Seschrock 			ASSERT3U(new_state->size + to_delta, >=,
7271544Seschrock 			    new_state->lsize);
7281544Seschrock 
7291544Seschrock 			if (use_mutex)
730789Sahrens 				mutex_exit(&new_state->mtx);
731789Sahrens 		}
732789Sahrens 	}
733789Sahrens 
734789Sahrens 	ASSERT(!BUF_EMPTY(ab));
7351544Seschrock 	if (new_state == arc.anon && old_state != arc.anon) {
736789Sahrens 		buf_hash_remove(ab);
737789Sahrens 	}
738789Sahrens 
7391544Seschrock 	/* adjust state sizes */
7401544Seschrock 	if (to_delta)
7411544Seschrock 		atomic_add_64(&new_state->size, to_delta);
7421544Seschrock 	if (from_delta) {
7431544Seschrock 		ASSERT3U(old_state->size, >=, from_delta);
7441544Seschrock 		atomic_add_64(&old_state->size, -from_delta);
745789Sahrens 	}
746789Sahrens 	ab->b_state = new_state;
747789Sahrens }
748789Sahrens 
749789Sahrens arc_buf_t *
7503290Sjohansen arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
751789Sahrens {
752789Sahrens 	arc_buf_hdr_t *hdr;
753789Sahrens 	arc_buf_t *buf;
754789Sahrens 
755789Sahrens 	ASSERT3U(size, >, 0);
756789Sahrens 	hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
757789Sahrens 	ASSERT(BUF_EMPTY(hdr));
758789Sahrens 	hdr->b_size = size;
7593290Sjohansen 	hdr->b_type = type;
760789Sahrens 	hdr->b_spa = spa;
761789Sahrens 	hdr->b_state = arc.anon;
762789Sahrens 	hdr->b_arc_access = 0;
763789Sahrens 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
764789Sahrens 	buf->b_hdr = hdr;
7652688Smaybee 	buf->b_data = NULL;
7661544Seschrock 	buf->b_efunc = NULL;
7671544Seschrock 	buf->b_private = NULL;
768789Sahrens 	buf->b_next = NULL;
769789Sahrens 	hdr->b_buf = buf;
7702688Smaybee 	arc_get_data_buf(buf);
7711544Seschrock 	hdr->b_datacnt = 1;
772789Sahrens 	hdr->b_flags = 0;
773789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
774789Sahrens 	(void) refcount_add(&hdr->b_refcnt, tag);
775789Sahrens 
776789Sahrens 	return (buf);
777789Sahrens }
778789Sahrens 
7792688Smaybee static arc_buf_t *
7802688Smaybee arc_buf_clone(arc_buf_t *from)
7811544Seschrock {
7822688Smaybee 	arc_buf_t *buf;
7832688Smaybee 	arc_buf_hdr_t *hdr = from->b_hdr;
7842688Smaybee 	uint64_t size = hdr->b_size;
7851544Seschrock 
7862688Smaybee 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
7872688Smaybee 	buf->b_hdr = hdr;
7882688Smaybee 	buf->b_data = NULL;
7892688Smaybee 	buf->b_efunc = NULL;
7902688Smaybee 	buf->b_private = NULL;
7912688Smaybee 	buf->b_next = hdr->b_buf;
7922688Smaybee 	hdr->b_buf = buf;
7932688Smaybee 	arc_get_data_buf(buf);
7942688Smaybee 	bcopy(from->b_data, buf->b_data, size);
7952688Smaybee 	hdr->b_datacnt += 1;
7962688Smaybee 	return (buf);
7971544Seschrock }
7981544Seschrock 
7991544Seschrock void
8001544Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag)
8011544Seschrock {
8022887Smaybee 	arc_buf_hdr_t *hdr;
8031544Seschrock 	kmutex_t *hash_lock;
8041544Seschrock 
8052724Smaybee 	/*
8062724Smaybee 	 * Check to see if this buffer is currently being evicted via
8072887Smaybee 	 * arc_do_user_evicts().
8082724Smaybee 	 */
8092887Smaybee 	mutex_enter(&arc_eviction_mtx);
8102887Smaybee 	hdr = buf->b_hdr;
8112887Smaybee 	if (hdr == NULL) {
8122887Smaybee 		mutex_exit(&arc_eviction_mtx);
8132724Smaybee 		return;
8142887Smaybee 	}
8152887Smaybee 	hash_lock = HDR_LOCK(hdr);
8162887Smaybee 	mutex_exit(&arc_eviction_mtx);
8172724Smaybee 
8182724Smaybee 	mutex_enter(hash_lock);
8191544Seschrock 	if (buf->b_data == NULL) {
8201544Seschrock 		/*
8211544Seschrock 		 * This buffer is evicted.
8221544Seschrock 		 */
8232724Smaybee 		mutex_exit(hash_lock);
8241544Seschrock 		return;
8251544Seschrock 	}
8261544Seschrock 
8272724Smaybee 	ASSERT(buf->b_hdr == hdr);
8282724Smaybee 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
8291544Seschrock 	add_reference(hdr, hash_lock, tag);
8302688Smaybee 	arc_access(hdr, hash_lock);
8312688Smaybee 	mutex_exit(hash_lock);
8321544Seschrock 	atomic_add_64(&arc.hits, 1);
8331544Seschrock }
8341544Seschrock 
835789Sahrens static void
8362688Smaybee arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
8371544Seschrock {
8381544Seschrock 	arc_buf_t **bufp;
8391544Seschrock 
8401544Seschrock 	/* free up data associated with the buf */
8411544Seschrock 	if (buf->b_data) {
8421544Seschrock 		arc_state_t *state = buf->b_hdr->b_state;
8431544Seschrock 		uint64_t size = buf->b_hdr->b_size;
8443290Sjohansen 		arc_buf_contents_t type = buf->b_hdr->b_type;
8451544Seschrock 
8463093Sahrens 		arc_cksum_verify(buf);
8472688Smaybee 		if (!recycle) {
8483290Sjohansen 			if (type == ARC_BUFC_METADATA) {
8493290Sjohansen 				zio_buf_free(buf->b_data, size);
8503290Sjohansen 			} else {
8513290Sjohansen 				ASSERT(type == ARC_BUFC_DATA);
8523290Sjohansen 				zio_data_buf_free(buf->b_data, size);
8533290Sjohansen 			}
8542688Smaybee 			atomic_add_64(&arc.size, -size);
8552688Smaybee 		}
8561544Seschrock 		if (list_link_active(&buf->b_hdr->b_arc_node)) {
8571544Seschrock 			ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
8581544Seschrock 			ASSERT(state != arc.anon);
8591544Seschrock 			ASSERT3U(state->lsize, >=, size);
8601544Seschrock 			atomic_add_64(&state->lsize, -size);
8611544Seschrock 		}
8621544Seschrock 		ASSERT3U(state->size, >=, size);
8631544Seschrock 		atomic_add_64(&state->size, -size);
8641544Seschrock 		buf->b_data = NULL;
8651544Seschrock 		ASSERT(buf->b_hdr->b_datacnt > 0);
8661544Seschrock 		buf->b_hdr->b_datacnt -= 1;
8671544Seschrock 	}
8681544Seschrock 
8691544Seschrock 	/* only remove the buf if requested */
8701544Seschrock 	if (!all)
8711544Seschrock 		return;
8721544Seschrock 
8731544Seschrock 	/* remove the buf from the hdr list */
8741544Seschrock 	for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
8751544Seschrock 		continue;
8761544Seschrock 	*bufp = buf->b_next;
8771544Seschrock 
8781544Seschrock 	ASSERT(buf->b_efunc == NULL);
8791544Seschrock 
8801544Seschrock 	/* clean up the buf */
8811544Seschrock 	buf->b_hdr = NULL;
8821544Seschrock 	kmem_cache_free(buf_cache, buf);
8831544Seschrock }
8841544Seschrock 
8851544Seschrock static void
8861544Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr)
887789Sahrens {
888789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
889789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
8901544Seschrock 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
891789Sahrens 
892789Sahrens 	if (!BUF_EMPTY(hdr)) {
8931544Seschrock 		ASSERT(!HDR_IN_HASH_TABLE(hdr));
894789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
895789Sahrens 		hdr->b_birth = 0;
896789Sahrens 		hdr->b_cksum0 = 0;
897789Sahrens 	}
8981544Seschrock 	while (hdr->b_buf) {
899789Sahrens 		arc_buf_t *buf = hdr->b_buf;
900789Sahrens 
9011544Seschrock 		if (buf->b_efunc) {
9021544Seschrock 			mutex_enter(&arc_eviction_mtx);
9031544Seschrock 			ASSERT(buf->b_hdr != NULL);
9042688Smaybee 			arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
9051544Seschrock 			hdr->b_buf = buf->b_next;
9062887Smaybee 			buf->b_hdr = &arc_eviction_hdr;
9071544Seschrock 			buf->b_next = arc_eviction_list;
9081544Seschrock 			arc_eviction_list = buf;
9091544Seschrock 			mutex_exit(&arc_eviction_mtx);
9101544Seschrock 		} else {
9112688Smaybee 			arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
9121544Seschrock 		}
913789Sahrens 	}
9143093Sahrens 	if (hdr->b_freeze_cksum != NULL) {
9153093Sahrens 		kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
9163093Sahrens 		hdr->b_freeze_cksum = NULL;
9173093Sahrens 	}
9181544Seschrock 
919789Sahrens 	ASSERT(!list_link_active(&hdr->b_arc_node));
920789Sahrens 	ASSERT3P(hdr->b_hash_next, ==, NULL);
921789Sahrens 	ASSERT3P(hdr->b_acb, ==, NULL);
922789Sahrens 	kmem_cache_free(hdr_cache, hdr);
923789Sahrens }
924789Sahrens 
925789Sahrens void
926789Sahrens arc_buf_free(arc_buf_t *buf, void *tag)
927789Sahrens {
928789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
9291544Seschrock 	int hashed = hdr->b_state != arc.anon;
9301544Seschrock 
9311544Seschrock 	ASSERT(buf->b_efunc == NULL);
9321544Seschrock 	ASSERT(buf->b_data != NULL);
9331544Seschrock 
9341544Seschrock 	if (hashed) {
9351544Seschrock 		kmutex_t *hash_lock = HDR_LOCK(hdr);
9361544Seschrock 
9371544Seschrock 		mutex_enter(hash_lock);
9381544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
9391544Seschrock 		if (hdr->b_datacnt > 1)
9402688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
9411544Seschrock 		else
9421544Seschrock 			hdr->b_flags |= ARC_BUF_AVAILABLE;
9431544Seschrock 		mutex_exit(hash_lock);
9441544Seschrock 	} else if (HDR_IO_IN_PROGRESS(hdr)) {
9451544Seschrock 		int destroy_hdr;
9461544Seschrock 		/*
9471544Seschrock 		 * We are in the middle of an async write.  Don't destroy
9481544Seschrock 		 * this buffer unless the write completes before we finish
9491544Seschrock 		 * decrementing the reference count.
9501544Seschrock 		 */
9511544Seschrock 		mutex_enter(&arc_eviction_mtx);
9521544Seschrock 		(void) remove_reference(hdr, NULL, tag);
9531544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
9541544Seschrock 		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
9551544Seschrock 		mutex_exit(&arc_eviction_mtx);
9561544Seschrock 		if (destroy_hdr)
9571544Seschrock 			arc_hdr_destroy(hdr);
9581544Seschrock 	} else {
9591544Seschrock 		if (remove_reference(hdr, NULL, tag) > 0) {
9601544Seschrock 			ASSERT(HDR_IO_ERROR(hdr));
9612688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
9621544Seschrock 		} else {
9631544Seschrock 			arc_hdr_destroy(hdr);
9641544Seschrock 		}
9651544Seschrock 	}
9661544Seschrock }
9671544Seschrock 
9681544Seschrock int
9691544Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag)
9701544Seschrock {
9711544Seschrock 	arc_buf_hdr_t *hdr = buf->b_hdr;
972789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
9731544Seschrock 	int no_callback = (buf->b_efunc == NULL);
9741544Seschrock 
9751544Seschrock 	if (hdr->b_state == arc.anon) {
9761544Seschrock 		arc_buf_free(buf, tag);
9771544Seschrock 		return (no_callback);
9781544Seschrock 	}
979789Sahrens 
980789Sahrens 	mutex_enter(hash_lock);
9811544Seschrock 	ASSERT(hdr->b_state != arc.anon);
9821544Seschrock 	ASSERT(buf->b_data != NULL);
983789Sahrens 
9841544Seschrock 	(void) remove_reference(hdr, hash_lock, tag);
9851544Seschrock 	if (hdr->b_datacnt > 1) {
9861544Seschrock 		if (no_callback)
9872688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
9881544Seschrock 	} else if (no_callback) {
9891544Seschrock 		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
9901544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
991789Sahrens 	}
9921544Seschrock 	ASSERT(no_callback || hdr->b_datacnt > 1 ||
9931544Seschrock 	    refcount_is_zero(&hdr->b_refcnt));
994789Sahrens 	mutex_exit(hash_lock);
9951544Seschrock 	return (no_callback);
996789Sahrens }
997789Sahrens 
998789Sahrens int
999789Sahrens arc_buf_size(arc_buf_t *buf)
1000789Sahrens {
1001789Sahrens 	return (buf->b_hdr->b_size);
1002789Sahrens }
1003789Sahrens 
1004789Sahrens /*
1005789Sahrens  * Evict buffers from list until we've removed the specified number of
1006789Sahrens  * bytes.  Move the removed buffers to the appropriate evict state.
10072688Smaybee  * If the recycle flag is set, then attempt to "recycle" a buffer:
10082688Smaybee  * - look for a buffer to evict that is `bytes' long.
10092688Smaybee  * - return the data block from this buffer rather than freeing it.
10102688Smaybee  * This flag is used by callers that are trying to make space for a
10112688Smaybee  * new buffer in a full arc cache.
1012789Sahrens  */
10132688Smaybee static void *
10143290Sjohansen arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle,
10153290Sjohansen     arc_buf_contents_t type)
1016789Sahrens {
1017789Sahrens 	arc_state_t *evicted_state;
10182688Smaybee 	uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
10192918Smaybee 	arc_buf_hdr_t *ab, *ab_prev = NULL;
1020789Sahrens 	kmutex_t *hash_lock;
10212688Smaybee 	boolean_t have_lock;
10222918Smaybee 	void *stolen = NULL;
1023789Sahrens 
10241544Seschrock 	ASSERT(state == arc.mru || state == arc.mfu);
1025789Sahrens 
10261544Seschrock 	evicted_state = (state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
1027789Sahrens 
1028789Sahrens 	mutex_enter(&state->mtx);
1029789Sahrens 	mutex_enter(&evicted_state->mtx);
1030789Sahrens 
1031789Sahrens 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
1032789Sahrens 		ab_prev = list_prev(&state->list, ab);
10332391Smaybee 		/* prefetch buffers have a minimum lifespan */
10342688Smaybee 		if (HDR_IO_IN_PROGRESS(ab) ||
10352688Smaybee 		    (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
10362688Smaybee 		    lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) {
10372391Smaybee 			skipped++;
10382391Smaybee 			continue;
10392391Smaybee 		}
10402918Smaybee 		/* "lookahead" for better eviction candidate */
10412918Smaybee 		if (recycle && ab->b_size != bytes &&
10422918Smaybee 		    ab_prev && ab_prev->b_size == bytes)
10432688Smaybee 			continue;
1044789Sahrens 		hash_lock = HDR_LOCK(ab);
10452688Smaybee 		have_lock = MUTEX_HELD(hash_lock);
10462688Smaybee 		if (have_lock || mutex_tryenter(hash_lock)) {
1047789Sahrens 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
10481544Seschrock 			ASSERT(ab->b_datacnt > 0);
10491544Seschrock 			while (ab->b_buf) {
10501544Seschrock 				arc_buf_t *buf = ab->b_buf;
10512688Smaybee 				if (buf->b_data) {
10521544Seschrock 					bytes_evicted += ab->b_size;
10533290Sjohansen 					if (recycle && ab->b_type == type &&
10543290Sjohansen 					    ab->b_size == bytes) {
10552918Smaybee 						stolen = buf->b_data;
10562918Smaybee 						recycle = FALSE;
10572918Smaybee 					}
10582688Smaybee 				}
10591544Seschrock 				if (buf->b_efunc) {
10601544Seschrock 					mutex_enter(&arc_eviction_mtx);
10612918Smaybee 					arc_buf_destroy(buf,
10622918Smaybee 					    buf->b_data == stolen, FALSE);
10631544Seschrock 					ab->b_buf = buf->b_next;
10642887Smaybee 					buf->b_hdr = &arc_eviction_hdr;
10651544Seschrock 					buf->b_next = arc_eviction_list;
10661544Seschrock 					arc_eviction_list = buf;
10671544Seschrock 					mutex_exit(&arc_eviction_mtx);
10681544Seschrock 				} else {
10692918Smaybee 					arc_buf_destroy(buf,
10702918Smaybee 					    buf->b_data == stolen, TRUE);
10711544Seschrock 				}
10721544Seschrock 			}
10731544Seschrock 			ASSERT(ab->b_datacnt == 0);
1074789Sahrens 			arc_change_state(evicted_state, ab, hash_lock);
10751544Seschrock 			ASSERT(HDR_IN_HASH_TABLE(ab));
10761544Seschrock 			ab->b_flags = ARC_IN_HASH_TABLE;
1077789Sahrens 			DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
10782688Smaybee 			if (!have_lock)
10792688Smaybee 				mutex_exit(hash_lock);
10801544Seschrock 			if (bytes >= 0 && bytes_evicted >= bytes)
1081789Sahrens 				break;
1082789Sahrens 		} else {
10832688Smaybee 			missed += 1;
1084789Sahrens 		}
1085789Sahrens 	}
1086789Sahrens 	mutex_exit(&evicted_state->mtx);
1087789Sahrens 	mutex_exit(&state->mtx);
1088789Sahrens 
1089789Sahrens 	if (bytes_evicted < bytes)
1090789Sahrens 		dprintf("only evicted %lld bytes from %x",
1091789Sahrens 		    (longlong_t)bytes_evicted, state);
1092789Sahrens 
10932688Smaybee 	if (skipped)
10942688Smaybee 		atomic_add_64(&arc.evict_skip, skipped);
10952688Smaybee 	if (missed)
10962688Smaybee 		atomic_add_64(&arc.mutex_miss, missed);
10972918Smaybee 	return (stolen);
1098789Sahrens }
1099789Sahrens 
1100789Sahrens /*
1101789Sahrens  * Remove buffers from list until we've removed the specified number of
1102789Sahrens  * bytes.  Destroy the buffers that are removed.
1103789Sahrens  */
1104789Sahrens static void
11051544Seschrock arc_evict_ghost(arc_state_t *state, int64_t bytes)
1106789Sahrens {
1107789Sahrens 	arc_buf_hdr_t *ab, *ab_prev;
1108789Sahrens 	kmutex_t *hash_lock;
11091544Seschrock 	uint64_t bytes_deleted = 0;
11101544Seschrock 	uint_t bufs_skipped = 0;
1111789Sahrens 
11121544Seschrock 	ASSERT(GHOST_STATE(state));
1113789Sahrens top:
1114789Sahrens 	mutex_enter(&state->mtx);
1115789Sahrens 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
1116789Sahrens 		ab_prev = list_prev(&state->list, ab);
1117789Sahrens 		hash_lock = HDR_LOCK(ab);
1118789Sahrens 		if (mutex_tryenter(hash_lock)) {
11192391Smaybee 			ASSERT(!HDR_IO_IN_PROGRESS(ab));
11201544Seschrock 			ASSERT(ab->b_buf == NULL);
1121789Sahrens 			arc_change_state(arc.anon, ab, hash_lock);
1122789Sahrens 			mutex_exit(hash_lock);
1123789Sahrens 			atomic_add_64(&arc.deleted, 1);
11241544Seschrock 			bytes_deleted += ab->b_size;
11251544Seschrock 			arc_hdr_destroy(ab);
1126789Sahrens 			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1127789Sahrens 			if (bytes >= 0 && bytes_deleted >= bytes)
1128789Sahrens 				break;
1129789Sahrens 		} else {
1130789Sahrens 			if (bytes < 0) {
1131789Sahrens 				mutex_exit(&state->mtx);
1132789Sahrens 				mutex_enter(hash_lock);
1133789Sahrens 				mutex_exit(hash_lock);
1134789Sahrens 				goto top;
1135789Sahrens 			}
1136789Sahrens 			bufs_skipped += 1;
1137789Sahrens 		}
1138789Sahrens 	}
1139789Sahrens 	mutex_exit(&state->mtx);
1140789Sahrens 
1141789Sahrens 	if (bufs_skipped) {
11422688Smaybee 		atomic_add_64(&arc.mutex_miss, bufs_skipped);
1143789Sahrens 		ASSERT(bytes >= 0);
1144789Sahrens 	}
1145789Sahrens 
1146789Sahrens 	if (bytes_deleted < bytes)
1147789Sahrens 		dprintf("only deleted %lld bytes from %p",
1148789Sahrens 		    (longlong_t)bytes_deleted, state);
1149789Sahrens }
1150789Sahrens 
1151789Sahrens static void
1152789Sahrens arc_adjust(void)
1153789Sahrens {
1154789Sahrens 	int64_t top_sz, mru_over, arc_over;
1155789Sahrens 
11561544Seschrock 	top_sz = arc.anon->size + arc.mru->size;
1157789Sahrens 
11581544Seschrock 	if (top_sz > arc.p && arc.mru->lsize > 0) {
11591544Seschrock 		int64_t toevict = MIN(arc.mru->lsize, top_sz-arc.p);
11603290Sjohansen 		(void) arc_evict(arc.mru, toevict, FALSE, ARC_BUFC_UNDEF);
11611544Seschrock 		top_sz = arc.anon->size + arc.mru->size;
1162789Sahrens 	}
1163789Sahrens 
11641544Seschrock 	mru_over = top_sz + arc.mru_ghost->size - arc.c;
1165789Sahrens 
1166789Sahrens 	if (mru_over > 0) {
11671544Seschrock 		if (arc.mru_ghost->lsize > 0) {
11681544Seschrock 			int64_t todelete = MIN(arc.mru_ghost->lsize, mru_over);
11691544Seschrock 			arc_evict_ghost(arc.mru_ghost, todelete);
1170789Sahrens 		}
1171789Sahrens 	}
1172789Sahrens 
1173789Sahrens 	if ((arc_over = arc.size - arc.c) > 0) {
11741544Seschrock 		int64_t tbl_over;
1175789Sahrens 
11761544Seschrock 		if (arc.mfu->lsize > 0) {
11771544Seschrock 			int64_t toevict = MIN(arc.mfu->lsize, arc_over);
11783290Sjohansen 			(void) arc_evict(arc.mfu, toevict, FALSE,
11793290Sjohansen 			    ARC_BUFC_UNDEF);
1180789Sahrens 		}
1181789Sahrens 
11821544Seschrock 		tbl_over = arc.size + arc.mru_ghost->lsize +
11831544Seschrock 		    arc.mfu_ghost->lsize - arc.c*2;
1184789Sahrens 
11851544Seschrock 		if (tbl_over > 0 && arc.mfu_ghost->lsize > 0) {
11861544Seschrock 			int64_t todelete = MIN(arc.mfu_ghost->lsize, tbl_over);
11871544Seschrock 			arc_evict_ghost(arc.mfu_ghost, todelete);
1188789Sahrens 		}
1189789Sahrens 	}
1190789Sahrens }
1191789Sahrens 
11921544Seschrock static void
11931544Seschrock arc_do_user_evicts(void)
11941544Seschrock {
11951544Seschrock 	mutex_enter(&arc_eviction_mtx);
11961544Seschrock 	while (arc_eviction_list != NULL) {
11971544Seschrock 		arc_buf_t *buf = arc_eviction_list;
11981544Seschrock 		arc_eviction_list = buf->b_next;
11991544Seschrock 		buf->b_hdr = NULL;
12001544Seschrock 		mutex_exit(&arc_eviction_mtx);
12011544Seschrock 
12021819Smaybee 		if (buf->b_efunc != NULL)
12031819Smaybee 			VERIFY(buf->b_efunc(buf) == 0);
12041544Seschrock 
12051544Seschrock 		buf->b_efunc = NULL;
12061544Seschrock 		buf->b_private = NULL;
12071544Seschrock 		kmem_cache_free(buf_cache, buf);
12081544Seschrock 		mutex_enter(&arc_eviction_mtx);
12091544Seschrock 	}
12101544Seschrock 	mutex_exit(&arc_eviction_mtx);
12111544Seschrock }
12121544Seschrock 
1213789Sahrens /*
1214789Sahrens  * Flush all *evictable* data from the cache.
1215789Sahrens  * NOTE: this will not touch "active" (i.e. referenced) data.
1216789Sahrens  */
1217789Sahrens void
1218789Sahrens arc_flush(void)
1219789Sahrens {
12202688Smaybee 	while (list_head(&arc.mru->list))
12213290Sjohansen 		(void) arc_evict(arc.mru, -1, FALSE, ARC_BUFC_UNDEF);
12222688Smaybee 	while (list_head(&arc.mfu->list))
12233290Sjohansen 		(void) arc_evict(arc.mfu, -1, FALSE, ARC_BUFC_UNDEF);
1224789Sahrens 
12251544Seschrock 	arc_evict_ghost(arc.mru_ghost, -1);
12261544Seschrock 	arc_evict_ghost(arc.mfu_ghost, -1);
12271544Seschrock 
12281544Seschrock 	mutex_enter(&arc_reclaim_thr_lock);
12291544Seschrock 	arc_do_user_evicts();
12301544Seschrock 	mutex_exit(&arc_reclaim_thr_lock);
12311544Seschrock 	ASSERT(arc_eviction_list == NULL);
1232789Sahrens }
1233789Sahrens 
12343158Smaybee int arc_shrink_shift = 5;		/* log2(fraction of arc to reclaim) */
12352391Smaybee 
1236789Sahrens void
12373158Smaybee arc_shrink(void)
1238789Sahrens {
12393158Smaybee 	if (arc.c > arc.c_min) {
12403158Smaybee 		uint64_t to_free;
1241789Sahrens 
12422048Sstans #ifdef _KERNEL
12433158Smaybee 		to_free = MAX(arc.c >> arc_shrink_shift, ptob(needfree));
12442048Sstans #else
12453158Smaybee 		to_free = arc.c >> arc_shrink_shift;
12462048Sstans #endif
12473158Smaybee 		if (arc.c > arc.c_min + to_free)
12483158Smaybee 			atomic_add_64(&arc.c, -to_free);
12493158Smaybee 		else
12503158Smaybee 			arc.c = arc.c_min;
12512048Sstans 
12523158Smaybee 		atomic_add_64(&arc.p, -(arc.p >> arc_shrink_shift));
12533158Smaybee 		if (arc.c > arc.size)
12543158Smaybee 			arc.c = MAX(arc.size, arc.c_min);
12553158Smaybee 		if (arc.p > arc.c)
12563158Smaybee 			arc.p = (arc.c >> 1);
12573158Smaybee 		ASSERT(arc.c >= arc.c_min);
12583158Smaybee 		ASSERT((int64_t)arc.p >= 0);
12593158Smaybee 	}
1260789Sahrens 
12613158Smaybee 	if (arc.size > arc.c)
12623158Smaybee 		arc_adjust();
1263789Sahrens }
1264789Sahrens 
1265789Sahrens static int
1266789Sahrens arc_reclaim_needed(void)
1267789Sahrens {
1268789Sahrens 	uint64_t extra;
1269789Sahrens 
1270789Sahrens #ifdef _KERNEL
12712048Sstans 
12722048Sstans 	if (needfree)
12732048Sstans 		return (1);
12742048Sstans 
1275789Sahrens 	/*
1276789Sahrens 	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1277789Sahrens 	 */
1278789Sahrens 	extra = desfree;
1279789Sahrens 
1280789Sahrens 	/*
1281789Sahrens 	 * check that we're out of range of the pageout scanner.  It starts to
1282789Sahrens 	 * schedule paging if freemem is less than lotsfree and needfree.
1283789Sahrens 	 * lotsfree is the high-water mark for pageout, and needfree is the
1284789Sahrens 	 * number of needed free pages.  We add extra pages here to make sure
1285789Sahrens 	 * the scanner doesn't start up while we're freeing memory.
1286789Sahrens 	 */
1287789Sahrens 	if (freemem < lotsfree + needfree + extra)
1288789Sahrens 		return (1);
1289789Sahrens 
1290789Sahrens 	/*
1291789Sahrens 	 * check to make sure that swapfs has enough space so that anon
1292789Sahrens 	 * reservations can still succeeed. anon_resvmem() checks that the
1293789Sahrens 	 * availrmem is greater than swapfs_minfree, and the number of reserved
1294789Sahrens 	 * swap pages.  We also add a bit of extra here just to prevent
1295789Sahrens 	 * circumstances from getting really dire.
1296789Sahrens 	 */
1297789Sahrens 	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1298789Sahrens 		return (1);
1299789Sahrens 
1300*3307Sjohansen 	/*
1301*3307Sjohansen 	 * If zio data pages are being allocated out of a separate heap segment,
1302*3307Sjohansen 	 * then check that the size of available vmem for this area remains
1303*3307Sjohansen 	 * above 1/4th free.  This needs to be done since the size of the
1304*3307Sjohansen 	 * non-default segment is smaller than physical memory, so we could
1305*3307Sjohansen 	 * conceivably run out of VA in that segment before running out of
1306*3307Sjohansen 	 * physical memory.
1307*3307Sjohansen 	 */
1308*3307Sjohansen 	if ((zio_arena != NULL) && (btop(vmem_size(zio_arena, VMEM_FREE)) <
1309*3307Sjohansen 	    (btop(vmem_size(zio_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)))
1310*3307Sjohansen 		return (1);
1311*3307Sjohansen 
13121936Smaybee #if defined(__i386)
1313789Sahrens 	/*
1314789Sahrens 	 * If we're on an i386 platform, it's possible that we'll exhaust the
1315789Sahrens 	 * kernel heap space before we ever run out of available physical
1316789Sahrens 	 * memory.  Most checks of the size of the heap_area compare against
1317789Sahrens 	 * tune.t_minarmem, which is the minimum available real memory that we
1318789Sahrens 	 * can have in the system.  However, this is generally fixed at 25 pages
1319789Sahrens 	 * which is so low that it's useless.  In this comparison, we seek to
1320789Sahrens 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1321789Sahrens 	 * heap is allocated.  (Or, in the caclulation, if less than 1/4th is
1322789Sahrens 	 * free)
1323789Sahrens 	 */
1324789Sahrens 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1325789Sahrens 	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1326789Sahrens 		return (1);
1327789Sahrens #endif
1328789Sahrens 
1329789Sahrens #else
1330789Sahrens 	if (spa_get_random(100) == 0)
1331789Sahrens 		return (1);
1332789Sahrens #endif
1333789Sahrens 	return (0);
1334789Sahrens }
1335789Sahrens 
1336789Sahrens static void
1337789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1338789Sahrens {
1339789Sahrens 	size_t			i;
1340789Sahrens 	kmem_cache_t		*prev_cache = NULL;
13413290Sjohansen 	kmem_cache_t		*prev_data_cache = NULL;
1342789Sahrens 	extern kmem_cache_t	*zio_buf_cache[];
13433290Sjohansen 	extern kmem_cache_t	*zio_data_buf_cache[];
1344789Sahrens 
13451484Sek110237 #ifdef _KERNEL
13461484Sek110237 	/*
13471484Sek110237 	 * First purge some DNLC entries, in case the DNLC is using
13481484Sek110237 	 * up too much memory.
13491484Sek110237 	 */
13501505Sek110237 	dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
13511936Smaybee 
13521936Smaybee #if defined(__i386)
13531936Smaybee 	/*
13541936Smaybee 	 * Reclaim unused memory from all kmem caches.
13551936Smaybee 	 */
13561936Smaybee 	kmem_reap();
13571936Smaybee #endif
13581484Sek110237 #endif
13591484Sek110237 
1360789Sahrens 	/*
13611544Seschrock 	 * An agressive reclamation will shrink the cache size as well as
13621544Seschrock 	 * reap free buffers from the arc kmem caches.
1363789Sahrens 	 */
1364789Sahrens 	if (strat == ARC_RECLAIM_AGGR)
13653158Smaybee 		arc_shrink();
1366789Sahrens 
1367789Sahrens 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1368789Sahrens 		if (zio_buf_cache[i] != prev_cache) {
1369789Sahrens 			prev_cache = zio_buf_cache[i];
1370789Sahrens 			kmem_cache_reap_now(zio_buf_cache[i]);
1371789Sahrens 		}
13723290Sjohansen 		if (zio_data_buf_cache[i] != prev_data_cache) {
13733290Sjohansen 			prev_data_cache = zio_data_buf_cache[i];
13743290Sjohansen 			kmem_cache_reap_now(zio_data_buf_cache[i]);
13753290Sjohansen 		}
1376789Sahrens 	}
13771544Seschrock 	kmem_cache_reap_now(buf_cache);
13781544Seschrock 	kmem_cache_reap_now(hdr_cache);
1379789Sahrens }
1380789Sahrens 
1381789Sahrens static void
1382789Sahrens arc_reclaim_thread(void)
1383789Sahrens {
1384789Sahrens 	clock_t			growtime = 0;
1385789Sahrens 	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
1386789Sahrens 	callb_cpr_t		cpr;
1387789Sahrens 
1388789Sahrens 	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1389789Sahrens 
1390789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
1391789Sahrens 	while (arc_thread_exit == 0) {
1392789Sahrens 		if (arc_reclaim_needed()) {
1393789Sahrens 
1394789Sahrens 			if (arc.no_grow) {
1395789Sahrens 				if (last_reclaim == ARC_RECLAIM_CONS) {
1396789Sahrens 					last_reclaim = ARC_RECLAIM_AGGR;
1397789Sahrens 				} else {
1398789Sahrens 					last_reclaim = ARC_RECLAIM_CONS;
1399789Sahrens 				}
1400789Sahrens 			} else {
1401789Sahrens 				arc.no_grow = TRUE;
1402789Sahrens 				last_reclaim = ARC_RECLAIM_AGGR;
1403789Sahrens 				membar_producer();
1404789Sahrens 			}
1405789Sahrens 
1406789Sahrens 			/* reset the growth delay for every reclaim */
1407789Sahrens 			growtime = lbolt + (arc_grow_retry * hz);
14082856Snd150628 			ASSERT(growtime > 0);
1409789Sahrens 
1410789Sahrens 			arc_kmem_reap_now(last_reclaim);
1411789Sahrens 
1412789Sahrens 		} else if ((growtime > 0) && ((growtime - lbolt) <= 0)) {
1413789Sahrens 			arc.no_grow = FALSE;
1414789Sahrens 		}
1415789Sahrens 
14163298Smaybee 		if (2 * arc.c <
14173298Smaybee 		    arc.size + arc.mru_ghost->size + arc.mfu_ghost->size)
14183298Smaybee 			arc_adjust();
14193298Smaybee 
14201544Seschrock 		if (arc_eviction_list != NULL)
14211544Seschrock 			arc_do_user_evicts();
14221544Seschrock 
1423789Sahrens 		/* block until needed, or one second, whichever is shorter */
1424789Sahrens 		CALLB_CPR_SAFE_BEGIN(&cpr);
1425789Sahrens 		(void) cv_timedwait(&arc_reclaim_thr_cv,
1426789Sahrens 		    &arc_reclaim_thr_lock, (lbolt + hz));
1427789Sahrens 		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1428789Sahrens 	}
1429789Sahrens 
1430789Sahrens 	arc_thread_exit = 0;
1431789Sahrens 	cv_broadcast(&arc_reclaim_thr_cv);
1432789Sahrens 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
1433789Sahrens 	thread_exit();
1434789Sahrens }
1435789Sahrens 
14361544Seschrock /*
14371544Seschrock  * Adapt arc info given the number of bytes we are trying to add and
14381544Seschrock  * the state that we are comming from.  This function is only called
14391544Seschrock  * when we are adding new content to the cache.
14401544Seschrock  */
1441789Sahrens static void
14421544Seschrock arc_adapt(int bytes, arc_state_t *state)
1443789Sahrens {
14441544Seschrock 	int mult;
14451544Seschrock 
14461544Seschrock 	ASSERT(bytes > 0);
1447789Sahrens 	/*
14481544Seschrock 	 * Adapt the target size of the MRU list:
14491544Seschrock 	 *	- if we just hit in the MRU ghost list, then increase
14501544Seschrock 	 *	  the target size of the MRU list.
14511544Seschrock 	 *	- if we just hit in the MFU ghost list, then increase
14521544Seschrock 	 *	  the target size of the MFU list by decreasing the
14531544Seschrock 	 *	  target size of the MRU list.
1454789Sahrens 	 */
14551544Seschrock 	if (state == arc.mru_ghost) {
14561544Seschrock 		mult = ((arc.mru_ghost->size >= arc.mfu_ghost->size) ?
14571544Seschrock 		    1 : (arc.mfu_ghost->size/arc.mru_ghost->size));
14581544Seschrock 
14591544Seschrock 		arc.p = MIN(arc.c, arc.p + bytes * mult);
14601544Seschrock 	} else if (state == arc.mfu_ghost) {
14611544Seschrock 		mult = ((arc.mfu_ghost->size >= arc.mru_ghost->size) ?
14621544Seschrock 		    1 : (arc.mru_ghost->size/arc.mfu_ghost->size));
14631544Seschrock 
14641544Seschrock 		arc.p = MAX(0, (int64_t)arc.p - bytes * mult);
14651544Seschrock 	}
14661544Seschrock 	ASSERT((int64_t)arc.p >= 0);
1467789Sahrens 
1468789Sahrens 	if (arc_reclaim_needed()) {
1469789Sahrens 		cv_signal(&arc_reclaim_thr_cv);
1470789Sahrens 		return;
1471789Sahrens 	}
1472789Sahrens 
1473789Sahrens 	if (arc.no_grow)
1474789Sahrens 		return;
1475789Sahrens 
14761544Seschrock 	if (arc.c >= arc.c_max)
14771544Seschrock 		return;
14781544Seschrock 
1479789Sahrens 	/*
14801544Seschrock 	 * If we're within (2 * maxblocksize) bytes of the target
14811544Seschrock 	 * cache size, increment the target cache size
1482789Sahrens 	 */
14831544Seschrock 	if (arc.size > arc.c - (2ULL << SPA_MAXBLOCKSHIFT)) {
14841544Seschrock 		atomic_add_64(&arc.c, (int64_t)bytes);
1485789Sahrens 		if (arc.c > arc.c_max)
1486789Sahrens 			arc.c = arc.c_max;
14871544Seschrock 		else if (state == arc.anon)
14881544Seschrock 			atomic_add_64(&arc.p, (int64_t)bytes);
14891544Seschrock 		if (arc.p > arc.c)
14901544Seschrock 			arc.p = arc.c;
1491789Sahrens 	}
14921544Seschrock 	ASSERT((int64_t)arc.p >= 0);
1493789Sahrens }
1494789Sahrens 
1495789Sahrens /*
14961544Seschrock  * Check if the cache has reached its limits and eviction is required
14971544Seschrock  * prior to insert.
1498789Sahrens  */
1499789Sahrens static int
1500789Sahrens arc_evict_needed()
1501789Sahrens {
1502789Sahrens 	if (arc_reclaim_needed())
1503789Sahrens 		return (1);
1504789Sahrens 
15051544Seschrock 	return (arc.size > arc.c);
1506789Sahrens }
1507789Sahrens 
1508789Sahrens /*
15092688Smaybee  * The buffer, supplied as the first argument, needs a data block.
15102688Smaybee  * So, if we are at cache max, determine which cache should be victimized.
15112688Smaybee  * We have the following cases:
1512789Sahrens  *
15131544Seschrock  * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru) ->
1514789Sahrens  * In this situation if we're out of space, but the resident size of the MFU is
1515789Sahrens  * under the limit, victimize the MFU cache to satisfy this insertion request.
1516789Sahrens  *
15171544Seschrock  * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru) ->
1518789Sahrens  * Here, we've used up all of the available space for the MRU, so we need to
1519789Sahrens  * evict from our own cache instead.  Evict from the set of resident MRU
1520789Sahrens  * entries.
1521789Sahrens  *
15221544Seschrock  * 3. Insert for MFU (c - p) > sizeof(arc.mfu) ->
1523789Sahrens  * c minus p represents the MFU space in the cache, since p is the size of the
1524789Sahrens  * cache that is dedicated to the MRU.  In this situation there's still space on
1525789Sahrens  * the MFU side, so the MRU side needs to be victimized.
1526789Sahrens  *
15271544Seschrock  * 4. Insert for MFU (c - p) < sizeof(arc.mfu) ->
1528789Sahrens  * MFU's resident set is consuming more space than it has been allotted.  In
1529789Sahrens  * this situation, we must victimize our own cache, the MFU, for this insertion.
1530789Sahrens  */
1531789Sahrens static void
15322688Smaybee arc_get_data_buf(arc_buf_t *buf)
1533789Sahrens {
15343290Sjohansen 	arc_state_t		*state = buf->b_hdr->b_state;
15353290Sjohansen 	uint64_t		size = buf->b_hdr->b_size;
15363290Sjohansen 	arc_buf_contents_t	type = buf->b_hdr->b_type;
15372688Smaybee 
15382688Smaybee 	arc_adapt(size, state);
1539789Sahrens 
15402688Smaybee 	/*
15412688Smaybee 	 * We have not yet reached cache maximum size,
15422688Smaybee 	 * just allocate a new buffer.
15432688Smaybee 	 */
15442688Smaybee 	if (!arc_evict_needed()) {
15453290Sjohansen 		if (type == ARC_BUFC_METADATA) {
15463290Sjohansen 			buf->b_data = zio_buf_alloc(size);
15473290Sjohansen 		} else {
15483290Sjohansen 			ASSERT(type == ARC_BUFC_DATA);
15493290Sjohansen 			buf->b_data = zio_data_buf_alloc(size);
15503290Sjohansen 		}
15512688Smaybee 		atomic_add_64(&arc.size, size);
15522688Smaybee 		goto out;
15532688Smaybee 	}
15542688Smaybee 
15552688Smaybee 	/*
15562688Smaybee 	 * If we are prefetching from the mfu ghost list, this buffer
15572688Smaybee 	 * will end up on the mru list; so steal space from there.
15582688Smaybee 	 */
15592688Smaybee 	if (state == arc.mfu_ghost)
15602688Smaybee 		state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc.mru : arc.mfu;
15612688Smaybee 	else if (state == arc.mru_ghost)
15622688Smaybee 		state = arc.mru;
1563789Sahrens 
15642688Smaybee 	if (state == arc.mru || state == arc.anon) {
15652688Smaybee 		uint64_t mru_used = arc.anon->size + arc.mru->size;
15662688Smaybee 		state = (arc.p > mru_used) ? arc.mfu : arc.mru;
1567789Sahrens 	} else {
15682688Smaybee 		/* MFU cases */
15692688Smaybee 		uint64_t mfu_space = arc.c - arc.p;
15702688Smaybee 		state =  (mfu_space > arc.mfu->size) ? arc.mru : arc.mfu;
15712688Smaybee 	}
15723290Sjohansen 	if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) {
15733290Sjohansen 		if (type == ARC_BUFC_METADATA) {
15743290Sjohansen 			buf->b_data = zio_buf_alloc(size);
15753290Sjohansen 		} else {
15763290Sjohansen 			ASSERT(type == ARC_BUFC_DATA);
15773290Sjohansen 			buf->b_data = zio_data_buf_alloc(size);
15783290Sjohansen 		}
15792688Smaybee 		atomic_add_64(&arc.size, size);
15802688Smaybee 		atomic_add_64(&arc.recycle_miss, 1);
15812688Smaybee 	}
15822688Smaybee 	ASSERT(buf->b_data != NULL);
15832688Smaybee out:
15842688Smaybee 	/*
15852688Smaybee 	 * Update the state size.  Note that ghost states have a
15862688Smaybee 	 * "ghost size" and so don't need to be updated.
15872688Smaybee 	 */
15882688Smaybee 	if (!GHOST_STATE(buf->b_hdr->b_state)) {
15892688Smaybee 		arc_buf_hdr_t *hdr = buf->b_hdr;
15902688Smaybee 
15912688Smaybee 		atomic_add_64(&hdr->b_state->size, size);
15922688Smaybee 		if (list_link_active(&hdr->b_arc_node)) {
15932688Smaybee 			ASSERT(refcount_is_zero(&hdr->b_refcnt));
15942688Smaybee 			atomic_add_64(&hdr->b_state->lsize, size);
1595789Sahrens 		}
15963298Smaybee 		/*
15973298Smaybee 		 * If we are growing the cache, and we are adding anonymous
15983298Smaybee 		 * data, and we have outgrown arc.p, update arc.p
15993298Smaybee 		 */
16003298Smaybee 		if (arc.size < arc.c && hdr->b_state == arc.anon &&
16013298Smaybee 		    arc.anon->size + arc.mru->size > arc.p)
16023298Smaybee 			arc.p = MIN(arc.c, arc.p + size);
1603789Sahrens 	}
1604789Sahrens }
1605789Sahrens 
1606789Sahrens /*
1607789Sahrens  * This routine is called whenever a buffer is accessed.
16081544Seschrock  * NOTE: the hash lock is dropped in this function.
1609789Sahrens  */
1610789Sahrens static void
16112688Smaybee arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
1612789Sahrens {
1613789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
1614789Sahrens 
1615789Sahrens 	if (buf->b_state == arc.anon) {
1616789Sahrens 		/*
1617789Sahrens 		 * This buffer is not in the cache, and does not
1618789Sahrens 		 * appear in our "ghost" list.  Add the new buffer
1619789Sahrens 		 * to the MRU state.
1620789Sahrens 		 */
1621789Sahrens 
1622789Sahrens 		ASSERT(buf->b_arc_access == 0);
1623789Sahrens 		buf->b_arc_access = lbolt;
16241544Seschrock 		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
16251544Seschrock 		arc_change_state(arc.mru, buf, hash_lock);
1626789Sahrens 
16271544Seschrock 	} else if (buf->b_state == arc.mru) {
1628789Sahrens 		/*
16292391Smaybee 		 * If this buffer is here because of a prefetch, then either:
16302391Smaybee 		 * - clear the flag if this is a "referencing" read
16312391Smaybee 		 *   (any subsequent access will bump this into the MFU state).
16322391Smaybee 		 * or
16332391Smaybee 		 * - move the buffer to the head of the list if this is
16342391Smaybee 		 *   another prefetch (to make it less likely to be evicted).
1635789Sahrens 		 */
1636789Sahrens 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
16372391Smaybee 			if (refcount_count(&buf->b_refcnt) == 0) {
16382391Smaybee 				ASSERT(list_link_active(&buf->b_arc_node));
16392391Smaybee 				mutex_enter(&arc.mru->mtx);
16402391Smaybee 				list_remove(&arc.mru->list, buf);
16412391Smaybee 				list_insert_head(&arc.mru->list, buf);
16422391Smaybee 				mutex_exit(&arc.mru->mtx);
16432391Smaybee 			} else {
16442391Smaybee 				buf->b_flags &= ~ARC_PREFETCH;
16452391Smaybee 				atomic_add_64(&arc.mru->hits, 1);
16462391Smaybee 			}
16472391Smaybee 			buf->b_arc_access = lbolt;
1648789Sahrens 			return;
1649789Sahrens 		}
1650789Sahrens 
1651789Sahrens 		/*
1652789Sahrens 		 * This buffer has been "accessed" only once so far,
1653789Sahrens 		 * but it is still in the cache. Move it to the MFU
1654789Sahrens 		 * state.
1655789Sahrens 		 */
1656789Sahrens 		if (lbolt > buf->b_arc_access + ARC_MINTIME) {
1657789Sahrens 			/*
1658789Sahrens 			 * More than 125ms have passed since we
1659789Sahrens 			 * instantiated this buffer.  Move it to the
1660789Sahrens 			 * most frequently used state.
1661789Sahrens 			 */
1662789Sahrens 			buf->b_arc_access = lbolt;
16631544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
16641544Seschrock 			arc_change_state(arc.mfu, buf, hash_lock);
1665789Sahrens 		}
16661544Seschrock 		atomic_add_64(&arc.mru->hits, 1);
16671544Seschrock 	} else if (buf->b_state == arc.mru_ghost) {
1668789Sahrens 		arc_state_t	*new_state;
1669789Sahrens 		/*
1670789Sahrens 		 * This buffer has been "accessed" recently, but
1671789Sahrens 		 * was evicted from the cache.  Move it to the
1672789Sahrens 		 * MFU state.
1673789Sahrens 		 */
1674789Sahrens 
1675789Sahrens 		if (buf->b_flags & ARC_PREFETCH) {
16761544Seschrock 			new_state = arc.mru;
16772391Smaybee 			if (refcount_count(&buf->b_refcnt) > 0)
16782391Smaybee 				buf->b_flags &= ~ARC_PREFETCH;
16791544Seschrock 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1680789Sahrens 		} else {
16811544Seschrock 			new_state = arc.mfu;
16821544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1683789Sahrens 		}
1684789Sahrens 
1685789Sahrens 		buf->b_arc_access = lbolt;
1686789Sahrens 		arc_change_state(new_state, buf, hash_lock);
1687789Sahrens 
16881544Seschrock 		atomic_add_64(&arc.mru_ghost->hits, 1);
16891544Seschrock 	} else if (buf->b_state == arc.mfu) {
1690789Sahrens 		/*
1691789Sahrens 		 * This buffer has been accessed more than once and is
1692789Sahrens 		 * still in the cache.  Keep it in the MFU state.
1693789Sahrens 		 *
16942391Smaybee 		 * NOTE: an add_reference() that occurred when we did
16952391Smaybee 		 * the arc_read() will have kicked this off the list.
16962391Smaybee 		 * If it was a prefetch, we will explicitly move it to
16972391Smaybee 		 * the head of the list now.
1698789Sahrens 		 */
16992391Smaybee 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
17002391Smaybee 			ASSERT(refcount_count(&buf->b_refcnt) == 0);
17012391Smaybee 			ASSERT(list_link_active(&buf->b_arc_node));
17022391Smaybee 			mutex_enter(&arc.mfu->mtx);
17032391Smaybee 			list_remove(&arc.mfu->list, buf);
17042391Smaybee 			list_insert_head(&arc.mfu->list, buf);
17052391Smaybee 			mutex_exit(&arc.mfu->mtx);
17062391Smaybee 		}
17071544Seschrock 		atomic_add_64(&arc.mfu->hits, 1);
17082391Smaybee 		buf->b_arc_access = lbolt;
17091544Seschrock 	} else if (buf->b_state == arc.mfu_ghost) {
17102391Smaybee 		arc_state_t	*new_state = arc.mfu;
1711789Sahrens 		/*
1712789Sahrens 		 * This buffer has been accessed more than once but has
1713789Sahrens 		 * been evicted from the cache.  Move it back to the
1714789Sahrens 		 * MFU state.
1715789Sahrens 		 */
1716789Sahrens 
17172391Smaybee 		if (buf->b_flags & ARC_PREFETCH) {
17182391Smaybee 			/*
17192391Smaybee 			 * This is a prefetch access...
17202391Smaybee 			 * move this block back to the MRU state.
17212391Smaybee 			 */
17222391Smaybee 			ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
17232391Smaybee 			new_state = arc.mru;
17242391Smaybee 		}
17252391Smaybee 
1726789Sahrens 		buf->b_arc_access = lbolt;
17271544Seschrock 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
17282391Smaybee 		arc_change_state(new_state, buf, hash_lock);
1729789Sahrens 
17301544Seschrock 		atomic_add_64(&arc.mfu_ghost->hits, 1);
1731789Sahrens 	} else {
1732789Sahrens 		ASSERT(!"invalid arc state");
1733789Sahrens 	}
1734789Sahrens }
1735789Sahrens 
1736789Sahrens /* a generic arc_done_func_t which you can use */
1737789Sahrens /* ARGSUSED */
1738789Sahrens void
1739789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
1740789Sahrens {
1741789Sahrens 	bcopy(buf->b_data, arg, buf->b_hdr->b_size);
17421544Seschrock 	VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1743789Sahrens }
1744789Sahrens 
1745789Sahrens /* a generic arc_done_func_t which you can use */
1746789Sahrens void
1747789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
1748789Sahrens {
1749789Sahrens 	arc_buf_t **bufp = arg;
1750789Sahrens 	if (zio && zio->io_error) {
17511544Seschrock 		VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1752789Sahrens 		*bufp = NULL;
1753789Sahrens 	} else {
1754789Sahrens 		*bufp = buf;
1755789Sahrens 	}
1756789Sahrens }
1757789Sahrens 
1758789Sahrens static void
1759789Sahrens arc_read_done(zio_t *zio)
1760789Sahrens {
17611589Smaybee 	arc_buf_hdr_t	*hdr, *found;
1762789Sahrens 	arc_buf_t	*buf;
1763789Sahrens 	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
1764789Sahrens 	kmutex_t	*hash_lock;
1765789Sahrens 	arc_callback_t	*callback_list, *acb;
1766789Sahrens 	int		freeable = FALSE;
1767789Sahrens 
1768789Sahrens 	buf = zio->io_private;
1769789Sahrens 	hdr = buf->b_hdr;
1770789Sahrens 
17711589Smaybee 	/*
17721589Smaybee 	 * The hdr was inserted into hash-table and removed from lists
17731589Smaybee 	 * prior to starting I/O.  We should find this header, since
17741589Smaybee 	 * it's in the hash table, and it should be legit since it's
17751589Smaybee 	 * not possible to evict it during the I/O.  The only possible
17761589Smaybee 	 * reason for it not to be found is if we were freed during the
17771589Smaybee 	 * read.
17781589Smaybee 	 */
17791589Smaybee 	found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
17803093Sahrens 	    &hash_lock);
1781789Sahrens 
17821589Smaybee 	ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
17831589Smaybee 	    (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))));
1784789Sahrens 
1785789Sahrens 	/* byteswap if necessary */
1786789Sahrens 	callback_list = hdr->b_acb;
1787789Sahrens 	ASSERT(callback_list != NULL);
1788789Sahrens 	if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
1789789Sahrens 		callback_list->acb_byteswap(buf->b_data, hdr->b_size);
1790789Sahrens 
17913093Sahrens 	arc_cksum_compute(buf);
17923093Sahrens 
1793789Sahrens 	/* create copies of the data buffer for the callers */
1794789Sahrens 	abuf = buf;
1795789Sahrens 	for (acb = callback_list; acb; acb = acb->acb_next) {
1796789Sahrens 		if (acb->acb_done) {
17972688Smaybee 			if (abuf == NULL)
17982688Smaybee 				abuf = arc_buf_clone(buf);
1799789Sahrens 			acb->acb_buf = abuf;
1800789Sahrens 			abuf = NULL;
1801789Sahrens 		}
1802789Sahrens 	}
1803789Sahrens 	hdr->b_acb = NULL;
1804789Sahrens 	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
18051544Seschrock 	ASSERT(!HDR_BUF_AVAILABLE(hdr));
18061544Seschrock 	if (abuf == buf)
18071544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
1808789Sahrens 
1809789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
1810789Sahrens 
1811789Sahrens 	if (zio->io_error != 0) {
1812789Sahrens 		hdr->b_flags |= ARC_IO_ERROR;
1813789Sahrens 		if (hdr->b_state != arc.anon)
1814789Sahrens 			arc_change_state(arc.anon, hdr, hash_lock);
18151544Seschrock 		if (HDR_IN_HASH_TABLE(hdr))
18161544Seschrock 			buf_hash_remove(hdr);
1817789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
18182391Smaybee 		/* convert checksum errors into IO errors */
18191544Seschrock 		if (zio->io_error == ECKSUM)
18201544Seschrock 			zio->io_error = EIO;
1821789Sahrens 	}
1822789Sahrens 
18231544Seschrock 	/*
18242391Smaybee 	 * Broadcast before we drop the hash_lock to avoid the possibility
18252391Smaybee 	 * that the hdr (and hence the cv) might be freed before we get to
18262391Smaybee 	 * the cv_broadcast().
18271544Seschrock 	 */
18281544Seschrock 	cv_broadcast(&hdr->b_cv);
18291544Seschrock 
18301589Smaybee 	if (hash_lock) {
1831789Sahrens 		/*
1832789Sahrens 		 * Only call arc_access on anonymous buffers.  This is because
1833789Sahrens 		 * if we've issued an I/O for an evicted buffer, we've already
1834789Sahrens 		 * called arc_access (to prevent any simultaneous readers from
1835789Sahrens 		 * getting confused).
1836789Sahrens 		 */
1837789Sahrens 		if (zio->io_error == 0 && hdr->b_state == arc.anon)
18382688Smaybee 			arc_access(hdr, hash_lock);
18392688Smaybee 		mutex_exit(hash_lock);
1840789Sahrens 	} else {
1841789Sahrens 		/*
1842789Sahrens 		 * This block was freed while we waited for the read to
1843789Sahrens 		 * complete.  It has been removed from the hash table and
1844789Sahrens 		 * moved to the anonymous state (so that it won't show up
1845789Sahrens 		 * in the cache).
1846789Sahrens 		 */
1847789Sahrens 		ASSERT3P(hdr->b_state, ==, arc.anon);
1848789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
1849789Sahrens 	}
1850789Sahrens 
1851789Sahrens 	/* execute each callback and free its structure */
1852789Sahrens 	while ((acb = callback_list) != NULL) {
1853789Sahrens 		if (acb->acb_done)
1854789Sahrens 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
1855789Sahrens 
1856789Sahrens 		if (acb->acb_zio_dummy != NULL) {
1857789Sahrens 			acb->acb_zio_dummy->io_error = zio->io_error;
1858789Sahrens 			zio_nowait(acb->acb_zio_dummy);
1859789Sahrens 		}
1860789Sahrens 
1861789Sahrens 		callback_list = acb->acb_next;
1862789Sahrens 		kmem_free(acb, sizeof (arc_callback_t));
1863789Sahrens 	}
1864789Sahrens 
1865789Sahrens 	if (freeable)
18661544Seschrock 		arc_hdr_destroy(hdr);
1867789Sahrens }
1868789Sahrens 
1869789Sahrens /*
1870789Sahrens  * "Read" the block block at the specified DVA (in bp) via the
1871789Sahrens  * cache.  If the block is found in the cache, invoke the provided
1872789Sahrens  * callback immediately and return.  Note that the `zio' parameter
1873789Sahrens  * in the callback will be NULL in this case, since no IO was
1874789Sahrens  * required.  If the block is not in the cache pass the read request
1875789Sahrens  * on to the spa with a substitute callback function, so that the
1876789Sahrens  * requested block will be added to the cache.
1877789Sahrens  *
1878789Sahrens  * If a read request arrives for a block that has a read in-progress,
1879789Sahrens  * either wait for the in-progress read to complete (and return the
1880789Sahrens  * results); or, if this is a read with a "done" func, add a record
1881789Sahrens  * to the read to invoke the "done" func when the read completes,
1882789Sahrens  * and return; or just return.
1883789Sahrens  *
1884789Sahrens  * arc_read_done() will invoke all the requested "done" functions
1885789Sahrens  * for readers of this block.
1886789Sahrens  */
1887789Sahrens int
1888789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
1889789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
18902391Smaybee     uint32_t *arc_flags, zbookmark_t *zb)
1891789Sahrens {
1892789Sahrens 	arc_buf_hdr_t *hdr;
1893789Sahrens 	arc_buf_t *buf;
1894789Sahrens 	kmutex_t *hash_lock;
1895789Sahrens 	zio_t	*rzio;
1896789Sahrens 
1897789Sahrens top:
1898789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
18991544Seschrock 	if (hdr && hdr->b_datacnt > 0) {
1900789Sahrens 
19012391Smaybee 		*arc_flags |= ARC_CACHED;
19022391Smaybee 
1903789Sahrens 		if (HDR_IO_IN_PROGRESS(hdr)) {
19042391Smaybee 
19052391Smaybee 			if (*arc_flags & ARC_WAIT) {
19062391Smaybee 				cv_wait(&hdr->b_cv, hash_lock);
19072391Smaybee 				mutex_exit(hash_lock);
19082391Smaybee 				goto top;
19092391Smaybee 			}
19102391Smaybee 			ASSERT(*arc_flags & ARC_NOWAIT);
19112391Smaybee 
19122391Smaybee 			if (done) {
1913789Sahrens 				arc_callback_t	*acb = NULL;
1914789Sahrens 
1915789Sahrens 				acb = kmem_zalloc(sizeof (arc_callback_t),
1916789Sahrens 				    KM_SLEEP);
1917789Sahrens 				acb->acb_done = done;
1918789Sahrens 				acb->acb_private = private;
1919789Sahrens 				acb->acb_byteswap = swap;
1920789Sahrens 				if (pio != NULL)
1921789Sahrens 					acb->acb_zio_dummy = zio_null(pio,
1922789Sahrens 					    spa, NULL, NULL, flags);
1923789Sahrens 
1924789Sahrens 				ASSERT(acb->acb_done != NULL);
1925789Sahrens 				acb->acb_next = hdr->b_acb;
1926789Sahrens 				hdr->b_acb = acb;
1927789Sahrens 				add_reference(hdr, hash_lock, private);
1928789Sahrens 				mutex_exit(hash_lock);
1929789Sahrens 				return (0);
1930789Sahrens 			}
1931789Sahrens 			mutex_exit(hash_lock);
1932789Sahrens 			return (0);
1933789Sahrens 		}
1934789Sahrens 
19351544Seschrock 		ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
1936789Sahrens 
19371544Seschrock 		if (done) {
19382688Smaybee 			add_reference(hdr, hash_lock, private);
19391544Seschrock 			/*
19401544Seschrock 			 * If this block is already in use, create a new
19411544Seschrock 			 * copy of the data so that we will be guaranteed
19421544Seschrock 			 * that arc_release() will always succeed.
19431544Seschrock 			 */
19441544Seschrock 			buf = hdr->b_buf;
19451544Seschrock 			ASSERT(buf);
19461544Seschrock 			ASSERT(buf->b_data);
19472688Smaybee 			if (HDR_BUF_AVAILABLE(hdr)) {
19481544Seschrock 				ASSERT(buf->b_efunc == NULL);
19491544Seschrock 				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
19502688Smaybee 			} else {
19512688Smaybee 				buf = arc_buf_clone(buf);
19521544Seschrock 			}
19532391Smaybee 		} else if (*arc_flags & ARC_PREFETCH &&
19542391Smaybee 		    refcount_count(&hdr->b_refcnt) == 0) {
19552391Smaybee 			hdr->b_flags |= ARC_PREFETCH;
1956789Sahrens 		}
1957789Sahrens 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
19582688Smaybee 		arc_access(hdr, hash_lock);
19592688Smaybee 		mutex_exit(hash_lock);
1960789Sahrens 		atomic_add_64(&arc.hits, 1);
1961789Sahrens 		if (done)
1962789Sahrens 			done(NULL, buf, private);
1963789Sahrens 	} else {
1964789Sahrens 		uint64_t size = BP_GET_LSIZE(bp);
1965789Sahrens 		arc_callback_t	*acb;
1966789Sahrens 
1967789Sahrens 		if (hdr == NULL) {
1968789Sahrens 			/* this block is not in the cache */
1969789Sahrens 			arc_buf_hdr_t	*exists;
19703290Sjohansen 			arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
19713290Sjohansen 			buf = arc_buf_alloc(spa, size, private, type);
1972789Sahrens 			hdr = buf->b_hdr;
1973789Sahrens 			hdr->b_dva = *BP_IDENTITY(bp);
1974789Sahrens 			hdr->b_birth = bp->blk_birth;
1975789Sahrens 			hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
1976789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
1977789Sahrens 			if (exists) {
1978789Sahrens 				/* somebody beat us to the hash insert */
1979789Sahrens 				mutex_exit(hash_lock);
1980789Sahrens 				bzero(&hdr->b_dva, sizeof (dva_t));
1981789Sahrens 				hdr->b_birth = 0;
1982789Sahrens 				hdr->b_cksum0 = 0;
19831544Seschrock 				(void) arc_buf_remove_ref(buf, private);
1984789Sahrens 				goto top; /* restart the IO request */
1985789Sahrens 			}
19862391Smaybee 			/* if this is a prefetch, we don't have a reference */
19872391Smaybee 			if (*arc_flags & ARC_PREFETCH) {
19882391Smaybee 				(void) remove_reference(hdr, hash_lock,
19892391Smaybee 				    private);
19902391Smaybee 				hdr->b_flags |= ARC_PREFETCH;
19912391Smaybee 			}
19922391Smaybee 			if (BP_GET_LEVEL(bp) > 0)
19932391Smaybee 				hdr->b_flags |= ARC_INDIRECT;
1994789Sahrens 		} else {
1995789Sahrens 			/* this block is in the ghost cache */
19961544Seschrock 			ASSERT(GHOST_STATE(hdr->b_state));
19971544Seschrock 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
19982391Smaybee 			ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
19992391Smaybee 			ASSERT(hdr->b_buf == NULL);
2000789Sahrens 
20012391Smaybee 			/* if this is a prefetch, we don't have a reference */
20022391Smaybee 			if (*arc_flags & ARC_PREFETCH)
20032391Smaybee 				hdr->b_flags |= ARC_PREFETCH;
20042391Smaybee 			else
20052391Smaybee 				add_reference(hdr, hash_lock, private);
2006789Sahrens 			buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
20071544Seschrock 			buf->b_hdr = hdr;
20082688Smaybee 			buf->b_data = NULL;
20091544Seschrock 			buf->b_efunc = NULL;
20101544Seschrock 			buf->b_private = NULL;
20111544Seschrock 			buf->b_next = NULL;
20121544Seschrock 			hdr->b_buf = buf;
20132688Smaybee 			arc_get_data_buf(buf);
20141544Seschrock 			ASSERT(hdr->b_datacnt == 0);
20151544Seschrock 			hdr->b_datacnt = 1;
20162391Smaybee 
2017789Sahrens 		}
2018789Sahrens 
2019789Sahrens 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2020789Sahrens 		acb->acb_done = done;
2021789Sahrens 		acb->acb_private = private;
2022789Sahrens 		acb->acb_byteswap = swap;
2023789Sahrens 
2024789Sahrens 		ASSERT(hdr->b_acb == NULL);
2025789Sahrens 		hdr->b_acb = acb;
2026789Sahrens 		hdr->b_flags |= ARC_IO_IN_PROGRESS;
2027789Sahrens 
2028789Sahrens 		/*
2029789Sahrens 		 * If the buffer has been evicted, migrate it to a present state
2030789Sahrens 		 * before issuing the I/O.  Once we drop the hash-table lock,
2031789Sahrens 		 * the header will be marked as I/O in progress and have an
2032789Sahrens 		 * attached buffer.  At this point, anybody who finds this
2033789Sahrens 		 * buffer ought to notice that it's legit but has a pending I/O.
2034789Sahrens 		 */
2035789Sahrens 
20361544Seschrock 		if (GHOST_STATE(hdr->b_state))
20372688Smaybee 			arc_access(hdr, hash_lock);
20382688Smaybee 		mutex_exit(hash_lock);
2039789Sahrens 
2040789Sahrens 		ASSERT3U(hdr->b_size, ==, size);
20411596Sahrens 		DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
20421596Sahrens 		    zbookmark_t *, zb);
2043789Sahrens 		atomic_add_64(&arc.misses, 1);
20441544Seschrock 
2045789Sahrens 		rzio = zio_read(pio, spa, bp, buf->b_data, size,
20461544Seschrock 		    arc_read_done, buf, priority, flags, zb);
2047789Sahrens 
20482391Smaybee 		if (*arc_flags & ARC_WAIT)
2049789Sahrens 			return (zio_wait(rzio));
2050789Sahrens 
20512391Smaybee 		ASSERT(*arc_flags & ARC_NOWAIT);
2052789Sahrens 		zio_nowait(rzio);
2053789Sahrens 	}
2054789Sahrens 	return (0);
2055789Sahrens }
2056789Sahrens 
2057789Sahrens /*
2058789Sahrens  * arc_read() variant to support pool traversal.  If the block is already
2059789Sahrens  * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
2060789Sahrens  * The idea is that we don't want pool traversal filling up memory, but
2061789Sahrens  * if the ARC already has the data anyway, we shouldn't pay for the I/O.
2062789Sahrens  */
2063789Sahrens int
2064789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
2065789Sahrens {
2066789Sahrens 	arc_buf_hdr_t *hdr;
2067789Sahrens 	kmutex_t *hash_mtx;
2068789Sahrens 	int rc = 0;
2069789Sahrens 
2070789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
2071789Sahrens 
20721544Seschrock 	if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
20731544Seschrock 		arc_buf_t *buf = hdr->b_buf;
20741544Seschrock 
20751544Seschrock 		ASSERT(buf);
20761544Seschrock 		while (buf->b_data == NULL) {
20771544Seschrock 			buf = buf->b_next;
20781544Seschrock 			ASSERT(buf);
20791544Seschrock 		}
20801544Seschrock 		bcopy(buf->b_data, data, hdr->b_size);
20811544Seschrock 	} else {
2082789Sahrens 		rc = ENOENT;
20831544Seschrock 	}
2084789Sahrens 
2085789Sahrens 	if (hash_mtx)
2086789Sahrens 		mutex_exit(hash_mtx);
2087789Sahrens 
2088789Sahrens 	return (rc);
2089789Sahrens }
2090789Sahrens 
20911544Seschrock void
20921544Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
20931544Seschrock {
20941544Seschrock 	ASSERT(buf->b_hdr != NULL);
20951544Seschrock 	ASSERT(buf->b_hdr->b_state != arc.anon);
20961544Seschrock 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
20971544Seschrock 	buf->b_efunc = func;
20981544Seschrock 	buf->b_private = private;
20991544Seschrock }
21001544Seschrock 
21011544Seschrock /*
21021544Seschrock  * This is used by the DMU to let the ARC know that a buffer is
21031544Seschrock  * being evicted, so the ARC should clean up.  If this arc buf
21041544Seschrock  * is not yet in the evicted state, it will be put there.
21051544Seschrock  */
21061544Seschrock int
21071544Seschrock arc_buf_evict(arc_buf_t *buf)
21081544Seschrock {
21092887Smaybee 	arc_buf_hdr_t *hdr;
21101544Seschrock 	kmutex_t *hash_lock;
21111544Seschrock 	arc_buf_t **bufp;
21121544Seschrock 
21132887Smaybee 	mutex_enter(&arc_eviction_mtx);
21142887Smaybee 	hdr = buf->b_hdr;
21151544Seschrock 	if (hdr == NULL) {
21161544Seschrock 		/*
21171544Seschrock 		 * We are in arc_do_user_evicts().
21181544Seschrock 		 */
21191544Seschrock 		ASSERT(buf->b_data == NULL);
21202887Smaybee 		mutex_exit(&arc_eviction_mtx);
21211544Seschrock 		return (0);
21221544Seschrock 	}
21232887Smaybee 	hash_lock = HDR_LOCK(hdr);
21242887Smaybee 	mutex_exit(&arc_eviction_mtx);
21251544Seschrock 
21261544Seschrock 	mutex_enter(hash_lock);
21271544Seschrock 
21282724Smaybee 	if (buf->b_data == NULL) {
21292724Smaybee 		/*
21302724Smaybee 		 * We are on the eviction list.
21312724Smaybee 		 */
21322724Smaybee 		mutex_exit(hash_lock);
21332724Smaybee 		mutex_enter(&arc_eviction_mtx);
21342724Smaybee 		if (buf->b_hdr == NULL) {
21352724Smaybee 			/*
21362724Smaybee 			 * We are already in arc_do_user_evicts().
21372724Smaybee 			 */
21382724Smaybee 			mutex_exit(&arc_eviction_mtx);
21392724Smaybee 			return (0);
21402724Smaybee 		} else {
21412724Smaybee 			arc_buf_t copy = *buf; /* structure assignment */
21422724Smaybee 			/*
21432724Smaybee 			 * Process this buffer now
21442724Smaybee 			 * but let arc_do_user_evicts() do the reaping.
21452724Smaybee 			 */
21462724Smaybee 			buf->b_efunc = NULL;
21472724Smaybee 			mutex_exit(&arc_eviction_mtx);
21482724Smaybee 			VERIFY(copy.b_efunc(&copy) == 0);
21492724Smaybee 			return (1);
21502724Smaybee 		}
21512724Smaybee 	}
21522724Smaybee 
21532724Smaybee 	ASSERT(buf->b_hdr == hdr);
21542724Smaybee 	ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
21551544Seschrock 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
21561544Seschrock 
21571544Seschrock 	/*
21581544Seschrock 	 * Pull this buffer off of the hdr
21591544Seschrock 	 */
21601544Seschrock 	bufp = &hdr->b_buf;
21611544Seschrock 	while (*bufp != buf)
21621544Seschrock 		bufp = &(*bufp)->b_next;
21631544Seschrock 	*bufp = buf->b_next;
21641544Seschrock 
21651544Seschrock 	ASSERT(buf->b_data != NULL);
21662688Smaybee 	arc_buf_destroy(buf, FALSE, FALSE);
21671544Seschrock 
21681544Seschrock 	if (hdr->b_datacnt == 0) {
21691544Seschrock 		arc_state_t *old_state = hdr->b_state;
21701544Seschrock 		arc_state_t *evicted_state;
21711544Seschrock 
21721544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
21731544Seschrock 
21741544Seschrock 		evicted_state =
21751544Seschrock 		    (old_state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
21761544Seschrock 
21771544Seschrock 		mutex_enter(&old_state->mtx);
21781544Seschrock 		mutex_enter(&evicted_state->mtx);
21791544Seschrock 
21801544Seschrock 		arc_change_state(evicted_state, hdr, hash_lock);
21811544Seschrock 		ASSERT(HDR_IN_HASH_TABLE(hdr));
21821544Seschrock 		hdr->b_flags = ARC_IN_HASH_TABLE;
21831544Seschrock 
21841544Seschrock 		mutex_exit(&evicted_state->mtx);
21851544Seschrock 		mutex_exit(&old_state->mtx);
21861544Seschrock 	}
21871544Seschrock 	mutex_exit(hash_lock);
21881819Smaybee 
21891544Seschrock 	VERIFY(buf->b_efunc(buf) == 0);
21901544Seschrock 	buf->b_efunc = NULL;
21911544Seschrock 	buf->b_private = NULL;
21921544Seschrock 	buf->b_hdr = NULL;
21931544Seschrock 	kmem_cache_free(buf_cache, buf);
21941544Seschrock 	return (1);
21951544Seschrock }
21961544Seschrock 
2197789Sahrens /*
2198789Sahrens  * Release this buffer from the cache.  This must be done
2199789Sahrens  * after a read and prior to modifying the buffer contents.
2200789Sahrens  * If the buffer has more than one reference, we must make
2201789Sahrens  * make a new hdr for the buffer.
2202789Sahrens  */
2203789Sahrens void
2204789Sahrens arc_release(arc_buf_t *buf, void *tag)
2205789Sahrens {
2206789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2207789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
2208789Sahrens 
2209789Sahrens 	/* this buffer is not on any list */
2210789Sahrens 	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2211789Sahrens 
2212789Sahrens 	if (hdr->b_state == arc.anon) {
2213789Sahrens 		/* this buffer is already released */
2214789Sahrens 		ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2215789Sahrens 		ASSERT(BUF_EMPTY(hdr));
22161544Seschrock 		ASSERT(buf->b_efunc == NULL);
22173093Sahrens 		arc_buf_thaw(buf);
2218789Sahrens 		return;
2219789Sahrens 	}
2220789Sahrens 
2221789Sahrens 	mutex_enter(hash_lock);
2222789Sahrens 
22231544Seschrock 	/*
22241544Seschrock 	 * Do we have more than one buf?
22251544Seschrock 	 */
22261544Seschrock 	if (hdr->b_buf != buf || buf->b_next != NULL) {
2227789Sahrens 		arc_buf_hdr_t *nhdr;
2228789Sahrens 		arc_buf_t **bufp;
2229789Sahrens 		uint64_t blksz = hdr->b_size;
2230789Sahrens 		spa_t *spa = hdr->b_spa;
22313290Sjohansen 		arc_buf_contents_t type = hdr->b_type;
2232789Sahrens 
22331544Seschrock 		ASSERT(hdr->b_datacnt > 1);
2234789Sahrens 		/*
2235789Sahrens 		 * Pull the data off of this buf and attach it to
2236789Sahrens 		 * a new anonymous buf.
2237789Sahrens 		 */
22381544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
2239789Sahrens 		bufp = &hdr->b_buf;
22401544Seschrock 		while (*bufp != buf)
2241789Sahrens 			bufp = &(*bufp)->b_next;
2242789Sahrens 		*bufp = (*bufp)->b_next;
22431544Seschrock 
2244789Sahrens 		ASSERT3U(hdr->b_state->size, >=, hdr->b_size);
2245789Sahrens 		atomic_add_64(&hdr->b_state->size, -hdr->b_size);
22461544Seschrock 		if (refcount_is_zero(&hdr->b_refcnt)) {
22471544Seschrock 			ASSERT3U(hdr->b_state->lsize, >=, hdr->b_size);
22481544Seschrock 			atomic_add_64(&hdr->b_state->lsize, -hdr->b_size);
22491544Seschrock 		}
22501544Seschrock 		hdr->b_datacnt -= 1;
22511544Seschrock 
2252789Sahrens 		mutex_exit(hash_lock);
2253789Sahrens 
2254789Sahrens 		nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
2255789Sahrens 		nhdr->b_size = blksz;
2256789Sahrens 		nhdr->b_spa = spa;
22573290Sjohansen 		nhdr->b_type = type;
2258789Sahrens 		nhdr->b_buf = buf;
2259789Sahrens 		nhdr->b_state = arc.anon;
2260789Sahrens 		nhdr->b_arc_access = 0;
2261789Sahrens 		nhdr->b_flags = 0;
22621544Seschrock 		nhdr->b_datacnt = 1;
22633093Sahrens 		nhdr->b_freeze_cksum =
22643093Sahrens 		    kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
22653093Sahrens 		*nhdr->b_freeze_cksum = *hdr->b_freeze_cksum; /* struct copy */
2266789Sahrens 		buf->b_hdr = nhdr;
2267789Sahrens 		buf->b_next = NULL;
2268789Sahrens 		(void) refcount_add(&nhdr->b_refcnt, tag);
2269789Sahrens 		atomic_add_64(&arc.anon->size, blksz);
2270789Sahrens 
2271789Sahrens 		hdr = nhdr;
2272789Sahrens 	} else {
22731544Seschrock 		ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2274789Sahrens 		ASSERT(!list_link_active(&hdr->b_arc_node));
2275789Sahrens 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2276789Sahrens 		arc_change_state(arc.anon, hdr, hash_lock);
2277789Sahrens 		hdr->b_arc_access = 0;
2278789Sahrens 		mutex_exit(hash_lock);
2279789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
2280789Sahrens 		hdr->b_birth = 0;
2281789Sahrens 		hdr->b_cksum0 = 0;
2282789Sahrens 	}
22831544Seschrock 	buf->b_efunc = NULL;
22841544Seschrock 	buf->b_private = NULL;
22853093Sahrens 	arc_buf_thaw(buf);
2286789Sahrens }
2287789Sahrens 
2288789Sahrens int
2289789Sahrens arc_released(arc_buf_t *buf)
2290789Sahrens {
22911544Seschrock 	return (buf->b_data != NULL && buf->b_hdr->b_state == arc.anon);
22921544Seschrock }
22931544Seschrock 
22941544Seschrock int
22951544Seschrock arc_has_callback(arc_buf_t *buf)
22961544Seschrock {
22971544Seschrock 	return (buf->b_efunc != NULL);
2298789Sahrens }
2299789Sahrens 
23001544Seschrock #ifdef ZFS_DEBUG
23011544Seschrock int
23021544Seschrock arc_referenced(arc_buf_t *buf)
23031544Seschrock {
23041544Seschrock 	return (refcount_count(&buf->b_hdr->b_refcnt));
23051544Seschrock }
23061544Seschrock #endif
23071544Seschrock 
2308789Sahrens static void
2309789Sahrens arc_write_done(zio_t *zio)
2310789Sahrens {
2311789Sahrens 	arc_buf_t *buf;
2312789Sahrens 	arc_buf_hdr_t *hdr;
2313789Sahrens 	arc_callback_t *acb;
2314789Sahrens 
2315789Sahrens 	buf = zio->io_private;
2316789Sahrens 	hdr = buf->b_hdr;
2317789Sahrens 	acb = hdr->b_acb;
2318789Sahrens 	hdr->b_acb = NULL;
23191544Seschrock 	ASSERT(acb != NULL);
2320789Sahrens 
2321789Sahrens 	/* this buffer is on no lists and is not in the hash table */
2322789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
2323789Sahrens 
2324789Sahrens 	hdr->b_dva = *BP_IDENTITY(zio->io_bp);
2325789Sahrens 	hdr->b_birth = zio->io_bp->blk_birth;
2326789Sahrens 	hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
23271544Seschrock 	/*
23281544Seschrock 	 * If the block to be written was all-zero, we may have
23291544Seschrock 	 * compressed it away.  In this case no write was performed
23301544Seschrock 	 * so there will be no dva/birth-date/checksum.  The buffer
23311544Seschrock 	 * must therefor remain anonymous (and uncached).
23321544Seschrock 	 */
2333789Sahrens 	if (!BUF_EMPTY(hdr)) {
2334789Sahrens 		arc_buf_hdr_t *exists;
2335789Sahrens 		kmutex_t *hash_lock;
2336789Sahrens 
23373093Sahrens 		arc_cksum_verify(buf);
23383093Sahrens 
2339789Sahrens 		exists = buf_hash_insert(hdr, &hash_lock);
2340789Sahrens 		if (exists) {
2341789Sahrens 			/*
2342789Sahrens 			 * This can only happen if we overwrite for
2343789Sahrens 			 * sync-to-convergence, because we remove
2344789Sahrens 			 * buffers from the hash table when we arc_free().
2345789Sahrens 			 */
2346789Sahrens 			ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
2347789Sahrens 			    BP_IDENTITY(zio->io_bp)));
2348789Sahrens 			ASSERT3U(zio->io_bp_orig.blk_birth, ==,
2349789Sahrens 			    zio->io_bp->blk_birth);
2350789Sahrens 
2351789Sahrens 			ASSERT(refcount_is_zero(&exists->b_refcnt));
2352789Sahrens 			arc_change_state(arc.anon, exists, hash_lock);
2353789Sahrens 			mutex_exit(hash_lock);
23541544Seschrock 			arc_hdr_destroy(exists);
2355789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
2356789Sahrens 			ASSERT3P(exists, ==, NULL);
2357789Sahrens 		}
23581544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
23592688Smaybee 		arc_access(hdr, hash_lock);
23602688Smaybee 		mutex_exit(hash_lock);
23611544Seschrock 	} else if (acb->acb_done == NULL) {
23621544Seschrock 		int destroy_hdr;
23631544Seschrock 		/*
23641544Seschrock 		 * This is an anonymous buffer with no user callback,
23651544Seschrock 		 * destroy it if there are no active references.
23661544Seschrock 		 */
23671544Seschrock 		mutex_enter(&arc_eviction_mtx);
23681544Seschrock 		destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
23691544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
23701544Seschrock 		mutex_exit(&arc_eviction_mtx);
23711544Seschrock 		if (destroy_hdr)
23721544Seschrock 			arc_hdr_destroy(hdr);
23731544Seschrock 	} else {
23741544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2375789Sahrens 	}
23761544Seschrock 
23771544Seschrock 	if (acb->acb_done) {
2378789Sahrens 		ASSERT(!refcount_is_zero(&hdr->b_refcnt));
2379789Sahrens 		acb->acb_done(zio, buf, acb->acb_private);
2380789Sahrens 	}
2381789Sahrens 
23821544Seschrock 	kmem_free(acb, sizeof (arc_callback_t));
2383789Sahrens }
2384789Sahrens 
2385789Sahrens int
23861775Sbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
2387789Sahrens     uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
2388789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
23891544Seschrock     uint32_t arc_flags, zbookmark_t *zb)
2390789Sahrens {
2391789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2392789Sahrens 	arc_callback_t	*acb;
2393789Sahrens 	zio_t	*rzio;
2394789Sahrens 
2395789Sahrens 	/* this is a private buffer - no locking required */
2396789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
2397789Sahrens 	ASSERT(BUF_EMPTY(hdr));
2398789Sahrens 	ASSERT(!HDR_IO_ERROR(hdr));
23992237Smaybee 	ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
24002237Smaybee 	ASSERT(hdr->b_acb == 0);
2401789Sahrens 	acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2402789Sahrens 	acb->acb_done = done;
2403789Sahrens 	acb->acb_private = private;
2404789Sahrens 	acb->acb_byteswap = (arc_byteswap_func_t *)-1;
2405789Sahrens 	hdr->b_acb = acb;
24061544Seschrock 	hdr->b_flags |= ARC_IO_IN_PROGRESS;
24073093Sahrens 	arc_cksum_compute(buf);
24081775Sbillm 	rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp,
24091544Seschrock 	    buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb);
2410789Sahrens 
2411789Sahrens 	if (arc_flags & ARC_WAIT)
2412789Sahrens 		return (zio_wait(rzio));
2413789Sahrens 
2414789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
2415789Sahrens 	zio_nowait(rzio);
2416789Sahrens 
2417789Sahrens 	return (0);
2418789Sahrens }
2419789Sahrens 
2420789Sahrens int
2421789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
2422789Sahrens     zio_done_func_t *done, void *private, uint32_t arc_flags)
2423789Sahrens {
2424789Sahrens 	arc_buf_hdr_t *ab;
2425789Sahrens 	kmutex_t *hash_lock;
2426789Sahrens 	zio_t	*zio;
2427789Sahrens 
2428789Sahrens 	/*
2429789Sahrens 	 * If this buffer is in the cache, release it, so it
2430789Sahrens 	 * can be re-used.
2431789Sahrens 	 */
2432789Sahrens 	ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2433789Sahrens 	if (ab != NULL) {
2434789Sahrens 		/*
2435789Sahrens 		 * The checksum of blocks to free is not always
2436789Sahrens 		 * preserved (eg. on the deadlist).  However, if it is
2437789Sahrens 		 * nonzero, it should match what we have in the cache.
2438789Sahrens 		 */
2439789Sahrens 		ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
2440789Sahrens 		    ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
24411990Smaybee 		if (ab->b_state != arc.anon)
24421990Smaybee 			arc_change_state(arc.anon, ab, hash_lock);
24432391Smaybee 		if (HDR_IO_IN_PROGRESS(ab)) {
24442391Smaybee 			/*
24452391Smaybee 			 * This should only happen when we prefetch.
24462391Smaybee 			 */
24472391Smaybee 			ASSERT(ab->b_flags & ARC_PREFETCH);
24482391Smaybee 			ASSERT3U(ab->b_datacnt, ==, 1);
24492391Smaybee 			ab->b_flags |= ARC_FREED_IN_READ;
24502391Smaybee 			if (HDR_IN_HASH_TABLE(ab))
24512391Smaybee 				buf_hash_remove(ab);
24522391Smaybee 			ab->b_arc_access = 0;
24532391Smaybee 			bzero(&ab->b_dva, sizeof (dva_t));
24542391Smaybee 			ab->b_birth = 0;
24552391Smaybee 			ab->b_cksum0 = 0;
24562391Smaybee 			ab->b_buf->b_efunc = NULL;
24572391Smaybee 			ab->b_buf->b_private = NULL;
24582391Smaybee 			mutex_exit(hash_lock);
24592391Smaybee 		} else if (refcount_is_zero(&ab->b_refcnt)) {
2460789Sahrens 			mutex_exit(hash_lock);
24611544Seschrock 			arc_hdr_destroy(ab);
2462789Sahrens 			atomic_add_64(&arc.deleted, 1);
2463789Sahrens 		} else {
24641589Smaybee 			/*
24652391Smaybee 			 * We still have an active reference on this
24662391Smaybee 			 * buffer.  This can happen, e.g., from
24672391Smaybee 			 * dbuf_unoverride().
24681589Smaybee 			 */
24692391Smaybee 			ASSERT(!HDR_IN_HASH_TABLE(ab));
2470789Sahrens 			ab->b_arc_access = 0;
2471789Sahrens 			bzero(&ab->b_dva, sizeof (dva_t));
2472789Sahrens 			ab->b_birth = 0;
2473789Sahrens 			ab->b_cksum0 = 0;
24741544Seschrock 			ab->b_buf->b_efunc = NULL;
24751544Seschrock 			ab->b_buf->b_private = NULL;
2476789Sahrens 			mutex_exit(hash_lock);
2477789Sahrens 		}
2478789Sahrens 	}
2479789Sahrens 
2480789Sahrens 	zio = zio_free(pio, spa, txg, bp, done, private);
2481789Sahrens 
2482789Sahrens 	if (arc_flags & ARC_WAIT)
2483789Sahrens 		return (zio_wait(zio));
2484789Sahrens 
2485789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
2486789Sahrens 	zio_nowait(zio);
2487789Sahrens 
2488789Sahrens 	return (0);
2489789Sahrens }
2490789Sahrens 
2491789Sahrens void
2492789Sahrens arc_tempreserve_clear(uint64_t tempreserve)
2493789Sahrens {
2494789Sahrens 	atomic_add_64(&arc_tempreserve, -tempreserve);
2495789Sahrens 	ASSERT((int64_t)arc_tempreserve >= 0);
2496789Sahrens }
2497789Sahrens 
2498789Sahrens int
2499789Sahrens arc_tempreserve_space(uint64_t tempreserve)
2500789Sahrens {
2501789Sahrens #ifdef ZFS_DEBUG
2502789Sahrens 	/*
2503789Sahrens 	 * Once in a while, fail for no reason.  Everything should cope.
2504789Sahrens 	 */
2505789Sahrens 	if (spa_get_random(10000) == 0) {
2506789Sahrens 		dprintf("forcing random failure\n");
2507789Sahrens 		return (ERESTART);
2508789Sahrens 	}
2509789Sahrens #endif
2510982Smaybee 	if (tempreserve > arc.c/4 && !arc.no_grow)
2511982Smaybee 		arc.c = MIN(arc.c_max, tempreserve * 4);
2512982Smaybee 	if (tempreserve > arc.c)
2513982Smaybee 		return (ENOMEM);
2514982Smaybee 
2515789Sahrens 	/*
2516982Smaybee 	 * Throttle writes when the amount of dirty data in the cache
2517982Smaybee 	 * gets too large.  We try to keep the cache less than half full
2518982Smaybee 	 * of dirty blocks so that our sync times don't grow too large.
2519982Smaybee 	 * Note: if two requests come in concurrently, we might let them
2520982Smaybee 	 * both succeed, when one of them should fail.  Not a huge deal.
2521982Smaybee 	 *
2522982Smaybee 	 * XXX The limit should be adjusted dynamically to keep the time
2523982Smaybee 	 * to sync a dataset fixed (around 1-5 seconds?).
2524789Sahrens 	 */
2525789Sahrens 
2526982Smaybee 	if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 &&
2527982Smaybee 	    arc_tempreserve + arc.anon->size > arc.c / 4) {
2528789Sahrens 		dprintf("failing, arc_tempreserve=%lluK anon=%lluK "
2529789Sahrens 		    "tempreserve=%lluK arc.c=%lluK\n",
2530789Sahrens 		    arc_tempreserve>>10, arc.anon->lsize>>10,
2531789Sahrens 		    tempreserve>>10, arc.c>>10);
2532789Sahrens 		return (ERESTART);
2533789Sahrens 	}
2534789Sahrens 	atomic_add_64(&arc_tempreserve, tempreserve);
2535789Sahrens 	return (0);
2536789Sahrens }
2537789Sahrens 
2538789Sahrens void
2539789Sahrens arc_init(void)
2540789Sahrens {
2541789Sahrens 	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
2542789Sahrens 	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
2543789Sahrens 
25442391Smaybee 	/* Convert seconds to clock ticks */
25452638Sperrin 	arc_min_prefetch_lifespan = 1 * hz;
25462391Smaybee 
2547789Sahrens 	/* Start out with 1/8 of all memory */
2548789Sahrens 	arc.c = physmem * PAGESIZE / 8;
2549789Sahrens 
2550789Sahrens #ifdef _KERNEL
2551789Sahrens 	/*
2552789Sahrens 	 * On architectures where the physical memory can be larger
2553789Sahrens 	 * than the addressable space (intel in 32-bit mode), we may
2554789Sahrens 	 * need to limit the cache to 1/8 of VM size.
2555789Sahrens 	 */
2556789Sahrens 	arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
2557789Sahrens #endif
2558789Sahrens 
2559982Smaybee 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
2560789Sahrens 	arc.c_min = MAX(arc.c / 4, 64<<20);
2561982Smaybee 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
2562789Sahrens 	if (arc.c * 8 >= 1<<30)
2563789Sahrens 		arc.c_max = (arc.c * 8) - (1<<30);
2564789Sahrens 	else
2565789Sahrens 		arc.c_max = arc.c_min;
2566789Sahrens 	arc.c_max = MAX(arc.c * 6, arc.c_max);
25672885Sahrens 
25682885Sahrens 	/*
25692885Sahrens 	 * Allow the tunables to override our calculations if they are
25702885Sahrens 	 * reasonable (ie. over 64MB)
25712885Sahrens 	 */
25722885Sahrens 	if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
25732885Sahrens 		arc.c_max = zfs_arc_max;
25742885Sahrens 	if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc.c_max)
25752885Sahrens 		arc.c_min = zfs_arc_min;
25762885Sahrens 
2577789Sahrens 	arc.c = arc.c_max;
2578789Sahrens 	arc.p = (arc.c >> 1);
2579789Sahrens 
2580789Sahrens 	/* if kmem_flags are set, lets try to use less memory */
2581789Sahrens 	if (kmem_debugging())
2582789Sahrens 		arc.c = arc.c / 2;
2583789Sahrens 	if (arc.c < arc.c_min)
2584789Sahrens 		arc.c = arc.c_min;
2585789Sahrens 
2586789Sahrens 	arc.anon = &ARC_anon;
25871544Seschrock 	arc.mru = &ARC_mru;
25881544Seschrock 	arc.mru_ghost = &ARC_mru_ghost;
25891544Seschrock 	arc.mfu = &ARC_mfu;
25901544Seschrock 	arc.mfu_ghost = &ARC_mfu_ghost;
25911544Seschrock 	arc.size = 0;
2592789Sahrens 
25932688Smaybee 	arc.hits = 0;
25942688Smaybee 	arc.recycle_miss = 0;
25952688Smaybee 	arc.evict_skip = 0;
25962688Smaybee 	arc.mutex_miss = 0;
25972688Smaybee 
25982856Snd150628 	mutex_init(&arc.anon->mtx, NULL, MUTEX_DEFAULT, NULL);
25992856Snd150628 	mutex_init(&arc.mru->mtx, NULL, MUTEX_DEFAULT, NULL);
26002856Snd150628 	mutex_init(&arc.mru_ghost->mtx, NULL, MUTEX_DEFAULT, NULL);
26012856Snd150628 	mutex_init(&arc.mfu->mtx, NULL, MUTEX_DEFAULT, NULL);
26022856Snd150628 	mutex_init(&arc.mfu_ghost->mtx, NULL, MUTEX_DEFAULT, NULL);
26032856Snd150628 
26041544Seschrock 	list_create(&arc.mru->list, sizeof (arc_buf_hdr_t),
2605789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
26061544Seschrock 	list_create(&arc.mru_ghost->list, sizeof (arc_buf_hdr_t),
2607789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
26081544Seschrock 	list_create(&arc.mfu->list, sizeof (arc_buf_hdr_t),
2609789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
26101544Seschrock 	list_create(&arc.mfu_ghost->list, sizeof (arc_buf_hdr_t),
2611789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
2612789Sahrens 
2613789Sahrens 	buf_init();
2614789Sahrens 
2615789Sahrens 	arc_thread_exit = 0;
26161544Seschrock 	arc_eviction_list = NULL;
26171544Seschrock 	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
26182887Smaybee 	bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
2619789Sahrens 
2620789Sahrens 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
2621789Sahrens 	    TS_RUN, minclsyspri);
26223158Smaybee 
26233158Smaybee 	arc_dead = FALSE;
2624789Sahrens }
2625789Sahrens 
2626789Sahrens void
2627789Sahrens arc_fini(void)
2628789Sahrens {
2629789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
2630789Sahrens 	arc_thread_exit = 1;
2631789Sahrens 	while (arc_thread_exit != 0)
2632789Sahrens 		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
2633789Sahrens 	mutex_exit(&arc_reclaim_thr_lock);
2634789Sahrens 
2635789Sahrens 	arc_flush();
2636789Sahrens 
2637789Sahrens 	arc_dead = TRUE;
2638789Sahrens 
26391544Seschrock 	mutex_destroy(&arc_eviction_mtx);
2640789Sahrens 	mutex_destroy(&arc_reclaim_thr_lock);
2641789Sahrens 	cv_destroy(&arc_reclaim_thr_cv);
2642789Sahrens 
26431544Seschrock 	list_destroy(&arc.mru->list);
26441544Seschrock 	list_destroy(&arc.mru_ghost->list);
26451544Seschrock 	list_destroy(&arc.mfu->list);
26461544Seschrock 	list_destroy(&arc.mfu_ghost->list);
2647789Sahrens 
26482856Snd150628 	mutex_destroy(&arc.anon->mtx);
26492856Snd150628 	mutex_destroy(&arc.mru->mtx);
26502856Snd150628 	mutex_destroy(&arc.mru_ghost->mtx);
26512856Snd150628 	mutex_destroy(&arc.mfu->mtx);
26522856Snd150628 	mutex_destroy(&arc.mfu_ghost->mtx);
26532856Snd150628 
2654789Sahrens 	buf_fini();
2655789Sahrens }
2656