xref: /onnv-gate/usr/src/uts/common/fs/zfs/arc.c (revision 3158)
1789Sahrens /*
2789Sahrens  * CDDL HEADER START
3789Sahrens  *
4789Sahrens  * The contents of this file are subject to the terms of the
51484Sek110237  * Common Development and Distribution License (the "License").
61484Sek110237  * You may not use this file except in compliance with the License.
7789Sahrens  *
8789Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9789Sahrens  * or http://www.opensolaris.org/os/licensing.
10789Sahrens  * See the License for the specific language governing permissions
11789Sahrens  * and limitations under the License.
12789Sahrens  *
13789Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14789Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15789Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16789Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17789Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18789Sahrens  *
19789Sahrens  * CDDL HEADER END
20789Sahrens  */
21789Sahrens /*
221484Sek110237  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23789Sahrens  * Use is subject to license terms.
24789Sahrens  */
25789Sahrens 
26789Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
27789Sahrens 
28789Sahrens /*
29789Sahrens  * DVA-based Adjustable Relpacement Cache
30789Sahrens  *
311544Seschrock  * While much of the theory of operation used here is
321544Seschrock  * based on the self-tuning, low overhead replacement cache
33789Sahrens  * presented by Megiddo and Modha at FAST 2003, there are some
34789Sahrens  * significant differences:
35789Sahrens  *
36789Sahrens  * 1. The Megiddo and Modha model assumes any page is evictable.
37789Sahrens  * Pages in its cache cannot be "locked" into memory.  This makes
38789Sahrens  * the eviction algorithm simple: evict the last page in the list.
39789Sahrens  * This also make the performance characteristics easy to reason
40789Sahrens  * about.  Our cache is not so simple.  At any given moment, some
41789Sahrens  * subset of the blocks in the cache are un-evictable because we
42789Sahrens  * have handed out a reference to them.  Blocks are only evictable
43789Sahrens  * when there are no external references active.  This makes
44789Sahrens  * eviction far more problematic:  we choose to evict the evictable
45789Sahrens  * blocks that are the "lowest" in the list.
46789Sahrens  *
47789Sahrens  * There are times when it is not possible to evict the requested
48789Sahrens  * space.  In these circumstances we are unable to adjust the cache
49789Sahrens  * size.  To prevent the cache growing unbounded at these times we
50789Sahrens  * implement a "cache throttle" that slowes the flow of new data
51789Sahrens  * into the cache until we can make space avaiable.
52789Sahrens  *
53789Sahrens  * 2. The Megiddo and Modha model assumes a fixed cache size.
54789Sahrens  * Pages are evicted when the cache is full and there is a cache
55789Sahrens  * miss.  Our model has a variable sized cache.  It grows with
56789Sahrens  * high use, but also tries to react to memory preasure from the
57789Sahrens  * operating system: decreasing its size when system memory is
58789Sahrens  * tight.
59789Sahrens  *
60789Sahrens  * 3. The Megiddo and Modha model assumes a fixed page size. All
61789Sahrens  * elements of the cache are therefor exactly the same size.  So
62789Sahrens  * when adjusting the cache size following a cache miss, its simply
63789Sahrens  * a matter of choosing a single page to evict.  In our model, we
64789Sahrens  * have variable sized cache blocks (rangeing from 512 bytes to
65789Sahrens  * 128K bytes).  We therefor choose a set of blocks to evict to make
66789Sahrens  * space for a cache miss that approximates as closely as possible
67789Sahrens  * the space used by the new block.
68789Sahrens  *
69789Sahrens  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70789Sahrens  * by N. Megiddo & D. Modha, FAST 2003
71789Sahrens  */
72789Sahrens 
73789Sahrens /*
74789Sahrens  * The locking model:
75789Sahrens  *
76789Sahrens  * A new reference to a cache buffer can be obtained in two
77789Sahrens  * ways: 1) via a hash table lookup using the DVA as a key,
78789Sahrens  * or 2) via one of the ARC lists.  The arc_read() inerface
79789Sahrens  * uses method 1, while the internal arc algorithms for
80789Sahrens  * adjusting the cache use method 2.  We therefor provide two
81789Sahrens  * types of locks: 1) the hash table lock array, and 2) the
82789Sahrens  * arc list locks.
83789Sahrens  *
84789Sahrens  * Buffers do not have their own mutexs, rather they rely on the
85789Sahrens  * hash table mutexs for the bulk of their protection (i.e. most
86789Sahrens  * fields in the arc_buf_hdr_t are protected by these mutexs).
87789Sahrens  *
88789Sahrens  * buf_hash_find() returns the appropriate mutex (held) when it
89789Sahrens  * locates the requested buffer in the hash table.  It returns
90789Sahrens  * NULL for the mutex if the buffer was not in the table.
91789Sahrens  *
92789Sahrens  * buf_hash_remove() expects the appropriate hash mutex to be
93789Sahrens  * already held before it is invoked.
94789Sahrens  *
95789Sahrens  * Each arc state also has a mutex which is used to protect the
96789Sahrens  * buffer list associated with the state.  When attempting to
97789Sahrens  * obtain a hash table lock while holding an arc list lock you
98789Sahrens  * must use: mutex_tryenter() to avoid deadlock.  Also note that
992688Smaybee  * the active state mutex must be held before the ghost state mutex.
100789Sahrens  *
1011544Seschrock  * Arc buffers may have an associated eviction callback function.
1021544Seschrock  * This function will be invoked prior to removing the buffer (e.g.
1031544Seschrock  * in arc_do_user_evicts()).  Note however that the data associated
1041544Seschrock  * with the buffer may be evicted prior to the callback.  The callback
1051544Seschrock  * must be made with *no locks held* (to prevent deadlock).  Additionally,
1061544Seschrock  * the users of callbacks must ensure that their private data is
1071544Seschrock  * protected from simultaneous callbacks from arc_buf_evict()
1081544Seschrock  * and arc_do_user_evicts().
1091544Seschrock  *
110789Sahrens  * Note that the majority of the performance stats are manipulated
111789Sahrens  * with atomic operations.
112789Sahrens  */
113789Sahrens 
114789Sahrens #include <sys/spa.h>
115789Sahrens #include <sys/zio.h>
1163093Sahrens #include <sys/zio_checksum.h>
117789Sahrens #include <sys/zfs_context.h>
118789Sahrens #include <sys/arc.h>
119789Sahrens #include <sys/refcount.h>
120789Sahrens #ifdef _KERNEL
121789Sahrens #include <sys/vmsystm.h>
122789Sahrens #include <vm/anon.h>
123789Sahrens #include <sys/fs/swapnode.h>
1241484Sek110237 #include <sys/dnlc.h>
125789Sahrens #endif
126789Sahrens #include <sys/callb.h>
127789Sahrens 
128789Sahrens static kmutex_t		arc_reclaim_thr_lock;
129789Sahrens static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
130789Sahrens static uint8_t		arc_thread_exit;
131789Sahrens 
1321484Sek110237 #define	ARC_REDUCE_DNLC_PERCENT	3
1331484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
1341484Sek110237 
135789Sahrens typedef enum arc_reclaim_strategy {
136789Sahrens 	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
137789Sahrens 	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
138789Sahrens } arc_reclaim_strategy_t;
139789Sahrens 
140789Sahrens /* number of seconds before growing cache again */
141789Sahrens static int		arc_grow_retry = 60;
142789Sahrens 
1432391Smaybee /*
1442638Sperrin  * minimum lifespan of a prefetch block in clock ticks
1452638Sperrin  * (initialized in arc_init())
1462391Smaybee  */
1472638Sperrin static int		arc_min_prefetch_lifespan;
1482391Smaybee 
149789Sahrens static int arc_dead;
150789Sahrens 
151789Sahrens /*
1522885Sahrens  * These tunables are for performance analysis.
1532885Sahrens  */
1542885Sahrens uint64_t zfs_arc_max;
1552885Sahrens uint64_t zfs_arc_min;
1562885Sahrens 
1572885Sahrens /*
158789Sahrens  * Note that buffers can be on one of 5 states:
159789Sahrens  *	ARC_anon	- anonymous (discussed below)
1601544Seschrock  *	ARC_mru		- recently used, currently cached
1611544Seschrock  *	ARC_mru_ghost	- recentely used, no longer in cache
1621544Seschrock  *	ARC_mfu		- frequently used, currently cached
1631544Seschrock  *	ARC_mfu_ghost	- frequently used, no longer in cache
164789Sahrens  * When there are no active references to the buffer, they
165789Sahrens  * are linked onto one of the lists in arc.  These are the
166789Sahrens  * only buffers that can be evicted or deleted.
167789Sahrens  *
168789Sahrens  * Anonymous buffers are buffers that are not associated with
169789Sahrens  * a DVA.  These are buffers that hold dirty block copies
170789Sahrens  * before they are written to stable storage.  By definition,
1711544Seschrock  * they are "ref'd" and are considered part of arc_mru
172789Sahrens  * that cannot be freed.  Generally, they will aquire a DVA
1731544Seschrock  * as they are written and migrate onto the arc_mru list.
174789Sahrens  */
175789Sahrens 
176789Sahrens typedef struct arc_state {
177789Sahrens 	list_t	list;	/* linked list of evictable buffer in state */
178789Sahrens 	uint64_t lsize;	/* total size of buffers in the linked list */
179789Sahrens 	uint64_t size;	/* total size of all buffers in this state */
180789Sahrens 	uint64_t hits;
181789Sahrens 	kmutex_t mtx;
182789Sahrens } arc_state_t;
183789Sahrens 
184789Sahrens /* The 5 states: */
185789Sahrens static arc_state_t ARC_anon;
1861544Seschrock static arc_state_t ARC_mru;
1871544Seschrock static arc_state_t ARC_mru_ghost;
1881544Seschrock static arc_state_t ARC_mfu;
1891544Seschrock static arc_state_t ARC_mfu_ghost;
190789Sahrens 
191789Sahrens static struct arc {
192789Sahrens 	arc_state_t 	*anon;
1931544Seschrock 	arc_state_t	*mru;
1941544Seschrock 	arc_state_t	*mru_ghost;
1951544Seschrock 	arc_state_t	*mfu;
1961544Seschrock 	arc_state_t	*mfu_ghost;
197789Sahrens 	uint64_t	size;		/* Actual total arc size */
1981544Seschrock 	uint64_t	p;		/* Target size (in bytes) of mru */
199789Sahrens 	uint64_t	c;		/* Target size of cache (in bytes) */
200789Sahrens 	uint64_t	c_min;		/* Minimum target cache size */
201789Sahrens 	uint64_t	c_max;		/* Maximum target cache size */
202789Sahrens 
203789Sahrens 	/* performance stats */
204789Sahrens 	uint64_t	hits;
205789Sahrens 	uint64_t	misses;
206789Sahrens 	uint64_t	deleted;
2072688Smaybee 	uint64_t	recycle_miss;
2082688Smaybee 	uint64_t	mutex_miss;
2092688Smaybee 	uint64_t	evict_skip;
210789Sahrens 	uint64_t	hash_elements;
211789Sahrens 	uint64_t	hash_elements_max;
212789Sahrens 	uint64_t	hash_collisions;
213789Sahrens 	uint64_t	hash_chains;
214789Sahrens 	uint32_t	hash_chain_max;
215789Sahrens 
216789Sahrens 	int		no_grow;	/* Don't try to grow cache size */
217789Sahrens } arc;
218789Sahrens 
219789Sahrens static uint64_t arc_tempreserve;
220789Sahrens 
221789Sahrens typedef struct arc_callback arc_callback_t;
222789Sahrens 
223789Sahrens struct arc_callback {
224789Sahrens 	arc_done_func_t		*acb_done;
225789Sahrens 	void			*acb_private;
226789Sahrens 	arc_byteswap_func_t	*acb_byteswap;
227789Sahrens 	arc_buf_t		*acb_buf;
228789Sahrens 	zio_t			*acb_zio_dummy;
229789Sahrens 	arc_callback_t		*acb_next;
230789Sahrens };
231789Sahrens 
232789Sahrens struct arc_buf_hdr {
233789Sahrens 	/* immutable */
234789Sahrens 	uint64_t		b_size;
235789Sahrens 	spa_t			*b_spa;
236789Sahrens 
237789Sahrens 	/* protected by hash lock */
238789Sahrens 	dva_t			b_dva;
239789Sahrens 	uint64_t		b_birth;
240789Sahrens 	uint64_t		b_cksum0;
241789Sahrens 
2423093Sahrens 	kmutex_t		b_freeze_lock;
2433093Sahrens 	zio_cksum_t		*b_freeze_cksum;
2443093Sahrens 
245789Sahrens 	arc_buf_hdr_t		*b_hash_next;
246789Sahrens 	arc_buf_t		*b_buf;
247789Sahrens 	uint32_t		b_flags;
2481544Seschrock 	uint32_t		b_datacnt;
249789Sahrens 
250789Sahrens 	kcondvar_t		b_cv;
251789Sahrens 	arc_callback_t		*b_acb;
252789Sahrens 
253789Sahrens 	/* protected by arc state mutex */
254789Sahrens 	arc_state_t		*b_state;
255789Sahrens 	list_node_t		b_arc_node;
256789Sahrens 
257789Sahrens 	/* updated atomically */
258789Sahrens 	clock_t			b_arc_access;
259789Sahrens 
260789Sahrens 	/* self protecting */
261789Sahrens 	refcount_t		b_refcnt;
262789Sahrens };
263789Sahrens 
2641544Seschrock static arc_buf_t *arc_eviction_list;
2651544Seschrock static kmutex_t arc_eviction_mtx;
2662887Smaybee static arc_buf_hdr_t arc_eviction_hdr;
2672688Smaybee static void arc_get_data_buf(arc_buf_t *buf);
2682688Smaybee static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
2691544Seschrock 
2701544Seschrock #define	GHOST_STATE(state)	\
2711544Seschrock 	((state) == arc.mru_ghost || (state) == arc.mfu_ghost)
2721544Seschrock 
273789Sahrens /*
274789Sahrens  * Private ARC flags.  These flags are private ARC only flags that will show up
275789Sahrens  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
276789Sahrens  * be passed in as arc_flags in things like arc_read.  However, these flags
277789Sahrens  * should never be passed and should only be set by ARC code.  When adding new
278789Sahrens  * public flags, make sure not to smash the private ones.
279789Sahrens  */
280789Sahrens 
2811544Seschrock #define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
282789Sahrens #define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
283789Sahrens #define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
284789Sahrens #define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
2851544Seschrock #define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
2862391Smaybee #define	ARC_INDIRECT		(1 << 14)	/* this is an indirect block */
287789Sahrens 
2881544Seschrock #define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
289789Sahrens #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
290789Sahrens #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
291789Sahrens #define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
2921544Seschrock #define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
293789Sahrens 
294789Sahrens /*
295789Sahrens  * Hash table routines
296789Sahrens  */
297789Sahrens 
298789Sahrens #define	HT_LOCK_PAD	64
299789Sahrens 
300789Sahrens struct ht_lock {
301789Sahrens 	kmutex_t	ht_lock;
302789Sahrens #ifdef _KERNEL
303789Sahrens 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
304789Sahrens #endif
305789Sahrens };
306789Sahrens 
307789Sahrens #define	BUF_LOCKS 256
308789Sahrens typedef struct buf_hash_table {
309789Sahrens 	uint64_t ht_mask;
310789Sahrens 	arc_buf_hdr_t **ht_table;
311789Sahrens 	struct ht_lock ht_locks[BUF_LOCKS];
312789Sahrens } buf_hash_table_t;
313789Sahrens 
314789Sahrens static buf_hash_table_t buf_hash_table;
315789Sahrens 
316789Sahrens #define	BUF_HASH_INDEX(spa, dva, birth) \
317789Sahrens 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
318789Sahrens #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
319789Sahrens #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
320789Sahrens #define	HDR_LOCK(buf) \
321789Sahrens 	(BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
322789Sahrens 
323789Sahrens uint64_t zfs_crc64_table[256];
324789Sahrens 
325789Sahrens static uint64_t
326789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
327789Sahrens {
328789Sahrens 	uintptr_t spav = (uintptr_t)spa;
329789Sahrens 	uint8_t *vdva = (uint8_t *)dva;
330789Sahrens 	uint64_t crc = -1ULL;
331789Sahrens 	int i;
332789Sahrens 
333789Sahrens 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
334789Sahrens 
335789Sahrens 	for (i = 0; i < sizeof (dva_t); i++)
336789Sahrens 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
337789Sahrens 
338789Sahrens 	crc ^= (spav>>8) ^ birth;
339789Sahrens 
340789Sahrens 	return (crc);
341789Sahrens }
342789Sahrens 
343789Sahrens #define	BUF_EMPTY(buf)						\
344789Sahrens 	((buf)->b_dva.dva_word[0] == 0 &&			\
345789Sahrens 	(buf)->b_dva.dva_word[1] == 0 &&			\
346789Sahrens 	(buf)->b_birth == 0)
347789Sahrens 
348789Sahrens #define	BUF_EQUAL(spa, dva, birth, buf)				\
349789Sahrens 	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
350789Sahrens 	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
351789Sahrens 	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
352789Sahrens 
353789Sahrens static arc_buf_hdr_t *
354789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
355789Sahrens {
356789Sahrens 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
357789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
358789Sahrens 	arc_buf_hdr_t *buf;
359789Sahrens 
360789Sahrens 	mutex_enter(hash_lock);
361789Sahrens 	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
362789Sahrens 	    buf = buf->b_hash_next) {
363789Sahrens 		if (BUF_EQUAL(spa, dva, birth, buf)) {
364789Sahrens 			*lockp = hash_lock;
365789Sahrens 			return (buf);
366789Sahrens 		}
367789Sahrens 	}
368789Sahrens 	mutex_exit(hash_lock);
369789Sahrens 	*lockp = NULL;
370789Sahrens 	return (NULL);
371789Sahrens }
372789Sahrens 
373789Sahrens /*
374789Sahrens  * Insert an entry into the hash table.  If there is already an element
375789Sahrens  * equal to elem in the hash table, then the already existing element
376789Sahrens  * will be returned and the new element will not be inserted.
377789Sahrens  * Otherwise returns NULL.
378789Sahrens  */
379789Sahrens static arc_buf_hdr_t *
380789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
381789Sahrens {
382789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
383789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
384789Sahrens 	arc_buf_hdr_t *fbuf;
385789Sahrens 	uint32_t max, i;
386789Sahrens 
3871544Seschrock 	ASSERT(!HDR_IN_HASH_TABLE(buf));
388789Sahrens 	*lockp = hash_lock;
389789Sahrens 	mutex_enter(hash_lock);
390789Sahrens 	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
391789Sahrens 	    fbuf = fbuf->b_hash_next, i++) {
392789Sahrens 		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
393789Sahrens 			return (fbuf);
394789Sahrens 	}
395789Sahrens 
396789Sahrens 	buf->b_hash_next = buf_hash_table.ht_table[idx];
397789Sahrens 	buf_hash_table.ht_table[idx] = buf;
3981544Seschrock 	buf->b_flags |= ARC_IN_HASH_TABLE;
399789Sahrens 
400789Sahrens 	/* collect some hash table performance data */
401789Sahrens 	if (i > 0) {
402789Sahrens 		atomic_add_64(&arc.hash_collisions, 1);
403789Sahrens 		if (i == 1)
404789Sahrens 			atomic_add_64(&arc.hash_chains, 1);
405789Sahrens 	}
406789Sahrens 	while (i > (max = arc.hash_chain_max) &&
407789Sahrens 	    max != atomic_cas_32(&arc.hash_chain_max, max, i)) {
408789Sahrens 		continue;
409789Sahrens 	}
410789Sahrens 	atomic_add_64(&arc.hash_elements, 1);
411789Sahrens 	if (arc.hash_elements > arc.hash_elements_max)
412789Sahrens 		atomic_add_64(&arc.hash_elements_max, 1);
413789Sahrens 
414789Sahrens 	return (NULL);
415789Sahrens }
416789Sahrens 
417789Sahrens static void
418789Sahrens buf_hash_remove(arc_buf_hdr_t *buf)
419789Sahrens {
420789Sahrens 	arc_buf_hdr_t *fbuf, **bufp;
421789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
422789Sahrens 
423789Sahrens 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
4241544Seschrock 	ASSERT(HDR_IN_HASH_TABLE(buf));
425789Sahrens 
426789Sahrens 	bufp = &buf_hash_table.ht_table[idx];
427789Sahrens 	while ((fbuf = *bufp) != buf) {
428789Sahrens 		ASSERT(fbuf != NULL);
429789Sahrens 		bufp = &fbuf->b_hash_next;
430789Sahrens 	}
431789Sahrens 	*bufp = buf->b_hash_next;
432789Sahrens 	buf->b_hash_next = NULL;
4331544Seschrock 	buf->b_flags &= ~ARC_IN_HASH_TABLE;
434789Sahrens 
435789Sahrens 	/* collect some hash table performance data */
436789Sahrens 	atomic_add_64(&arc.hash_elements, -1);
437789Sahrens 	if (buf_hash_table.ht_table[idx] &&
438789Sahrens 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
439789Sahrens 		atomic_add_64(&arc.hash_chains, -1);
440789Sahrens }
441789Sahrens 
442789Sahrens /*
443789Sahrens  * Global data structures and functions for the buf kmem cache.
444789Sahrens  */
445789Sahrens static kmem_cache_t *hdr_cache;
446789Sahrens static kmem_cache_t *buf_cache;
447789Sahrens 
448789Sahrens static void
449789Sahrens buf_fini(void)
450789Sahrens {
451789Sahrens 	int i;
452789Sahrens 
453789Sahrens 	kmem_free(buf_hash_table.ht_table,
454789Sahrens 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
455789Sahrens 	for (i = 0; i < BUF_LOCKS; i++)
456789Sahrens 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
457789Sahrens 	kmem_cache_destroy(hdr_cache);
458789Sahrens 	kmem_cache_destroy(buf_cache);
459789Sahrens }
460789Sahrens 
461789Sahrens /*
462789Sahrens  * Constructor callback - called when the cache is empty
463789Sahrens  * and a new buf is requested.
464789Sahrens  */
465789Sahrens /* ARGSUSED */
466789Sahrens static int
467789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag)
468789Sahrens {
469789Sahrens 	arc_buf_hdr_t *buf = vbuf;
470789Sahrens 
471789Sahrens 	bzero(buf, sizeof (arc_buf_hdr_t));
472789Sahrens 	refcount_create(&buf->b_refcnt);
473789Sahrens 	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
474789Sahrens 	return (0);
475789Sahrens }
476789Sahrens 
477789Sahrens /*
478789Sahrens  * Destructor callback - called when a cached buf is
479789Sahrens  * no longer required.
480789Sahrens  */
481789Sahrens /* ARGSUSED */
482789Sahrens static void
483789Sahrens hdr_dest(void *vbuf, void *unused)
484789Sahrens {
485789Sahrens 	arc_buf_hdr_t *buf = vbuf;
486789Sahrens 
487789Sahrens 	refcount_destroy(&buf->b_refcnt);
488789Sahrens 	cv_destroy(&buf->b_cv);
489789Sahrens }
490789Sahrens 
491789Sahrens /*
492789Sahrens  * Reclaim callback -- invoked when memory is low.
493789Sahrens  */
494789Sahrens /* ARGSUSED */
495789Sahrens static void
496789Sahrens hdr_recl(void *unused)
497789Sahrens {
498789Sahrens 	dprintf("hdr_recl called\n");
499*3158Smaybee 	/*
500*3158Smaybee 	 * umem calls the reclaim func when we destroy the buf cache,
501*3158Smaybee 	 * which is after we do arc_fini().
502*3158Smaybee 	 */
503*3158Smaybee 	if (!arc_dead)
504*3158Smaybee 		cv_signal(&arc_reclaim_thr_cv);
505789Sahrens }
506789Sahrens 
507789Sahrens static void
508789Sahrens buf_init(void)
509789Sahrens {
510789Sahrens 	uint64_t *ct;
5111544Seschrock 	uint64_t hsize = 1ULL << 12;
512789Sahrens 	int i, j;
513789Sahrens 
514789Sahrens 	/*
515789Sahrens 	 * The hash table is big enough to fill all of physical memory
5161544Seschrock 	 * with an average 64K block size.  The table will take up
5171544Seschrock 	 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
518789Sahrens 	 */
5191544Seschrock 	while (hsize * 65536 < physmem * PAGESIZE)
520789Sahrens 		hsize <<= 1;
5211544Seschrock retry:
522789Sahrens 	buf_hash_table.ht_mask = hsize - 1;
5231544Seschrock 	buf_hash_table.ht_table =
5241544Seschrock 	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
5251544Seschrock 	if (buf_hash_table.ht_table == NULL) {
5261544Seschrock 		ASSERT(hsize > (1ULL << 8));
5271544Seschrock 		hsize >>= 1;
5281544Seschrock 		goto retry;
5291544Seschrock 	}
530789Sahrens 
531789Sahrens 	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
532789Sahrens 	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
533789Sahrens 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
534789Sahrens 	    0, NULL, NULL, NULL, NULL, NULL, 0);
535789Sahrens 
536789Sahrens 	for (i = 0; i < 256; i++)
537789Sahrens 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
538789Sahrens 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
539789Sahrens 
540789Sahrens 	for (i = 0; i < BUF_LOCKS; i++) {
541789Sahrens 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
542789Sahrens 		    NULL, MUTEX_DEFAULT, NULL);
543789Sahrens 	}
544789Sahrens }
545789Sahrens 
546789Sahrens #define	ARC_MINTIME	(hz>>4) /* 62 ms */
547789Sahrens 
548789Sahrens static void
5493093Sahrens arc_cksum_verify(arc_buf_t *buf)
5503093Sahrens {
5513093Sahrens 	zio_cksum_t zc;
5523093Sahrens 
5533093Sahrens 	if (!zfs_flags & ZFS_DEBUG_MODIFY)
5543093Sahrens 		return;
5553093Sahrens 
5563093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
5573093Sahrens 	if (buf->b_hdr->b_freeze_cksum == NULL) {
5583093Sahrens 		mutex_exit(&buf->b_hdr->b_freeze_lock);
5593093Sahrens 		return;
5603093Sahrens 	}
5613093Sahrens 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
5623093Sahrens 	if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
5633093Sahrens 		panic("buffer modified while frozen!");
5643093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
5653093Sahrens }
5663093Sahrens 
5673093Sahrens static void
5683093Sahrens arc_cksum_compute(arc_buf_t *buf)
5693093Sahrens {
5703093Sahrens 	if (!zfs_flags & ZFS_DEBUG_MODIFY)
5713093Sahrens 		return;
5723093Sahrens 
5733093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
5743093Sahrens 	if (buf->b_hdr->b_freeze_cksum != NULL) {
5753093Sahrens 		mutex_exit(&buf->b_hdr->b_freeze_lock);
5763093Sahrens 		return;
5773093Sahrens 	}
5783093Sahrens 	buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
5793093Sahrens 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
5803093Sahrens 	    buf->b_hdr->b_freeze_cksum);
5813093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
5823093Sahrens }
5833093Sahrens 
5843093Sahrens void
5853093Sahrens arc_buf_thaw(arc_buf_t *buf)
5863093Sahrens {
5873093Sahrens 	if (!zfs_flags & ZFS_DEBUG_MODIFY)
5883093Sahrens 		return;
5893093Sahrens 
5903093Sahrens 	if (buf->b_hdr->b_state != arc.anon)
5913093Sahrens 		panic("modifying non-anon buffer!");
5923093Sahrens 	if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
5933093Sahrens 		panic("modifying buffer while i/o in progress!");
5943093Sahrens 	arc_cksum_verify(buf);
5953093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
5963093Sahrens 	if (buf->b_hdr->b_freeze_cksum != NULL) {
5973093Sahrens 		kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
5983093Sahrens 		buf->b_hdr->b_freeze_cksum = NULL;
5993093Sahrens 	}
6003093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
6013093Sahrens }
6023093Sahrens 
6033093Sahrens void
6043093Sahrens arc_buf_freeze(arc_buf_t *buf)
6053093Sahrens {
6063093Sahrens 	ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
6073093Sahrens 	    buf->b_hdr->b_state == arc.anon);
6083093Sahrens 	arc_cksum_compute(buf);
6093093Sahrens }
6103093Sahrens 
6113093Sahrens static void
612789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
613789Sahrens {
614789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
615789Sahrens 
616789Sahrens 	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
617789Sahrens 	    (ab->b_state != arc.anon)) {
6181544Seschrock 		int delta = ab->b_size * ab->b_datacnt;
619789Sahrens 
620789Sahrens 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
621789Sahrens 		mutex_enter(&ab->b_state->mtx);
622789Sahrens 		ASSERT(list_link_active(&ab->b_arc_node));
623789Sahrens 		list_remove(&ab->b_state->list, ab);
6241544Seschrock 		if (GHOST_STATE(ab->b_state)) {
6251544Seschrock 			ASSERT3U(ab->b_datacnt, ==, 0);
6261544Seschrock 			ASSERT3P(ab->b_buf, ==, NULL);
6271544Seschrock 			delta = ab->b_size;
6281544Seschrock 		}
6291544Seschrock 		ASSERT(delta > 0);
6301544Seschrock 		ASSERT3U(ab->b_state->lsize, >=, delta);
6311544Seschrock 		atomic_add_64(&ab->b_state->lsize, -delta);
632789Sahrens 		mutex_exit(&ab->b_state->mtx);
6332391Smaybee 		/* remove the prefetch flag is we get a reference */
6342391Smaybee 		if (ab->b_flags & ARC_PREFETCH)
6352391Smaybee 			ab->b_flags &= ~ARC_PREFETCH;
636789Sahrens 	}
637789Sahrens }
638789Sahrens 
639789Sahrens static int
640789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
641789Sahrens {
642789Sahrens 	int cnt;
643789Sahrens 
6441544Seschrock 	ASSERT(ab->b_state == arc.anon || MUTEX_HELD(hash_lock));
6451544Seschrock 	ASSERT(!GHOST_STATE(ab->b_state));
646789Sahrens 
647789Sahrens 	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
648789Sahrens 	    (ab->b_state != arc.anon)) {
649789Sahrens 
650789Sahrens 		ASSERT(!MUTEX_HELD(&ab->b_state->mtx));
651789Sahrens 		mutex_enter(&ab->b_state->mtx);
652789Sahrens 		ASSERT(!list_link_active(&ab->b_arc_node));
653789Sahrens 		list_insert_head(&ab->b_state->list, ab);
6541544Seschrock 		ASSERT(ab->b_datacnt > 0);
6551544Seschrock 		atomic_add_64(&ab->b_state->lsize, ab->b_size * ab->b_datacnt);
6561544Seschrock 		ASSERT3U(ab->b_state->size, >=, ab->b_state->lsize);
657789Sahrens 		mutex_exit(&ab->b_state->mtx);
658789Sahrens 	}
659789Sahrens 	return (cnt);
660789Sahrens }
661789Sahrens 
662789Sahrens /*
663789Sahrens  * Move the supplied buffer to the indicated state.  The mutex
664789Sahrens  * for the buffer must be held by the caller.
665789Sahrens  */
666789Sahrens static void
6671544Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
668789Sahrens {
6691544Seschrock 	arc_state_t *old_state = ab->b_state;
6701544Seschrock 	int refcnt = refcount_count(&ab->b_refcnt);
6711544Seschrock 	int from_delta, to_delta;
672789Sahrens 
673789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
6741544Seschrock 	ASSERT(new_state != old_state);
6751544Seschrock 	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
6761544Seschrock 	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
6771544Seschrock 
6781544Seschrock 	from_delta = to_delta = ab->b_datacnt * ab->b_size;
679789Sahrens 
680789Sahrens 	/*
681789Sahrens 	 * If this buffer is evictable, transfer it from the
682789Sahrens 	 * old state list to the new state list.
683789Sahrens 	 */
6841544Seschrock 	if (refcnt == 0) {
6851544Seschrock 		if (old_state != arc.anon) {
6861544Seschrock 			int use_mutex = !MUTEX_HELD(&old_state->mtx);
6871544Seschrock 
6881544Seschrock 			if (use_mutex)
6891544Seschrock 				mutex_enter(&old_state->mtx);
6901544Seschrock 
6911544Seschrock 			ASSERT(list_link_active(&ab->b_arc_node));
6921544Seschrock 			list_remove(&old_state->list, ab);
693789Sahrens 
6942391Smaybee 			/*
6952391Smaybee 			 * If prefetching out of the ghost cache,
6962391Smaybee 			 * we will have a non-null datacnt.
6972391Smaybee 			 */
6982391Smaybee 			if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
6992391Smaybee 				/* ghost elements have a ghost size */
7001544Seschrock 				ASSERT(ab->b_buf == NULL);
7011544Seschrock 				from_delta = ab->b_size;
702789Sahrens 			}
7031544Seschrock 			ASSERT3U(old_state->lsize, >=, from_delta);
7041544Seschrock 			atomic_add_64(&old_state->lsize, -from_delta);
7051544Seschrock 
7061544Seschrock 			if (use_mutex)
7071544Seschrock 				mutex_exit(&old_state->mtx);
708789Sahrens 		}
709789Sahrens 		if (new_state != arc.anon) {
7101544Seschrock 			int use_mutex = !MUTEX_HELD(&new_state->mtx);
711789Sahrens 
7121544Seschrock 			if (use_mutex)
713789Sahrens 				mutex_enter(&new_state->mtx);
7141544Seschrock 
715789Sahrens 			list_insert_head(&new_state->list, ab);
7161544Seschrock 
7171544Seschrock 			/* ghost elements have a ghost size */
7181544Seschrock 			if (GHOST_STATE(new_state)) {
7191544Seschrock 				ASSERT(ab->b_datacnt == 0);
7201544Seschrock 				ASSERT(ab->b_buf == NULL);
7211544Seschrock 				to_delta = ab->b_size;
7221544Seschrock 			}
7231544Seschrock 			atomic_add_64(&new_state->lsize, to_delta);
7241544Seschrock 			ASSERT3U(new_state->size + to_delta, >=,
7251544Seschrock 			    new_state->lsize);
7261544Seschrock 
7271544Seschrock 			if (use_mutex)
728789Sahrens 				mutex_exit(&new_state->mtx);
729789Sahrens 		}
730789Sahrens 	}
731789Sahrens 
732789Sahrens 	ASSERT(!BUF_EMPTY(ab));
7331544Seschrock 	if (new_state == arc.anon && old_state != arc.anon) {
734789Sahrens 		buf_hash_remove(ab);
735789Sahrens 	}
736789Sahrens 
7371544Seschrock 	/* adjust state sizes */
7381544Seschrock 	if (to_delta)
7391544Seschrock 		atomic_add_64(&new_state->size, to_delta);
7401544Seschrock 	if (from_delta) {
7411544Seschrock 		ASSERT3U(old_state->size, >=, from_delta);
7421544Seschrock 		atomic_add_64(&old_state->size, -from_delta);
743789Sahrens 	}
744789Sahrens 	ab->b_state = new_state;
745789Sahrens }
746789Sahrens 
747789Sahrens arc_buf_t *
748789Sahrens arc_buf_alloc(spa_t *spa, int size, void *tag)
749789Sahrens {
750789Sahrens 	arc_buf_hdr_t *hdr;
751789Sahrens 	arc_buf_t *buf;
752789Sahrens 
753789Sahrens 	ASSERT3U(size, >, 0);
754789Sahrens 	hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
755789Sahrens 	ASSERT(BUF_EMPTY(hdr));
756789Sahrens 	hdr->b_size = size;
757789Sahrens 	hdr->b_spa = spa;
758789Sahrens 	hdr->b_state = arc.anon;
759789Sahrens 	hdr->b_arc_access = 0;
760789Sahrens 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
761789Sahrens 	buf->b_hdr = hdr;
7622688Smaybee 	buf->b_data = NULL;
7631544Seschrock 	buf->b_efunc = NULL;
7641544Seschrock 	buf->b_private = NULL;
765789Sahrens 	buf->b_next = NULL;
766789Sahrens 	hdr->b_buf = buf;
7672688Smaybee 	arc_get_data_buf(buf);
7681544Seschrock 	hdr->b_datacnt = 1;
769789Sahrens 	hdr->b_flags = 0;
770789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
771789Sahrens 	(void) refcount_add(&hdr->b_refcnt, tag);
772789Sahrens 
773789Sahrens 	return (buf);
774789Sahrens }
775789Sahrens 
7762688Smaybee static arc_buf_t *
7772688Smaybee arc_buf_clone(arc_buf_t *from)
7781544Seschrock {
7792688Smaybee 	arc_buf_t *buf;
7802688Smaybee 	arc_buf_hdr_t *hdr = from->b_hdr;
7812688Smaybee 	uint64_t size = hdr->b_size;
7821544Seschrock 
7832688Smaybee 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
7842688Smaybee 	buf->b_hdr = hdr;
7852688Smaybee 	buf->b_data = NULL;
7862688Smaybee 	buf->b_efunc = NULL;
7872688Smaybee 	buf->b_private = NULL;
7882688Smaybee 	buf->b_next = hdr->b_buf;
7892688Smaybee 	hdr->b_buf = buf;
7902688Smaybee 	arc_get_data_buf(buf);
7912688Smaybee 	bcopy(from->b_data, buf->b_data, size);
7922688Smaybee 	hdr->b_datacnt += 1;
7932688Smaybee 	return (buf);
7941544Seschrock }
7951544Seschrock 
7961544Seschrock void
7971544Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag)
7981544Seschrock {
7992887Smaybee 	arc_buf_hdr_t *hdr;
8001544Seschrock 	kmutex_t *hash_lock;
8011544Seschrock 
8022724Smaybee 	/*
8032724Smaybee 	 * Check to see if this buffer is currently being evicted via
8042887Smaybee 	 * arc_do_user_evicts().
8052724Smaybee 	 */
8062887Smaybee 	mutex_enter(&arc_eviction_mtx);
8072887Smaybee 	hdr = buf->b_hdr;
8082887Smaybee 	if (hdr == NULL) {
8092887Smaybee 		mutex_exit(&arc_eviction_mtx);
8102724Smaybee 		return;
8112887Smaybee 	}
8122887Smaybee 	hash_lock = HDR_LOCK(hdr);
8132887Smaybee 	mutex_exit(&arc_eviction_mtx);
8142724Smaybee 
8152724Smaybee 	mutex_enter(hash_lock);
8161544Seschrock 	if (buf->b_data == NULL) {
8171544Seschrock 		/*
8181544Seschrock 		 * This buffer is evicted.
8191544Seschrock 		 */
8202724Smaybee 		mutex_exit(hash_lock);
8211544Seschrock 		return;
8221544Seschrock 	}
8231544Seschrock 
8242724Smaybee 	ASSERT(buf->b_hdr == hdr);
8252724Smaybee 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
8261544Seschrock 	add_reference(hdr, hash_lock, tag);
8272688Smaybee 	arc_access(hdr, hash_lock);
8282688Smaybee 	mutex_exit(hash_lock);
8291544Seschrock 	atomic_add_64(&arc.hits, 1);
8301544Seschrock }
8311544Seschrock 
832789Sahrens static void
8332688Smaybee arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
8341544Seschrock {
8351544Seschrock 	arc_buf_t **bufp;
8361544Seschrock 
8371544Seschrock 	/* free up data associated with the buf */
8381544Seschrock 	if (buf->b_data) {
8391544Seschrock 		arc_state_t *state = buf->b_hdr->b_state;
8401544Seschrock 		uint64_t size = buf->b_hdr->b_size;
8411544Seschrock 
8423093Sahrens 		arc_cksum_verify(buf);
8432688Smaybee 		if (!recycle) {
8442688Smaybee 			zio_buf_free(buf->b_data, size);
8452688Smaybee 			atomic_add_64(&arc.size, -size);
8462688Smaybee 		}
8471544Seschrock 		if (list_link_active(&buf->b_hdr->b_arc_node)) {
8481544Seschrock 			ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
8491544Seschrock 			ASSERT(state != arc.anon);
8501544Seschrock 			ASSERT3U(state->lsize, >=, size);
8511544Seschrock 			atomic_add_64(&state->lsize, -size);
8521544Seschrock 		}
8531544Seschrock 		ASSERT3U(state->size, >=, size);
8541544Seschrock 		atomic_add_64(&state->size, -size);
8551544Seschrock 		buf->b_data = NULL;
8561544Seschrock 		ASSERT(buf->b_hdr->b_datacnt > 0);
8571544Seschrock 		buf->b_hdr->b_datacnt -= 1;
8581544Seschrock 	}
8591544Seschrock 
8601544Seschrock 	/* only remove the buf if requested */
8611544Seschrock 	if (!all)
8621544Seschrock 		return;
8631544Seschrock 
8641544Seschrock 	/* remove the buf from the hdr list */
8651544Seschrock 	for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
8661544Seschrock 		continue;
8671544Seschrock 	*bufp = buf->b_next;
8681544Seschrock 
8691544Seschrock 	ASSERT(buf->b_efunc == NULL);
8701544Seschrock 
8711544Seschrock 	/* clean up the buf */
8721544Seschrock 	buf->b_hdr = NULL;
8731544Seschrock 	kmem_cache_free(buf_cache, buf);
8741544Seschrock }
8751544Seschrock 
8761544Seschrock static void
8771544Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr)
878789Sahrens {
879789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
880789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
8811544Seschrock 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
882789Sahrens 
883789Sahrens 	if (!BUF_EMPTY(hdr)) {
8841544Seschrock 		ASSERT(!HDR_IN_HASH_TABLE(hdr));
885789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
886789Sahrens 		hdr->b_birth = 0;
887789Sahrens 		hdr->b_cksum0 = 0;
888789Sahrens 	}
8891544Seschrock 	while (hdr->b_buf) {
890789Sahrens 		arc_buf_t *buf = hdr->b_buf;
891789Sahrens 
8921544Seschrock 		if (buf->b_efunc) {
8931544Seschrock 			mutex_enter(&arc_eviction_mtx);
8941544Seschrock 			ASSERT(buf->b_hdr != NULL);
8952688Smaybee 			arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
8961544Seschrock 			hdr->b_buf = buf->b_next;
8972887Smaybee 			buf->b_hdr = &arc_eviction_hdr;
8981544Seschrock 			buf->b_next = arc_eviction_list;
8991544Seschrock 			arc_eviction_list = buf;
9001544Seschrock 			mutex_exit(&arc_eviction_mtx);
9011544Seschrock 		} else {
9022688Smaybee 			arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
9031544Seschrock 		}
904789Sahrens 	}
9053093Sahrens 	if (hdr->b_freeze_cksum != NULL) {
9063093Sahrens 		kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
9073093Sahrens 		hdr->b_freeze_cksum = NULL;
9083093Sahrens 	}
9091544Seschrock 
910789Sahrens 	ASSERT(!list_link_active(&hdr->b_arc_node));
911789Sahrens 	ASSERT3P(hdr->b_hash_next, ==, NULL);
912789Sahrens 	ASSERT3P(hdr->b_acb, ==, NULL);
913789Sahrens 	kmem_cache_free(hdr_cache, hdr);
914789Sahrens }
915789Sahrens 
916789Sahrens void
917789Sahrens arc_buf_free(arc_buf_t *buf, void *tag)
918789Sahrens {
919789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
9201544Seschrock 	int hashed = hdr->b_state != arc.anon;
9211544Seschrock 
9221544Seschrock 	ASSERT(buf->b_efunc == NULL);
9231544Seschrock 	ASSERT(buf->b_data != NULL);
9241544Seschrock 
9251544Seschrock 	if (hashed) {
9261544Seschrock 		kmutex_t *hash_lock = HDR_LOCK(hdr);
9271544Seschrock 
9281544Seschrock 		mutex_enter(hash_lock);
9291544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
9301544Seschrock 		if (hdr->b_datacnt > 1)
9312688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
9321544Seschrock 		else
9331544Seschrock 			hdr->b_flags |= ARC_BUF_AVAILABLE;
9341544Seschrock 		mutex_exit(hash_lock);
9351544Seschrock 	} else if (HDR_IO_IN_PROGRESS(hdr)) {
9361544Seschrock 		int destroy_hdr;
9371544Seschrock 		/*
9381544Seschrock 		 * We are in the middle of an async write.  Don't destroy
9391544Seschrock 		 * this buffer unless the write completes before we finish
9401544Seschrock 		 * decrementing the reference count.
9411544Seschrock 		 */
9421544Seschrock 		mutex_enter(&arc_eviction_mtx);
9431544Seschrock 		(void) remove_reference(hdr, NULL, tag);
9441544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
9451544Seschrock 		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
9461544Seschrock 		mutex_exit(&arc_eviction_mtx);
9471544Seschrock 		if (destroy_hdr)
9481544Seschrock 			arc_hdr_destroy(hdr);
9491544Seschrock 	} else {
9501544Seschrock 		if (remove_reference(hdr, NULL, tag) > 0) {
9511544Seschrock 			ASSERT(HDR_IO_ERROR(hdr));
9522688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
9531544Seschrock 		} else {
9541544Seschrock 			arc_hdr_destroy(hdr);
9551544Seschrock 		}
9561544Seschrock 	}
9571544Seschrock }
9581544Seschrock 
9591544Seschrock int
9601544Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag)
9611544Seschrock {
9621544Seschrock 	arc_buf_hdr_t *hdr = buf->b_hdr;
963789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
9641544Seschrock 	int no_callback = (buf->b_efunc == NULL);
9651544Seschrock 
9661544Seschrock 	if (hdr->b_state == arc.anon) {
9671544Seschrock 		arc_buf_free(buf, tag);
9681544Seschrock 		return (no_callback);
9691544Seschrock 	}
970789Sahrens 
971789Sahrens 	mutex_enter(hash_lock);
9721544Seschrock 	ASSERT(hdr->b_state != arc.anon);
9731544Seschrock 	ASSERT(buf->b_data != NULL);
974789Sahrens 
9751544Seschrock 	(void) remove_reference(hdr, hash_lock, tag);
9761544Seschrock 	if (hdr->b_datacnt > 1) {
9771544Seschrock 		if (no_callback)
9782688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
9791544Seschrock 	} else if (no_callback) {
9801544Seschrock 		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
9811544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
982789Sahrens 	}
9831544Seschrock 	ASSERT(no_callback || hdr->b_datacnt > 1 ||
9841544Seschrock 	    refcount_is_zero(&hdr->b_refcnt));
985789Sahrens 	mutex_exit(hash_lock);
9861544Seschrock 	return (no_callback);
987789Sahrens }
988789Sahrens 
989789Sahrens int
990789Sahrens arc_buf_size(arc_buf_t *buf)
991789Sahrens {
992789Sahrens 	return (buf->b_hdr->b_size);
993789Sahrens }
994789Sahrens 
995789Sahrens /*
996789Sahrens  * Evict buffers from list until we've removed the specified number of
997789Sahrens  * bytes.  Move the removed buffers to the appropriate evict state.
9982688Smaybee  * If the recycle flag is set, then attempt to "recycle" a buffer:
9992688Smaybee  * - look for a buffer to evict that is `bytes' long.
10002688Smaybee  * - return the data block from this buffer rather than freeing it.
10012688Smaybee  * This flag is used by callers that are trying to make space for a
10022688Smaybee  * new buffer in a full arc cache.
1003789Sahrens  */
10042688Smaybee static void *
10052688Smaybee arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle)
1006789Sahrens {
1007789Sahrens 	arc_state_t *evicted_state;
10082688Smaybee 	uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
10092918Smaybee 	arc_buf_hdr_t *ab, *ab_prev = NULL;
1010789Sahrens 	kmutex_t *hash_lock;
10112688Smaybee 	boolean_t have_lock;
10122918Smaybee 	void *stolen = NULL;
1013789Sahrens 
10141544Seschrock 	ASSERT(state == arc.mru || state == arc.mfu);
1015789Sahrens 
10161544Seschrock 	evicted_state = (state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
1017789Sahrens 
1018789Sahrens 	mutex_enter(&state->mtx);
1019789Sahrens 	mutex_enter(&evicted_state->mtx);
1020789Sahrens 
1021789Sahrens 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
1022789Sahrens 		ab_prev = list_prev(&state->list, ab);
10232391Smaybee 		/* prefetch buffers have a minimum lifespan */
10242688Smaybee 		if (HDR_IO_IN_PROGRESS(ab) ||
10252688Smaybee 		    (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
10262688Smaybee 		    lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) {
10272391Smaybee 			skipped++;
10282391Smaybee 			continue;
10292391Smaybee 		}
10302918Smaybee 		/* "lookahead" for better eviction candidate */
10312918Smaybee 		if (recycle && ab->b_size != bytes &&
10322918Smaybee 		    ab_prev && ab_prev->b_size == bytes)
10332688Smaybee 			continue;
1034789Sahrens 		hash_lock = HDR_LOCK(ab);
10352688Smaybee 		have_lock = MUTEX_HELD(hash_lock);
10362688Smaybee 		if (have_lock || mutex_tryenter(hash_lock)) {
1037789Sahrens 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
10381544Seschrock 			ASSERT(ab->b_datacnt > 0);
10391544Seschrock 			while (ab->b_buf) {
10401544Seschrock 				arc_buf_t *buf = ab->b_buf;
10412688Smaybee 				if (buf->b_data) {
10421544Seschrock 					bytes_evicted += ab->b_size;
10432918Smaybee 					if (recycle && ab->b_size == bytes) {
10442918Smaybee 						stolen = buf->b_data;
10452918Smaybee 						recycle = FALSE;
10462918Smaybee 					}
10472688Smaybee 				}
10481544Seschrock 				if (buf->b_efunc) {
10491544Seschrock 					mutex_enter(&arc_eviction_mtx);
10502918Smaybee 					arc_buf_destroy(buf,
10512918Smaybee 					    buf->b_data == stolen, FALSE);
10521544Seschrock 					ab->b_buf = buf->b_next;
10532887Smaybee 					buf->b_hdr = &arc_eviction_hdr;
10541544Seschrock 					buf->b_next = arc_eviction_list;
10551544Seschrock 					arc_eviction_list = buf;
10561544Seschrock 					mutex_exit(&arc_eviction_mtx);
10571544Seschrock 				} else {
10582918Smaybee 					arc_buf_destroy(buf,
10592918Smaybee 					    buf->b_data == stolen, TRUE);
10601544Seschrock 				}
10611544Seschrock 			}
10621544Seschrock 			ASSERT(ab->b_datacnt == 0);
1063789Sahrens 			arc_change_state(evicted_state, ab, hash_lock);
10641544Seschrock 			ASSERT(HDR_IN_HASH_TABLE(ab));
10651544Seschrock 			ab->b_flags = ARC_IN_HASH_TABLE;
1066789Sahrens 			DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
10672688Smaybee 			if (!have_lock)
10682688Smaybee 				mutex_exit(hash_lock);
10691544Seschrock 			if (bytes >= 0 && bytes_evicted >= bytes)
1070789Sahrens 				break;
1071789Sahrens 		} else {
10722688Smaybee 			missed += 1;
1073789Sahrens 		}
1074789Sahrens 	}
1075789Sahrens 	mutex_exit(&evicted_state->mtx);
1076789Sahrens 	mutex_exit(&state->mtx);
1077789Sahrens 
1078789Sahrens 	if (bytes_evicted < bytes)
1079789Sahrens 		dprintf("only evicted %lld bytes from %x",
1080789Sahrens 		    (longlong_t)bytes_evicted, state);
1081789Sahrens 
10822688Smaybee 	if (skipped)
10832688Smaybee 		atomic_add_64(&arc.evict_skip, skipped);
10842688Smaybee 	if (missed)
10852688Smaybee 		atomic_add_64(&arc.mutex_miss, missed);
10862918Smaybee 	return (stolen);
1087789Sahrens }
1088789Sahrens 
1089789Sahrens /*
1090789Sahrens  * Remove buffers from list until we've removed the specified number of
1091789Sahrens  * bytes.  Destroy the buffers that are removed.
1092789Sahrens  */
1093789Sahrens static void
10941544Seschrock arc_evict_ghost(arc_state_t *state, int64_t bytes)
1095789Sahrens {
1096789Sahrens 	arc_buf_hdr_t *ab, *ab_prev;
1097789Sahrens 	kmutex_t *hash_lock;
10981544Seschrock 	uint64_t bytes_deleted = 0;
10991544Seschrock 	uint_t bufs_skipped = 0;
1100789Sahrens 
11011544Seschrock 	ASSERT(GHOST_STATE(state));
1102789Sahrens top:
1103789Sahrens 	mutex_enter(&state->mtx);
1104789Sahrens 	for (ab = list_tail(&state->list); ab; ab = ab_prev) {
1105789Sahrens 		ab_prev = list_prev(&state->list, ab);
1106789Sahrens 		hash_lock = HDR_LOCK(ab);
1107789Sahrens 		if (mutex_tryenter(hash_lock)) {
11082391Smaybee 			ASSERT(!HDR_IO_IN_PROGRESS(ab));
11091544Seschrock 			ASSERT(ab->b_buf == NULL);
1110789Sahrens 			arc_change_state(arc.anon, ab, hash_lock);
1111789Sahrens 			mutex_exit(hash_lock);
1112789Sahrens 			atomic_add_64(&arc.deleted, 1);
11131544Seschrock 			bytes_deleted += ab->b_size;
11141544Seschrock 			arc_hdr_destroy(ab);
1115789Sahrens 			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1116789Sahrens 			if (bytes >= 0 && bytes_deleted >= bytes)
1117789Sahrens 				break;
1118789Sahrens 		} else {
1119789Sahrens 			if (bytes < 0) {
1120789Sahrens 				mutex_exit(&state->mtx);
1121789Sahrens 				mutex_enter(hash_lock);
1122789Sahrens 				mutex_exit(hash_lock);
1123789Sahrens 				goto top;
1124789Sahrens 			}
1125789Sahrens 			bufs_skipped += 1;
1126789Sahrens 		}
1127789Sahrens 	}
1128789Sahrens 	mutex_exit(&state->mtx);
1129789Sahrens 
1130789Sahrens 	if (bufs_skipped) {
11312688Smaybee 		atomic_add_64(&arc.mutex_miss, bufs_skipped);
1132789Sahrens 		ASSERT(bytes >= 0);
1133789Sahrens 	}
1134789Sahrens 
1135789Sahrens 	if (bytes_deleted < bytes)
1136789Sahrens 		dprintf("only deleted %lld bytes from %p",
1137789Sahrens 		    (longlong_t)bytes_deleted, state);
1138789Sahrens }
1139789Sahrens 
1140789Sahrens static void
1141789Sahrens arc_adjust(void)
1142789Sahrens {
1143789Sahrens 	int64_t top_sz, mru_over, arc_over;
1144789Sahrens 
11451544Seschrock 	top_sz = arc.anon->size + arc.mru->size;
1146789Sahrens 
11471544Seschrock 	if (top_sz > arc.p && arc.mru->lsize > 0) {
11481544Seschrock 		int64_t toevict = MIN(arc.mru->lsize, top_sz-arc.p);
11492688Smaybee 		(void) arc_evict(arc.mru, toevict, FALSE);
11501544Seschrock 		top_sz = arc.anon->size + arc.mru->size;
1151789Sahrens 	}
1152789Sahrens 
11531544Seschrock 	mru_over = top_sz + arc.mru_ghost->size - arc.c;
1154789Sahrens 
1155789Sahrens 	if (mru_over > 0) {
11561544Seschrock 		if (arc.mru_ghost->lsize > 0) {
11571544Seschrock 			int64_t todelete = MIN(arc.mru_ghost->lsize, mru_over);
11581544Seschrock 			arc_evict_ghost(arc.mru_ghost, todelete);
1159789Sahrens 		}
1160789Sahrens 	}
1161789Sahrens 
1162789Sahrens 	if ((arc_over = arc.size - arc.c) > 0) {
11631544Seschrock 		int64_t tbl_over;
1164789Sahrens 
11651544Seschrock 		if (arc.mfu->lsize > 0) {
11661544Seschrock 			int64_t toevict = MIN(arc.mfu->lsize, arc_over);
11672688Smaybee 			(void) arc_evict(arc.mfu, toevict, FALSE);
1168789Sahrens 		}
1169789Sahrens 
11701544Seschrock 		tbl_over = arc.size + arc.mru_ghost->lsize +
11711544Seschrock 		    arc.mfu_ghost->lsize - arc.c*2;
1172789Sahrens 
11731544Seschrock 		if (tbl_over > 0 && arc.mfu_ghost->lsize > 0) {
11741544Seschrock 			int64_t todelete = MIN(arc.mfu_ghost->lsize, tbl_over);
11751544Seschrock 			arc_evict_ghost(arc.mfu_ghost, todelete);
1176789Sahrens 		}
1177789Sahrens 	}
1178789Sahrens }
1179789Sahrens 
11801544Seschrock static void
11811544Seschrock arc_do_user_evicts(void)
11821544Seschrock {
11831544Seschrock 	mutex_enter(&arc_eviction_mtx);
11841544Seschrock 	while (arc_eviction_list != NULL) {
11851544Seschrock 		arc_buf_t *buf = arc_eviction_list;
11861544Seschrock 		arc_eviction_list = buf->b_next;
11871544Seschrock 		buf->b_hdr = NULL;
11881544Seschrock 		mutex_exit(&arc_eviction_mtx);
11891544Seschrock 
11901819Smaybee 		if (buf->b_efunc != NULL)
11911819Smaybee 			VERIFY(buf->b_efunc(buf) == 0);
11921544Seschrock 
11931544Seschrock 		buf->b_efunc = NULL;
11941544Seschrock 		buf->b_private = NULL;
11951544Seschrock 		kmem_cache_free(buf_cache, buf);
11961544Seschrock 		mutex_enter(&arc_eviction_mtx);
11971544Seschrock 	}
11981544Seschrock 	mutex_exit(&arc_eviction_mtx);
11991544Seschrock }
12001544Seschrock 
1201789Sahrens /*
1202789Sahrens  * Flush all *evictable* data from the cache.
1203789Sahrens  * NOTE: this will not touch "active" (i.e. referenced) data.
1204789Sahrens  */
1205789Sahrens void
1206789Sahrens arc_flush(void)
1207789Sahrens {
12082688Smaybee 	while (list_head(&arc.mru->list))
12092688Smaybee 		(void) arc_evict(arc.mru, -1, FALSE);
12102688Smaybee 	while (list_head(&arc.mfu->list))
12112688Smaybee 		(void) arc_evict(arc.mfu, -1, FALSE);
1212789Sahrens 
12131544Seschrock 	arc_evict_ghost(arc.mru_ghost, -1);
12141544Seschrock 	arc_evict_ghost(arc.mfu_ghost, -1);
12151544Seschrock 
12161544Seschrock 	mutex_enter(&arc_reclaim_thr_lock);
12171544Seschrock 	arc_do_user_evicts();
12181544Seschrock 	mutex_exit(&arc_reclaim_thr_lock);
12191544Seschrock 	ASSERT(arc_eviction_list == NULL);
1220789Sahrens }
1221789Sahrens 
1222*3158Smaybee int arc_shrink_shift = 5;		/* log2(fraction of arc to reclaim) */
12232391Smaybee 
1224789Sahrens void
1225*3158Smaybee arc_shrink(void)
1226789Sahrens {
1227*3158Smaybee 	if (arc.c > arc.c_min) {
1228*3158Smaybee 		uint64_t to_free;
1229789Sahrens 
12302048Sstans #ifdef _KERNEL
1231*3158Smaybee 		to_free = MAX(arc.c >> arc_shrink_shift, ptob(needfree));
12322048Sstans #else
1233*3158Smaybee 		to_free = arc.c >> arc_shrink_shift;
12342048Sstans #endif
1235*3158Smaybee 		if (arc.c > arc.c_min + to_free)
1236*3158Smaybee 			atomic_add_64(&arc.c, -to_free);
1237*3158Smaybee 		else
1238*3158Smaybee 			arc.c = arc.c_min;
12392048Sstans 
1240*3158Smaybee 		atomic_add_64(&arc.p, -(arc.p >> arc_shrink_shift));
1241*3158Smaybee 		if (arc.c > arc.size)
1242*3158Smaybee 			arc.c = MAX(arc.size, arc.c_min);
1243*3158Smaybee 		if (arc.p > arc.c)
1244*3158Smaybee 			arc.p = (arc.c >> 1);
1245*3158Smaybee 		ASSERT(arc.c >= arc.c_min);
1246*3158Smaybee 		ASSERT((int64_t)arc.p >= 0);
1247*3158Smaybee 	}
1248789Sahrens 
1249*3158Smaybee 	if (arc.size > arc.c)
1250*3158Smaybee 		arc_adjust();
1251789Sahrens }
1252789Sahrens 
1253789Sahrens static int
1254789Sahrens arc_reclaim_needed(void)
1255789Sahrens {
1256789Sahrens 	uint64_t extra;
1257789Sahrens 
1258789Sahrens #ifdef _KERNEL
12592048Sstans 
12602048Sstans 	if (needfree)
12612048Sstans 		return (1);
12622048Sstans 
1263789Sahrens 	/*
1264789Sahrens 	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1265789Sahrens 	 */
1266789Sahrens 	extra = desfree;
1267789Sahrens 
1268789Sahrens 	/*
1269789Sahrens 	 * check that we're out of range of the pageout scanner.  It starts to
1270789Sahrens 	 * schedule paging if freemem is less than lotsfree and needfree.
1271789Sahrens 	 * lotsfree is the high-water mark for pageout, and needfree is the
1272789Sahrens 	 * number of needed free pages.  We add extra pages here to make sure
1273789Sahrens 	 * the scanner doesn't start up while we're freeing memory.
1274789Sahrens 	 */
1275789Sahrens 	if (freemem < lotsfree + needfree + extra)
1276789Sahrens 		return (1);
1277789Sahrens 
1278789Sahrens 	/*
1279789Sahrens 	 * check to make sure that swapfs has enough space so that anon
1280789Sahrens 	 * reservations can still succeeed. anon_resvmem() checks that the
1281789Sahrens 	 * availrmem is greater than swapfs_minfree, and the number of reserved
1282789Sahrens 	 * swap pages.  We also add a bit of extra here just to prevent
1283789Sahrens 	 * circumstances from getting really dire.
1284789Sahrens 	 */
1285789Sahrens 	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1286789Sahrens 		return (1);
1287789Sahrens 
12881936Smaybee #if defined(__i386)
1289789Sahrens 	/*
1290789Sahrens 	 * If we're on an i386 platform, it's possible that we'll exhaust the
1291789Sahrens 	 * kernel heap space before we ever run out of available physical
1292789Sahrens 	 * memory.  Most checks of the size of the heap_area compare against
1293789Sahrens 	 * tune.t_minarmem, which is the minimum available real memory that we
1294789Sahrens 	 * can have in the system.  However, this is generally fixed at 25 pages
1295789Sahrens 	 * which is so low that it's useless.  In this comparison, we seek to
1296789Sahrens 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1297789Sahrens 	 * heap is allocated.  (Or, in the caclulation, if less than 1/4th is
1298789Sahrens 	 * free)
1299789Sahrens 	 */
1300789Sahrens 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1301789Sahrens 	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1302789Sahrens 		return (1);
1303789Sahrens #endif
1304789Sahrens 
1305789Sahrens #else
1306789Sahrens 	if (spa_get_random(100) == 0)
1307789Sahrens 		return (1);
1308789Sahrens #endif
1309789Sahrens 	return (0);
1310789Sahrens }
1311789Sahrens 
1312789Sahrens static void
1313789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1314789Sahrens {
1315789Sahrens 	size_t			i;
1316789Sahrens 	kmem_cache_t		*prev_cache = NULL;
1317789Sahrens 	extern kmem_cache_t	*zio_buf_cache[];
1318789Sahrens 
13191484Sek110237 #ifdef _KERNEL
13201484Sek110237 	/*
13211484Sek110237 	 * First purge some DNLC entries, in case the DNLC is using
13221484Sek110237 	 * up too much memory.
13231484Sek110237 	 */
13241505Sek110237 	dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
13251936Smaybee 
13261936Smaybee #if defined(__i386)
13271936Smaybee 	/*
13281936Smaybee 	 * Reclaim unused memory from all kmem caches.
13291936Smaybee 	 */
13301936Smaybee 	kmem_reap();
13311936Smaybee #endif
13321484Sek110237 #endif
13331484Sek110237 
1334789Sahrens 	/*
13351544Seschrock 	 * An agressive reclamation will shrink the cache size as well as
13361544Seschrock 	 * reap free buffers from the arc kmem caches.
1337789Sahrens 	 */
1338789Sahrens 	if (strat == ARC_RECLAIM_AGGR)
1339*3158Smaybee 		arc_shrink();
1340789Sahrens 
1341789Sahrens 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1342789Sahrens 		if (zio_buf_cache[i] != prev_cache) {
1343789Sahrens 			prev_cache = zio_buf_cache[i];
1344789Sahrens 			kmem_cache_reap_now(zio_buf_cache[i]);
1345789Sahrens 		}
1346789Sahrens 	}
13471544Seschrock 	kmem_cache_reap_now(buf_cache);
13481544Seschrock 	kmem_cache_reap_now(hdr_cache);
1349789Sahrens }
1350789Sahrens 
1351789Sahrens static void
1352789Sahrens arc_reclaim_thread(void)
1353789Sahrens {
1354789Sahrens 	clock_t			growtime = 0;
1355789Sahrens 	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
1356789Sahrens 	callb_cpr_t		cpr;
1357789Sahrens 
1358789Sahrens 	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1359789Sahrens 
1360789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
1361789Sahrens 	while (arc_thread_exit == 0) {
1362789Sahrens 		if (arc_reclaim_needed()) {
1363789Sahrens 
1364789Sahrens 			if (arc.no_grow) {
1365789Sahrens 				if (last_reclaim == ARC_RECLAIM_CONS) {
1366789Sahrens 					last_reclaim = ARC_RECLAIM_AGGR;
1367789Sahrens 				} else {
1368789Sahrens 					last_reclaim = ARC_RECLAIM_CONS;
1369789Sahrens 				}
1370789Sahrens 			} else {
1371789Sahrens 				arc.no_grow = TRUE;
1372789Sahrens 				last_reclaim = ARC_RECLAIM_AGGR;
1373789Sahrens 				membar_producer();
1374789Sahrens 			}
1375789Sahrens 
1376789Sahrens 			/* reset the growth delay for every reclaim */
1377789Sahrens 			growtime = lbolt + (arc_grow_retry * hz);
13782856Snd150628 			ASSERT(growtime > 0);
1379789Sahrens 
1380789Sahrens 			arc_kmem_reap_now(last_reclaim);
1381789Sahrens 
1382789Sahrens 		} else if ((growtime > 0) && ((growtime - lbolt) <= 0)) {
1383789Sahrens 			arc.no_grow = FALSE;
1384789Sahrens 		}
1385789Sahrens 
13861544Seschrock 		if (arc_eviction_list != NULL)
13871544Seschrock 			arc_do_user_evicts();
13881544Seschrock 
1389789Sahrens 		/* block until needed, or one second, whichever is shorter */
1390789Sahrens 		CALLB_CPR_SAFE_BEGIN(&cpr);
1391789Sahrens 		(void) cv_timedwait(&arc_reclaim_thr_cv,
1392789Sahrens 		    &arc_reclaim_thr_lock, (lbolt + hz));
1393789Sahrens 		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1394789Sahrens 	}
1395789Sahrens 
1396789Sahrens 	arc_thread_exit = 0;
1397789Sahrens 	cv_broadcast(&arc_reclaim_thr_cv);
1398789Sahrens 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
1399789Sahrens 	thread_exit();
1400789Sahrens }
1401789Sahrens 
14021544Seschrock /*
14031544Seschrock  * Adapt arc info given the number of bytes we are trying to add and
14041544Seschrock  * the state that we are comming from.  This function is only called
14051544Seschrock  * when we are adding new content to the cache.
14061544Seschrock  */
1407789Sahrens static void
14081544Seschrock arc_adapt(int bytes, arc_state_t *state)
1409789Sahrens {
14101544Seschrock 	int mult;
14111544Seschrock 
14121544Seschrock 	ASSERT(bytes > 0);
1413789Sahrens 	/*
14141544Seschrock 	 * Adapt the target size of the MRU list:
14151544Seschrock 	 *	- if we just hit in the MRU ghost list, then increase
14161544Seschrock 	 *	  the target size of the MRU list.
14171544Seschrock 	 *	- if we just hit in the MFU ghost list, then increase
14181544Seschrock 	 *	  the target size of the MFU list by decreasing the
14191544Seschrock 	 *	  target size of the MRU list.
1420789Sahrens 	 */
14211544Seschrock 	if (state == arc.mru_ghost) {
14221544Seschrock 		mult = ((arc.mru_ghost->size >= arc.mfu_ghost->size) ?
14231544Seschrock 		    1 : (arc.mfu_ghost->size/arc.mru_ghost->size));
14241544Seschrock 
14251544Seschrock 		arc.p = MIN(arc.c, arc.p + bytes * mult);
14261544Seschrock 	} else if (state == arc.mfu_ghost) {
14271544Seschrock 		mult = ((arc.mfu_ghost->size >= arc.mru_ghost->size) ?
14281544Seschrock 		    1 : (arc.mru_ghost->size/arc.mfu_ghost->size));
14291544Seschrock 
14301544Seschrock 		arc.p = MAX(0, (int64_t)arc.p - bytes * mult);
14311544Seschrock 	}
14321544Seschrock 	ASSERT((int64_t)arc.p >= 0);
1433789Sahrens 
1434789Sahrens 	if (arc_reclaim_needed()) {
1435789Sahrens 		cv_signal(&arc_reclaim_thr_cv);
1436789Sahrens 		return;
1437789Sahrens 	}
1438789Sahrens 
1439789Sahrens 	if (arc.no_grow)
1440789Sahrens 		return;
1441789Sahrens 
14421544Seschrock 	if (arc.c >= arc.c_max)
14431544Seschrock 		return;
14441544Seschrock 
1445789Sahrens 	/*
14461544Seschrock 	 * If we're within (2 * maxblocksize) bytes of the target
14471544Seschrock 	 * cache size, increment the target cache size
1448789Sahrens 	 */
14491544Seschrock 	if (arc.size > arc.c - (2ULL << SPA_MAXBLOCKSHIFT)) {
14501544Seschrock 		atomic_add_64(&arc.c, (int64_t)bytes);
1451789Sahrens 		if (arc.c > arc.c_max)
1452789Sahrens 			arc.c = arc.c_max;
14531544Seschrock 		else if (state == arc.anon)
14541544Seschrock 			atomic_add_64(&arc.p, (int64_t)bytes);
14551544Seschrock 		if (arc.p > arc.c)
14561544Seschrock 			arc.p = arc.c;
1457789Sahrens 	}
14581544Seschrock 	ASSERT((int64_t)arc.p >= 0);
1459789Sahrens }
1460789Sahrens 
1461789Sahrens /*
14621544Seschrock  * Check if the cache has reached its limits and eviction is required
14631544Seschrock  * prior to insert.
1464789Sahrens  */
1465789Sahrens static int
1466789Sahrens arc_evict_needed()
1467789Sahrens {
1468789Sahrens 	if (arc_reclaim_needed())
1469789Sahrens 		return (1);
1470789Sahrens 
14711544Seschrock 	return (arc.size > arc.c);
1472789Sahrens }
1473789Sahrens 
1474789Sahrens /*
14752688Smaybee  * The buffer, supplied as the first argument, needs a data block.
14762688Smaybee  * So, if we are at cache max, determine which cache should be victimized.
14772688Smaybee  * We have the following cases:
1478789Sahrens  *
14791544Seschrock  * 1. Insert for MRU, p > sizeof(arc.anon + arc.mru) ->
1480789Sahrens  * In this situation if we're out of space, but the resident size of the MFU is
1481789Sahrens  * under the limit, victimize the MFU cache to satisfy this insertion request.
1482789Sahrens  *
14831544Seschrock  * 2. Insert for MRU, p <= sizeof(arc.anon + arc.mru) ->
1484789Sahrens  * Here, we've used up all of the available space for the MRU, so we need to
1485789Sahrens  * evict from our own cache instead.  Evict from the set of resident MRU
1486789Sahrens  * entries.
1487789Sahrens  *
14881544Seschrock  * 3. Insert for MFU (c - p) > sizeof(arc.mfu) ->
1489789Sahrens  * c minus p represents the MFU space in the cache, since p is the size of the
1490789Sahrens  * cache that is dedicated to the MRU.  In this situation there's still space on
1491789Sahrens  * the MFU side, so the MRU side needs to be victimized.
1492789Sahrens  *
14931544Seschrock  * 4. Insert for MFU (c - p) < sizeof(arc.mfu) ->
1494789Sahrens  * MFU's resident set is consuming more space than it has been allotted.  In
1495789Sahrens  * this situation, we must victimize our own cache, the MFU, for this insertion.
1496789Sahrens  */
1497789Sahrens static void
14982688Smaybee arc_get_data_buf(arc_buf_t *buf)
1499789Sahrens {
15002688Smaybee 	arc_state_t	*state = buf->b_hdr->b_state;
15012688Smaybee 	uint64_t	size = buf->b_hdr->b_size;
15022688Smaybee 
15032688Smaybee 	arc_adapt(size, state);
1504789Sahrens 
15052688Smaybee 	/*
15062688Smaybee 	 * We have not yet reached cache maximum size,
15072688Smaybee 	 * just allocate a new buffer.
15082688Smaybee 	 */
15092688Smaybee 	if (!arc_evict_needed()) {
15102688Smaybee 		buf->b_data = zio_buf_alloc(size);
15112688Smaybee 		atomic_add_64(&arc.size, size);
15122688Smaybee 		goto out;
15132688Smaybee 	}
15142688Smaybee 
15152688Smaybee 	/*
15162688Smaybee 	 * If we are prefetching from the mfu ghost list, this buffer
15172688Smaybee 	 * will end up on the mru list; so steal space from there.
15182688Smaybee 	 */
15192688Smaybee 	if (state == arc.mfu_ghost)
15202688Smaybee 		state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc.mru : arc.mfu;
15212688Smaybee 	else if (state == arc.mru_ghost)
15222688Smaybee 		state = arc.mru;
1523789Sahrens 
15242688Smaybee 	if (state == arc.mru || state == arc.anon) {
15252688Smaybee 		uint64_t mru_used = arc.anon->size + arc.mru->size;
15262688Smaybee 		state = (arc.p > mru_used) ? arc.mfu : arc.mru;
1527789Sahrens 	} else {
15282688Smaybee 		/* MFU cases */
15292688Smaybee 		uint64_t mfu_space = arc.c - arc.p;
15302688Smaybee 		state =  (mfu_space > arc.mfu->size) ? arc.mru : arc.mfu;
15312688Smaybee 	}
15322688Smaybee 	if ((buf->b_data = arc_evict(state, size, TRUE)) == NULL) {
15332688Smaybee 		buf->b_data = zio_buf_alloc(size);
15342688Smaybee 		atomic_add_64(&arc.size, size);
15352688Smaybee 		atomic_add_64(&arc.recycle_miss, 1);
15362688Smaybee 	}
15372688Smaybee 	ASSERT(buf->b_data != NULL);
15382688Smaybee out:
15392688Smaybee 	/*
15402688Smaybee 	 * Update the state size.  Note that ghost states have a
15412688Smaybee 	 * "ghost size" and so don't need to be updated.
15422688Smaybee 	 */
15432688Smaybee 	if (!GHOST_STATE(buf->b_hdr->b_state)) {
15442688Smaybee 		arc_buf_hdr_t *hdr = buf->b_hdr;
15452688Smaybee 
15462688Smaybee 		atomic_add_64(&hdr->b_state->size, size);
15472688Smaybee 		if (list_link_active(&hdr->b_arc_node)) {
15482688Smaybee 			ASSERT(refcount_is_zero(&hdr->b_refcnt));
15492688Smaybee 			atomic_add_64(&hdr->b_state->lsize, size);
1550789Sahrens 		}
1551789Sahrens 	}
1552789Sahrens }
1553789Sahrens 
1554789Sahrens /*
1555789Sahrens  * This routine is called whenever a buffer is accessed.
15561544Seschrock  * NOTE: the hash lock is dropped in this function.
1557789Sahrens  */
1558789Sahrens static void
15592688Smaybee arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
1560789Sahrens {
1561789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
1562789Sahrens 
1563789Sahrens 	if (buf->b_state == arc.anon) {
1564789Sahrens 		/*
1565789Sahrens 		 * This buffer is not in the cache, and does not
1566789Sahrens 		 * appear in our "ghost" list.  Add the new buffer
1567789Sahrens 		 * to the MRU state.
1568789Sahrens 		 */
1569789Sahrens 
1570789Sahrens 		ASSERT(buf->b_arc_access == 0);
1571789Sahrens 		buf->b_arc_access = lbolt;
15721544Seschrock 		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
15731544Seschrock 		arc_change_state(arc.mru, buf, hash_lock);
1574789Sahrens 
15751544Seschrock 	} else if (buf->b_state == arc.mru) {
1576789Sahrens 		/*
15772391Smaybee 		 * If this buffer is here because of a prefetch, then either:
15782391Smaybee 		 * - clear the flag if this is a "referencing" read
15792391Smaybee 		 *   (any subsequent access will bump this into the MFU state).
15802391Smaybee 		 * or
15812391Smaybee 		 * - move the buffer to the head of the list if this is
15822391Smaybee 		 *   another prefetch (to make it less likely to be evicted).
1583789Sahrens 		 */
1584789Sahrens 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
15852391Smaybee 			if (refcount_count(&buf->b_refcnt) == 0) {
15862391Smaybee 				ASSERT(list_link_active(&buf->b_arc_node));
15872391Smaybee 				mutex_enter(&arc.mru->mtx);
15882391Smaybee 				list_remove(&arc.mru->list, buf);
15892391Smaybee 				list_insert_head(&arc.mru->list, buf);
15902391Smaybee 				mutex_exit(&arc.mru->mtx);
15912391Smaybee 			} else {
15922391Smaybee 				buf->b_flags &= ~ARC_PREFETCH;
15932391Smaybee 				atomic_add_64(&arc.mru->hits, 1);
15942391Smaybee 			}
15952391Smaybee 			buf->b_arc_access = lbolt;
1596789Sahrens 			return;
1597789Sahrens 		}
1598789Sahrens 
1599789Sahrens 		/*
1600789Sahrens 		 * This buffer has been "accessed" only once so far,
1601789Sahrens 		 * but it is still in the cache. Move it to the MFU
1602789Sahrens 		 * state.
1603789Sahrens 		 */
1604789Sahrens 		if (lbolt > buf->b_arc_access + ARC_MINTIME) {
1605789Sahrens 			/*
1606789Sahrens 			 * More than 125ms have passed since we
1607789Sahrens 			 * instantiated this buffer.  Move it to the
1608789Sahrens 			 * most frequently used state.
1609789Sahrens 			 */
1610789Sahrens 			buf->b_arc_access = lbolt;
16111544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
16121544Seschrock 			arc_change_state(arc.mfu, buf, hash_lock);
1613789Sahrens 		}
16141544Seschrock 		atomic_add_64(&arc.mru->hits, 1);
16151544Seschrock 	} else if (buf->b_state == arc.mru_ghost) {
1616789Sahrens 		arc_state_t	*new_state;
1617789Sahrens 		/*
1618789Sahrens 		 * This buffer has been "accessed" recently, but
1619789Sahrens 		 * was evicted from the cache.  Move it to the
1620789Sahrens 		 * MFU state.
1621789Sahrens 		 */
1622789Sahrens 
1623789Sahrens 		if (buf->b_flags & ARC_PREFETCH) {
16241544Seschrock 			new_state = arc.mru;
16252391Smaybee 			if (refcount_count(&buf->b_refcnt) > 0)
16262391Smaybee 				buf->b_flags &= ~ARC_PREFETCH;
16271544Seschrock 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1628789Sahrens 		} else {
16291544Seschrock 			new_state = arc.mfu;
16301544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1631789Sahrens 		}
1632789Sahrens 
1633789Sahrens 		buf->b_arc_access = lbolt;
1634789Sahrens 		arc_change_state(new_state, buf, hash_lock);
1635789Sahrens 
16361544Seschrock 		atomic_add_64(&arc.mru_ghost->hits, 1);
16371544Seschrock 	} else if (buf->b_state == arc.mfu) {
1638789Sahrens 		/*
1639789Sahrens 		 * This buffer has been accessed more than once and is
1640789Sahrens 		 * still in the cache.  Keep it in the MFU state.
1641789Sahrens 		 *
16422391Smaybee 		 * NOTE: an add_reference() that occurred when we did
16432391Smaybee 		 * the arc_read() will have kicked this off the list.
16442391Smaybee 		 * If it was a prefetch, we will explicitly move it to
16452391Smaybee 		 * the head of the list now.
1646789Sahrens 		 */
16472391Smaybee 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
16482391Smaybee 			ASSERT(refcount_count(&buf->b_refcnt) == 0);
16492391Smaybee 			ASSERT(list_link_active(&buf->b_arc_node));
16502391Smaybee 			mutex_enter(&arc.mfu->mtx);
16512391Smaybee 			list_remove(&arc.mfu->list, buf);
16522391Smaybee 			list_insert_head(&arc.mfu->list, buf);
16532391Smaybee 			mutex_exit(&arc.mfu->mtx);
16542391Smaybee 		}
16551544Seschrock 		atomic_add_64(&arc.mfu->hits, 1);
16562391Smaybee 		buf->b_arc_access = lbolt;
16571544Seschrock 	} else if (buf->b_state == arc.mfu_ghost) {
16582391Smaybee 		arc_state_t	*new_state = arc.mfu;
1659789Sahrens 		/*
1660789Sahrens 		 * This buffer has been accessed more than once but has
1661789Sahrens 		 * been evicted from the cache.  Move it back to the
1662789Sahrens 		 * MFU state.
1663789Sahrens 		 */
1664789Sahrens 
16652391Smaybee 		if (buf->b_flags & ARC_PREFETCH) {
16662391Smaybee 			/*
16672391Smaybee 			 * This is a prefetch access...
16682391Smaybee 			 * move this block back to the MRU state.
16692391Smaybee 			 */
16702391Smaybee 			ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
16712391Smaybee 			new_state = arc.mru;
16722391Smaybee 		}
16732391Smaybee 
1674789Sahrens 		buf->b_arc_access = lbolt;
16751544Seschrock 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
16762391Smaybee 		arc_change_state(new_state, buf, hash_lock);
1677789Sahrens 
16781544Seschrock 		atomic_add_64(&arc.mfu_ghost->hits, 1);
1679789Sahrens 	} else {
1680789Sahrens 		ASSERT(!"invalid arc state");
1681789Sahrens 	}
1682789Sahrens }
1683789Sahrens 
1684789Sahrens /* a generic arc_done_func_t which you can use */
1685789Sahrens /* ARGSUSED */
1686789Sahrens void
1687789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
1688789Sahrens {
1689789Sahrens 	bcopy(buf->b_data, arg, buf->b_hdr->b_size);
16901544Seschrock 	VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1691789Sahrens }
1692789Sahrens 
1693789Sahrens /* a generic arc_done_func_t which you can use */
1694789Sahrens void
1695789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
1696789Sahrens {
1697789Sahrens 	arc_buf_t **bufp = arg;
1698789Sahrens 	if (zio && zio->io_error) {
16991544Seschrock 		VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1700789Sahrens 		*bufp = NULL;
1701789Sahrens 	} else {
1702789Sahrens 		*bufp = buf;
1703789Sahrens 	}
1704789Sahrens }
1705789Sahrens 
1706789Sahrens static void
1707789Sahrens arc_read_done(zio_t *zio)
1708789Sahrens {
17091589Smaybee 	arc_buf_hdr_t	*hdr, *found;
1710789Sahrens 	arc_buf_t	*buf;
1711789Sahrens 	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
1712789Sahrens 	kmutex_t	*hash_lock;
1713789Sahrens 	arc_callback_t	*callback_list, *acb;
1714789Sahrens 	int		freeable = FALSE;
1715789Sahrens 
1716789Sahrens 	buf = zio->io_private;
1717789Sahrens 	hdr = buf->b_hdr;
1718789Sahrens 
17191589Smaybee 	/*
17201589Smaybee 	 * The hdr was inserted into hash-table and removed from lists
17211589Smaybee 	 * prior to starting I/O.  We should find this header, since
17221589Smaybee 	 * it's in the hash table, and it should be legit since it's
17231589Smaybee 	 * not possible to evict it during the I/O.  The only possible
17241589Smaybee 	 * reason for it not to be found is if we were freed during the
17251589Smaybee 	 * read.
17261589Smaybee 	 */
17271589Smaybee 	found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
17283093Sahrens 	    &hash_lock);
1729789Sahrens 
17301589Smaybee 	ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
17311589Smaybee 	    (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))));
1732789Sahrens 
1733789Sahrens 	/* byteswap if necessary */
1734789Sahrens 	callback_list = hdr->b_acb;
1735789Sahrens 	ASSERT(callback_list != NULL);
1736789Sahrens 	if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
1737789Sahrens 		callback_list->acb_byteswap(buf->b_data, hdr->b_size);
1738789Sahrens 
17393093Sahrens 	arc_cksum_compute(buf);
17403093Sahrens 
1741789Sahrens 	/* create copies of the data buffer for the callers */
1742789Sahrens 	abuf = buf;
1743789Sahrens 	for (acb = callback_list; acb; acb = acb->acb_next) {
1744789Sahrens 		if (acb->acb_done) {
17452688Smaybee 			if (abuf == NULL)
17462688Smaybee 				abuf = arc_buf_clone(buf);
1747789Sahrens 			acb->acb_buf = abuf;
1748789Sahrens 			abuf = NULL;
1749789Sahrens 		}
1750789Sahrens 	}
1751789Sahrens 	hdr->b_acb = NULL;
1752789Sahrens 	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
17531544Seschrock 	ASSERT(!HDR_BUF_AVAILABLE(hdr));
17541544Seschrock 	if (abuf == buf)
17551544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
1756789Sahrens 
1757789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
1758789Sahrens 
1759789Sahrens 	if (zio->io_error != 0) {
1760789Sahrens 		hdr->b_flags |= ARC_IO_ERROR;
1761789Sahrens 		if (hdr->b_state != arc.anon)
1762789Sahrens 			arc_change_state(arc.anon, hdr, hash_lock);
17631544Seschrock 		if (HDR_IN_HASH_TABLE(hdr))
17641544Seschrock 			buf_hash_remove(hdr);
1765789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
17662391Smaybee 		/* convert checksum errors into IO errors */
17671544Seschrock 		if (zio->io_error == ECKSUM)
17681544Seschrock 			zio->io_error = EIO;
1769789Sahrens 	}
1770789Sahrens 
17711544Seschrock 	/*
17722391Smaybee 	 * Broadcast before we drop the hash_lock to avoid the possibility
17732391Smaybee 	 * that the hdr (and hence the cv) might be freed before we get to
17742391Smaybee 	 * the cv_broadcast().
17751544Seschrock 	 */
17761544Seschrock 	cv_broadcast(&hdr->b_cv);
17771544Seschrock 
17781589Smaybee 	if (hash_lock) {
1779789Sahrens 		/*
1780789Sahrens 		 * Only call arc_access on anonymous buffers.  This is because
1781789Sahrens 		 * if we've issued an I/O for an evicted buffer, we've already
1782789Sahrens 		 * called arc_access (to prevent any simultaneous readers from
1783789Sahrens 		 * getting confused).
1784789Sahrens 		 */
1785789Sahrens 		if (zio->io_error == 0 && hdr->b_state == arc.anon)
17862688Smaybee 			arc_access(hdr, hash_lock);
17872688Smaybee 		mutex_exit(hash_lock);
1788789Sahrens 	} else {
1789789Sahrens 		/*
1790789Sahrens 		 * This block was freed while we waited for the read to
1791789Sahrens 		 * complete.  It has been removed from the hash table and
1792789Sahrens 		 * moved to the anonymous state (so that it won't show up
1793789Sahrens 		 * in the cache).
1794789Sahrens 		 */
1795789Sahrens 		ASSERT3P(hdr->b_state, ==, arc.anon);
1796789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
1797789Sahrens 	}
1798789Sahrens 
1799789Sahrens 	/* execute each callback and free its structure */
1800789Sahrens 	while ((acb = callback_list) != NULL) {
1801789Sahrens 		if (acb->acb_done)
1802789Sahrens 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
1803789Sahrens 
1804789Sahrens 		if (acb->acb_zio_dummy != NULL) {
1805789Sahrens 			acb->acb_zio_dummy->io_error = zio->io_error;
1806789Sahrens 			zio_nowait(acb->acb_zio_dummy);
1807789Sahrens 		}
1808789Sahrens 
1809789Sahrens 		callback_list = acb->acb_next;
1810789Sahrens 		kmem_free(acb, sizeof (arc_callback_t));
1811789Sahrens 	}
1812789Sahrens 
1813789Sahrens 	if (freeable)
18141544Seschrock 		arc_hdr_destroy(hdr);
1815789Sahrens }
1816789Sahrens 
1817789Sahrens /*
1818789Sahrens  * "Read" the block block at the specified DVA (in bp) via the
1819789Sahrens  * cache.  If the block is found in the cache, invoke the provided
1820789Sahrens  * callback immediately and return.  Note that the `zio' parameter
1821789Sahrens  * in the callback will be NULL in this case, since no IO was
1822789Sahrens  * required.  If the block is not in the cache pass the read request
1823789Sahrens  * on to the spa with a substitute callback function, so that the
1824789Sahrens  * requested block will be added to the cache.
1825789Sahrens  *
1826789Sahrens  * If a read request arrives for a block that has a read in-progress,
1827789Sahrens  * either wait for the in-progress read to complete (and return the
1828789Sahrens  * results); or, if this is a read with a "done" func, add a record
1829789Sahrens  * to the read to invoke the "done" func when the read completes,
1830789Sahrens  * and return; or just return.
1831789Sahrens  *
1832789Sahrens  * arc_read_done() will invoke all the requested "done" functions
1833789Sahrens  * for readers of this block.
1834789Sahrens  */
1835789Sahrens int
1836789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
1837789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
18382391Smaybee     uint32_t *arc_flags, zbookmark_t *zb)
1839789Sahrens {
1840789Sahrens 	arc_buf_hdr_t *hdr;
1841789Sahrens 	arc_buf_t *buf;
1842789Sahrens 	kmutex_t *hash_lock;
1843789Sahrens 	zio_t	*rzio;
1844789Sahrens 
1845789Sahrens top:
1846789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
18471544Seschrock 	if (hdr && hdr->b_datacnt > 0) {
1848789Sahrens 
18492391Smaybee 		*arc_flags |= ARC_CACHED;
18502391Smaybee 
1851789Sahrens 		if (HDR_IO_IN_PROGRESS(hdr)) {
18522391Smaybee 
18532391Smaybee 			if (*arc_flags & ARC_WAIT) {
18542391Smaybee 				cv_wait(&hdr->b_cv, hash_lock);
18552391Smaybee 				mutex_exit(hash_lock);
18562391Smaybee 				goto top;
18572391Smaybee 			}
18582391Smaybee 			ASSERT(*arc_flags & ARC_NOWAIT);
18592391Smaybee 
18602391Smaybee 			if (done) {
1861789Sahrens 				arc_callback_t	*acb = NULL;
1862789Sahrens 
1863789Sahrens 				acb = kmem_zalloc(sizeof (arc_callback_t),
1864789Sahrens 				    KM_SLEEP);
1865789Sahrens 				acb->acb_done = done;
1866789Sahrens 				acb->acb_private = private;
1867789Sahrens 				acb->acb_byteswap = swap;
1868789Sahrens 				if (pio != NULL)
1869789Sahrens 					acb->acb_zio_dummy = zio_null(pio,
1870789Sahrens 					    spa, NULL, NULL, flags);
1871789Sahrens 
1872789Sahrens 				ASSERT(acb->acb_done != NULL);
1873789Sahrens 				acb->acb_next = hdr->b_acb;
1874789Sahrens 				hdr->b_acb = acb;
1875789Sahrens 				add_reference(hdr, hash_lock, private);
1876789Sahrens 				mutex_exit(hash_lock);
1877789Sahrens 				return (0);
1878789Sahrens 			}
1879789Sahrens 			mutex_exit(hash_lock);
1880789Sahrens 			return (0);
1881789Sahrens 		}
1882789Sahrens 
18831544Seschrock 		ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
1884789Sahrens 
18851544Seschrock 		if (done) {
18862688Smaybee 			add_reference(hdr, hash_lock, private);
18871544Seschrock 			/*
18881544Seschrock 			 * If this block is already in use, create a new
18891544Seschrock 			 * copy of the data so that we will be guaranteed
18901544Seschrock 			 * that arc_release() will always succeed.
18911544Seschrock 			 */
18921544Seschrock 			buf = hdr->b_buf;
18931544Seschrock 			ASSERT(buf);
18941544Seschrock 			ASSERT(buf->b_data);
18952688Smaybee 			if (HDR_BUF_AVAILABLE(hdr)) {
18961544Seschrock 				ASSERT(buf->b_efunc == NULL);
18971544Seschrock 				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
18982688Smaybee 			} else {
18992688Smaybee 				buf = arc_buf_clone(buf);
19001544Seschrock 			}
19012391Smaybee 		} else if (*arc_flags & ARC_PREFETCH &&
19022391Smaybee 		    refcount_count(&hdr->b_refcnt) == 0) {
19032391Smaybee 			hdr->b_flags |= ARC_PREFETCH;
1904789Sahrens 		}
1905789Sahrens 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
19062688Smaybee 		arc_access(hdr, hash_lock);
19072688Smaybee 		mutex_exit(hash_lock);
1908789Sahrens 		atomic_add_64(&arc.hits, 1);
1909789Sahrens 		if (done)
1910789Sahrens 			done(NULL, buf, private);
1911789Sahrens 	} else {
1912789Sahrens 		uint64_t size = BP_GET_LSIZE(bp);
1913789Sahrens 		arc_callback_t	*acb;
1914789Sahrens 
1915789Sahrens 		if (hdr == NULL) {
1916789Sahrens 			/* this block is not in the cache */
1917789Sahrens 			arc_buf_hdr_t	*exists;
1918789Sahrens 
1919789Sahrens 			buf = arc_buf_alloc(spa, size, private);
1920789Sahrens 			hdr = buf->b_hdr;
1921789Sahrens 			hdr->b_dva = *BP_IDENTITY(bp);
1922789Sahrens 			hdr->b_birth = bp->blk_birth;
1923789Sahrens 			hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
1924789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
1925789Sahrens 			if (exists) {
1926789Sahrens 				/* somebody beat us to the hash insert */
1927789Sahrens 				mutex_exit(hash_lock);
1928789Sahrens 				bzero(&hdr->b_dva, sizeof (dva_t));
1929789Sahrens 				hdr->b_birth = 0;
1930789Sahrens 				hdr->b_cksum0 = 0;
19311544Seschrock 				(void) arc_buf_remove_ref(buf, private);
1932789Sahrens 				goto top; /* restart the IO request */
1933789Sahrens 			}
19342391Smaybee 			/* if this is a prefetch, we don't have a reference */
19352391Smaybee 			if (*arc_flags & ARC_PREFETCH) {
19362391Smaybee 				(void) remove_reference(hdr, hash_lock,
19372391Smaybee 				    private);
19382391Smaybee 				hdr->b_flags |= ARC_PREFETCH;
19392391Smaybee 			}
19402391Smaybee 			if (BP_GET_LEVEL(bp) > 0)
19412391Smaybee 				hdr->b_flags |= ARC_INDIRECT;
1942789Sahrens 		} else {
1943789Sahrens 			/* this block is in the ghost cache */
19441544Seschrock 			ASSERT(GHOST_STATE(hdr->b_state));
19451544Seschrock 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
19462391Smaybee 			ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
19472391Smaybee 			ASSERT(hdr->b_buf == NULL);
1948789Sahrens 
19492391Smaybee 			/* if this is a prefetch, we don't have a reference */
19502391Smaybee 			if (*arc_flags & ARC_PREFETCH)
19512391Smaybee 				hdr->b_flags |= ARC_PREFETCH;
19522391Smaybee 			else
19532391Smaybee 				add_reference(hdr, hash_lock, private);
1954789Sahrens 			buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
19551544Seschrock 			buf->b_hdr = hdr;
19562688Smaybee 			buf->b_data = NULL;
19571544Seschrock 			buf->b_efunc = NULL;
19581544Seschrock 			buf->b_private = NULL;
19591544Seschrock 			buf->b_next = NULL;
19601544Seschrock 			hdr->b_buf = buf;
19612688Smaybee 			arc_get_data_buf(buf);
19621544Seschrock 			ASSERT(hdr->b_datacnt == 0);
19631544Seschrock 			hdr->b_datacnt = 1;
19642391Smaybee 
1965789Sahrens 		}
1966789Sahrens 
1967789Sahrens 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
1968789Sahrens 		acb->acb_done = done;
1969789Sahrens 		acb->acb_private = private;
1970789Sahrens 		acb->acb_byteswap = swap;
1971789Sahrens 
1972789Sahrens 		ASSERT(hdr->b_acb == NULL);
1973789Sahrens 		hdr->b_acb = acb;
1974789Sahrens 		hdr->b_flags |= ARC_IO_IN_PROGRESS;
1975789Sahrens 
1976789Sahrens 		/*
1977789Sahrens 		 * If the buffer has been evicted, migrate it to a present state
1978789Sahrens 		 * before issuing the I/O.  Once we drop the hash-table lock,
1979789Sahrens 		 * the header will be marked as I/O in progress and have an
1980789Sahrens 		 * attached buffer.  At this point, anybody who finds this
1981789Sahrens 		 * buffer ought to notice that it's legit but has a pending I/O.
1982789Sahrens 		 */
1983789Sahrens 
19841544Seschrock 		if (GHOST_STATE(hdr->b_state))
19852688Smaybee 			arc_access(hdr, hash_lock);
19862688Smaybee 		mutex_exit(hash_lock);
1987789Sahrens 
1988789Sahrens 		ASSERT3U(hdr->b_size, ==, size);
19891596Sahrens 		DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
19901596Sahrens 		    zbookmark_t *, zb);
1991789Sahrens 		atomic_add_64(&arc.misses, 1);
19921544Seschrock 
1993789Sahrens 		rzio = zio_read(pio, spa, bp, buf->b_data, size,
19941544Seschrock 		    arc_read_done, buf, priority, flags, zb);
1995789Sahrens 
19962391Smaybee 		if (*arc_flags & ARC_WAIT)
1997789Sahrens 			return (zio_wait(rzio));
1998789Sahrens 
19992391Smaybee 		ASSERT(*arc_flags & ARC_NOWAIT);
2000789Sahrens 		zio_nowait(rzio);
2001789Sahrens 	}
2002789Sahrens 	return (0);
2003789Sahrens }
2004789Sahrens 
2005789Sahrens /*
2006789Sahrens  * arc_read() variant to support pool traversal.  If the block is already
2007789Sahrens  * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
2008789Sahrens  * The idea is that we don't want pool traversal filling up memory, but
2009789Sahrens  * if the ARC already has the data anyway, we shouldn't pay for the I/O.
2010789Sahrens  */
2011789Sahrens int
2012789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
2013789Sahrens {
2014789Sahrens 	arc_buf_hdr_t *hdr;
2015789Sahrens 	kmutex_t *hash_mtx;
2016789Sahrens 	int rc = 0;
2017789Sahrens 
2018789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
2019789Sahrens 
20201544Seschrock 	if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
20211544Seschrock 		arc_buf_t *buf = hdr->b_buf;
20221544Seschrock 
20231544Seschrock 		ASSERT(buf);
20241544Seschrock 		while (buf->b_data == NULL) {
20251544Seschrock 			buf = buf->b_next;
20261544Seschrock 			ASSERT(buf);
20271544Seschrock 		}
20281544Seschrock 		bcopy(buf->b_data, data, hdr->b_size);
20291544Seschrock 	} else {
2030789Sahrens 		rc = ENOENT;
20311544Seschrock 	}
2032789Sahrens 
2033789Sahrens 	if (hash_mtx)
2034789Sahrens 		mutex_exit(hash_mtx);
2035789Sahrens 
2036789Sahrens 	return (rc);
2037789Sahrens }
2038789Sahrens 
20391544Seschrock void
20401544Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
20411544Seschrock {
20421544Seschrock 	ASSERT(buf->b_hdr != NULL);
20431544Seschrock 	ASSERT(buf->b_hdr->b_state != arc.anon);
20441544Seschrock 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
20451544Seschrock 	buf->b_efunc = func;
20461544Seschrock 	buf->b_private = private;
20471544Seschrock }
20481544Seschrock 
20491544Seschrock /*
20501544Seschrock  * This is used by the DMU to let the ARC know that a buffer is
20511544Seschrock  * being evicted, so the ARC should clean up.  If this arc buf
20521544Seschrock  * is not yet in the evicted state, it will be put there.
20531544Seschrock  */
20541544Seschrock int
20551544Seschrock arc_buf_evict(arc_buf_t *buf)
20561544Seschrock {
20572887Smaybee 	arc_buf_hdr_t *hdr;
20581544Seschrock 	kmutex_t *hash_lock;
20591544Seschrock 	arc_buf_t **bufp;
20601544Seschrock 
20612887Smaybee 	mutex_enter(&arc_eviction_mtx);
20622887Smaybee 	hdr = buf->b_hdr;
20631544Seschrock 	if (hdr == NULL) {
20641544Seschrock 		/*
20651544Seschrock 		 * We are in arc_do_user_evicts().
20661544Seschrock 		 */
20671544Seschrock 		ASSERT(buf->b_data == NULL);
20682887Smaybee 		mutex_exit(&arc_eviction_mtx);
20691544Seschrock 		return (0);
20701544Seschrock 	}
20712887Smaybee 	hash_lock = HDR_LOCK(hdr);
20722887Smaybee 	mutex_exit(&arc_eviction_mtx);
20731544Seschrock 
20741544Seschrock 	mutex_enter(hash_lock);
20751544Seschrock 
20762724Smaybee 	if (buf->b_data == NULL) {
20772724Smaybee 		/*
20782724Smaybee 		 * We are on the eviction list.
20792724Smaybee 		 */
20802724Smaybee 		mutex_exit(hash_lock);
20812724Smaybee 		mutex_enter(&arc_eviction_mtx);
20822724Smaybee 		if (buf->b_hdr == NULL) {
20832724Smaybee 			/*
20842724Smaybee 			 * We are already in arc_do_user_evicts().
20852724Smaybee 			 */
20862724Smaybee 			mutex_exit(&arc_eviction_mtx);
20872724Smaybee 			return (0);
20882724Smaybee 		} else {
20892724Smaybee 			arc_buf_t copy = *buf; /* structure assignment */
20902724Smaybee 			/*
20912724Smaybee 			 * Process this buffer now
20922724Smaybee 			 * but let arc_do_user_evicts() do the reaping.
20932724Smaybee 			 */
20942724Smaybee 			buf->b_efunc = NULL;
20952724Smaybee 			mutex_exit(&arc_eviction_mtx);
20962724Smaybee 			VERIFY(copy.b_efunc(&copy) == 0);
20972724Smaybee 			return (1);
20982724Smaybee 		}
20992724Smaybee 	}
21002724Smaybee 
21012724Smaybee 	ASSERT(buf->b_hdr == hdr);
21022724Smaybee 	ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
21031544Seschrock 	ASSERT(hdr->b_state == arc.mru || hdr->b_state == arc.mfu);
21041544Seschrock 
21051544Seschrock 	/*
21061544Seschrock 	 * Pull this buffer off of the hdr
21071544Seschrock 	 */
21081544Seschrock 	bufp = &hdr->b_buf;
21091544Seschrock 	while (*bufp != buf)
21101544Seschrock 		bufp = &(*bufp)->b_next;
21111544Seschrock 	*bufp = buf->b_next;
21121544Seschrock 
21131544Seschrock 	ASSERT(buf->b_data != NULL);
21142688Smaybee 	arc_buf_destroy(buf, FALSE, FALSE);
21151544Seschrock 
21161544Seschrock 	if (hdr->b_datacnt == 0) {
21171544Seschrock 		arc_state_t *old_state = hdr->b_state;
21181544Seschrock 		arc_state_t *evicted_state;
21191544Seschrock 
21201544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
21211544Seschrock 
21221544Seschrock 		evicted_state =
21231544Seschrock 		    (old_state == arc.mru) ? arc.mru_ghost : arc.mfu_ghost;
21241544Seschrock 
21251544Seschrock 		mutex_enter(&old_state->mtx);
21261544Seschrock 		mutex_enter(&evicted_state->mtx);
21271544Seschrock 
21281544Seschrock 		arc_change_state(evicted_state, hdr, hash_lock);
21291544Seschrock 		ASSERT(HDR_IN_HASH_TABLE(hdr));
21301544Seschrock 		hdr->b_flags = ARC_IN_HASH_TABLE;
21311544Seschrock 
21321544Seschrock 		mutex_exit(&evicted_state->mtx);
21331544Seschrock 		mutex_exit(&old_state->mtx);
21341544Seschrock 	}
21351544Seschrock 	mutex_exit(hash_lock);
21361819Smaybee 
21371544Seschrock 	VERIFY(buf->b_efunc(buf) == 0);
21381544Seschrock 	buf->b_efunc = NULL;
21391544Seschrock 	buf->b_private = NULL;
21401544Seschrock 	buf->b_hdr = NULL;
21411544Seschrock 	kmem_cache_free(buf_cache, buf);
21421544Seschrock 	return (1);
21431544Seschrock }
21441544Seschrock 
2145789Sahrens /*
2146789Sahrens  * Release this buffer from the cache.  This must be done
2147789Sahrens  * after a read and prior to modifying the buffer contents.
2148789Sahrens  * If the buffer has more than one reference, we must make
2149789Sahrens  * make a new hdr for the buffer.
2150789Sahrens  */
2151789Sahrens void
2152789Sahrens arc_release(arc_buf_t *buf, void *tag)
2153789Sahrens {
2154789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2155789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
2156789Sahrens 
2157789Sahrens 	/* this buffer is not on any list */
2158789Sahrens 	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2159789Sahrens 
2160789Sahrens 	if (hdr->b_state == arc.anon) {
2161789Sahrens 		/* this buffer is already released */
2162789Sahrens 		ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2163789Sahrens 		ASSERT(BUF_EMPTY(hdr));
21641544Seschrock 		ASSERT(buf->b_efunc == NULL);
21653093Sahrens 		arc_buf_thaw(buf);
2166789Sahrens 		return;
2167789Sahrens 	}
2168789Sahrens 
2169789Sahrens 	mutex_enter(hash_lock);
2170789Sahrens 
21711544Seschrock 	/*
21721544Seschrock 	 * Do we have more than one buf?
21731544Seschrock 	 */
21741544Seschrock 	if (hdr->b_buf != buf || buf->b_next != NULL) {
2175789Sahrens 		arc_buf_hdr_t *nhdr;
2176789Sahrens 		arc_buf_t **bufp;
2177789Sahrens 		uint64_t blksz = hdr->b_size;
2178789Sahrens 		spa_t *spa = hdr->b_spa;
2179789Sahrens 
21801544Seschrock 		ASSERT(hdr->b_datacnt > 1);
2181789Sahrens 		/*
2182789Sahrens 		 * Pull the data off of this buf and attach it to
2183789Sahrens 		 * a new anonymous buf.
2184789Sahrens 		 */
21851544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
2186789Sahrens 		bufp = &hdr->b_buf;
21871544Seschrock 		while (*bufp != buf)
2188789Sahrens 			bufp = &(*bufp)->b_next;
2189789Sahrens 		*bufp = (*bufp)->b_next;
21901544Seschrock 
2191789Sahrens 		ASSERT3U(hdr->b_state->size, >=, hdr->b_size);
2192789Sahrens 		atomic_add_64(&hdr->b_state->size, -hdr->b_size);
21931544Seschrock 		if (refcount_is_zero(&hdr->b_refcnt)) {
21941544Seschrock 			ASSERT3U(hdr->b_state->lsize, >=, hdr->b_size);
21951544Seschrock 			atomic_add_64(&hdr->b_state->lsize, -hdr->b_size);
21961544Seschrock 		}
21971544Seschrock 		hdr->b_datacnt -= 1;
21981544Seschrock 
2199789Sahrens 		mutex_exit(hash_lock);
2200789Sahrens 
2201789Sahrens 		nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
2202789Sahrens 		nhdr->b_size = blksz;
2203789Sahrens 		nhdr->b_spa = spa;
2204789Sahrens 		nhdr->b_buf = buf;
2205789Sahrens 		nhdr->b_state = arc.anon;
2206789Sahrens 		nhdr->b_arc_access = 0;
2207789Sahrens 		nhdr->b_flags = 0;
22081544Seschrock 		nhdr->b_datacnt = 1;
22093093Sahrens 		nhdr->b_freeze_cksum =
22103093Sahrens 		    kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
22113093Sahrens 		*nhdr->b_freeze_cksum = *hdr->b_freeze_cksum; /* struct copy */
2212789Sahrens 		buf->b_hdr = nhdr;
2213789Sahrens 		buf->b_next = NULL;
2214789Sahrens 		(void) refcount_add(&nhdr->b_refcnt, tag);
2215789Sahrens 		atomic_add_64(&arc.anon->size, blksz);
2216789Sahrens 
2217789Sahrens 		hdr = nhdr;
2218789Sahrens 	} else {
22191544Seschrock 		ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2220789Sahrens 		ASSERT(!list_link_active(&hdr->b_arc_node));
2221789Sahrens 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2222789Sahrens 		arc_change_state(arc.anon, hdr, hash_lock);
2223789Sahrens 		hdr->b_arc_access = 0;
2224789Sahrens 		mutex_exit(hash_lock);
2225789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
2226789Sahrens 		hdr->b_birth = 0;
2227789Sahrens 		hdr->b_cksum0 = 0;
2228789Sahrens 	}
22291544Seschrock 	buf->b_efunc = NULL;
22301544Seschrock 	buf->b_private = NULL;
22313093Sahrens 	arc_buf_thaw(buf);
2232789Sahrens }
2233789Sahrens 
2234789Sahrens int
2235789Sahrens arc_released(arc_buf_t *buf)
2236789Sahrens {
22371544Seschrock 	return (buf->b_data != NULL && buf->b_hdr->b_state == arc.anon);
22381544Seschrock }
22391544Seschrock 
22401544Seschrock int
22411544Seschrock arc_has_callback(arc_buf_t *buf)
22421544Seschrock {
22431544Seschrock 	return (buf->b_efunc != NULL);
2244789Sahrens }
2245789Sahrens 
22461544Seschrock #ifdef ZFS_DEBUG
22471544Seschrock int
22481544Seschrock arc_referenced(arc_buf_t *buf)
22491544Seschrock {
22501544Seschrock 	return (refcount_count(&buf->b_hdr->b_refcnt));
22511544Seschrock }
22521544Seschrock #endif
22531544Seschrock 
2254789Sahrens static void
2255789Sahrens arc_write_done(zio_t *zio)
2256789Sahrens {
2257789Sahrens 	arc_buf_t *buf;
2258789Sahrens 	arc_buf_hdr_t *hdr;
2259789Sahrens 	arc_callback_t *acb;
2260789Sahrens 
2261789Sahrens 	buf = zio->io_private;
2262789Sahrens 	hdr = buf->b_hdr;
2263789Sahrens 	acb = hdr->b_acb;
2264789Sahrens 	hdr->b_acb = NULL;
22651544Seschrock 	ASSERT(acb != NULL);
2266789Sahrens 
2267789Sahrens 	/* this buffer is on no lists and is not in the hash table */
2268789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
2269789Sahrens 
2270789Sahrens 	hdr->b_dva = *BP_IDENTITY(zio->io_bp);
2271789Sahrens 	hdr->b_birth = zio->io_bp->blk_birth;
2272789Sahrens 	hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
22731544Seschrock 	/*
22741544Seschrock 	 * If the block to be written was all-zero, we may have
22751544Seschrock 	 * compressed it away.  In this case no write was performed
22761544Seschrock 	 * so there will be no dva/birth-date/checksum.  The buffer
22771544Seschrock 	 * must therefor remain anonymous (and uncached).
22781544Seschrock 	 */
2279789Sahrens 	if (!BUF_EMPTY(hdr)) {
2280789Sahrens 		arc_buf_hdr_t *exists;
2281789Sahrens 		kmutex_t *hash_lock;
2282789Sahrens 
22833093Sahrens 		arc_cksum_verify(buf);
22843093Sahrens 
2285789Sahrens 		exists = buf_hash_insert(hdr, &hash_lock);
2286789Sahrens 		if (exists) {
2287789Sahrens 			/*
2288789Sahrens 			 * This can only happen if we overwrite for
2289789Sahrens 			 * sync-to-convergence, because we remove
2290789Sahrens 			 * buffers from the hash table when we arc_free().
2291789Sahrens 			 */
2292789Sahrens 			ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
2293789Sahrens 			    BP_IDENTITY(zio->io_bp)));
2294789Sahrens 			ASSERT3U(zio->io_bp_orig.blk_birth, ==,
2295789Sahrens 			    zio->io_bp->blk_birth);
2296789Sahrens 
2297789Sahrens 			ASSERT(refcount_is_zero(&exists->b_refcnt));
2298789Sahrens 			arc_change_state(arc.anon, exists, hash_lock);
2299789Sahrens 			mutex_exit(hash_lock);
23001544Seschrock 			arc_hdr_destroy(exists);
2301789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
2302789Sahrens 			ASSERT3P(exists, ==, NULL);
2303789Sahrens 		}
23041544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
23052688Smaybee 		arc_access(hdr, hash_lock);
23062688Smaybee 		mutex_exit(hash_lock);
23071544Seschrock 	} else if (acb->acb_done == NULL) {
23081544Seschrock 		int destroy_hdr;
23091544Seschrock 		/*
23101544Seschrock 		 * This is an anonymous buffer with no user callback,
23111544Seschrock 		 * destroy it if there are no active references.
23121544Seschrock 		 */
23131544Seschrock 		mutex_enter(&arc_eviction_mtx);
23141544Seschrock 		destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
23151544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
23161544Seschrock 		mutex_exit(&arc_eviction_mtx);
23171544Seschrock 		if (destroy_hdr)
23181544Seschrock 			arc_hdr_destroy(hdr);
23191544Seschrock 	} else {
23201544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2321789Sahrens 	}
23221544Seschrock 
23231544Seschrock 	if (acb->acb_done) {
2324789Sahrens 		ASSERT(!refcount_is_zero(&hdr->b_refcnt));
2325789Sahrens 		acb->acb_done(zio, buf, acb->acb_private);
2326789Sahrens 	}
2327789Sahrens 
23281544Seschrock 	kmem_free(acb, sizeof (arc_callback_t));
2329789Sahrens }
2330789Sahrens 
2331789Sahrens int
23321775Sbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
2333789Sahrens     uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
2334789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
23351544Seschrock     uint32_t arc_flags, zbookmark_t *zb)
2336789Sahrens {
2337789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2338789Sahrens 	arc_callback_t	*acb;
2339789Sahrens 	zio_t	*rzio;
2340789Sahrens 
2341789Sahrens 	/* this is a private buffer - no locking required */
2342789Sahrens 	ASSERT3P(hdr->b_state, ==, arc.anon);
2343789Sahrens 	ASSERT(BUF_EMPTY(hdr));
2344789Sahrens 	ASSERT(!HDR_IO_ERROR(hdr));
23452237Smaybee 	ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
23462237Smaybee 	ASSERT(hdr->b_acb == 0);
2347789Sahrens 	acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2348789Sahrens 	acb->acb_done = done;
2349789Sahrens 	acb->acb_private = private;
2350789Sahrens 	acb->acb_byteswap = (arc_byteswap_func_t *)-1;
2351789Sahrens 	hdr->b_acb = acb;
23521544Seschrock 	hdr->b_flags |= ARC_IO_IN_PROGRESS;
23533093Sahrens 	arc_cksum_compute(buf);
23541775Sbillm 	rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp,
23551544Seschrock 	    buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb);
2356789Sahrens 
2357789Sahrens 	if (arc_flags & ARC_WAIT)
2358789Sahrens 		return (zio_wait(rzio));
2359789Sahrens 
2360789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
2361789Sahrens 	zio_nowait(rzio);
2362789Sahrens 
2363789Sahrens 	return (0);
2364789Sahrens }
2365789Sahrens 
2366789Sahrens int
2367789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
2368789Sahrens     zio_done_func_t *done, void *private, uint32_t arc_flags)
2369789Sahrens {
2370789Sahrens 	arc_buf_hdr_t *ab;
2371789Sahrens 	kmutex_t *hash_lock;
2372789Sahrens 	zio_t	*zio;
2373789Sahrens 
2374789Sahrens 	/*
2375789Sahrens 	 * If this buffer is in the cache, release it, so it
2376789Sahrens 	 * can be re-used.
2377789Sahrens 	 */
2378789Sahrens 	ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2379789Sahrens 	if (ab != NULL) {
2380789Sahrens 		/*
2381789Sahrens 		 * The checksum of blocks to free is not always
2382789Sahrens 		 * preserved (eg. on the deadlist).  However, if it is
2383789Sahrens 		 * nonzero, it should match what we have in the cache.
2384789Sahrens 		 */
2385789Sahrens 		ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
2386789Sahrens 		    ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
23871990Smaybee 		if (ab->b_state != arc.anon)
23881990Smaybee 			arc_change_state(arc.anon, ab, hash_lock);
23892391Smaybee 		if (HDR_IO_IN_PROGRESS(ab)) {
23902391Smaybee 			/*
23912391Smaybee 			 * This should only happen when we prefetch.
23922391Smaybee 			 */
23932391Smaybee 			ASSERT(ab->b_flags & ARC_PREFETCH);
23942391Smaybee 			ASSERT3U(ab->b_datacnt, ==, 1);
23952391Smaybee 			ab->b_flags |= ARC_FREED_IN_READ;
23962391Smaybee 			if (HDR_IN_HASH_TABLE(ab))
23972391Smaybee 				buf_hash_remove(ab);
23982391Smaybee 			ab->b_arc_access = 0;
23992391Smaybee 			bzero(&ab->b_dva, sizeof (dva_t));
24002391Smaybee 			ab->b_birth = 0;
24012391Smaybee 			ab->b_cksum0 = 0;
24022391Smaybee 			ab->b_buf->b_efunc = NULL;
24032391Smaybee 			ab->b_buf->b_private = NULL;
24042391Smaybee 			mutex_exit(hash_lock);
24052391Smaybee 		} else if (refcount_is_zero(&ab->b_refcnt)) {
2406789Sahrens 			mutex_exit(hash_lock);
24071544Seschrock 			arc_hdr_destroy(ab);
2408789Sahrens 			atomic_add_64(&arc.deleted, 1);
2409789Sahrens 		} else {
24101589Smaybee 			/*
24112391Smaybee 			 * We still have an active reference on this
24122391Smaybee 			 * buffer.  This can happen, e.g., from
24132391Smaybee 			 * dbuf_unoverride().
24141589Smaybee 			 */
24152391Smaybee 			ASSERT(!HDR_IN_HASH_TABLE(ab));
2416789Sahrens 			ab->b_arc_access = 0;
2417789Sahrens 			bzero(&ab->b_dva, sizeof (dva_t));
2418789Sahrens 			ab->b_birth = 0;
2419789Sahrens 			ab->b_cksum0 = 0;
24201544Seschrock 			ab->b_buf->b_efunc = NULL;
24211544Seschrock 			ab->b_buf->b_private = NULL;
2422789Sahrens 			mutex_exit(hash_lock);
2423789Sahrens 		}
2424789Sahrens 	}
2425789Sahrens 
2426789Sahrens 	zio = zio_free(pio, spa, txg, bp, done, private);
2427789Sahrens 
2428789Sahrens 	if (arc_flags & ARC_WAIT)
2429789Sahrens 		return (zio_wait(zio));
2430789Sahrens 
2431789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
2432789Sahrens 	zio_nowait(zio);
2433789Sahrens 
2434789Sahrens 	return (0);
2435789Sahrens }
2436789Sahrens 
2437789Sahrens void
2438789Sahrens arc_tempreserve_clear(uint64_t tempreserve)
2439789Sahrens {
2440789Sahrens 	atomic_add_64(&arc_tempreserve, -tempreserve);
2441789Sahrens 	ASSERT((int64_t)arc_tempreserve >= 0);
2442789Sahrens }
2443789Sahrens 
2444789Sahrens int
2445789Sahrens arc_tempreserve_space(uint64_t tempreserve)
2446789Sahrens {
2447789Sahrens #ifdef ZFS_DEBUG
2448789Sahrens 	/*
2449789Sahrens 	 * Once in a while, fail for no reason.  Everything should cope.
2450789Sahrens 	 */
2451789Sahrens 	if (spa_get_random(10000) == 0) {
2452789Sahrens 		dprintf("forcing random failure\n");
2453789Sahrens 		return (ERESTART);
2454789Sahrens 	}
2455789Sahrens #endif
2456982Smaybee 	if (tempreserve > arc.c/4 && !arc.no_grow)
2457982Smaybee 		arc.c = MIN(arc.c_max, tempreserve * 4);
2458982Smaybee 	if (tempreserve > arc.c)
2459982Smaybee 		return (ENOMEM);
2460982Smaybee 
2461789Sahrens 	/*
2462982Smaybee 	 * Throttle writes when the amount of dirty data in the cache
2463982Smaybee 	 * gets too large.  We try to keep the cache less than half full
2464982Smaybee 	 * of dirty blocks so that our sync times don't grow too large.
2465982Smaybee 	 * Note: if two requests come in concurrently, we might let them
2466982Smaybee 	 * both succeed, when one of them should fail.  Not a huge deal.
2467982Smaybee 	 *
2468982Smaybee 	 * XXX The limit should be adjusted dynamically to keep the time
2469982Smaybee 	 * to sync a dataset fixed (around 1-5 seconds?).
2470789Sahrens 	 */
2471789Sahrens 
2472982Smaybee 	if (tempreserve + arc_tempreserve + arc.anon->size > arc.c / 2 &&
2473982Smaybee 	    arc_tempreserve + arc.anon->size > arc.c / 4) {
2474789Sahrens 		dprintf("failing, arc_tempreserve=%lluK anon=%lluK "
2475789Sahrens 		    "tempreserve=%lluK arc.c=%lluK\n",
2476789Sahrens 		    arc_tempreserve>>10, arc.anon->lsize>>10,
2477789Sahrens 		    tempreserve>>10, arc.c>>10);
2478789Sahrens 		return (ERESTART);
2479789Sahrens 	}
2480789Sahrens 	atomic_add_64(&arc_tempreserve, tempreserve);
2481789Sahrens 	return (0);
2482789Sahrens }
2483789Sahrens 
2484789Sahrens void
2485789Sahrens arc_init(void)
2486789Sahrens {
2487789Sahrens 	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
2488789Sahrens 	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
2489789Sahrens 
24902391Smaybee 	/* Convert seconds to clock ticks */
24912638Sperrin 	arc_min_prefetch_lifespan = 1 * hz;
24922391Smaybee 
2493789Sahrens 	/* Start out with 1/8 of all memory */
2494789Sahrens 	arc.c = physmem * PAGESIZE / 8;
2495789Sahrens 
2496789Sahrens #ifdef _KERNEL
2497789Sahrens 	/*
2498789Sahrens 	 * On architectures where the physical memory can be larger
2499789Sahrens 	 * than the addressable space (intel in 32-bit mode), we may
2500789Sahrens 	 * need to limit the cache to 1/8 of VM size.
2501789Sahrens 	 */
2502789Sahrens 	arc.c = MIN(arc.c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
2503789Sahrens #endif
2504789Sahrens 
2505982Smaybee 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
2506789Sahrens 	arc.c_min = MAX(arc.c / 4, 64<<20);
2507982Smaybee 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
2508789Sahrens 	if (arc.c * 8 >= 1<<30)
2509789Sahrens 		arc.c_max = (arc.c * 8) - (1<<30);
2510789Sahrens 	else
2511789Sahrens 		arc.c_max = arc.c_min;
2512789Sahrens 	arc.c_max = MAX(arc.c * 6, arc.c_max);
25132885Sahrens 
25142885Sahrens 	/*
25152885Sahrens 	 * Allow the tunables to override our calculations if they are
25162885Sahrens 	 * reasonable (ie. over 64MB)
25172885Sahrens 	 */
25182885Sahrens 	if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
25192885Sahrens 		arc.c_max = zfs_arc_max;
25202885Sahrens 	if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc.c_max)
25212885Sahrens 		arc.c_min = zfs_arc_min;
25222885Sahrens 
2523789Sahrens 	arc.c = arc.c_max;
2524789Sahrens 	arc.p = (arc.c >> 1);
2525789Sahrens 
2526789Sahrens 	/* if kmem_flags are set, lets try to use less memory */
2527789Sahrens 	if (kmem_debugging())
2528789Sahrens 		arc.c = arc.c / 2;
2529789Sahrens 	if (arc.c < arc.c_min)
2530789Sahrens 		arc.c = arc.c_min;
2531789Sahrens 
2532789Sahrens 	arc.anon = &ARC_anon;
25331544Seschrock 	arc.mru = &ARC_mru;
25341544Seschrock 	arc.mru_ghost = &ARC_mru_ghost;
25351544Seschrock 	arc.mfu = &ARC_mfu;
25361544Seschrock 	arc.mfu_ghost = &ARC_mfu_ghost;
25371544Seschrock 	arc.size = 0;
2538789Sahrens 
25392688Smaybee 	arc.hits = 0;
25402688Smaybee 	arc.recycle_miss = 0;
25412688Smaybee 	arc.evict_skip = 0;
25422688Smaybee 	arc.mutex_miss = 0;
25432688Smaybee 
25442856Snd150628 	mutex_init(&arc.anon->mtx, NULL, MUTEX_DEFAULT, NULL);
25452856Snd150628 	mutex_init(&arc.mru->mtx, NULL, MUTEX_DEFAULT, NULL);
25462856Snd150628 	mutex_init(&arc.mru_ghost->mtx, NULL, MUTEX_DEFAULT, NULL);
25472856Snd150628 	mutex_init(&arc.mfu->mtx, NULL, MUTEX_DEFAULT, NULL);
25482856Snd150628 	mutex_init(&arc.mfu_ghost->mtx, NULL, MUTEX_DEFAULT, NULL);
25492856Snd150628 
25501544Seschrock 	list_create(&arc.mru->list, sizeof (arc_buf_hdr_t),
2551789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
25521544Seschrock 	list_create(&arc.mru_ghost->list, sizeof (arc_buf_hdr_t),
2553789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
25541544Seschrock 	list_create(&arc.mfu->list, sizeof (arc_buf_hdr_t),
2555789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
25561544Seschrock 	list_create(&arc.mfu_ghost->list, sizeof (arc_buf_hdr_t),
2557789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
2558789Sahrens 
2559789Sahrens 	buf_init();
2560789Sahrens 
2561789Sahrens 	arc_thread_exit = 0;
25621544Seschrock 	arc_eviction_list = NULL;
25631544Seschrock 	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
25642887Smaybee 	bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
2565789Sahrens 
2566789Sahrens 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
2567789Sahrens 	    TS_RUN, minclsyspri);
2568*3158Smaybee 
2569*3158Smaybee 	arc_dead = FALSE;
2570789Sahrens }
2571789Sahrens 
2572789Sahrens void
2573789Sahrens arc_fini(void)
2574789Sahrens {
2575789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
2576789Sahrens 	arc_thread_exit = 1;
2577789Sahrens 	while (arc_thread_exit != 0)
2578789Sahrens 		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
2579789Sahrens 	mutex_exit(&arc_reclaim_thr_lock);
2580789Sahrens 
2581789Sahrens 	arc_flush();
2582789Sahrens 
2583789Sahrens 	arc_dead = TRUE;
2584789Sahrens 
25851544Seschrock 	mutex_destroy(&arc_eviction_mtx);
2586789Sahrens 	mutex_destroy(&arc_reclaim_thr_lock);
2587789Sahrens 	cv_destroy(&arc_reclaim_thr_cv);
2588789Sahrens 
25891544Seschrock 	list_destroy(&arc.mru->list);
25901544Seschrock 	list_destroy(&arc.mru_ghost->list);
25911544Seschrock 	list_destroy(&arc.mfu->list);
25921544Seschrock 	list_destroy(&arc.mfu_ghost->list);
2593789Sahrens 
25942856Snd150628 	mutex_destroy(&arc.anon->mtx);
25952856Snd150628 	mutex_destroy(&arc.mru->mtx);
25962856Snd150628 	mutex_destroy(&arc.mru_ghost->mtx);
25972856Snd150628 	mutex_destroy(&arc.mfu->mtx);
25982856Snd150628 	mutex_destroy(&arc.mfu_ghost->mtx);
25992856Snd150628 
2600789Sahrens 	buf_fini();
2601789Sahrens }
2602