xref: /onnv-gate/usr/src/uts/common/fs/zfs/arc.c (revision 3403)
1789Sahrens /*
2789Sahrens  * CDDL HEADER START
3789Sahrens  *
4789Sahrens  * The contents of this file are subject to the terms of the
51484Sek110237  * Common Development and Distribution License (the "License").
61484Sek110237  * You may not use this file except in compliance with the License.
7789Sahrens  *
8789Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9789Sahrens  * or http://www.opensolaris.org/os/licensing.
10789Sahrens  * See the License for the specific language governing permissions
11789Sahrens  * and limitations under the License.
12789Sahrens  *
13789Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14789Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15789Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16789Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17789Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18789Sahrens  *
19789Sahrens  * CDDL HEADER END
20789Sahrens  */
21789Sahrens /*
22*3403Sbmc  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23789Sahrens  * Use is subject to license terms.
24789Sahrens  */
25789Sahrens 
26789Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
27789Sahrens 
28789Sahrens /*
29*3403Sbmc  * DVA-based Adjustable Replacement Cache
30789Sahrens  *
311544Seschrock  * While much of the theory of operation used here is
321544Seschrock  * based on the self-tuning, low overhead replacement cache
33789Sahrens  * presented by Megiddo and Modha at FAST 2003, there are some
34789Sahrens  * significant differences:
35789Sahrens  *
36789Sahrens  * 1. The Megiddo and Modha model assumes any page is evictable.
37789Sahrens  * Pages in its cache cannot be "locked" into memory.  This makes
38789Sahrens  * the eviction algorithm simple: evict the last page in the list.
39789Sahrens  * This also make the performance characteristics easy to reason
40789Sahrens  * about.  Our cache is not so simple.  At any given moment, some
41789Sahrens  * subset of the blocks in the cache are un-evictable because we
42789Sahrens  * have handed out a reference to them.  Blocks are only evictable
43789Sahrens  * when there are no external references active.  This makes
44789Sahrens  * eviction far more problematic:  we choose to evict the evictable
45789Sahrens  * blocks that are the "lowest" in the list.
46789Sahrens  *
47789Sahrens  * There are times when it is not possible to evict the requested
48789Sahrens  * space.  In these circumstances we are unable to adjust the cache
49789Sahrens  * size.  To prevent the cache growing unbounded at these times we
50789Sahrens  * implement a "cache throttle" that slowes the flow of new data
51789Sahrens  * into the cache until we can make space avaiable.
52789Sahrens  *
53789Sahrens  * 2. The Megiddo and Modha model assumes a fixed cache size.
54789Sahrens  * Pages are evicted when the cache is full and there is a cache
55789Sahrens  * miss.  Our model has a variable sized cache.  It grows with
56789Sahrens  * high use, but also tries to react to memory preasure from the
57789Sahrens  * operating system: decreasing its size when system memory is
58789Sahrens  * tight.
59789Sahrens  *
60789Sahrens  * 3. The Megiddo and Modha model assumes a fixed page size. All
61789Sahrens  * elements of the cache are therefor exactly the same size.  So
62789Sahrens  * when adjusting the cache size following a cache miss, its simply
63789Sahrens  * a matter of choosing a single page to evict.  In our model, we
64789Sahrens  * have variable sized cache blocks (rangeing from 512 bytes to
65789Sahrens  * 128K bytes).  We therefor choose a set of blocks to evict to make
66789Sahrens  * space for a cache miss that approximates as closely as possible
67789Sahrens  * the space used by the new block.
68789Sahrens  *
69789Sahrens  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70789Sahrens  * by N. Megiddo & D. Modha, FAST 2003
71789Sahrens  */
72789Sahrens 
73789Sahrens /*
74789Sahrens  * The locking model:
75789Sahrens  *
76789Sahrens  * A new reference to a cache buffer can be obtained in two
77789Sahrens  * ways: 1) via a hash table lookup using the DVA as a key,
78789Sahrens  * or 2) via one of the ARC lists.  The arc_read() inerface
79789Sahrens  * uses method 1, while the internal arc algorithms for
80789Sahrens  * adjusting the cache use method 2.  We therefor provide two
81789Sahrens  * types of locks: 1) the hash table lock array, and 2) the
82789Sahrens  * arc list locks.
83789Sahrens  *
84789Sahrens  * Buffers do not have their own mutexs, rather they rely on the
85789Sahrens  * hash table mutexs for the bulk of their protection (i.e. most
86789Sahrens  * fields in the arc_buf_hdr_t are protected by these mutexs).
87789Sahrens  *
88789Sahrens  * buf_hash_find() returns the appropriate mutex (held) when it
89789Sahrens  * locates the requested buffer in the hash table.  It returns
90789Sahrens  * NULL for the mutex if the buffer was not in the table.
91789Sahrens  *
92789Sahrens  * buf_hash_remove() expects the appropriate hash mutex to be
93789Sahrens  * already held before it is invoked.
94789Sahrens  *
95789Sahrens  * Each arc state also has a mutex which is used to protect the
96789Sahrens  * buffer list associated with the state.  When attempting to
97789Sahrens  * obtain a hash table lock while holding an arc list lock you
98789Sahrens  * must use: mutex_tryenter() to avoid deadlock.  Also note that
992688Smaybee  * the active state mutex must be held before the ghost state mutex.
100789Sahrens  *
1011544Seschrock  * Arc buffers may have an associated eviction callback function.
1021544Seschrock  * This function will be invoked prior to removing the buffer (e.g.
1031544Seschrock  * in arc_do_user_evicts()).  Note however that the data associated
1041544Seschrock  * with the buffer may be evicted prior to the callback.  The callback
1051544Seschrock  * must be made with *no locks held* (to prevent deadlock).  Additionally,
1061544Seschrock  * the users of callbacks must ensure that their private data is
1071544Seschrock  * protected from simultaneous callbacks from arc_buf_evict()
1081544Seschrock  * and arc_do_user_evicts().
1091544Seschrock  *
110789Sahrens  * Note that the majority of the performance stats are manipulated
111789Sahrens  * with atomic operations.
112789Sahrens  */
113789Sahrens 
114789Sahrens #include <sys/spa.h>
115789Sahrens #include <sys/zio.h>
1163093Sahrens #include <sys/zio_checksum.h>
117789Sahrens #include <sys/zfs_context.h>
118789Sahrens #include <sys/arc.h>
119789Sahrens #include <sys/refcount.h>
120789Sahrens #ifdef _KERNEL
121789Sahrens #include <sys/vmsystm.h>
122789Sahrens #include <vm/anon.h>
123789Sahrens #include <sys/fs/swapnode.h>
1241484Sek110237 #include <sys/dnlc.h>
125789Sahrens #endif
126789Sahrens #include <sys/callb.h>
127*3403Sbmc #include <sys/kstat.h>
128789Sahrens 
129789Sahrens static kmutex_t		arc_reclaim_thr_lock;
130789Sahrens static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
131789Sahrens static uint8_t		arc_thread_exit;
132789Sahrens 
1331484Sek110237 #define	ARC_REDUCE_DNLC_PERCENT	3
1341484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
1351484Sek110237 
136789Sahrens typedef enum arc_reclaim_strategy {
137789Sahrens 	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
138789Sahrens 	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
139789Sahrens } arc_reclaim_strategy_t;
140789Sahrens 
141789Sahrens /* number of seconds before growing cache again */
142789Sahrens static int		arc_grow_retry = 60;
143789Sahrens 
1442391Smaybee /*
1452638Sperrin  * minimum lifespan of a prefetch block in clock ticks
1462638Sperrin  * (initialized in arc_init())
1472391Smaybee  */
1482638Sperrin static int		arc_min_prefetch_lifespan;
1492391Smaybee 
150789Sahrens static int arc_dead;
151789Sahrens 
152789Sahrens /*
1532885Sahrens  * These tunables are for performance analysis.
1542885Sahrens  */
1552885Sahrens uint64_t zfs_arc_max;
1562885Sahrens uint64_t zfs_arc_min;
1572885Sahrens 
1582885Sahrens /*
159789Sahrens  * Note that buffers can be on one of 5 states:
160789Sahrens  *	ARC_anon	- anonymous (discussed below)
1611544Seschrock  *	ARC_mru		- recently used, currently cached
1621544Seschrock  *	ARC_mru_ghost	- recentely used, no longer in cache
1631544Seschrock  *	ARC_mfu		- frequently used, currently cached
1641544Seschrock  *	ARC_mfu_ghost	- frequently used, no longer in cache
165789Sahrens  * When there are no active references to the buffer, they
166789Sahrens  * are linked onto one of the lists in arc.  These are the
167789Sahrens  * only buffers that can be evicted or deleted.
168789Sahrens  *
169789Sahrens  * Anonymous buffers are buffers that are not associated with
170789Sahrens  * a DVA.  These are buffers that hold dirty block copies
171789Sahrens  * before they are written to stable storage.  By definition,
1721544Seschrock  * they are "ref'd" and are considered part of arc_mru
173789Sahrens  * that cannot be freed.  Generally, they will aquire a DVA
1741544Seschrock  * as they are written and migrate onto the arc_mru list.
175789Sahrens  */
176789Sahrens 
177789Sahrens typedef struct arc_state {
178*3403Sbmc 	list_t	arcs_list;	/* linked list of evictable buffer in state */
179*3403Sbmc 	uint64_t arcs_lsize;	/* total size of buffers in the linked list */
180*3403Sbmc 	uint64_t arcs_size;	/* total size of all buffers in this state */
181*3403Sbmc 	kmutex_t arcs_mtx;
182789Sahrens } arc_state_t;
183789Sahrens 
184789Sahrens /* The 5 states: */
185789Sahrens static arc_state_t ARC_anon;
1861544Seschrock static arc_state_t ARC_mru;
1871544Seschrock static arc_state_t ARC_mru_ghost;
1881544Seschrock static arc_state_t ARC_mfu;
1891544Seschrock static arc_state_t ARC_mfu_ghost;
190789Sahrens 
191*3403Sbmc typedef struct arc_stats {
192*3403Sbmc 	kstat_named_t arcstat_hits;
193*3403Sbmc 	kstat_named_t arcstat_misses;
194*3403Sbmc 	kstat_named_t arcstat_demand_data_hits;
195*3403Sbmc 	kstat_named_t arcstat_demand_data_misses;
196*3403Sbmc 	kstat_named_t arcstat_demand_metadata_hits;
197*3403Sbmc 	kstat_named_t arcstat_demand_metadata_misses;
198*3403Sbmc 	kstat_named_t arcstat_prefetch_data_hits;
199*3403Sbmc 	kstat_named_t arcstat_prefetch_data_misses;
200*3403Sbmc 	kstat_named_t arcstat_prefetch_metadata_hits;
201*3403Sbmc 	kstat_named_t arcstat_prefetch_metadata_misses;
202*3403Sbmc 	kstat_named_t arcstat_mru_hits;
203*3403Sbmc 	kstat_named_t arcstat_mru_ghost_hits;
204*3403Sbmc 	kstat_named_t arcstat_mfu_hits;
205*3403Sbmc 	kstat_named_t arcstat_mfu_ghost_hits;
206*3403Sbmc 	kstat_named_t arcstat_deleted;
207*3403Sbmc 	kstat_named_t arcstat_recycle_miss;
208*3403Sbmc 	kstat_named_t arcstat_mutex_miss;
209*3403Sbmc 	kstat_named_t arcstat_evict_skip;
210*3403Sbmc 	kstat_named_t arcstat_hash_elements;
211*3403Sbmc 	kstat_named_t arcstat_hash_elements_max;
212*3403Sbmc 	kstat_named_t arcstat_hash_collisions;
213*3403Sbmc 	kstat_named_t arcstat_hash_chains;
214*3403Sbmc 	kstat_named_t arcstat_hash_chain_max;
215*3403Sbmc 	kstat_named_t arcstat_p;
216*3403Sbmc 	kstat_named_t arcstat_c;
217*3403Sbmc 	kstat_named_t arcstat_c_min;
218*3403Sbmc 	kstat_named_t arcstat_c_max;
219*3403Sbmc 	kstat_named_t arcstat_size;
220*3403Sbmc } arc_stats_t;
221*3403Sbmc 
222*3403Sbmc static arc_stats_t arc_stats = {
223*3403Sbmc 	{ "hits",			KSTAT_DATA_UINT64 },
224*3403Sbmc 	{ "misses",			KSTAT_DATA_UINT64 },
225*3403Sbmc 	{ "demand_data_hits",		KSTAT_DATA_UINT64 },
226*3403Sbmc 	{ "demand_data_misses",		KSTAT_DATA_UINT64 },
227*3403Sbmc 	{ "demand_metadata_hits",	KSTAT_DATA_UINT64 },
228*3403Sbmc 	{ "demand_metadata_misses",	KSTAT_DATA_UINT64 },
229*3403Sbmc 	{ "prefetch_data_hits",		KSTAT_DATA_UINT64 },
230*3403Sbmc 	{ "prefetch_data_misses",	KSTAT_DATA_UINT64 },
231*3403Sbmc 	{ "prefetch_metadata_hits",	KSTAT_DATA_UINT64 },
232*3403Sbmc 	{ "prefetch_metadata_misses",	KSTAT_DATA_UINT64 },
233*3403Sbmc 	{ "mru_hits",			KSTAT_DATA_UINT64 },
234*3403Sbmc 	{ "mru_ghost_hits",		KSTAT_DATA_UINT64 },
235*3403Sbmc 	{ "mfu_hits",			KSTAT_DATA_UINT64 },
236*3403Sbmc 	{ "mfu_ghost_hits",		KSTAT_DATA_UINT64 },
237*3403Sbmc 	{ "deleted",			KSTAT_DATA_UINT64 },
238*3403Sbmc 	{ "recycle_miss",		KSTAT_DATA_UINT64 },
239*3403Sbmc 	{ "mutex_miss",			KSTAT_DATA_UINT64 },
240*3403Sbmc 	{ "evict_skip",			KSTAT_DATA_UINT64 },
241*3403Sbmc 	{ "hash_elements",		KSTAT_DATA_UINT64 },
242*3403Sbmc 	{ "hash_elements_max",		KSTAT_DATA_UINT64 },
243*3403Sbmc 	{ "hash_collisions",		KSTAT_DATA_UINT64 },
244*3403Sbmc 	{ "hash_chains",		KSTAT_DATA_UINT64 },
245*3403Sbmc 	{ "hash_chain_max",		KSTAT_DATA_UINT64 },
246*3403Sbmc 	{ "p",				KSTAT_DATA_UINT64 },
247*3403Sbmc 	{ "c",				KSTAT_DATA_UINT64 },
248*3403Sbmc 	{ "c_min",			KSTAT_DATA_UINT64 },
249*3403Sbmc 	{ "c_max",			KSTAT_DATA_UINT64 },
250*3403Sbmc 	{ "size",			KSTAT_DATA_UINT64 }
251*3403Sbmc };
252789Sahrens 
253*3403Sbmc #define	ARCSTAT(stat)	(arc_stats.stat.value.ui64)
254*3403Sbmc 
255*3403Sbmc #define	ARCSTAT_INCR(stat, val) \
256*3403Sbmc 	atomic_add_64(&arc_stats.stat.value.ui64, (val));
257*3403Sbmc 
258*3403Sbmc #define	ARCSTAT_BUMP(stat) 	ARCSTAT_INCR(stat, 1)
259*3403Sbmc #define	ARCSTAT_BUMPDOWN(stat)	ARCSTAT_INCR(stat, -1)
260*3403Sbmc 
261*3403Sbmc #define	ARCSTAT_MAX(stat, val) {					\
262*3403Sbmc 	uint64_t m;							\
263*3403Sbmc 	while ((val) > (m = arc_stats.stat.value.ui64) &&		\
264*3403Sbmc 	    (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val))))	\
265*3403Sbmc 		continue;						\
266*3403Sbmc }
267*3403Sbmc 
268*3403Sbmc #define	ARCSTAT_MAXSTAT(stat) \
269*3403Sbmc 	ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
270789Sahrens 
271*3403Sbmc /*
272*3403Sbmc  * We define a macro to allow ARC hits/misses to be easily broken down by
273*3403Sbmc  * two separate conditions, giving a total of four different subtypes for
274*3403Sbmc  * each of hits and misses (so eight statistics total).
275*3403Sbmc  */
276*3403Sbmc #define	ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
277*3403Sbmc 	if (cond1) {							\
278*3403Sbmc 		if (cond2) {						\
279*3403Sbmc 			ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
280*3403Sbmc 		} else {						\
281*3403Sbmc 			ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
282*3403Sbmc 		}							\
283*3403Sbmc 	} else {							\
284*3403Sbmc 		if (cond2) {						\
285*3403Sbmc 			ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
286*3403Sbmc 		} else {						\
287*3403Sbmc 			ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
288*3403Sbmc 		}							\
289*3403Sbmc 	}
290789Sahrens 
291*3403Sbmc kstat_t			*arc_ksp;
292*3403Sbmc static arc_state_t 	*arc_anon;
293*3403Sbmc static arc_state_t	*arc_mru;
294*3403Sbmc static arc_state_t	*arc_mru_ghost;
295*3403Sbmc static arc_state_t	*arc_mfu;
296*3403Sbmc static arc_state_t	*arc_mfu_ghost;
297*3403Sbmc 
298*3403Sbmc /*
299*3403Sbmc  * There are several ARC variables that are critical to export as kstats --
300*3403Sbmc  * but we don't want to have to grovel around in the kstat whenever we wish to
301*3403Sbmc  * manipulate them.  For these variables, we therefore define them to be in
302*3403Sbmc  * terms of the statistic variable.  This assures that we are not introducing
303*3403Sbmc  * the possibility of inconsistency by having shadow copies of the variables,
304*3403Sbmc  * while still allowing the code to be readable.
305*3403Sbmc  */
306*3403Sbmc #define	arc_size	ARCSTAT(arcstat_size)	/* actual total arc size */
307*3403Sbmc #define	arc_p		ARCSTAT(arcstat_p)	/* target size of MRU */
308*3403Sbmc #define	arc_c		ARCSTAT(arcstat_c)	/* target size of cache */
309*3403Sbmc #define	arc_c_min	ARCSTAT(arcstat_c_min)	/* min target cache size */
310*3403Sbmc #define	arc_c_max	ARCSTAT(arcstat_c_max)	/* max target cache size */
311*3403Sbmc 
312*3403Sbmc static int		arc_no_grow;	/* Don't try to grow cache size */
313*3403Sbmc static uint64_t		arc_tempreserve;
314789Sahrens 
315789Sahrens typedef struct arc_callback arc_callback_t;
316789Sahrens 
317789Sahrens struct arc_callback {
318789Sahrens 	arc_done_func_t		*acb_done;
319789Sahrens 	void			*acb_private;
320789Sahrens 	arc_byteswap_func_t	*acb_byteswap;
321789Sahrens 	arc_buf_t		*acb_buf;
322789Sahrens 	zio_t			*acb_zio_dummy;
323789Sahrens 	arc_callback_t		*acb_next;
324789Sahrens };
325789Sahrens 
326789Sahrens struct arc_buf_hdr {
327789Sahrens 	/* protected by hash lock */
328789Sahrens 	dva_t			b_dva;
329789Sahrens 	uint64_t		b_birth;
330789Sahrens 	uint64_t		b_cksum0;
331789Sahrens 
3323093Sahrens 	kmutex_t		b_freeze_lock;
3333093Sahrens 	zio_cksum_t		*b_freeze_cksum;
3343093Sahrens 
335789Sahrens 	arc_buf_hdr_t		*b_hash_next;
336789Sahrens 	arc_buf_t		*b_buf;
337789Sahrens 	uint32_t		b_flags;
3381544Seschrock 	uint32_t		b_datacnt;
339789Sahrens 
3403290Sjohansen 	arc_callback_t		*b_acb;
341789Sahrens 	kcondvar_t		b_cv;
3423290Sjohansen 
3433290Sjohansen 	/* immutable */
3443290Sjohansen 	arc_buf_contents_t	b_type;
3453290Sjohansen 	uint64_t		b_size;
3463290Sjohansen 	spa_t			*b_spa;
347789Sahrens 
348789Sahrens 	/* protected by arc state mutex */
349789Sahrens 	arc_state_t		*b_state;
350789Sahrens 	list_node_t		b_arc_node;
351789Sahrens 
352789Sahrens 	/* updated atomically */
353789Sahrens 	clock_t			b_arc_access;
354789Sahrens 
355789Sahrens 	/* self protecting */
356789Sahrens 	refcount_t		b_refcnt;
357789Sahrens };
358789Sahrens 
3591544Seschrock static arc_buf_t *arc_eviction_list;
3601544Seschrock static kmutex_t arc_eviction_mtx;
3612887Smaybee static arc_buf_hdr_t arc_eviction_hdr;
3622688Smaybee static void arc_get_data_buf(arc_buf_t *buf);
3632688Smaybee static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
3641544Seschrock 
3651544Seschrock #define	GHOST_STATE(state)	\
366*3403Sbmc 	((state) == arc_mru_ghost || (state) == arc_mfu_ghost)
3671544Seschrock 
368789Sahrens /*
369789Sahrens  * Private ARC flags.  These flags are private ARC only flags that will show up
370789Sahrens  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
371789Sahrens  * be passed in as arc_flags in things like arc_read.  However, these flags
372789Sahrens  * should never be passed and should only be set by ARC code.  When adding new
373789Sahrens  * public flags, make sure not to smash the private ones.
374789Sahrens  */
375789Sahrens 
3761544Seschrock #define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
377789Sahrens #define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
378789Sahrens #define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
379789Sahrens #define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
3801544Seschrock #define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
3812391Smaybee #define	ARC_INDIRECT		(1 << 14)	/* this is an indirect block */
382789Sahrens 
3831544Seschrock #define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
384789Sahrens #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
385789Sahrens #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
386789Sahrens #define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
3871544Seschrock #define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
388789Sahrens 
389789Sahrens /*
390789Sahrens  * Hash table routines
391789Sahrens  */
392789Sahrens 
393789Sahrens #define	HT_LOCK_PAD	64
394789Sahrens 
395789Sahrens struct ht_lock {
396789Sahrens 	kmutex_t	ht_lock;
397789Sahrens #ifdef _KERNEL
398789Sahrens 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
399789Sahrens #endif
400789Sahrens };
401789Sahrens 
402789Sahrens #define	BUF_LOCKS 256
403789Sahrens typedef struct buf_hash_table {
404789Sahrens 	uint64_t ht_mask;
405789Sahrens 	arc_buf_hdr_t **ht_table;
406789Sahrens 	struct ht_lock ht_locks[BUF_LOCKS];
407789Sahrens } buf_hash_table_t;
408789Sahrens 
409789Sahrens static buf_hash_table_t buf_hash_table;
410789Sahrens 
411789Sahrens #define	BUF_HASH_INDEX(spa, dva, birth) \
412789Sahrens 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
413789Sahrens #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
414789Sahrens #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
415789Sahrens #define	HDR_LOCK(buf) \
416789Sahrens 	(BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
417789Sahrens 
418789Sahrens uint64_t zfs_crc64_table[256];
419789Sahrens 
420789Sahrens static uint64_t
421789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
422789Sahrens {
423789Sahrens 	uintptr_t spav = (uintptr_t)spa;
424789Sahrens 	uint8_t *vdva = (uint8_t *)dva;
425789Sahrens 	uint64_t crc = -1ULL;
426789Sahrens 	int i;
427789Sahrens 
428789Sahrens 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
429789Sahrens 
430789Sahrens 	for (i = 0; i < sizeof (dva_t); i++)
431789Sahrens 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
432789Sahrens 
433789Sahrens 	crc ^= (spav>>8) ^ birth;
434789Sahrens 
435789Sahrens 	return (crc);
436789Sahrens }
437789Sahrens 
438789Sahrens #define	BUF_EMPTY(buf)						\
439789Sahrens 	((buf)->b_dva.dva_word[0] == 0 &&			\
440789Sahrens 	(buf)->b_dva.dva_word[1] == 0 &&			\
441789Sahrens 	(buf)->b_birth == 0)
442789Sahrens 
443789Sahrens #define	BUF_EQUAL(spa, dva, birth, buf)				\
444789Sahrens 	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
445789Sahrens 	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
446789Sahrens 	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
447789Sahrens 
448789Sahrens static arc_buf_hdr_t *
449789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
450789Sahrens {
451789Sahrens 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
452789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
453789Sahrens 	arc_buf_hdr_t *buf;
454789Sahrens 
455789Sahrens 	mutex_enter(hash_lock);
456789Sahrens 	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
457789Sahrens 	    buf = buf->b_hash_next) {
458789Sahrens 		if (BUF_EQUAL(spa, dva, birth, buf)) {
459789Sahrens 			*lockp = hash_lock;
460789Sahrens 			return (buf);
461789Sahrens 		}
462789Sahrens 	}
463789Sahrens 	mutex_exit(hash_lock);
464789Sahrens 	*lockp = NULL;
465789Sahrens 	return (NULL);
466789Sahrens }
467789Sahrens 
468789Sahrens /*
469789Sahrens  * Insert an entry into the hash table.  If there is already an element
470789Sahrens  * equal to elem in the hash table, then the already existing element
471789Sahrens  * will be returned and the new element will not be inserted.
472789Sahrens  * Otherwise returns NULL.
473789Sahrens  */
474789Sahrens static arc_buf_hdr_t *
475789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
476789Sahrens {
477789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
478789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
479789Sahrens 	arc_buf_hdr_t *fbuf;
480*3403Sbmc 	uint32_t i;
481789Sahrens 
4821544Seschrock 	ASSERT(!HDR_IN_HASH_TABLE(buf));
483789Sahrens 	*lockp = hash_lock;
484789Sahrens 	mutex_enter(hash_lock);
485789Sahrens 	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
486789Sahrens 	    fbuf = fbuf->b_hash_next, i++) {
487789Sahrens 		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
488789Sahrens 			return (fbuf);
489789Sahrens 	}
490789Sahrens 
491789Sahrens 	buf->b_hash_next = buf_hash_table.ht_table[idx];
492789Sahrens 	buf_hash_table.ht_table[idx] = buf;
4931544Seschrock 	buf->b_flags |= ARC_IN_HASH_TABLE;
494789Sahrens 
495789Sahrens 	/* collect some hash table performance data */
496789Sahrens 	if (i > 0) {
497*3403Sbmc 		ARCSTAT_BUMP(arcstat_hash_collisions);
498789Sahrens 		if (i == 1)
499*3403Sbmc 			ARCSTAT_BUMP(arcstat_hash_chains);
500*3403Sbmc 
501*3403Sbmc 		ARCSTAT_MAX(arcstat_hash_chain_max, i);
502789Sahrens 	}
503*3403Sbmc 
504*3403Sbmc 	ARCSTAT_BUMP(arcstat_hash_elements);
505*3403Sbmc 	ARCSTAT_MAXSTAT(arcstat_hash_elements);
506789Sahrens 
507789Sahrens 	return (NULL);
508789Sahrens }
509789Sahrens 
510789Sahrens static void
511789Sahrens buf_hash_remove(arc_buf_hdr_t *buf)
512789Sahrens {
513789Sahrens 	arc_buf_hdr_t *fbuf, **bufp;
514789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
515789Sahrens 
516789Sahrens 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
5171544Seschrock 	ASSERT(HDR_IN_HASH_TABLE(buf));
518789Sahrens 
519789Sahrens 	bufp = &buf_hash_table.ht_table[idx];
520789Sahrens 	while ((fbuf = *bufp) != buf) {
521789Sahrens 		ASSERT(fbuf != NULL);
522789Sahrens 		bufp = &fbuf->b_hash_next;
523789Sahrens 	}
524789Sahrens 	*bufp = buf->b_hash_next;
525789Sahrens 	buf->b_hash_next = NULL;
5261544Seschrock 	buf->b_flags &= ~ARC_IN_HASH_TABLE;
527789Sahrens 
528789Sahrens 	/* collect some hash table performance data */
529*3403Sbmc 	ARCSTAT_BUMPDOWN(arcstat_hash_elements);
530*3403Sbmc 
531789Sahrens 	if (buf_hash_table.ht_table[idx] &&
532789Sahrens 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
533*3403Sbmc 		ARCSTAT_BUMPDOWN(arcstat_hash_chains);
534789Sahrens }
535789Sahrens 
536789Sahrens /*
537789Sahrens  * Global data structures and functions for the buf kmem cache.
538789Sahrens  */
539789Sahrens static kmem_cache_t *hdr_cache;
540789Sahrens static kmem_cache_t *buf_cache;
541789Sahrens 
542789Sahrens static void
543789Sahrens buf_fini(void)
544789Sahrens {
545789Sahrens 	int i;
546789Sahrens 
547789Sahrens 	kmem_free(buf_hash_table.ht_table,
548789Sahrens 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
549789Sahrens 	for (i = 0; i < BUF_LOCKS; i++)
550789Sahrens 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
551789Sahrens 	kmem_cache_destroy(hdr_cache);
552789Sahrens 	kmem_cache_destroy(buf_cache);
553789Sahrens }
554789Sahrens 
555789Sahrens /*
556789Sahrens  * Constructor callback - called when the cache is empty
557789Sahrens  * and a new buf is requested.
558789Sahrens  */
559789Sahrens /* ARGSUSED */
560789Sahrens static int
561789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag)
562789Sahrens {
563789Sahrens 	arc_buf_hdr_t *buf = vbuf;
564789Sahrens 
565789Sahrens 	bzero(buf, sizeof (arc_buf_hdr_t));
566789Sahrens 	refcount_create(&buf->b_refcnt);
567789Sahrens 	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
568789Sahrens 	return (0);
569789Sahrens }
570789Sahrens 
571789Sahrens /*
572789Sahrens  * Destructor callback - called when a cached buf is
573789Sahrens  * no longer required.
574789Sahrens  */
575789Sahrens /* ARGSUSED */
576789Sahrens static void
577789Sahrens hdr_dest(void *vbuf, void *unused)
578789Sahrens {
579789Sahrens 	arc_buf_hdr_t *buf = vbuf;
580789Sahrens 
581789Sahrens 	refcount_destroy(&buf->b_refcnt);
582789Sahrens 	cv_destroy(&buf->b_cv);
583789Sahrens }
584789Sahrens 
585789Sahrens /*
586789Sahrens  * Reclaim callback -- invoked when memory is low.
587789Sahrens  */
588789Sahrens /* ARGSUSED */
589789Sahrens static void
590789Sahrens hdr_recl(void *unused)
591789Sahrens {
592789Sahrens 	dprintf("hdr_recl called\n");
5933158Smaybee 	/*
5943158Smaybee 	 * umem calls the reclaim func when we destroy the buf cache,
5953158Smaybee 	 * which is after we do arc_fini().
5963158Smaybee 	 */
5973158Smaybee 	if (!arc_dead)
5983158Smaybee 		cv_signal(&arc_reclaim_thr_cv);
599789Sahrens }
600789Sahrens 
601789Sahrens static void
602789Sahrens buf_init(void)
603789Sahrens {
604789Sahrens 	uint64_t *ct;
6051544Seschrock 	uint64_t hsize = 1ULL << 12;
606789Sahrens 	int i, j;
607789Sahrens 
608789Sahrens 	/*
609789Sahrens 	 * The hash table is big enough to fill all of physical memory
6101544Seschrock 	 * with an average 64K block size.  The table will take up
6111544Seschrock 	 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
612789Sahrens 	 */
6131544Seschrock 	while (hsize * 65536 < physmem * PAGESIZE)
614789Sahrens 		hsize <<= 1;
6151544Seschrock retry:
616789Sahrens 	buf_hash_table.ht_mask = hsize - 1;
6171544Seschrock 	buf_hash_table.ht_table =
6181544Seschrock 	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
6191544Seschrock 	if (buf_hash_table.ht_table == NULL) {
6201544Seschrock 		ASSERT(hsize > (1ULL << 8));
6211544Seschrock 		hsize >>= 1;
6221544Seschrock 		goto retry;
6231544Seschrock 	}
624789Sahrens 
625789Sahrens 	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
626789Sahrens 	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
627789Sahrens 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
628789Sahrens 	    0, NULL, NULL, NULL, NULL, NULL, 0);
629789Sahrens 
630789Sahrens 	for (i = 0; i < 256; i++)
631789Sahrens 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
632789Sahrens 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
633789Sahrens 
634789Sahrens 	for (i = 0; i < BUF_LOCKS; i++) {
635789Sahrens 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
636789Sahrens 		    NULL, MUTEX_DEFAULT, NULL);
637789Sahrens 	}
638789Sahrens }
639789Sahrens 
640789Sahrens #define	ARC_MINTIME	(hz>>4) /* 62 ms */
641789Sahrens 
642789Sahrens static void
6433093Sahrens arc_cksum_verify(arc_buf_t *buf)
6443093Sahrens {
6453093Sahrens 	zio_cksum_t zc;
6463093Sahrens 
6473312Sahrens 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
6483093Sahrens 		return;
6493093Sahrens 
6503093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
6513265Sahrens 	if (buf->b_hdr->b_freeze_cksum == NULL ||
6523265Sahrens 	    (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
6533093Sahrens 		mutex_exit(&buf->b_hdr->b_freeze_lock);
6543093Sahrens 		return;
6553093Sahrens 	}
6563093Sahrens 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
6573093Sahrens 	if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
6583093Sahrens 		panic("buffer modified while frozen!");
6593093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
6603093Sahrens }
6613093Sahrens 
6623093Sahrens static void
6633093Sahrens arc_cksum_compute(arc_buf_t *buf)
6643093Sahrens {
6653312Sahrens 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
6663093Sahrens 		return;
6673093Sahrens 
6683093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
6693093Sahrens 	if (buf->b_hdr->b_freeze_cksum != NULL) {
6703093Sahrens 		mutex_exit(&buf->b_hdr->b_freeze_lock);
6713093Sahrens 		return;
6723093Sahrens 	}
6733093Sahrens 	buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
6743093Sahrens 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
6753093Sahrens 	    buf->b_hdr->b_freeze_cksum);
6763093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
6773093Sahrens }
6783093Sahrens 
6793093Sahrens void
6803093Sahrens arc_buf_thaw(arc_buf_t *buf)
6813093Sahrens {
6823312Sahrens 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
6833093Sahrens 		return;
6843093Sahrens 
685*3403Sbmc 	if (buf->b_hdr->b_state != arc_anon)
6863093Sahrens 		panic("modifying non-anon buffer!");
6873093Sahrens 	if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
6883093Sahrens 		panic("modifying buffer while i/o in progress!");
6893093Sahrens 	arc_cksum_verify(buf);
6903093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
6913093Sahrens 	if (buf->b_hdr->b_freeze_cksum != NULL) {
6923093Sahrens 		kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
6933093Sahrens 		buf->b_hdr->b_freeze_cksum = NULL;
6943093Sahrens 	}
6953093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
6963093Sahrens }
6973093Sahrens 
6983093Sahrens void
6993093Sahrens arc_buf_freeze(arc_buf_t *buf)
7003093Sahrens {
7013312Sahrens 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
7023312Sahrens 		return;
7033312Sahrens 
7043093Sahrens 	ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
705*3403Sbmc 	    buf->b_hdr->b_state == arc_anon);
7063093Sahrens 	arc_cksum_compute(buf);
7073093Sahrens }
7083093Sahrens 
7093093Sahrens static void
710789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
711789Sahrens {
712789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
713789Sahrens 
714789Sahrens 	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
715*3403Sbmc 	    (ab->b_state != arc_anon)) {
7161544Seschrock 		int delta = ab->b_size * ab->b_datacnt;
717789Sahrens 
718*3403Sbmc 		ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
719*3403Sbmc 		mutex_enter(&ab->b_state->arcs_mtx);
720789Sahrens 		ASSERT(list_link_active(&ab->b_arc_node));
721*3403Sbmc 		list_remove(&ab->b_state->arcs_list, ab);
7221544Seschrock 		if (GHOST_STATE(ab->b_state)) {
7231544Seschrock 			ASSERT3U(ab->b_datacnt, ==, 0);
7241544Seschrock 			ASSERT3P(ab->b_buf, ==, NULL);
7251544Seschrock 			delta = ab->b_size;
7261544Seschrock 		}
7271544Seschrock 		ASSERT(delta > 0);
728*3403Sbmc 		ASSERT3U(ab->b_state->arcs_lsize, >=, delta);
729*3403Sbmc 		atomic_add_64(&ab->b_state->arcs_lsize, -delta);
730*3403Sbmc 		mutex_exit(&ab->b_state->arcs_mtx);
7312391Smaybee 		/* remove the prefetch flag is we get a reference */
7322391Smaybee 		if (ab->b_flags & ARC_PREFETCH)
7332391Smaybee 			ab->b_flags &= ~ARC_PREFETCH;
734789Sahrens 	}
735789Sahrens }
736789Sahrens 
737789Sahrens static int
738789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
739789Sahrens {
740789Sahrens 	int cnt;
741*3403Sbmc 	arc_state_t *state = ab->b_state;
742789Sahrens 
743*3403Sbmc 	ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
744*3403Sbmc 	ASSERT(!GHOST_STATE(state));
745789Sahrens 
746789Sahrens 	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
747*3403Sbmc 	    (state != arc_anon)) {
748*3403Sbmc 		ASSERT(!MUTEX_HELD(&state->arcs_mtx));
749*3403Sbmc 		mutex_enter(&state->arcs_mtx);
750789Sahrens 		ASSERT(!list_link_active(&ab->b_arc_node));
751*3403Sbmc 		list_insert_head(&state->arcs_list, ab);
7521544Seschrock 		ASSERT(ab->b_datacnt > 0);
753*3403Sbmc 		atomic_add_64(&state->arcs_lsize, ab->b_size * ab->b_datacnt);
754*3403Sbmc 		ASSERT3U(state->arcs_size, >=, state->arcs_lsize);
755*3403Sbmc 		mutex_exit(&state->arcs_mtx);
756789Sahrens 	}
757789Sahrens 	return (cnt);
758789Sahrens }
759789Sahrens 
760789Sahrens /*
761789Sahrens  * Move the supplied buffer to the indicated state.  The mutex
762789Sahrens  * for the buffer must be held by the caller.
763789Sahrens  */
764789Sahrens static void
7651544Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
766789Sahrens {
7671544Seschrock 	arc_state_t *old_state = ab->b_state;
7681544Seschrock 	int refcnt = refcount_count(&ab->b_refcnt);
7691544Seschrock 	int from_delta, to_delta;
770789Sahrens 
771789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
7721544Seschrock 	ASSERT(new_state != old_state);
7731544Seschrock 	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
7741544Seschrock 	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
7751544Seschrock 
7761544Seschrock 	from_delta = to_delta = ab->b_datacnt * ab->b_size;
777789Sahrens 
778789Sahrens 	/*
779789Sahrens 	 * If this buffer is evictable, transfer it from the
780789Sahrens 	 * old state list to the new state list.
781789Sahrens 	 */
7821544Seschrock 	if (refcnt == 0) {
783*3403Sbmc 		if (old_state != arc_anon) {
784*3403Sbmc 			int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
7851544Seschrock 
7861544Seschrock 			if (use_mutex)
787*3403Sbmc 				mutex_enter(&old_state->arcs_mtx);
7881544Seschrock 
7891544Seschrock 			ASSERT(list_link_active(&ab->b_arc_node));
790*3403Sbmc 			list_remove(&old_state->arcs_list, ab);
791789Sahrens 
7922391Smaybee 			/*
7932391Smaybee 			 * If prefetching out of the ghost cache,
7942391Smaybee 			 * we will have a non-null datacnt.
7952391Smaybee 			 */
7962391Smaybee 			if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
7972391Smaybee 				/* ghost elements have a ghost size */
7981544Seschrock 				ASSERT(ab->b_buf == NULL);
7991544Seschrock 				from_delta = ab->b_size;
800789Sahrens 			}
801*3403Sbmc 			ASSERT3U(old_state->arcs_lsize, >=, from_delta);
802*3403Sbmc 			atomic_add_64(&old_state->arcs_lsize, -from_delta);
8031544Seschrock 
8041544Seschrock 			if (use_mutex)
805*3403Sbmc 				mutex_exit(&old_state->arcs_mtx);
806789Sahrens 		}
807*3403Sbmc 		if (new_state != arc_anon) {
808*3403Sbmc 			int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
809789Sahrens 
8101544Seschrock 			if (use_mutex)
811*3403Sbmc 				mutex_enter(&new_state->arcs_mtx);
8121544Seschrock 
813*3403Sbmc 			list_insert_head(&new_state->arcs_list, ab);
8141544Seschrock 
8151544Seschrock 			/* ghost elements have a ghost size */
8161544Seschrock 			if (GHOST_STATE(new_state)) {
8171544Seschrock 				ASSERT(ab->b_datacnt == 0);
8181544Seschrock 				ASSERT(ab->b_buf == NULL);
8191544Seschrock 				to_delta = ab->b_size;
8201544Seschrock 			}
821*3403Sbmc 			atomic_add_64(&new_state->arcs_lsize, to_delta);
822*3403Sbmc 			ASSERT3U(new_state->arcs_size + to_delta, >=,
823*3403Sbmc 			    new_state->arcs_lsize);
8241544Seschrock 
8251544Seschrock 			if (use_mutex)
826*3403Sbmc 				mutex_exit(&new_state->arcs_mtx);
827789Sahrens 		}
828789Sahrens 	}
829789Sahrens 
830789Sahrens 	ASSERT(!BUF_EMPTY(ab));
831*3403Sbmc 	if (new_state == arc_anon && old_state != arc_anon) {
832789Sahrens 		buf_hash_remove(ab);
833789Sahrens 	}
834789Sahrens 
8351544Seschrock 	/* adjust state sizes */
8361544Seschrock 	if (to_delta)
837*3403Sbmc 		atomic_add_64(&new_state->arcs_size, to_delta);
8381544Seschrock 	if (from_delta) {
839*3403Sbmc 		ASSERT3U(old_state->arcs_size, >=, from_delta);
840*3403Sbmc 		atomic_add_64(&old_state->arcs_size, -from_delta);
841789Sahrens 	}
842789Sahrens 	ab->b_state = new_state;
843789Sahrens }
844789Sahrens 
845789Sahrens arc_buf_t *
8463290Sjohansen arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
847789Sahrens {
848789Sahrens 	arc_buf_hdr_t *hdr;
849789Sahrens 	arc_buf_t *buf;
850789Sahrens 
851789Sahrens 	ASSERT3U(size, >, 0);
852789Sahrens 	hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
853789Sahrens 	ASSERT(BUF_EMPTY(hdr));
854789Sahrens 	hdr->b_size = size;
8553290Sjohansen 	hdr->b_type = type;
856789Sahrens 	hdr->b_spa = spa;
857*3403Sbmc 	hdr->b_state = arc_anon;
858789Sahrens 	hdr->b_arc_access = 0;
859789Sahrens 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
860789Sahrens 	buf->b_hdr = hdr;
8612688Smaybee 	buf->b_data = NULL;
8621544Seschrock 	buf->b_efunc = NULL;
8631544Seschrock 	buf->b_private = NULL;
864789Sahrens 	buf->b_next = NULL;
865789Sahrens 	hdr->b_buf = buf;
8662688Smaybee 	arc_get_data_buf(buf);
8671544Seschrock 	hdr->b_datacnt = 1;
868789Sahrens 	hdr->b_flags = 0;
869789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
870789Sahrens 	(void) refcount_add(&hdr->b_refcnt, tag);
871789Sahrens 
872789Sahrens 	return (buf);
873789Sahrens }
874789Sahrens 
8752688Smaybee static arc_buf_t *
8762688Smaybee arc_buf_clone(arc_buf_t *from)
8771544Seschrock {
8782688Smaybee 	arc_buf_t *buf;
8792688Smaybee 	arc_buf_hdr_t *hdr = from->b_hdr;
8802688Smaybee 	uint64_t size = hdr->b_size;
8811544Seschrock 
8822688Smaybee 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
8832688Smaybee 	buf->b_hdr = hdr;
8842688Smaybee 	buf->b_data = NULL;
8852688Smaybee 	buf->b_efunc = NULL;
8862688Smaybee 	buf->b_private = NULL;
8872688Smaybee 	buf->b_next = hdr->b_buf;
8882688Smaybee 	hdr->b_buf = buf;
8892688Smaybee 	arc_get_data_buf(buf);
8902688Smaybee 	bcopy(from->b_data, buf->b_data, size);
8912688Smaybee 	hdr->b_datacnt += 1;
8922688Smaybee 	return (buf);
8931544Seschrock }
8941544Seschrock 
8951544Seschrock void
8961544Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag)
8971544Seschrock {
8982887Smaybee 	arc_buf_hdr_t *hdr;
8991544Seschrock 	kmutex_t *hash_lock;
9001544Seschrock 
9012724Smaybee 	/*
9022724Smaybee 	 * Check to see if this buffer is currently being evicted via
9032887Smaybee 	 * arc_do_user_evicts().
9042724Smaybee 	 */
9052887Smaybee 	mutex_enter(&arc_eviction_mtx);
9062887Smaybee 	hdr = buf->b_hdr;
9072887Smaybee 	if (hdr == NULL) {
9082887Smaybee 		mutex_exit(&arc_eviction_mtx);
9092724Smaybee 		return;
9102887Smaybee 	}
9112887Smaybee 	hash_lock = HDR_LOCK(hdr);
9122887Smaybee 	mutex_exit(&arc_eviction_mtx);
9132724Smaybee 
9142724Smaybee 	mutex_enter(hash_lock);
9151544Seschrock 	if (buf->b_data == NULL) {
9161544Seschrock 		/*
9171544Seschrock 		 * This buffer is evicted.
9181544Seschrock 		 */
9192724Smaybee 		mutex_exit(hash_lock);
9201544Seschrock 		return;
9211544Seschrock 	}
9221544Seschrock 
9232724Smaybee 	ASSERT(buf->b_hdr == hdr);
924*3403Sbmc 	ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
9251544Seschrock 	add_reference(hdr, hash_lock, tag);
9262688Smaybee 	arc_access(hdr, hash_lock);
9272688Smaybee 	mutex_exit(hash_lock);
928*3403Sbmc 	ARCSTAT_BUMP(arcstat_hits);
929*3403Sbmc 	ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
930*3403Sbmc 	    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
931*3403Sbmc 	    data, metadata, hits);
9321544Seschrock }
9331544Seschrock 
934789Sahrens static void
9352688Smaybee arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
9361544Seschrock {
9371544Seschrock 	arc_buf_t **bufp;
9381544Seschrock 
9391544Seschrock 	/* free up data associated with the buf */
9401544Seschrock 	if (buf->b_data) {
9411544Seschrock 		arc_state_t *state = buf->b_hdr->b_state;
9421544Seschrock 		uint64_t size = buf->b_hdr->b_size;
9433290Sjohansen 		arc_buf_contents_t type = buf->b_hdr->b_type;
9441544Seschrock 
9453093Sahrens 		arc_cksum_verify(buf);
9462688Smaybee 		if (!recycle) {
9473290Sjohansen 			if (type == ARC_BUFC_METADATA) {
9483290Sjohansen 				zio_buf_free(buf->b_data, size);
9493290Sjohansen 			} else {
9503290Sjohansen 				ASSERT(type == ARC_BUFC_DATA);
9513290Sjohansen 				zio_data_buf_free(buf->b_data, size);
9523290Sjohansen 			}
953*3403Sbmc 			atomic_add_64(&arc_size, -size);
9542688Smaybee 		}
9551544Seschrock 		if (list_link_active(&buf->b_hdr->b_arc_node)) {
9561544Seschrock 			ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
957*3403Sbmc 			ASSERT(state != arc_anon);
958*3403Sbmc 			ASSERT3U(state->arcs_lsize, >=, size);
959*3403Sbmc 			atomic_add_64(&state->arcs_lsize, -size);
9601544Seschrock 		}
961*3403Sbmc 		ASSERT3U(state->arcs_size, >=, size);
962*3403Sbmc 		atomic_add_64(&state->arcs_size, -size);
9631544Seschrock 		buf->b_data = NULL;
9641544Seschrock 		ASSERT(buf->b_hdr->b_datacnt > 0);
9651544Seschrock 		buf->b_hdr->b_datacnt -= 1;
9661544Seschrock 	}
9671544Seschrock 
9681544Seschrock 	/* only remove the buf if requested */
9691544Seschrock 	if (!all)
9701544Seschrock 		return;
9711544Seschrock 
9721544Seschrock 	/* remove the buf from the hdr list */
9731544Seschrock 	for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
9741544Seschrock 		continue;
9751544Seschrock 	*bufp = buf->b_next;
9761544Seschrock 
9771544Seschrock 	ASSERT(buf->b_efunc == NULL);
9781544Seschrock 
9791544Seschrock 	/* clean up the buf */
9801544Seschrock 	buf->b_hdr = NULL;
9811544Seschrock 	kmem_cache_free(buf_cache, buf);
9821544Seschrock }
9831544Seschrock 
9841544Seschrock static void
9851544Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr)
986789Sahrens {
987789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
988*3403Sbmc 	ASSERT3P(hdr->b_state, ==, arc_anon);
9891544Seschrock 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
990789Sahrens 
991789Sahrens 	if (!BUF_EMPTY(hdr)) {
9921544Seschrock 		ASSERT(!HDR_IN_HASH_TABLE(hdr));
993789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
994789Sahrens 		hdr->b_birth = 0;
995789Sahrens 		hdr->b_cksum0 = 0;
996789Sahrens 	}
9971544Seschrock 	while (hdr->b_buf) {
998789Sahrens 		arc_buf_t *buf = hdr->b_buf;
999789Sahrens 
10001544Seschrock 		if (buf->b_efunc) {
10011544Seschrock 			mutex_enter(&arc_eviction_mtx);
10021544Seschrock 			ASSERT(buf->b_hdr != NULL);
10032688Smaybee 			arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
10041544Seschrock 			hdr->b_buf = buf->b_next;
10052887Smaybee 			buf->b_hdr = &arc_eviction_hdr;
10061544Seschrock 			buf->b_next = arc_eviction_list;
10071544Seschrock 			arc_eviction_list = buf;
10081544Seschrock 			mutex_exit(&arc_eviction_mtx);
10091544Seschrock 		} else {
10102688Smaybee 			arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
10111544Seschrock 		}
1012789Sahrens 	}
10133093Sahrens 	if (hdr->b_freeze_cksum != NULL) {
10143093Sahrens 		kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
10153093Sahrens 		hdr->b_freeze_cksum = NULL;
10163093Sahrens 	}
10171544Seschrock 
1018789Sahrens 	ASSERT(!list_link_active(&hdr->b_arc_node));
1019789Sahrens 	ASSERT3P(hdr->b_hash_next, ==, NULL);
1020789Sahrens 	ASSERT3P(hdr->b_acb, ==, NULL);
1021789Sahrens 	kmem_cache_free(hdr_cache, hdr);
1022789Sahrens }
1023789Sahrens 
1024789Sahrens void
1025789Sahrens arc_buf_free(arc_buf_t *buf, void *tag)
1026789Sahrens {
1027789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
1028*3403Sbmc 	int hashed = hdr->b_state != arc_anon;
10291544Seschrock 
10301544Seschrock 	ASSERT(buf->b_efunc == NULL);
10311544Seschrock 	ASSERT(buf->b_data != NULL);
10321544Seschrock 
10331544Seschrock 	if (hashed) {
10341544Seschrock 		kmutex_t *hash_lock = HDR_LOCK(hdr);
10351544Seschrock 
10361544Seschrock 		mutex_enter(hash_lock);
10371544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
10381544Seschrock 		if (hdr->b_datacnt > 1)
10392688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
10401544Seschrock 		else
10411544Seschrock 			hdr->b_flags |= ARC_BUF_AVAILABLE;
10421544Seschrock 		mutex_exit(hash_lock);
10431544Seschrock 	} else if (HDR_IO_IN_PROGRESS(hdr)) {
10441544Seschrock 		int destroy_hdr;
10451544Seschrock 		/*
10461544Seschrock 		 * We are in the middle of an async write.  Don't destroy
10471544Seschrock 		 * this buffer unless the write completes before we finish
10481544Seschrock 		 * decrementing the reference count.
10491544Seschrock 		 */
10501544Seschrock 		mutex_enter(&arc_eviction_mtx);
10511544Seschrock 		(void) remove_reference(hdr, NULL, tag);
10521544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
10531544Seschrock 		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
10541544Seschrock 		mutex_exit(&arc_eviction_mtx);
10551544Seschrock 		if (destroy_hdr)
10561544Seschrock 			arc_hdr_destroy(hdr);
10571544Seschrock 	} else {
10581544Seschrock 		if (remove_reference(hdr, NULL, tag) > 0) {
10591544Seschrock 			ASSERT(HDR_IO_ERROR(hdr));
10602688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
10611544Seschrock 		} else {
10621544Seschrock 			arc_hdr_destroy(hdr);
10631544Seschrock 		}
10641544Seschrock 	}
10651544Seschrock }
10661544Seschrock 
10671544Seschrock int
10681544Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag)
10691544Seschrock {
10701544Seschrock 	arc_buf_hdr_t *hdr = buf->b_hdr;
1071789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
10721544Seschrock 	int no_callback = (buf->b_efunc == NULL);
10731544Seschrock 
1074*3403Sbmc 	if (hdr->b_state == arc_anon) {
10751544Seschrock 		arc_buf_free(buf, tag);
10761544Seschrock 		return (no_callback);
10771544Seschrock 	}
1078789Sahrens 
1079789Sahrens 	mutex_enter(hash_lock);
1080*3403Sbmc 	ASSERT(hdr->b_state != arc_anon);
10811544Seschrock 	ASSERT(buf->b_data != NULL);
1082789Sahrens 
10831544Seschrock 	(void) remove_reference(hdr, hash_lock, tag);
10841544Seschrock 	if (hdr->b_datacnt > 1) {
10851544Seschrock 		if (no_callback)
10862688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
10871544Seschrock 	} else if (no_callback) {
10881544Seschrock 		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
10891544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
1090789Sahrens 	}
10911544Seschrock 	ASSERT(no_callback || hdr->b_datacnt > 1 ||
10921544Seschrock 	    refcount_is_zero(&hdr->b_refcnt));
1093789Sahrens 	mutex_exit(hash_lock);
10941544Seschrock 	return (no_callback);
1095789Sahrens }
1096789Sahrens 
1097789Sahrens int
1098789Sahrens arc_buf_size(arc_buf_t *buf)
1099789Sahrens {
1100789Sahrens 	return (buf->b_hdr->b_size);
1101789Sahrens }
1102789Sahrens 
1103789Sahrens /*
1104789Sahrens  * Evict buffers from list until we've removed the specified number of
1105789Sahrens  * bytes.  Move the removed buffers to the appropriate evict state.
11062688Smaybee  * If the recycle flag is set, then attempt to "recycle" a buffer:
11072688Smaybee  * - look for a buffer to evict that is `bytes' long.
11082688Smaybee  * - return the data block from this buffer rather than freeing it.
11092688Smaybee  * This flag is used by callers that are trying to make space for a
11102688Smaybee  * new buffer in a full arc cache.
1111789Sahrens  */
11122688Smaybee static void *
11133290Sjohansen arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle,
11143290Sjohansen     arc_buf_contents_t type)
1115789Sahrens {
1116789Sahrens 	arc_state_t *evicted_state;
11172688Smaybee 	uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
11182918Smaybee 	arc_buf_hdr_t *ab, *ab_prev = NULL;
1119789Sahrens 	kmutex_t *hash_lock;
11202688Smaybee 	boolean_t have_lock;
11212918Smaybee 	void *stolen = NULL;
1122789Sahrens 
1123*3403Sbmc 	ASSERT(state == arc_mru || state == arc_mfu);
1124789Sahrens 
1125*3403Sbmc 	evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1126789Sahrens 
1127*3403Sbmc 	mutex_enter(&state->arcs_mtx);
1128*3403Sbmc 	mutex_enter(&evicted_state->arcs_mtx);
1129789Sahrens 
1130*3403Sbmc 	for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) {
1131*3403Sbmc 		ab_prev = list_prev(&state->arcs_list, ab);
11322391Smaybee 		/* prefetch buffers have a minimum lifespan */
11332688Smaybee 		if (HDR_IO_IN_PROGRESS(ab) ||
11342688Smaybee 		    (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
11352688Smaybee 		    lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) {
11362391Smaybee 			skipped++;
11372391Smaybee 			continue;
11382391Smaybee 		}
11392918Smaybee 		/* "lookahead" for better eviction candidate */
11402918Smaybee 		if (recycle && ab->b_size != bytes &&
11412918Smaybee 		    ab_prev && ab_prev->b_size == bytes)
11422688Smaybee 			continue;
1143789Sahrens 		hash_lock = HDR_LOCK(ab);
11442688Smaybee 		have_lock = MUTEX_HELD(hash_lock);
11452688Smaybee 		if (have_lock || mutex_tryenter(hash_lock)) {
1146789Sahrens 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
11471544Seschrock 			ASSERT(ab->b_datacnt > 0);
11481544Seschrock 			while (ab->b_buf) {
11491544Seschrock 				arc_buf_t *buf = ab->b_buf;
11502688Smaybee 				if (buf->b_data) {
11511544Seschrock 					bytes_evicted += ab->b_size;
11523290Sjohansen 					if (recycle && ab->b_type == type &&
11533290Sjohansen 					    ab->b_size == bytes) {
11542918Smaybee 						stolen = buf->b_data;
11552918Smaybee 						recycle = FALSE;
11562918Smaybee 					}
11572688Smaybee 				}
11581544Seschrock 				if (buf->b_efunc) {
11591544Seschrock 					mutex_enter(&arc_eviction_mtx);
11602918Smaybee 					arc_buf_destroy(buf,
11612918Smaybee 					    buf->b_data == stolen, FALSE);
11621544Seschrock 					ab->b_buf = buf->b_next;
11632887Smaybee 					buf->b_hdr = &arc_eviction_hdr;
11641544Seschrock 					buf->b_next = arc_eviction_list;
11651544Seschrock 					arc_eviction_list = buf;
11661544Seschrock 					mutex_exit(&arc_eviction_mtx);
11671544Seschrock 				} else {
11682918Smaybee 					arc_buf_destroy(buf,
11692918Smaybee 					    buf->b_data == stolen, TRUE);
11701544Seschrock 				}
11711544Seschrock 			}
11721544Seschrock 			ASSERT(ab->b_datacnt == 0);
1173789Sahrens 			arc_change_state(evicted_state, ab, hash_lock);
11741544Seschrock 			ASSERT(HDR_IN_HASH_TABLE(ab));
11751544Seschrock 			ab->b_flags = ARC_IN_HASH_TABLE;
1176789Sahrens 			DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
11772688Smaybee 			if (!have_lock)
11782688Smaybee 				mutex_exit(hash_lock);
11791544Seschrock 			if (bytes >= 0 && bytes_evicted >= bytes)
1180789Sahrens 				break;
1181789Sahrens 		} else {
11822688Smaybee 			missed += 1;
1183789Sahrens 		}
1184789Sahrens 	}
1185*3403Sbmc 
1186*3403Sbmc 	mutex_exit(&evicted_state->arcs_mtx);
1187*3403Sbmc 	mutex_exit(&state->arcs_mtx);
1188789Sahrens 
1189789Sahrens 	if (bytes_evicted < bytes)
1190789Sahrens 		dprintf("only evicted %lld bytes from %x",
1191789Sahrens 		    (longlong_t)bytes_evicted, state);
1192789Sahrens 
11932688Smaybee 	if (skipped)
1194*3403Sbmc 		ARCSTAT_INCR(arcstat_evict_skip, skipped);
1195*3403Sbmc 
11962688Smaybee 	if (missed)
1197*3403Sbmc 		ARCSTAT_INCR(arcstat_mutex_miss, missed);
1198*3403Sbmc 
11992918Smaybee 	return (stolen);
1200789Sahrens }
1201789Sahrens 
1202789Sahrens /*
1203789Sahrens  * Remove buffers from list until we've removed the specified number of
1204789Sahrens  * bytes.  Destroy the buffers that are removed.
1205789Sahrens  */
1206789Sahrens static void
12071544Seschrock arc_evict_ghost(arc_state_t *state, int64_t bytes)
1208789Sahrens {
1209789Sahrens 	arc_buf_hdr_t *ab, *ab_prev;
1210789Sahrens 	kmutex_t *hash_lock;
12111544Seschrock 	uint64_t bytes_deleted = 0;
12121544Seschrock 	uint_t bufs_skipped = 0;
1213789Sahrens 
12141544Seschrock 	ASSERT(GHOST_STATE(state));
1215789Sahrens top:
1216*3403Sbmc 	mutex_enter(&state->arcs_mtx);
1217*3403Sbmc 	for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) {
1218*3403Sbmc 		ab_prev = list_prev(&state->arcs_list, ab);
1219789Sahrens 		hash_lock = HDR_LOCK(ab);
1220789Sahrens 		if (mutex_tryenter(hash_lock)) {
12212391Smaybee 			ASSERT(!HDR_IO_IN_PROGRESS(ab));
12221544Seschrock 			ASSERT(ab->b_buf == NULL);
1223*3403Sbmc 			arc_change_state(arc_anon, ab, hash_lock);
1224789Sahrens 			mutex_exit(hash_lock);
1225*3403Sbmc 			ARCSTAT_BUMP(arcstat_deleted);
12261544Seschrock 			bytes_deleted += ab->b_size;
12271544Seschrock 			arc_hdr_destroy(ab);
1228789Sahrens 			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1229789Sahrens 			if (bytes >= 0 && bytes_deleted >= bytes)
1230789Sahrens 				break;
1231789Sahrens 		} else {
1232789Sahrens 			if (bytes < 0) {
1233*3403Sbmc 				mutex_exit(&state->arcs_mtx);
1234789Sahrens 				mutex_enter(hash_lock);
1235789Sahrens 				mutex_exit(hash_lock);
1236789Sahrens 				goto top;
1237789Sahrens 			}
1238789Sahrens 			bufs_skipped += 1;
1239789Sahrens 		}
1240789Sahrens 	}
1241*3403Sbmc 	mutex_exit(&state->arcs_mtx);
1242789Sahrens 
1243789Sahrens 	if (bufs_skipped) {
1244*3403Sbmc 		ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
1245789Sahrens 		ASSERT(bytes >= 0);
1246789Sahrens 	}
1247789Sahrens 
1248789Sahrens 	if (bytes_deleted < bytes)
1249789Sahrens 		dprintf("only deleted %lld bytes from %p",
1250789Sahrens 		    (longlong_t)bytes_deleted, state);
1251789Sahrens }
1252789Sahrens 
1253789Sahrens static void
1254789Sahrens arc_adjust(void)
1255789Sahrens {
1256*3403Sbmc 	int64_t top_sz, mru_over, arc_over, todelete;
1257789Sahrens 
1258*3403Sbmc 	top_sz = arc_anon->arcs_size + arc_mru->arcs_size;
1259789Sahrens 
1260*3403Sbmc 	if (top_sz > arc_p && arc_mru->arcs_lsize > 0) {
1261*3403Sbmc 		int64_t toevict = MIN(arc_mru->arcs_lsize, top_sz - arc_p);
1262*3403Sbmc 		(void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_UNDEF);
1263*3403Sbmc 		top_sz = arc_anon->arcs_size + arc_mru->arcs_size;
1264789Sahrens 	}
1265789Sahrens 
1266*3403Sbmc 	mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c;
1267789Sahrens 
1268789Sahrens 	if (mru_over > 0) {
1269*3403Sbmc 		if (arc_mru_ghost->arcs_lsize > 0) {
1270*3403Sbmc 			todelete = MIN(arc_mru_ghost->arcs_lsize, mru_over);
1271*3403Sbmc 			arc_evict_ghost(arc_mru_ghost, todelete);
1272789Sahrens 		}
1273789Sahrens 	}
1274789Sahrens 
1275*3403Sbmc 	if ((arc_over = arc_size - arc_c) > 0) {
12761544Seschrock 		int64_t tbl_over;
1277789Sahrens 
1278*3403Sbmc 		if (arc_mfu->arcs_lsize > 0) {
1279*3403Sbmc 			int64_t toevict = MIN(arc_mfu->arcs_lsize, arc_over);
1280*3403Sbmc 			(void) arc_evict(arc_mfu, toevict, FALSE,
12813290Sjohansen 			    ARC_BUFC_UNDEF);
1282789Sahrens 		}
1283789Sahrens 
1284*3403Sbmc 		tbl_over = arc_size + arc_mru_ghost->arcs_lsize +
1285*3403Sbmc 		    arc_mfu_ghost->arcs_lsize - arc_c*2;
1286789Sahrens 
1287*3403Sbmc 		if (tbl_over > 0 && arc_mfu_ghost->arcs_lsize > 0) {
1288*3403Sbmc 			todelete = MIN(arc_mfu_ghost->arcs_lsize, tbl_over);
1289*3403Sbmc 			arc_evict_ghost(arc_mfu_ghost, todelete);
1290789Sahrens 		}
1291789Sahrens 	}
1292789Sahrens }
1293789Sahrens 
12941544Seschrock static void
12951544Seschrock arc_do_user_evicts(void)
12961544Seschrock {
12971544Seschrock 	mutex_enter(&arc_eviction_mtx);
12981544Seschrock 	while (arc_eviction_list != NULL) {
12991544Seschrock 		arc_buf_t *buf = arc_eviction_list;
13001544Seschrock 		arc_eviction_list = buf->b_next;
13011544Seschrock 		buf->b_hdr = NULL;
13021544Seschrock 		mutex_exit(&arc_eviction_mtx);
13031544Seschrock 
13041819Smaybee 		if (buf->b_efunc != NULL)
13051819Smaybee 			VERIFY(buf->b_efunc(buf) == 0);
13061544Seschrock 
13071544Seschrock 		buf->b_efunc = NULL;
13081544Seschrock 		buf->b_private = NULL;
13091544Seschrock 		kmem_cache_free(buf_cache, buf);
13101544Seschrock 		mutex_enter(&arc_eviction_mtx);
13111544Seschrock 	}
13121544Seschrock 	mutex_exit(&arc_eviction_mtx);
13131544Seschrock }
13141544Seschrock 
1315789Sahrens /*
1316789Sahrens  * Flush all *evictable* data from the cache.
1317789Sahrens  * NOTE: this will not touch "active" (i.e. referenced) data.
1318789Sahrens  */
1319789Sahrens void
1320789Sahrens arc_flush(void)
1321789Sahrens {
1322*3403Sbmc 	while (list_head(&arc_mru->arcs_list))
1323*3403Sbmc 		(void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_UNDEF);
1324*3403Sbmc 	while (list_head(&arc_mfu->arcs_list))
1325*3403Sbmc 		(void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_UNDEF);
1326789Sahrens 
1327*3403Sbmc 	arc_evict_ghost(arc_mru_ghost, -1);
1328*3403Sbmc 	arc_evict_ghost(arc_mfu_ghost, -1);
13291544Seschrock 
13301544Seschrock 	mutex_enter(&arc_reclaim_thr_lock);
13311544Seschrock 	arc_do_user_evicts();
13321544Seschrock 	mutex_exit(&arc_reclaim_thr_lock);
13331544Seschrock 	ASSERT(arc_eviction_list == NULL);
1334789Sahrens }
1335789Sahrens 
13363158Smaybee int arc_shrink_shift = 5;		/* log2(fraction of arc to reclaim) */
13372391Smaybee 
1338789Sahrens void
13393158Smaybee arc_shrink(void)
1340789Sahrens {
1341*3403Sbmc 	if (arc_c > arc_c_min) {
13423158Smaybee 		uint64_t to_free;
1343789Sahrens 
13442048Sstans #ifdef _KERNEL
1345*3403Sbmc 		to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
13462048Sstans #else
1347*3403Sbmc 		to_free = arc_c >> arc_shrink_shift;
13482048Sstans #endif
1349*3403Sbmc 		if (arc_c > arc_c_min + to_free)
1350*3403Sbmc 			atomic_add_64(&arc_c, -to_free);
13513158Smaybee 		else
1352*3403Sbmc 			arc_c = arc_c_min;
13532048Sstans 
1354*3403Sbmc 		atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
1355*3403Sbmc 		if (arc_c > arc_size)
1356*3403Sbmc 			arc_c = MAX(arc_size, arc_c_min);
1357*3403Sbmc 		if (arc_p > arc_c)
1358*3403Sbmc 			arc_p = (arc_c >> 1);
1359*3403Sbmc 		ASSERT(arc_c >= arc_c_min);
1360*3403Sbmc 		ASSERT((int64_t)arc_p >= 0);
13613158Smaybee 	}
1362789Sahrens 
1363*3403Sbmc 	if (arc_size > arc_c)
13643158Smaybee 		arc_adjust();
1365789Sahrens }
1366789Sahrens 
1367789Sahrens static int
1368789Sahrens arc_reclaim_needed(void)
1369789Sahrens {
1370789Sahrens 	uint64_t extra;
1371789Sahrens 
1372789Sahrens #ifdef _KERNEL
13732048Sstans 
13742048Sstans 	if (needfree)
13752048Sstans 		return (1);
13762048Sstans 
1377789Sahrens 	/*
1378789Sahrens 	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1379789Sahrens 	 */
1380789Sahrens 	extra = desfree;
1381789Sahrens 
1382789Sahrens 	/*
1383789Sahrens 	 * check that we're out of range of the pageout scanner.  It starts to
1384789Sahrens 	 * schedule paging if freemem is less than lotsfree and needfree.
1385789Sahrens 	 * lotsfree is the high-water mark for pageout, and needfree is the
1386789Sahrens 	 * number of needed free pages.  We add extra pages here to make sure
1387789Sahrens 	 * the scanner doesn't start up while we're freeing memory.
1388789Sahrens 	 */
1389789Sahrens 	if (freemem < lotsfree + needfree + extra)
1390789Sahrens 		return (1);
1391789Sahrens 
1392789Sahrens 	/*
1393789Sahrens 	 * check to make sure that swapfs has enough space so that anon
1394789Sahrens 	 * reservations can still succeeed. anon_resvmem() checks that the
1395789Sahrens 	 * availrmem is greater than swapfs_minfree, and the number of reserved
1396789Sahrens 	 * swap pages.  We also add a bit of extra here just to prevent
1397789Sahrens 	 * circumstances from getting really dire.
1398789Sahrens 	 */
1399789Sahrens 	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1400789Sahrens 		return (1);
1401789Sahrens 
14023307Sjohansen 	/*
14033307Sjohansen 	 * If zio data pages are being allocated out of a separate heap segment,
14043307Sjohansen 	 * then check that the size of available vmem for this area remains
14053307Sjohansen 	 * above 1/4th free.  This needs to be done since the size of the
14063307Sjohansen 	 * non-default segment is smaller than physical memory, so we could
14073307Sjohansen 	 * conceivably run out of VA in that segment before running out of
14083307Sjohansen 	 * physical memory.
14093307Sjohansen 	 */
14103307Sjohansen 	if ((zio_arena != NULL) && (btop(vmem_size(zio_arena, VMEM_FREE)) <
14113307Sjohansen 	    (btop(vmem_size(zio_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)))
14123307Sjohansen 		return (1);
14133307Sjohansen 
14141936Smaybee #if defined(__i386)
1415789Sahrens 	/*
1416789Sahrens 	 * If we're on an i386 platform, it's possible that we'll exhaust the
1417789Sahrens 	 * kernel heap space before we ever run out of available physical
1418789Sahrens 	 * memory.  Most checks of the size of the heap_area compare against
1419789Sahrens 	 * tune.t_minarmem, which is the minimum available real memory that we
1420789Sahrens 	 * can have in the system.  However, this is generally fixed at 25 pages
1421789Sahrens 	 * which is so low that it's useless.  In this comparison, we seek to
1422789Sahrens 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1423789Sahrens 	 * heap is allocated.  (Or, in the caclulation, if less than 1/4th is
1424789Sahrens 	 * free)
1425789Sahrens 	 */
1426789Sahrens 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1427789Sahrens 	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1428789Sahrens 		return (1);
1429789Sahrens #endif
1430789Sahrens 
1431789Sahrens #else
1432789Sahrens 	if (spa_get_random(100) == 0)
1433789Sahrens 		return (1);
1434789Sahrens #endif
1435789Sahrens 	return (0);
1436789Sahrens }
1437789Sahrens 
1438789Sahrens static void
1439789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1440789Sahrens {
1441789Sahrens 	size_t			i;
1442789Sahrens 	kmem_cache_t		*prev_cache = NULL;
14433290Sjohansen 	kmem_cache_t		*prev_data_cache = NULL;
1444789Sahrens 	extern kmem_cache_t	*zio_buf_cache[];
14453290Sjohansen 	extern kmem_cache_t	*zio_data_buf_cache[];
1446789Sahrens 
14471484Sek110237 #ifdef _KERNEL
14481484Sek110237 	/*
14491484Sek110237 	 * First purge some DNLC entries, in case the DNLC is using
14501484Sek110237 	 * up too much memory.
14511484Sek110237 	 */
14521505Sek110237 	dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
14531936Smaybee 
14541936Smaybee #if defined(__i386)
14551936Smaybee 	/*
14561936Smaybee 	 * Reclaim unused memory from all kmem caches.
14571936Smaybee 	 */
14581936Smaybee 	kmem_reap();
14591936Smaybee #endif
14601484Sek110237 #endif
14611484Sek110237 
1462789Sahrens 	/*
14631544Seschrock 	 * An agressive reclamation will shrink the cache size as well as
14641544Seschrock 	 * reap free buffers from the arc kmem caches.
1465789Sahrens 	 */
1466789Sahrens 	if (strat == ARC_RECLAIM_AGGR)
14673158Smaybee 		arc_shrink();
1468789Sahrens 
1469789Sahrens 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1470789Sahrens 		if (zio_buf_cache[i] != prev_cache) {
1471789Sahrens 			prev_cache = zio_buf_cache[i];
1472789Sahrens 			kmem_cache_reap_now(zio_buf_cache[i]);
1473789Sahrens 		}
14743290Sjohansen 		if (zio_data_buf_cache[i] != prev_data_cache) {
14753290Sjohansen 			prev_data_cache = zio_data_buf_cache[i];
14763290Sjohansen 			kmem_cache_reap_now(zio_data_buf_cache[i]);
14773290Sjohansen 		}
1478789Sahrens 	}
14791544Seschrock 	kmem_cache_reap_now(buf_cache);
14801544Seschrock 	kmem_cache_reap_now(hdr_cache);
1481789Sahrens }
1482789Sahrens 
1483789Sahrens static void
1484789Sahrens arc_reclaim_thread(void)
1485789Sahrens {
1486789Sahrens 	clock_t			growtime = 0;
1487789Sahrens 	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
1488789Sahrens 	callb_cpr_t		cpr;
1489789Sahrens 
1490789Sahrens 	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1491789Sahrens 
1492789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
1493789Sahrens 	while (arc_thread_exit == 0) {
1494789Sahrens 		if (arc_reclaim_needed()) {
1495789Sahrens 
1496*3403Sbmc 			if (arc_no_grow) {
1497789Sahrens 				if (last_reclaim == ARC_RECLAIM_CONS) {
1498789Sahrens 					last_reclaim = ARC_RECLAIM_AGGR;
1499789Sahrens 				} else {
1500789Sahrens 					last_reclaim = ARC_RECLAIM_CONS;
1501789Sahrens 				}
1502789Sahrens 			} else {
1503*3403Sbmc 				arc_no_grow = TRUE;
1504789Sahrens 				last_reclaim = ARC_RECLAIM_AGGR;
1505789Sahrens 				membar_producer();
1506789Sahrens 			}
1507789Sahrens 
1508789Sahrens 			/* reset the growth delay for every reclaim */
1509789Sahrens 			growtime = lbolt + (arc_grow_retry * hz);
15102856Snd150628 			ASSERT(growtime > 0);
1511789Sahrens 
1512789Sahrens 			arc_kmem_reap_now(last_reclaim);
1513789Sahrens 
1514789Sahrens 		} else if ((growtime > 0) && ((growtime - lbolt) <= 0)) {
1515*3403Sbmc 			arc_no_grow = FALSE;
1516789Sahrens 		}
1517789Sahrens 
1518*3403Sbmc 		if (2 * arc_c < arc_size +
1519*3403Sbmc 		    arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)
15203298Smaybee 			arc_adjust();
15213298Smaybee 
15221544Seschrock 		if (arc_eviction_list != NULL)
15231544Seschrock 			arc_do_user_evicts();
15241544Seschrock 
1525789Sahrens 		/* block until needed, or one second, whichever is shorter */
1526789Sahrens 		CALLB_CPR_SAFE_BEGIN(&cpr);
1527789Sahrens 		(void) cv_timedwait(&arc_reclaim_thr_cv,
1528789Sahrens 		    &arc_reclaim_thr_lock, (lbolt + hz));
1529789Sahrens 		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1530789Sahrens 	}
1531789Sahrens 
1532789Sahrens 	arc_thread_exit = 0;
1533789Sahrens 	cv_broadcast(&arc_reclaim_thr_cv);
1534789Sahrens 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
1535789Sahrens 	thread_exit();
1536789Sahrens }
1537789Sahrens 
15381544Seschrock /*
15391544Seschrock  * Adapt arc info given the number of bytes we are trying to add and
15401544Seschrock  * the state that we are comming from.  This function is only called
15411544Seschrock  * when we are adding new content to the cache.
15421544Seschrock  */
1543789Sahrens static void
15441544Seschrock arc_adapt(int bytes, arc_state_t *state)
1545789Sahrens {
15461544Seschrock 	int mult;
15471544Seschrock 
15481544Seschrock 	ASSERT(bytes > 0);
1549789Sahrens 	/*
15501544Seschrock 	 * Adapt the target size of the MRU list:
15511544Seschrock 	 *	- if we just hit in the MRU ghost list, then increase
15521544Seschrock 	 *	  the target size of the MRU list.
15531544Seschrock 	 *	- if we just hit in the MFU ghost list, then increase
15541544Seschrock 	 *	  the target size of the MFU list by decreasing the
15551544Seschrock 	 *	  target size of the MRU list.
1556789Sahrens 	 */
1557*3403Sbmc 	if (state == arc_mru_ghost) {
1558*3403Sbmc 		mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
1559*3403Sbmc 		    1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
15601544Seschrock 
1561*3403Sbmc 		arc_p = MIN(arc_c, arc_p + bytes * mult);
1562*3403Sbmc 	} else if (state == arc_mfu_ghost) {
1563*3403Sbmc 		mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
1564*3403Sbmc 		    1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
15651544Seschrock 
1566*3403Sbmc 		arc_p = MAX(0, (int64_t)arc_p - bytes * mult);
15671544Seschrock 	}
1568*3403Sbmc 	ASSERT((int64_t)arc_p >= 0);
1569789Sahrens 
1570789Sahrens 	if (arc_reclaim_needed()) {
1571789Sahrens 		cv_signal(&arc_reclaim_thr_cv);
1572789Sahrens 		return;
1573789Sahrens 	}
1574789Sahrens 
1575*3403Sbmc 	if (arc_no_grow)
1576789Sahrens 		return;
1577789Sahrens 
1578*3403Sbmc 	if (arc_c >= arc_c_max)
15791544Seschrock 		return;
15801544Seschrock 
1581789Sahrens 	/*
15821544Seschrock 	 * If we're within (2 * maxblocksize) bytes of the target
15831544Seschrock 	 * cache size, increment the target cache size
1584789Sahrens 	 */
1585*3403Sbmc 	if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
1586*3403Sbmc 		atomic_add_64(&arc_c, (int64_t)bytes);
1587*3403Sbmc 		if (arc_c > arc_c_max)
1588*3403Sbmc 			arc_c = arc_c_max;
1589*3403Sbmc 		else if (state == arc_anon)
1590*3403Sbmc 			atomic_add_64(&arc_p, (int64_t)bytes);
1591*3403Sbmc 		if (arc_p > arc_c)
1592*3403Sbmc 			arc_p = arc_c;
1593789Sahrens 	}
1594*3403Sbmc 	ASSERT((int64_t)arc_p >= 0);
1595789Sahrens }
1596789Sahrens 
1597789Sahrens /*
15981544Seschrock  * Check if the cache has reached its limits and eviction is required
15991544Seschrock  * prior to insert.
1600789Sahrens  */
1601789Sahrens static int
1602789Sahrens arc_evict_needed()
1603789Sahrens {
1604789Sahrens 	if (arc_reclaim_needed())
1605789Sahrens 		return (1);
1606789Sahrens 
1607*3403Sbmc 	return (arc_size > arc_c);
1608789Sahrens }
1609789Sahrens 
1610789Sahrens /*
16112688Smaybee  * The buffer, supplied as the first argument, needs a data block.
16122688Smaybee  * So, if we are at cache max, determine which cache should be victimized.
16132688Smaybee  * We have the following cases:
1614789Sahrens  *
1615*3403Sbmc  * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
1616789Sahrens  * In this situation if we're out of space, but the resident size of the MFU is
1617789Sahrens  * under the limit, victimize the MFU cache to satisfy this insertion request.
1618789Sahrens  *
1619*3403Sbmc  * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
1620789Sahrens  * Here, we've used up all of the available space for the MRU, so we need to
1621789Sahrens  * evict from our own cache instead.  Evict from the set of resident MRU
1622789Sahrens  * entries.
1623789Sahrens  *
1624*3403Sbmc  * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
1625789Sahrens  * c minus p represents the MFU space in the cache, since p is the size of the
1626789Sahrens  * cache that is dedicated to the MRU.  In this situation there's still space on
1627789Sahrens  * the MFU side, so the MRU side needs to be victimized.
1628789Sahrens  *
1629*3403Sbmc  * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
1630789Sahrens  * MFU's resident set is consuming more space than it has been allotted.  In
1631789Sahrens  * this situation, we must victimize our own cache, the MFU, for this insertion.
1632789Sahrens  */
1633789Sahrens static void
16342688Smaybee arc_get_data_buf(arc_buf_t *buf)
1635789Sahrens {
16363290Sjohansen 	arc_state_t		*state = buf->b_hdr->b_state;
16373290Sjohansen 	uint64_t		size = buf->b_hdr->b_size;
16383290Sjohansen 	arc_buf_contents_t	type = buf->b_hdr->b_type;
16392688Smaybee 
16402688Smaybee 	arc_adapt(size, state);
1641789Sahrens 
16422688Smaybee 	/*
16432688Smaybee 	 * We have not yet reached cache maximum size,
16442688Smaybee 	 * just allocate a new buffer.
16452688Smaybee 	 */
16462688Smaybee 	if (!arc_evict_needed()) {
16473290Sjohansen 		if (type == ARC_BUFC_METADATA) {
16483290Sjohansen 			buf->b_data = zio_buf_alloc(size);
16493290Sjohansen 		} else {
16503290Sjohansen 			ASSERT(type == ARC_BUFC_DATA);
16513290Sjohansen 			buf->b_data = zio_data_buf_alloc(size);
16523290Sjohansen 		}
1653*3403Sbmc 		atomic_add_64(&arc_size, size);
16542688Smaybee 		goto out;
16552688Smaybee 	}
16562688Smaybee 
16572688Smaybee 	/*
16582688Smaybee 	 * If we are prefetching from the mfu ghost list, this buffer
16592688Smaybee 	 * will end up on the mru list; so steal space from there.
16602688Smaybee 	 */
1661*3403Sbmc 	if (state == arc_mfu_ghost)
1662*3403Sbmc 		state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
1663*3403Sbmc 	else if (state == arc_mru_ghost)
1664*3403Sbmc 		state = arc_mru;
1665789Sahrens 
1666*3403Sbmc 	if (state == arc_mru || state == arc_anon) {
1667*3403Sbmc 		uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
1668*3403Sbmc 		state = (arc_p > mru_used) ? arc_mfu : arc_mru;
1669789Sahrens 	} else {
16702688Smaybee 		/* MFU cases */
1671*3403Sbmc 		uint64_t mfu_space = arc_c - arc_p;
1672*3403Sbmc 		state =  (mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
16732688Smaybee 	}
16743290Sjohansen 	if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) {
16753290Sjohansen 		if (type == ARC_BUFC_METADATA) {
16763290Sjohansen 			buf->b_data = zio_buf_alloc(size);
16773290Sjohansen 		} else {
16783290Sjohansen 			ASSERT(type == ARC_BUFC_DATA);
16793290Sjohansen 			buf->b_data = zio_data_buf_alloc(size);
16803290Sjohansen 		}
1681*3403Sbmc 		atomic_add_64(&arc_size, size);
1682*3403Sbmc 		ARCSTAT_BUMP(arcstat_recycle_miss);
16832688Smaybee 	}
16842688Smaybee 	ASSERT(buf->b_data != NULL);
16852688Smaybee out:
16862688Smaybee 	/*
16872688Smaybee 	 * Update the state size.  Note that ghost states have a
16882688Smaybee 	 * "ghost size" and so don't need to be updated.
16892688Smaybee 	 */
16902688Smaybee 	if (!GHOST_STATE(buf->b_hdr->b_state)) {
16912688Smaybee 		arc_buf_hdr_t *hdr = buf->b_hdr;
16922688Smaybee 
1693*3403Sbmc 		atomic_add_64(&hdr->b_state->arcs_size, size);
16942688Smaybee 		if (list_link_active(&hdr->b_arc_node)) {
16952688Smaybee 			ASSERT(refcount_is_zero(&hdr->b_refcnt));
1696*3403Sbmc 			atomic_add_64(&hdr->b_state->arcs_lsize, size);
1697789Sahrens 		}
16983298Smaybee 		/*
16993298Smaybee 		 * If we are growing the cache, and we are adding anonymous
1700*3403Sbmc 		 * data, and we have outgrown arc_p, update arc_p
17013298Smaybee 		 */
1702*3403Sbmc 		if (arc_size < arc_c && hdr->b_state == arc_anon &&
1703*3403Sbmc 		    arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
1704*3403Sbmc 			arc_p = MIN(arc_c, arc_p + size);
1705789Sahrens 	}
1706789Sahrens }
1707789Sahrens 
1708789Sahrens /*
1709789Sahrens  * This routine is called whenever a buffer is accessed.
17101544Seschrock  * NOTE: the hash lock is dropped in this function.
1711789Sahrens  */
1712789Sahrens static void
17132688Smaybee arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
1714789Sahrens {
1715789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
1716789Sahrens 
1717*3403Sbmc 	if (buf->b_state == arc_anon) {
1718789Sahrens 		/*
1719789Sahrens 		 * This buffer is not in the cache, and does not
1720789Sahrens 		 * appear in our "ghost" list.  Add the new buffer
1721789Sahrens 		 * to the MRU state.
1722789Sahrens 		 */
1723789Sahrens 
1724789Sahrens 		ASSERT(buf->b_arc_access == 0);
1725789Sahrens 		buf->b_arc_access = lbolt;
17261544Seschrock 		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1727*3403Sbmc 		arc_change_state(arc_mru, buf, hash_lock);
1728789Sahrens 
1729*3403Sbmc 	} else if (buf->b_state == arc_mru) {
1730789Sahrens 		/*
17312391Smaybee 		 * If this buffer is here because of a prefetch, then either:
17322391Smaybee 		 * - clear the flag if this is a "referencing" read
17332391Smaybee 		 *   (any subsequent access will bump this into the MFU state).
17342391Smaybee 		 * or
17352391Smaybee 		 * - move the buffer to the head of the list if this is
17362391Smaybee 		 *   another prefetch (to make it less likely to be evicted).
1737789Sahrens 		 */
1738789Sahrens 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
17392391Smaybee 			if (refcount_count(&buf->b_refcnt) == 0) {
17402391Smaybee 				ASSERT(list_link_active(&buf->b_arc_node));
1741*3403Sbmc 				mutex_enter(&arc_mru->arcs_mtx);
1742*3403Sbmc 				list_remove(&arc_mru->arcs_list, buf);
1743*3403Sbmc 				list_insert_head(&arc_mru->arcs_list, buf);
1744*3403Sbmc 				mutex_exit(&arc_mru->arcs_mtx);
17452391Smaybee 			} else {
17462391Smaybee 				buf->b_flags &= ~ARC_PREFETCH;
1747*3403Sbmc 				ARCSTAT_BUMP(arcstat_mru_hits);
17482391Smaybee 			}
17492391Smaybee 			buf->b_arc_access = lbolt;
1750789Sahrens 			return;
1751789Sahrens 		}
1752789Sahrens 
1753789Sahrens 		/*
1754789Sahrens 		 * This buffer has been "accessed" only once so far,
1755789Sahrens 		 * but it is still in the cache. Move it to the MFU
1756789Sahrens 		 * state.
1757789Sahrens 		 */
1758789Sahrens 		if (lbolt > buf->b_arc_access + ARC_MINTIME) {
1759789Sahrens 			/*
1760789Sahrens 			 * More than 125ms have passed since we
1761789Sahrens 			 * instantiated this buffer.  Move it to the
1762789Sahrens 			 * most frequently used state.
1763789Sahrens 			 */
1764789Sahrens 			buf->b_arc_access = lbolt;
17651544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1766*3403Sbmc 			arc_change_state(arc_mfu, buf, hash_lock);
1767789Sahrens 		}
1768*3403Sbmc 		ARCSTAT_BUMP(arcstat_mru_hits);
1769*3403Sbmc 	} else if (buf->b_state == arc_mru_ghost) {
1770789Sahrens 		arc_state_t	*new_state;
1771789Sahrens 		/*
1772789Sahrens 		 * This buffer has been "accessed" recently, but
1773789Sahrens 		 * was evicted from the cache.  Move it to the
1774789Sahrens 		 * MFU state.
1775789Sahrens 		 */
1776789Sahrens 
1777789Sahrens 		if (buf->b_flags & ARC_PREFETCH) {
1778*3403Sbmc 			new_state = arc_mru;
17792391Smaybee 			if (refcount_count(&buf->b_refcnt) > 0)
17802391Smaybee 				buf->b_flags &= ~ARC_PREFETCH;
17811544Seschrock 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
1782789Sahrens 		} else {
1783*3403Sbmc 			new_state = arc_mfu;
17841544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
1785789Sahrens 		}
1786789Sahrens 
1787789Sahrens 		buf->b_arc_access = lbolt;
1788789Sahrens 		arc_change_state(new_state, buf, hash_lock);
1789789Sahrens 
1790*3403Sbmc 		ARCSTAT_BUMP(arcstat_mru_ghost_hits);
1791*3403Sbmc 	} else if (buf->b_state == arc_mfu) {
1792789Sahrens 		/*
1793789Sahrens 		 * This buffer has been accessed more than once and is
1794789Sahrens 		 * still in the cache.  Keep it in the MFU state.
1795789Sahrens 		 *
17962391Smaybee 		 * NOTE: an add_reference() that occurred when we did
17972391Smaybee 		 * the arc_read() will have kicked this off the list.
17982391Smaybee 		 * If it was a prefetch, we will explicitly move it to
17992391Smaybee 		 * the head of the list now.
1800789Sahrens 		 */
18012391Smaybee 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
18022391Smaybee 			ASSERT(refcount_count(&buf->b_refcnt) == 0);
18032391Smaybee 			ASSERT(list_link_active(&buf->b_arc_node));
1804*3403Sbmc 			mutex_enter(&arc_mfu->arcs_mtx);
1805*3403Sbmc 			list_remove(&arc_mfu->arcs_list, buf);
1806*3403Sbmc 			list_insert_head(&arc_mfu->arcs_list, buf);
1807*3403Sbmc 			mutex_exit(&arc_mfu->arcs_mtx);
18082391Smaybee 		}
1809*3403Sbmc 		ARCSTAT_BUMP(arcstat_mfu_hits);
18102391Smaybee 		buf->b_arc_access = lbolt;
1811*3403Sbmc 	} else if (buf->b_state == arc_mfu_ghost) {
1812*3403Sbmc 		arc_state_t	*new_state = arc_mfu;
1813789Sahrens 		/*
1814789Sahrens 		 * This buffer has been accessed more than once but has
1815789Sahrens 		 * been evicted from the cache.  Move it back to the
1816789Sahrens 		 * MFU state.
1817789Sahrens 		 */
1818789Sahrens 
18192391Smaybee 		if (buf->b_flags & ARC_PREFETCH) {
18202391Smaybee 			/*
18212391Smaybee 			 * This is a prefetch access...
18222391Smaybee 			 * move this block back to the MRU state.
18232391Smaybee 			 */
18242391Smaybee 			ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
1825*3403Sbmc 			new_state = arc_mru;
18262391Smaybee 		}
18272391Smaybee 
1828789Sahrens 		buf->b_arc_access = lbolt;
18291544Seschrock 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
18302391Smaybee 		arc_change_state(new_state, buf, hash_lock);
1831789Sahrens 
1832*3403Sbmc 		ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
1833789Sahrens 	} else {
1834789Sahrens 		ASSERT(!"invalid arc state");
1835789Sahrens 	}
1836789Sahrens }
1837789Sahrens 
1838789Sahrens /* a generic arc_done_func_t which you can use */
1839789Sahrens /* ARGSUSED */
1840789Sahrens void
1841789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
1842789Sahrens {
1843789Sahrens 	bcopy(buf->b_data, arg, buf->b_hdr->b_size);
18441544Seschrock 	VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1845789Sahrens }
1846789Sahrens 
1847789Sahrens /* a generic arc_done_func_t which you can use */
1848789Sahrens void
1849789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
1850789Sahrens {
1851789Sahrens 	arc_buf_t **bufp = arg;
1852789Sahrens 	if (zio && zio->io_error) {
18531544Seschrock 		VERIFY(arc_buf_remove_ref(buf, arg) == 1);
1854789Sahrens 		*bufp = NULL;
1855789Sahrens 	} else {
1856789Sahrens 		*bufp = buf;
1857789Sahrens 	}
1858789Sahrens }
1859789Sahrens 
1860789Sahrens static void
1861789Sahrens arc_read_done(zio_t *zio)
1862789Sahrens {
18631589Smaybee 	arc_buf_hdr_t	*hdr, *found;
1864789Sahrens 	arc_buf_t	*buf;
1865789Sahrens 	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
1866789Sahrens 	kmutex_t	*hash_lock;
1867789Sahrens 	arc_callback_t	*callback_list, *acb;
1868789Sahrens 	int		freeable = FALSE;
1869789Sahrens 
1870789Sahrens 	buf = zio->io_private;
1871789Sahrens 	hdr = buf->b_hdr;
1872789Sahrens 
18731589Smaybee 	/*
18741589Smaybee 	 * The hdr was inserted into hash-table and removed from lists
18751589Smaybee 	 * prior to starting I/O.  We should find this header, since
18761589Smaybee 	 * it's in the hash table, and it should be legit since it's
18771589Smaybee 	 * not possible to evict it during the I/O.  The only possible
18781589Smaybee 	 * reason for it not to be found is if we were freed during the
18791589Smaybee 	 * read.
18801589Smaybee 	 */
18811589Smaybee 	found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
18823093Sahrens 	    &hash_lock);
1883789Sahrens 
18841589Smaybee 	ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
18851589Smaybee 	    (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))));
1886789Sahrens 
1887789Sahrens 	/* byteswap if necessary */
1888789Sahrens 	callback_list = hdr->b_acb;
1889789Sahrens 	ASSERT(callback_list != NULL);
1890789Sahrens 	if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
1891789Sahrens 		callback_list->acb_byteswap(buf->b_data, hdr->b_size);
1892789Sahrens 
18933093Sahrens 	arc_cksum_compute(buf);
18943093Sahrens 
1895789Sahrens 	/* create copies of the data buffer for the callers */
1896789Sahrens 	abuf = buf;
1897789Sahrens 	for (acb = callback_list; acb; acb = acb->acb_next) {
1898789Sahrens 		if (acb->acb_done) {
18992688Smaybee 			if (abuf == NULL)
19002688Smaybee 				abuf = arc_buf_clone(buf);
1901789Sahrens 			acb->acb_buf = abuf;
1902789Sahrens 			abuf = NULL;
1903789Sahrens 		}
1904789Sahrens 	}
1905789Sahrens 	hdr->b_acb = NULL;
1906789Sahrens 	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
19071544Seschrock 	ASSERT(!HDR_BUF_AVAILABLE(hdr));
19081544Seschrock 	if (abuf == buf)
19091544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
1910789Sahrens 
1911789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
1912789Sahrens 
1913789Sahrens 	if (zio->io_error != 0) {
1914789Sahrens 		hdr->b_flags |= ARC_IO_ERROR;
1915*3403Sbmc 		if (hdr->b_state != arc_anon)
1916*3403Sbmc 			arc_change_state(arc_anon, hdr, hash_lock);
19171544Seschrock 		if (HDR_IN_HASH_TABLE(hdr))
19181544Seschrock 			buf_hash_remove(hdr);
1919789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
19202391Smaybee 		/* convert checksum errors into IO errors */
19211544Seschrock 		if (zio->io_error == ECKSUM)
19221544Seschrock 			zio->io_error = EIO;
1923789Sahrens 	}
1924789Sahrens 
19251544Seschrock 	/*
19262391Smaybee 	 * Broadcast before we drop the hash_lock to avoid the possibility
19272391Smaybee 	 * that the hdr (and hence the cv) might be freed before we get to
19282391Smaybee 	 * the cv_broadcast().
19291544Seschrock 	 */
19301544Seschrock 	cv_broadcast(&hdr->b_cv);
19311544Seschrock 
19321589Smaybee 	if (hash_lock) {
1933789Sahrens 		/*
1934789Sahrens 		 * Only call arc_access on anonymous buffers.  This is because
1935789Sahrens 		 * if we've issued an I/O for an evicted buffer, we've already
1936789Sahrens 		 * called arc_access (to prevent any simultaneous readers from
1937789Sahrens 		 * getting confused).
1938789Sahrens 		 */
1939*3403Sbmc 		if (zio->io_error == 0 && hdr->b_state == arc_anon)
19402688Smaybee 			arc_access(hdr, hash_lock);
19412688Smaybee 		mutex_exit(hash_lock);
1942789Sahrens 	} else {
1943789Sahrens 		/*
1944789Sahrens 		 * This block was freed while we waited for the read to
1945789Sahrens 		 * complete.  It has been removed from the hash table and
1946789Sahrens 		 * moved to the anonymous state (so that it won't show up
1947789Sahrens 		 * in the cache).
1948789Sahrens 		 */
1949*3403Sbmc 		ASSERT3P(hdr->b_state, ==, arc_anon);
1950789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
1951789Sahrens 	}
1952789Sahrens 
1953789Sahrens 	/* execute each callback and free its structure */
1954789Sahrens 	while ((acb = callback_list) != NULL) {
1955789Sahrens 		if (acb->acb_done)
1956789Sahrens 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
1957789Sahrens 
1958789Sahrens 		if (acb->acb_zio_dummy != NULL) {
1959789Sahrens 			acb->acb_zio_dummy->io_error = zio->io_error;
1960789Sahrens 			zio_nowait(acb->acb_zio_dummy);
1961789Sahrens 		}
1962789Sahrens 
1963789Sahrens 		callback_list = acb->acb_next;
1964789Sahrens 		kmem_free(acb, sizeof (arc_callback_t));
1965789Sahrens 	}
1966789Sahrens 
1967789Sahrens 	if (freeable)
19681544Seschrock 		arc_hdr_destroy(hdr);
1969789Sahrens }
1970789Sahrens 
1971789Sahrens /*
1972789Sahrens  * "Read" the block block at the specified DVA (in bp) via the
1973789Sahrens  * cache.  If the block is found in the cache, invoke the provided
1974789Sahrens  * callback immediately and return.  Note that the `zio' parameter
1975789Sahrens  * in the callback will be NULL in this case, since no IO was
1976789Sahrens  * required.  If the block is not in the cache pass the read request
1977789Sahrens  * on to the spa with a substitute callback function, so that the
1978789Sahrens  * requested block will be added to the cache.
1979789Sahrens  *
1980789Sahrens  * If a read request arrives for a block that has a read in-progress,
1981789Sahrens  * either wait for the in-progress read to complete (and return the
1982789Sahrens  * results); or, if this is a read with a "done" func, add a record
1983789Sahrens  * to the read to invoke the "done" func when the read completes,
1984789Sahrens  * and return; or just return.
1985789Sahrens  *
1986789Sahrens  * arc_read_done() will invoke all the requested "done" functions
1987789Sahrens  * for readers of this block.
1988789Sahrens  */
1989789Sahrens int
1990789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
1991789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
19922391Smaybee     uint32_t *arc_flags, zbookmark_t *zb)
1993789Sahrens {
1994789Sahrens 	arc_buf_hdr_t *hdr;
1995789Sahrens 	arc_buf_t *buf;
1996789Sahrens 	kmutex_t *hash_lock;
1997789Sahrens 	zio_t	*rzio;
1998789Sahrens 
1999789Sahrens top:
2000789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
20011544Seschrock 	if (hdr && hdr->b_datacnt > 0) {
2002789Sahrens 
20032391Smaybee 		*arc_flags |= ARC_CACHED;
20042391Smaybee 
2005789Sahrens 		if (HDR_IO_IN_PROGRESS(hdr)) {
20062391Smaybee 
20072391Smaybee 			if (*arc_flags & ARC_WAIT) {
20082391Smaybee 				cv_wait(&hdr->b_cv, hash_lock);
20092391Smaybee 				mutex_exit(hash_lock);
20102391Smaybee 				goto top;
20112391Smaybee 			}
20122391Smaybee 			ASSERT(*arc_flags & ARC_NOWAIT);
20132391Smaybee 
20142391Smaybee 			if (done) {
2015789Sahrens 				arc_callback_t	*acb = NULL;
2016789Sahrens 
2017789Sahrens 				acb = kmem_zalloc(sizeof (arc_callback_t),
2018789Sahrens 				    KM_SLEEP);
2019789Sahrens 				acb->acb_done = done;
2020789Sahrens 				acb->acb_private = private;
2021789Sahrens 				acb->acb_byteswap = swap;
2022789Sahrens 				if (pio != NULL)
2023789Sahrens 					acb->acb_zio_dummy = zio_null(pio,
2024789Sahrens 					    spa, NULL, NULL, flags);
2025789Sahrens 
2026789Sahrens 				ASSERT(acb->acb_done != NULL);
2027789Sahrens 				acb->acb_next = hdr->b_acb;
2028789Sahrens 				hdr->b_acb = acb;
2029789Sahrens 				add_reference(hdr, hash_lock, private);
2030789Sahrens 				mutex_exit(hash_lock);
2031789Sahrens 				return (0);
2032789Sahrens 			}
2033789Sahrens 			mutex_exit(hash_lock);
2034789Sahrens 			return (0);
2035789Sahrens 		}
2036789Sahrens 
2037*3403Sbmc 		ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2038789Sahrens 
20391544Seschrock 		if (done) {
20402688Smaybee 			add_reference(hdr, hash_lock, private);
20411544Seschrock 			/*
20421544Seschrock 			 * If this block is already in use, create a new
20431544Seschrock 			 * copy of the data so that we will be guaranteed
20441544Seschrock 			 * that arc_release() will always succeed.
20451544Seschrock 			 */
20461544Seschrock 			buf = hdr->b_buf;
20471544Seschrock 			ASSERT(buf);
20481544Seschrock 			ASSERT(buf->b_data);
20492688Smaybee 			if (HDR_BUF_AVAILABLE(hdr)) {
20501544Seschrock 				ASSERT(buf->b_efunc == NULL);
20511544Seschrock 				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
20522688Smaybee 			} else {
20532688Smaybee 				buf = arc_buf_clone(buf);
20541544Seschrock 			}
20552391Smaybee 		} else if (*arc_flags & ARC_PREFETCH &&
20562391Smaybee 		    refcount_count(&hdr->b_refcnt) == 0) {
20572391Smaybee 			hdr->b_flags |= ARC_PREFETCH;
2058789Sahrens 		}
2059789Sahrens 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
20602688Smaybee 		arc_access(hdr, hash_lock);
20612688Smaybee 		mutex_exit(hash_lock);
2062*3403Sbmc 		ARCSTAT_BUMP(arcstat_hits);
2063*3403Sbmc 		ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2064*3403Sbmc 		    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2065*3403Sbmc 		    data, metadata, hits);
2066*3403Sbmc 
2067789Sahrens 		if (done)
2068789Sahrens 			done(NULL, buf, private);
2069789Sahrens 	} else {
2070789Sahrens 		uint64_t size = BP_GET_LSIZE(bp);
2071789Sahrens 		arc_callback_t	*acb;
2072789Sahrens 
2073789Sahrens 		if (hdr == NULL) {
2074789Sahrens 			/* this block is not in the cache */
2075789Sahrens 			arc_buf_hdr_t	*exists;
20763290Sjohansen 			arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
20773290Sjohansen 			buf = arc_buf_alloc(spa, size, private, type);
2078789Sahrens 			hdr = buf->b_hdr;
2079789Sahrens 			hdr->b_dva = *BP_IDENTITY(bp);
2080789Sahrens 			hdr->b_birth = bp->blk_birth;
2081789Sahrens 			hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2082789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
2083789Sahrens 			if (exists) {
2084789Sahrens 				/* somebody beat us to the hash insert */
2085789Sahrens 				mutex_exit(hash_lock);
2086789Sahrens 				bzero(&hdr->b_dva, sizeof (dva_t));
2087789Sahrens 				hdr->b_birth = 0;
2088789Sahrens 				hdr->b_cksum0 = 0;
20891544Seschrock 				(void) arc_buf_remove_ref(buf, private);
2090789Sahrens 				goto top; /* restart the IO request */
2091789Sahrens 			}
20922391Smaybee 			/* if this is a prefetch, we don't have a reference */
20932391Smaybee 			if (*arc_flags & ARC_PREFETCH) {
20942391Smaybee 				(void) remove_reference(hdr, hash_lock,
20952391Smaybee 				    private);
20962391Smaybee 				hdr->b_flags |= ARC_PREFETCH;
20972391Smaybee 			}
20982391Smaybee 			if (BP_GET_LEVEL(bp) > 0)
20992391Smaybee 				hdr->b_flags |= ARC_INDIRECT;
2100789Sahrens 		} else {
2101789Sahrens 			/* this block is in the ghost cache */
21021544Seschrock 			ASSERT(GHOST_STATE(hdr->b_state));
21031544Seschrock 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
21042391Smaybee 			ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
21052391Smaybee 			ASSERT(hdr->b_buf == NULL);
2106789Sahrens 
21072391Smaybee 			/* if this is a prefetch, we don't have a reference */
21082391Smaybee 			if (*arc_flags & ARC_PREFETCH)
21092391Smaybee 				hdr->b_flags |= ARC_PREFETCH;
21102391Smaybee 			else
21112391Smaybee 				add_reference(hdr, hash_lock, private);
2112789Sahrens 			buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
21131544Seschrock 			buf->b_hdr = hdr;
21142688Smaybee 			buf->b_data = NULL;
21151544Seschrock 			buf->b_efunc = NULL;
21161544Seschrock 			buf->b_private = NULL;
21171544Seschrock 			buf->b_next = NULL;
21181544Seschrock 			hdr->b_buf = buf;
21192688Smaybee 			arc_get_data_buf(buf);
21201544Seschrock 			ASSERT(hdr->b_datacnt == 0);
21211544Seschrock 			hdr->b_datacnt = 1;
21222391Smaybee 
2123789Sahrens 		}
2124789Sahrens 
2125789Sahrens 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2126789Sahrens 		acb->acb_done = done;
2127789Sahrens 		acb->acb_private = private;
2128789Sahrens 		acb->acb_byteswap = swap;
2129789Sahrens 
2130789Sahrens 		ASSERT(hdr->b_acb == NULL);
2131789Sahrens 		hdr->b_acb = acb;
2132789Sahrens 		hdr->b_flags |= ARC_IO_IN_PROGRESS;
2133789Sahrens 
2134789Sahrens 		/*
2135789Sahrens 		 * If the buffer has been evicted, migrate it to a present state
2136789Sahrens 		 * before issuing the I/O.  Once we drop the hash-table lock,
2137789Sahrens 		 * the header will be marked as I/O in progress and have an
2138789Sahrens 		 * attached buffer.  At this point, anybody who finds this
2139789Sahrens 		 * buffer ought to notice that it's legit but has a pending I/O.
2140789Sahrens 		 */
2141789Sahrens 
21421544Seschrock 		if (GHOST_STATE(hdr->b_state))
21432688Smaybee 			arc_access(hdr, hash_lock);
21442688Smaybee 		mutex_exit(hash_lock);
2145789Sahrens 
2146789Sahrens 		ASSERT3U(hdr->b_size, ==, size);
21471596Sahrens 		DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
21481596Sahrens 		    zbookmark_t *, zb);
2149*3403Sbmc 		ARCSTAT_BUMP(arcstat_misses);
2150*3403Sbmc 		ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2151*3403Sbmc 		    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2152*3403Sbmc 		    data, metadata, misses);
21531544Seschrock 
2154789Sahrens 		rzio = zio_read(pio, spa, bp, buf->b_data, size,
21551544Seschrock 		    arc_read_done, buf, priority, flags, zb);
2156789Sahrens 
21572391Smaybee 		if (*arc_flags & ARC_WAIT)
2158789Sahrens 			return (zio_wait(rzio));
2159789Sahrens 
21602391Smaybee 		ASSERT(*arc_flags & ARC_NOWAIT);
2161789Sahrens 		zio_nowait(rzio);
2162789Sahrens 	}
2163789Sahrens 	return (0);
2164789Sahrens }
2165789Sahrens 
2166789Sahrens /*
2167789Sahrens  * arc_read() variant to support pool traversal.  If the block is already
2168789Sahrens  * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
2169789Sahrens  * The idea is that we don't want pool traversal filling up memory, but
2170789Sahrens  * if the ARC already has the data anyway, we shouldn't pay for the I/O.
2171789Sahrens  */
2172789Sahrens int
2173789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
2174789Sahrens {
2175789Sahrens 	arc_buf_hdr_t *hdr;
2176789Sahrens 	kmutex_t *hash_mtx;
2177789Sahrens 	int rc = 0;
2178789Sahrens 
2179789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
2180789Sahrens 
21811544Seschrock 	if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
21821544Seschrock 		arc_buf_t *buf = hdr->b_buf;
21831544Seschrock 
21841544Seschrock 		ASSERT(buf);
21851544Seschrock 		while (buf->b_data == NULL) {
21861544Seschrock 			buf = buf->b_next;
21871544Seschrock 			ASSERT(buf);
21881544Seschrock 		}
21891544Seschrock 		bcopy(buf->b_data, data, hdr->b_size);
21901544Seschrock 	} else {
2191789Sahrens 		rc = ENOENT;
21921544Seschrock 	}
2193789Sahrens 
2194789Sahrens 	if (hash_mtx)
2195789Sahrens 		mutex_exit(hash_mtx);
2196789Sahrens 
2197789Sahrens 	return (rc);
2198789Sahrens }
2199789Sahrens 
22001544Seschrock void
22011544Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
22021544Seschrock {
22031544Seschrock 	ASSERT(buf->b_hdr != NULL);
2204*3403Sbmc 	ASSERT(buf->b_hdr->b_state != arc_anon);
22051544Seschrock 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
22061544Seschrock 	buf->b_efunc = func;
22071544Seschrock 	buf->b_private = private;
22081544Seschrock }
22091544Seschrock 
22101544Seschrock /*
22111544Seschrock  * This is used by the DMU to let the ARC know that a buffer is
22121544Seschrock  * being evicted, so the ARC should clean up.  If this arc buf
22131544Seschrock  * is not yet in the evicted state, it will be put there.
22141544Seschrock  */
22151544Seschrock int
22161544Seschrock arc_buf_evict(arc_buf_t *buf)
22171544Seschrock {
22182887Smaybee 	arc_buf_hdr_t *hdr;
22191544Seschrock 	kmutex_t *hash_lock;
22201544Seschrock 	arc_buf_t **bufp;
22211544Seschrock 
22222887Smaybee 	mutex_enter(&arc_eviction_mtx);
22232887Smaybee 	hdr = buf->b_hdr;
22241544Seschrock 	if (hdr == NULL) {
22251544Seschrock 		/*
22261544Seschrock 		 * We are in arc_do_user_evicts().
22271544Seschrock 		 */
22281544Seschrock 		ASSERT(buf->b_data == NULL);
22292887Smaybee 		mutex_exit(&arc_eviction_mtx);
22301544Seschrock 		return (0);
22311544Seschrock 	}
22322887Smaybee 	hash_lock = HDR_LOCK(hdr);
22332887Smaybee 	mutex_exit(&arc_eviction_mtx);
22341544Seschrock 
22351544Seschrock 	mutex_enter(hash_lock);
22361544Seschrock 
22372724Smaybee 	if (buf->b_data == NULL) {
22382724Smaybee 		/*
22392724Smaybee 		 * We are on the eviction list.
22402724Smaybee 		 */
22412724Smaybee 		mutex_exit(hash_lock);
22422724Smaybee 		mutex_enter(&arc_eviction_mtx);
22432724Smaybee 		if (buf->b_hdr == NULL) {
22442724Smaybee 			/*
22452724Smaybee 			 * We are already in arc_do_user_evicts().
22462724Smaybee 			 */
22472724Smaybee 			mutex_exit(&arc_eviction_mtx);
22482724Smaybee 			return (0);
22492724Smaybee 		} else {
22502724Smaybee 			arc_buf_t copy = *buf; /* structure assignment */
22512724Smaybee 			/*
22522724Smaybee 			 * Process this buffer now
22532724Smaybee 			 * but let arc_do_user_evicts() do the reaping.
22542724Smaybee 			 */
22552724Smaybee 			buf->b_efunc = NULL;
22562724Smaybee 			mutex_exit(&arc_eviction_mtx);
22572724Smaybee 			VERIFY(copy.b_efunc(&copy) == 0);
22582724Smaybee 			return (1);
22592724Smaybee 		}
22602724Smaybee 	}
22612724Smaybee 
22622724Smaybee 	ASSERT(buf->b_hdr == hdr);
22632724Smaybee 	ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
2264*3403Sbmc 	ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
22651544Seschrock 
22661544Seschrock 	/*
22671544Seschrock 	 * Pull this buffer off of the hdr
22681544Seschrock 	 */
22691544Seschrock 	bufp = &hdr->b_buf;
22701544Seschrock 	while (*bufp != buf)
22711544Seschrock 		bufp = &(*bufp)->b_next;
22721544Seschrock 	*bufp = buf->b_next;
22731544Seschrock 
22741544Seschrock 	ASSERT(buf->b_data != NULL);
22752688Smaybee 	arc_buf_destroy(buf, FALSE, FALSE);
22761544Seschrock 
22771544Seschrock 	if (hdr->b_datacnt == 0) {
22781544Seschrock 		arc_state_t *old_state = hdr->b_state;
22791544Seschrock 		arc_state_t *evicted_state;
22801544Seschrock 
22811544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
22821544Seschrock 
22831544Seschrock 		evicted_state =
2284*3403Sbmc 		    (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
22851544Seschrock 
2286*3403Sbmc 		mutex_enter(&old_state->arcs_mtx);
2287*3403Sbmc 		mutex_enter(&evicted_state->arcs_mtx);
22881544Seschrock 
22891544Seschrock 		arc_change_state(evicted_state, hdr, hash_lock);
22901544Seschrock 		ASSERT(HDR_IN_HASH_TABLE(hdr));
22911544Seschrock 		hdr->b_flags = ARC_IN_HASH_TABLE;
22921544Seschrock 
2293*3403Sbmc 		mutex_exit(&evicted_state->arcs_mtx);
2294*3403Sbmc 		mutex_exit(&old_state->arcs_mtx);
22951544Seschrock 	}
22961544Seschrock 	mutex_exit(hash_lock);
22971819Smaybee 
22981544Seschrock 	VERIFY(buf->b_efunc(buf) == 0);
22991544Seschrock 	buf->b_efunc = NULL;
23001544Seschrock 	buf->b_private = NULL;
23011544Seschrock 	buf->b_hdr = NULL;
23021544Seschrock 	kmem_cache_free(buf_cache, buf);
23031544Seschrock 	return (1);
23041544Seschrock }
23051544Seschrock 
2306789Sahrens /*
2307789Sahrens  * Release this buffer from the cache.  This must be done
2308789Sahrens  * after a read and prior to modifying the buffer contents.
2309789Sahrens  * If the buffer has more than one reference, we must make
2310789Sahrens  * make a new hdr for the buffer.
2311789Sahrens  */
2312789Sahrens void
2313789Sahrens arc_release(arc_buf_t *buf, void *tag)
2314789Sahrens {
2315789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2316789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
2317789Sahrens 
2318789Sahrens 	/* this buffer is not on any list */
2319789Sahrens 	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2320789Sahrens 
2321*3403Sbmc 	if (hdr->b_state == arc_anon) {
2322789Sahrens 		/* this buffer is already released */
2323789Sahrens 		ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2324789Sahrens 		ASSERT(BUF_EMPTY(hdr));
23251544Seschrock 		ASSERT(buf->b_efunc == NULL);
23263093Sahrens 		arc_buf_thaw(buf);
2327789Sahrens 		return;
2328789Sahrens 	}
2329789Sahrens 
2330789Sahrens 	mutex_enter(hash_lock);
2331789Sahrens 
23321544Seschrock 	/*
23331544Seschrock 	 * Do we have more than one buf?
23341544Seschrock 	 */
23351544Seschrock 	if (hdr->b_buf != buf || buf->b_next != NULL) {
2336789Sahrens 		arc_buf_hdr_t *nhdr;
2337789Sahrens 		arc_buf_t **bufp;
2338789Sahrens 		uint64_t blksz = hdr->b_size;
2339789Sahrens 		spa_t *spa = hdr->b_spa;
23403290Sjohansen 		arc_buf_contents_t type = hdr->b_type;
2341789Sahrens 
23421544Seschrock 		ASSERT(hdr->b_datacnt > 1);
2343789Sahrens 		/*
2344789Sahrens 		 * Pull the data off of this buf and attach it to
2345789Sahrens 		 * a new anonymous buf.
2346789Sahrens 		 */
23471544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
2348789Sahrens 		bufp = &hdr->b_buf;
23491544Seschrock 		while (*bufp != buf)
2350789Sahrens 			bufp = &(*bufp)->b_next;
2351789Sahrens 		*bufp = (*bufp)->b_next;
23521544Seschrock 
2353*3403Sbmc 		ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
2354*3403Sbmc 		atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
23551544Seschrock 		if (refcount_is_zero(&hdr->b_refcnt)) {
2356*3403Sbmc 			ASSERT3U(hdr->b_state->arcs_lsize, >=, hdr->b_size);
2357*3403Sbmc 			atomic_add_64(&hdr->b_state->arcs_lsize, -hdr->b_size);
23581544Seschrock 		}
23591544Seschrock 		hdr->b_datacnt -= 1;
23601544Seschrock 
2361789Sahrens 		mutex_exit(hash_lock);
2362789Sahrens 
2363789Sahrens 		nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
2364789Sahrens 		nhdr->b_size = blksz;
2365789Sahrens 		nhdr->b_spa = spa;
23663290Sjohansen 		nhdr->b_type = type;
2367789Sahrens 		nhdr->b_buf = buf;
2368*3403Sbmc 		nhdr->b_state = arc_anon;
2369789Sahrens 		nhdr->b_arc_access = 0;
2370789Sahrens 		nhdr->b_flags = 0;
23711544Seschrock 		nhdr->b_datacnt = 1;
23723312Sahrens 		if (hdr->b_freeze_cksum != NULL) {
23733312Sahrens 			nhdr->b_freeze_cksum =
23743312Sahrens 			    kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
23753312Sahrens 			*nhdr->b_freeze_cksum = *hdr->b_freeze_cksum;
23763312Sahrens 		}
2377789Sahrens 		buf->b_hdr = nhdr;
2378789Sahrens 		buf->b_next = NULL;
2379789Sahrens 		(void) refcount_add(&nhdr->b_refcnt, tag);
2380*3403Sbmc 		atomic_add_64(&arc_anon->arcs_size, blksz);
2381789Sahrens 
2382789Sahrens 		hdr = nhdr;
2383789Sahrens 	} else {
23841544Seschrock 		ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2385789Sahrens 		ASSERT(!list_link_active(&hdr->b_arc_node));
2386789Sahrens 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2387*3403Sbmc 		arc_change_state(arc_anon, hdr, hash_lock);
2388789Sahrens 		hdr->b_arc_access = 0;
2389789Sahrens 		mutex_exit(hash_lock);
2390789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
2391789Sahrens 		hdr->b_birth = 0;
2392789Sahrens 		hdr->b_cksum0 = 0;
2393789Sahrens 	}
23941544Seschrock 	buf->b_efunc = NULL;
23951544Seschrock 	buf->b_private = NULL;
23963093Sahrens 	arc_buf_thaw(buf);
2397789Sahrens }
2398789Sahrens 
2399789Sahrens int
2400789Sahrens arc_released(arc_buf_t *buf)
2401789Sahrens {
2402*3403Sbmc 	return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
24031544Seschrock }
24041544Seschrock 
24051544Seschrock int
24061544Seschrock arc_has_callback(arc_buf_t *buf)
24071544Seschrock {
24081544Seschrock 	return (buf->b_efunc != NULL);
2409789Sahrens }
2410789Sahrens 
24111544Seschrock #ifdef ZFS_DEBUG
24121544Seschrock int
24131544Seschrock arc_referenced(arc_buf_t *buf)
24141544Seschrock {
24151544Seschrock 	return (refcount_count(&buf->b_hdr->b_refcnt));
24161544Seschrock }
24171544Seschrock #endif
24181544Seschrock 
2419789Sahrens static void
2420789Sahrens arc_write_done(zio_t *zio)
2421789Sahrens {
2422789Sahrens 	arc_buf_t *buf;
2423789Sahrens 	arc_buf_hdr_t *hdr;
2424789Sahrens 	arc_callback_t *acb;
2425789Sahrens 
2426789Sahrens 	buf = zio->io_private;
2427789Sahrens 	hdr = buf->b_hdr;
2428789Sahrens 	acb = hdr->b_acb;
2429789Sahrens 	hdr->b_acb = NULL;
24301544Seschrock 	ASSERT(acb != NULL);
2431789Sahrens 
2432789Sahrens 	/* this buffer is on no lists and is not in the hash table */
2433*3403Sbmc 	ASSERT3P(hdr->b_state, ==, arc_anon);
2434789Sahrens 
2435789Sahrens 	hdr->b_dva = *BP_IDENTITY(zio->io_bp);
2436789Sahrens 	hdr->b_birth = zio->io_bp->blk_birth;
2437789Sahrens 	hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
24381544Seschrock 	/*
24391544Seschrock 	 * If the block to be written was all-zero, we may have
24401544Seschrock 	 * compressed it away.  In this case no write was performed
24411544Seschrock 	 * so there will be no dva/birth-date/checksum.  The buffer
24421544Seschrock 	 * must therefor remain anonymous (and uncached).
24431544Seschrock 	 */
2444789Sahrens 	if (!BUF_EMPTY(hdr)) {
2445789Sahrens 		arc_buf_hdr_t *exists;
2446789Sahrens 		kmutex_t *hash_lock;
2447789Sahrens 
24483093Sahrens 		arc_cksum_verify(buf);
24493093Sahrens 
2450789Sahrens 		exists = buf_hash_insert(hdr, &hash_lock);
2451789Sahrens 		if (exists) {
2452789Sahrens 			/*
2453789Sahrens 			 * This can only happen if we overwrite for
2454789Sahrens 			 * sync-to-convergence, because we remove
2455789Sahrens 			 * buffers from the hash table when we arc_free().
2456789Sahrens 			 */
2457789Sahrens 			ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
2458789Sahrens 			    BP_IDENTITY(zio->io_bp)));
2459789Sahrens 			ASSERT3U(zio->io_bp_orig.blk_birth, ==,
2460789Sahrens 			    zio->io_bp->blk_birth);
2461789Sahrens 
2462789Sahrens 			ASSERT(refcount_is_zero(&exists->b_refcnt));
2463*3403Sbmc 			arc_change_state(arc_anon, exists, hash_lock);
2464789Sahrens 			mutex_exit(hash_lock);
24651544Seschrock 			arc_hdr_destroy(exists);
2466789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
2467789Sahrens 			ASSERT3P(exists, ==, NULL);
2468789Sahrens 		}
24691544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
24702688Smaybee 		arc_access(hdr, hash_lock);
24712688Smaybee 		mutex_exit(hash_lock);
24721544Seschrock 	} else if (acb->acb_done == NULL) {
24731544Seschrock 		int destroy_hdr;
24741544Seschrock 		/*
24751544Seschrock 		 * This is an anonymous buffer with no user callback,
24761544Seschrock 		 * destroy it if there are no active references.
24771544Seschrock 		 */
24781544Seschrock 		mutex_enter(&arc_eviction_mtx);
24791544Seschrock 		destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
24801544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
24811544Seschrock 		mutex_exit(&arc_eviction_mtx);
24821544Seschrock 		if (destroy_hdr)
24831544Seschrock 			arc_hdr_destroy(hdr);
24841544Seschrock 	} else {
24851544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2486789Sahrens 	}
24871544Seschrock 
24881544Seschrock 	if (acb->acb_done) {
2489789Sahrens 		ASSERT(!refcount_is_zero(&hdr->b_refcnt));
2490789Sahrens 		acb->acb_done(zio, buf, acb->acb_private);
2491789Sahrens 	}
2492789Sahrens 
24931544Seschrock 	kmem_free(acb, sizeof (arc_callback_t));
2494789Sahrens }
2495789Sahrens 
2496789Sahrens int
24971775Sbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
2498789Sahrens     uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
2499789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
25001544Seschrock     uint32_t arc_flags, zbookmark_t *zb)
2501789Sahrens {
2502789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2503789Sahrens 	arc_callback_t	*acb;
2504789Sahrens 	zio_t	*rzio;
2505789Sahrens 
2506789Sahrens 	/* this is a private buffer - no locking required */
2507*3403Sbmc 	ASSERT3P(hdr->b_state, ==, arc_anon);
2508789Sahrens 	ASSERT(BUF_EMPTY(hdr));
2509789Sahrens 	ASSERT(!HDR_IO_ERROR(hdr));
25102237Smaybee 	ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
25112237Smaybee 	ASSERT(hdr->b_acb == 0);
2512789Sahrens 	acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2513789Sahrens 	acb->acb_done = done;
2514789Sahrens 	acb->acb_private = private;
2515789Sahrens 	acb->acb_byteswap = (arc_byteswap_func_t *)-1;
2516789Sahrens 	hdr->b_acb = acb;
25171544Seschrock 	hdr->b_flags |= ARC_IO_IN_PROGRESS;
25183093Sahrens 	arc_cksum_compute(buf);
25191775Sbillm 	rzio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp,
25201544Seschrock 	    buf->b_data, hdr->b_size, arc_write_done, buf, priority, flags, zb);
2521789Sahrens 
2522789Sahrens 	if (arc_flags & ARC_WAIT)
2523789Sahrens 		return (zio_wait(rzio));
2524789Sahrens 
2525789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
2526789Sahrens 	zio_nowait(rzio);
2527789Sahrens 
2528789Sahrens 	return (0);
2529789Sahrens }
2530789Sahrens 
2531789Sahrens int
2532789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
2533789Sahrens     zio_done_func_t *done, void *private, uint32_t arc_flags)
2534789Sahrens {
2535789Sahrens 	arc_buf_hdr_t *ab;
2536789Sahrens 	kmutex_t *hash_lock;
2537789Sahrens 	zio_t	*zio;
2538789Sahrens 
2539789Sahrens 	/*
2540789Sahrens 	 * If this buffer is in the cache, release it, so it
2541789Sahrens 	 * can be re-used.
2542789Sahrens 	 */
2543789Sahrens 	ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2544789Sahrens 	if (ab != NULL) {
2545789Sahrens 		/*
2546789Sahrens 		 * The checksum of blocks to free is not always
2547789Sahrens 		 * preserved (eg. on the deadlist).  However, if it is
2548789Sahrens 		 * nonzero, it should match what we have in the cache.
2549789Sahrens 		 */
2550789Sahrens 		ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
2551789Sahrens 		    ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
2552*3403Sbmc 		if (ab->b_state != arc_anon)
2553*3403Sbmc 			arc_change_state(arc_anon, ab, hash_lock);
25542391Smaybee 		if (HDR_IO_IN_PROGRESS(ab)) {
25552391Smaybee 			/*
25562391Smaybee 			 * This should only happen when we prefetch.
25572391Smaybee 			 */
25582391Smaybee 			ASSERT(ab->b_flags & ARC_PREFETCH);
25592391Smaybee 			ASSERT3U(ab->b_datacnt, ==, 1);
25602391Smaybee 			ab->b_flags |= ARC_FREED_IN_READ;
25612391Smaybee 			if (HDR_IN_HASH_TABLE(ab))
25622391Smaybee 				buf_hash_remove(ab);
25632391Smaybee 			ab->b_arc_access = 0;
25642391Smaybee 			bzero(&ab->b_dva, sizeof (dva_t));
25652391Smaybee 			ab->b_birth = 0;
25662391Smaybee 			ab->b_cksum0 = 0;
25672391Smaybee 			ab->b_buf->b_efunc = NULL;
25682391Smaybee 			ab->b_buf->b_private = NULL;
25692391Smaybee 			mutex_exit(hash_lock);
25702391Smaybee 		} else if (refcount_is_zero(&ab->b_refcnt)) {
2571789Sahrens 			mutex_exit(hash_lock);
25721544Seschrock 			arc_hdr_destroy(ab);
2573*3403Sbmc 			ARCSTAT_BUMP(arcstat_deleted);
2574789Sahrens 		} else {
25751589Smaybee 			/*
25762391Smaybee 			 * We still have an active reference on this
25772391Smaybee 			 * buffer.  This can happen, e.g., from
25782391Smaybee 			 * dbuf_unoverride().
25791589Smaybee 			 */
25802391Smaybee 			ASSERT(!HDR_IN_HASH_TABLE(ab));
2581789Sahrens 			ab->b_arc_access = 0;
2582789Sahrens 			bzero(&ab->b_dva, sizeof (dva_t));
2583789Sahrens 			ab->b_birth = 0;
2584789Sahrens 			ab->b_cksum0 = 0;
25851544Seschrock 			ab->b_buf->b_efunc = NULL;
25861544Seschrock 			ab->b_buf->b_private = NULL;
2587789Sahrens 			mutex_exit(hash_lock);
2588789Sahrens 		}
2589789Sahrens 	}
2590789Sahrens 
2591789Sahrens 	zio = zio_free(pio, spa, txg, bp, done, private);
2592789Sahrens 
2593789Sahrens 	if (arc_flags & ARC_WAIT)
2594789Sahrens 		return (zio_wait(zio));
2595789Sahrens 
2596789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
2597789Sahrens 	zio_nowait(zio);
2598789Sahrens 
2599789Sahrens 	return (0);
2600789Sahrens }
2601789Sahrens 
2602789Sahrens void
2603789Sahrens arc_tempreserve_clear(uint64_t tempreserve)
2604789Sahrens {
2605789Sahrens 	atomic_add_64(&arc_tempreserve, -tempreserve);
2606789Sahrens 	ASSERT((int64_t)arc_tempreserve >= 0);
2607789Sahrens }
2608789Sahrens 
2609789Sahrens int
2610789Sahrens arc_tempreserve_space(uint64_t tempreserve)
2611789Sahrens {
2612789Sahrens #ifdef ZFS_DEBUG
2613789Sahrens 	/*
2614789Sahrens 	 * Once in a while, fail for no reason.  Everything should cope.
2615789Sahrens 	 */
2616789Sahrens 	if (spa_get_random(10000) == 0) {
2617789Sahrens 		dprintf("forcing random failure\n");
2618789Sahrens 		return (ERESTART);
2619789Sahrens 	}
2620789Sahrens #endif
2621*3403Sbmc 	if (tempreserve > arc_c/4 && !arc_no_grow)
2622*3403Sbmc 		arc_c = MIN(arc_c_max, tempreserve * 4);
2623*3403Sbmc 	if (tempreserve > arc_c)
2624982Smaybee 		return (ENOMEM);
2625982Smaybee 
2626789Sahrens 	/*
2627982Smaybee 	 * Throttle writes when the amount of dirty data in the cache
2628982Smaybee 	 * gets too large.  We try to keep the cache less than half full
2629982Smaybee 	 * of dirty blocks so that our sync times don't grow too large.
2630982Smaybee 	 * Note: if two requests come in concurrently, we might let them
2631982Smaybee 	 * both succeed, when one of them should fail.  Not a huge deal.
2632982Smaybee 	 *
2633982Smaybee 	 * XXX The limit should be adjusted dynamically to keep the time
2634982Smaybee 	 * to sync a dataset fixed (around 1-5 seconds?).
2635789Sahrens 	 */
2636789Sahrens 
2637*3403Sbmc 	if (tempreserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 &&
2638*3403Sbmc 	    arc_tempreserve + arc_anon->arcs_size > arc_c / 4) {
2639789Sahrens 		dprintf("failing, arc_tempreserve=%lluK anon=%lluK "
2640*3403Sbmc 		    "tempreserve=%lluK arc_c=%lluK\n",
2641*3403Sbmc 		    arc_tempreserve>>10, arc_anon->arcs_lsize>>10,
2642*3403Sbmc 		    tempreserve>>10, arc_c>>10);
2643789Sahrens 		return (ERESTART);
2644789Sahrens 	}
2645789Sahrens 	atomic_add_64(&arc_tempreserve, tempreserve);
2646789Sahrens 	return (0);
2647789Sahrens }
2648789Sahrens 
2649789Sahrens void
2650789Sahrens arc_init(void)
2651789Sahrens {
2652789Sahrens 	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
2653789Sahrens 	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
2654789Sahrens 
26552391Smaybee 	/* Convert seconds to clock ticks */
26562638Sperrin 	arc_min_prefetch_lifespan = 1 * hz;
26572391Smaybee 
2658789Sahrens 	/* Start out with 1/8 of all memory */
2659*3403Sbmc 	arc_c = physmem * PAGESIZE / 8;
2660789Sahrens 
2661789Sahrens #ifdef _KERNEL
2662789Sahrens 	/*
2663789Sahrens 	 * On architectures where the physical memory can be larger
2664789Sahrens 	 * than the addressable space (intel in 32-bit mode), we may
2665789Sahrens 	 * need to limit the cache to 1/8 of VM size.
2666789Sahrens 	 */
2667*3403Sbmc 	arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
2668789Sahrens #endif
2669789Sahrens 
2670982Smaybee 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
2671*3403Sbmc 	arc_c_min = MAX(arc_c / 4, 64<<20);
2672982Smaybee 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
2673*3403Sbmc 	if (arc_c * 8 >= 1<<30)
2674*3403Sbmc 		arc_c_max = (arc_c * 8) - (1<<30);
2675789Sahrens 	else
2676*3403Sbmc 		arc_c_max = arc_c_min;
2677*3403Sbmc 	arc_c_max = MAX(arc_c * 6, arc_c_max);
26782885Sahrens 
26792885Sahrens 	/*
26802885Sahrens 	 * Allow the tunables to override our calculations if they are
26812885Sahrens 	 * reasonable (ie. over 64MB)
26822885Sahrens 	 */
26832885Sahrens 	if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
2684*3403Sbmc 		arc_c_max = zfs_arc_max;
2685*3403Sbmc 	if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
2686*3403Sbmc 		arc_c_min = zfs_arc_min;
26872885Sahrens 
2688*3403Sbmc 	arc_c = arc_c_max;
2689*3403Sbmc 	arc_p = (arc_c >> 1);
2690789Sahrens 
2691789Sahrens 	/* if kmem_flags are set, lets try to use less memory */
2692789Sahrens 	if (kmem_debugging())
2693*3403Sbmc 		arc_c = arc_c / 2;
2694*3403Sbmc 	if (arc_c < arc_c_min)
2695*3403Sbmc 		arc_c = arc_c_min;
2696789Sahrens 
2697*3403Sbmc 	arc_anon = &ARC_anon;
2698*3403Sbmc 	arc_mru = &ARC_mru;
2699*3403Sbmc 	arc_mru_ghost = &ARC_mru_ghost;
2700*3403Sbmc 	arc_mfu = &ARC_mfu;
2701*3403Sbmc 	arc_mfu_ghost = &ARC_mfu_ghost;
2702*3403Sbmc 	arc_size = 0;
2703789Sahrens 
2704*3403Sbmc 	mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
2705*3403Sbmc 	mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
2706*3403Sbmc 	mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
2707*3403Sbmc 	mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
2708*3403Sbmc 	mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
27092688Smaybee 
2710*3403Sbmc 	list_create(&arc_mru->arcs_list, sizeof (arc_buf_hdr_t),
2711*3403Sbmc 	    offsetof(arc_buf_hdr_t, b_arc_node));
2712*3403Sbmc 	list_create(&arc_mru_ghost->arcs_list, sizeof (arc_buf_hdr_t),
2713789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
2714*3403Sbmc 	list_create(&arc_mfu->arcs_list, sizeof (arc_buf_hdr_t),
2715789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
2716*3403Sbmc 	list_create(&arc_mfu_ghost->arcs_list, sizeof (arc_buf_hdr_t),
2717789Sahrens 	    offsetof(arc_buf_hdr_t, b_arc_node));
2718789Sahrens 
2719789Sahrens 	buf_init();
2720789Sahrens 
2721789Sahrens 	arc_thread_exit = 0;
27221544Seschrock 	arc_eviction_list = NULL;
27231544Seschrock 	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
27242887Smaybee 	bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
2725789Sahrens 
2726*3403Sbmc 	arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
2727*3403Sbmc 	    sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
2728*3403Sbmc 
2729*3403Sbmc 	if (arc_ksp != NULL) {
2730*3403Sbmc 		arc_ksp->ks_data = &arc_stats;
2731*3403Sbmc 		kstat_install(arc_ksp);
2732*3403Sbmc 	}
2733*3403Sbmc 
2734789Sahrens 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
2735789Sahrens 	    TS_RUN, minclsyspri);
27363158Smaybee 
27373158Smaybee 	arc_dead = FALSE;
2738789Sahrens }
2739789Sahrens 
2740789Sahrens void
2741789Sahrens arc_fini(void)
2742789Sahrens {
2743789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
2744789Sahrens 	arc_thread_exit = 1;
2745789Sahrens 	while (arc_thread_exit != 0)
2746789Sahrens 		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
2747789Sahrens 	mutex_exit(&arc_reclaim_thr_lock);
2748789Sahrens 
2749789Sahrens 	arc_flush();
2750789Sahrens 
2751789Sahrens 	arc_dead = TRUE;
2752789Sahrens 
2753*3403Sbmc 	if (arc_ksp != NULL) {
2754*3403Sbmc 		kstat_delete(arc_ksp);
2755*3403Sbmc 		arc_ksp = NULL;
2756*3403Sbmc 	}
2757*3403Sbmc 
27581544Seschrock 	mutex_destroy(&arc_eviction_mtx);
2759789Sahrens 	mutex_destroy(&arc_reclaim_thr_lock);
2760789Sahrens 	cv_destroy(&arc_reclaim_thr_cv);
2761789Sahrens 
2762*3403Sbmc 	list_destroy(&arc_mru->arcs_list);
2763*3403Sbmc 	list_destroy(&arc_mru_ghost->arcs_list);
2764*3403Sbmc 	list_destroy(&arc_mfu->arcs_list);
2765*3403Sbmc 	list_destroy(&arc_mfu_ghost->arcs_list);
2766789Sahrens 
2767*3403Sbmc 	mutex_destroy(&arc_anon->arcs_mtx);
2768*3403Sbmc 	mutex_destroy(&arc_mru->arcs_mtx);
2769*3403Sbmc 	mutex_destroy(&arc_mru_ghost->arcs_mtx);
2770*3403Sbmc 	mutex_destroy(&arc_mfu->arcs_mtx);
2771*3403Sbmc 	mutex_destroy(&arc_mfu_ghost->arcs_mtx);
27722856Snd150628 
2773789Sahrens 	buf_fini();
2774789Sahrens }
2775