xref: /onnv-gate/usr/src/uts/common/fs/zfs/arc.c (revision 5450)
1789Sahrens /*
2789Sahrens  * CDDL HEADER START
3789Sahrens  *
4789Sahrens  * The contents of this file are subject to the terms of the
51484Sek110237  * Common Development and Distribution License (the "License").
61484Sek110237  * You may not use this file except in compliance with the License.
7789Sahrens  *
8789Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9789Sahrens  * or http://www.opensolaris.org/os/licensing.
10789Sahrens  * See the License for the specific language governing permissions
11789Sahrens  * and limitations under the License.
12789Sahrens  *
13789Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14789Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15789Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16789Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17789Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18789Sahrens  *
19789Sahrens  * CDDL HEADER END
20789Sahrens  */
21789Sahrens /*
223403Sbmc  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23789Sahrens  * Use is subject to license terms.
24789Sahrens  */
25789Sahrens 
26789Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
27789Sahrens 
28789Sahrens /*
293403Sbmc  * DVA-based Adjustable Replacement Cache
30789Sahrens  *
311544Seschrock  * While much of the theory of operation used here is
321544Seschrock  * based on the self-tuning, low overhead replacement cache
33789Sahrens  * presented by Megiddo and Modha at FAST 2003, there are some
34789Sahrens  * significant differences:
35789Sahrens  *
36789Sahrens  * 1. The Megiddo and Modha model assumes any page is evictable.
37789Sahrens  * Pages in its cache cannot be "locked" into memory.  This makes
38789Sahrens  * the eviction algorithm simple: evict the last page in the list.
39789Sahrens  * This also make the performance characteristics easy to reason
40789Sahrens  * about.  Our cache is not so simple.  At any given moment, some
41789Sahrens  * subset of the blocks in the cache are un-evictable because we
42789Sahrens  * have handed out a reference to them.  Blocks are only evictable
43789Sahrens  * when there are no external references active.  This makes
44789Sahrens  * eviction far more problematic:  we choose to evict the evictable
45789Sahrens  * blocks that are the "lowest" in the list.
46789Sahrens  *
47789Sahrens  * There are times when it is not possible to evict the requested
48789Sahrens  * space.  In these circumstances we are unable to adjust the cache
49789Sahrens  * size.  To prevent the cache growing unbounded at these times we
50*5450Sbrendan  * implement a "cache throttle" that slows the flow of new data
51*5450Sbrendan  * into the cache until we can make space available.
52789Sahrens  *
53789Sahrens  * 2. The Megiddo and Modha model assumes a fixed cache size.
54789Sahrens  * Pages are evicted when the cache is full and there is a cache
55789Sahrens  * miss.  Our model has a variable sized cache.  It grows with
56*5450Sbrendan  * high use, but also tries to react to memory pressure from the
57789Sahrens  * operating system: decreasing its size when system memory is
58789Sahrens  * tight.
59789Sahrens  *
60789Sahrens  * 3. The Megiddo and Modha model assumes a fixed page size. All
61789Sahrens  * elements of the cache are therefor exactly the same size.  So
62789Sahrens  * when adjusting the cache size following a cache miss, its simply
63789Sahrens  * a matter of choosing a single page to evict.  In our model, we
64789Sahrens  * have variable sized cache blocks (rangeing from 512 bytes to
65789Sahrens  * 128K bytes).  We therefor choose a set of blocks to evict to make
66789Sahrens  * space for a cache miss that approximates as closely as possible
67789Sahrens  * the space used by the new block.
68789Sahrens  *
69789Sahrens  * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70789Sahrens  * by N. Megiddo & D. Modha, FAST 2003
71789Sahrens  */
72789Sahrens 
73789Sahrens /*
74789Sahrens  * The locking model:
75789Sahrens  *
76789Sahrens  * A new reference to a cache buffer can be obtained in two
77789Sahrens  * ways: 1) via a hash table lookup using the DVA as a key,
78*5450Sbrendan  * or 2) via one of the ARC lists.  The arc_read() interface
79789Sahrens  * uses method 1, while the internal arc algorithms for
80789Sahrens  * adjusting the cache use method 2.  We therefor provide two
81789Sahrens  * types of locks: 1) the hash table lock array, and 2) the
82789Sahrens  * arc list locks.
83789Sahrens  *
84789Sahrens  * Buffers do not have their own mutexs, rather they rely on the
85789Sahrens  * hash table mutexs for the bulk of their protection (i.e. most
86789Sahrens  * fields in the arc_buf_hdr_t are protected by these mutexs).
87789Sahrens  *
88789Sahrens  * buf_hash_find() returns the appropriate mutex (held) when it
89789Sahrens  * locates the requested buffer in the hash table.  It returns
90789Sahrens  * NULL for the mutex if the buffer was not in the table.
91789Sahrens  *
92789Sahrens  * buf_hash_remove() expects the appropriate hash mutex to be
93789Sahrens  * already held before it is invoked.
94789Sahrens  *
95789Sahrens  * Each arc state also has a mutex which is used to protect the
96789Sahrens  * buffer list associated with the state.  When attempting to
97789Sahrens  * obtain a hash table lock while holding an arc list lock you
98789Sahrens  * must use: mutex_tryenter() to avoid deadlock.  Also note that
992688Smaybee  * the active state mutex must be held before the ghost state mutex.
100789Sahrens  *
1011544Seschrock  * Arc buffers may have an associated eviction callback function.
1021544Seschrock  * This function will be invoked prior to removing the buffer (e.g.
1031544Seschrock  * in arc_do_user_evicts()).  Note however that the data associated
1041544Seschrock  * with the buffer may be evicted prior to the callback.  The callback
1051544Seschrock  * must be made with *no locks held* (to prevent deadlock).  Additionally,
1061544Seschrock  * the users of callbacks must ensure that their private data is
1071544Seschrock  * protected from simultaneous callbacks from arc_buf_evict()
1081544Seschrock  * and arc_do_user_evicts().
1091544Seschrock  *
110789Sahrens  * Note that the majority of the performance stats are manipulated
111789Sahrens  * with atomic operations.
112*5450Sbrendan  *
113*5450Sbrendan  * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
114*5450Sbrendan  *
115*5450Sbrendan  *	- L2ARC buflist creation
116*5450Sbrendan  *	- L2ARC buflist eviction
117*5450Sbrendan  *	- L2ARC write completion, which walks L2ARC buflists
118*5450Sbrendan  *	- ARC header destruction, as it removes from L2ARC buflists
119*5450Sbrendan  *	- ARC header release, as it removes from L2ARC buflists
120789Sahrens  */
121789Sahrens 
122789Sahrens #include <sys/spa.h>
123789Sahrens #include <sys/zio.h>
1243093Sahrens #include <sys/zio_checksum.h>
125789Sahrens #include <sys/zfs_context.h>
126789Sahrens #include <sys/arc.h>
127789Sahrens #include <sys/refcount.h>
128789Sahrens #ifdef _KERNEL
129789Sahrens #include <sys/vmsystm.h>
130789Sahrens #include <vm/anon.h>
131789Sahrens #include <sys/fs/swapnode.h>
1321484Sek110237 #include <sys/dnlc.h>
133789Sahrens #endif
134789Sahrens #include <sys/callb.h>
1353403Sbmc #include <sys/kstat.h>
136789Sahrens 
137789Sahrens static kmutex_t		arc_reclaim_thr_lock;
138789Sahrens static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
139789Sahrens static uint8_t		arc_thread_exit;
140789Sahrens 
1411484Sek110237 #define	ARC_REDUCE_DNLC_PERCENT	3
1421484Sek110237 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
1431484Sek110237 
144789Sahrens typedef enum arc_reclaim_strategy {
145789Sahrens 	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
146789Sahrens 	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
147789Sahrens } arc_reclaim_strategy_t;
148789Sahrens 
149789Sahrens /* number of seconds before growing cache again */
150789Sahrens static int		arc_grow_retry = 60;
151789Sahrens 
1522391Smaybee /*
1532638Sperrin  * minimum lifespan of a prefetch block in clock ticks
1542638Sperrin  * (initialized in arc_init())
1552391Smaybee  */
1562638Sperrin static int		arc_min_prefetch_lifespan;
1572391Smaybee 
158789Sahrens static int arc_dead;
159789Sahrens 
160789Sahrens /*
1612885Sahrens  * These tunables are for performance analysis.
1622885Sahrens  */
1632885Sahrens uint64_t zfs_arc_max;
1642885Sahrens uint64_t zfs_arc_min;
1654645Sek110237 uint64_t zfs_arc_meta_limit = 0;
1662885Sahrens 
1672885Sahrens /*
168*5450Sbrendan  * Note that buffers can be in one of 6 states:
169789Sahrens  *	ARC_anon	- anonymous (discussed below)
1701544Seschrock  *	ARC_mru		- recently used, currently cached
1711544Seschrock  *	ARC_mru_ghost	- recentely used, no longer in cache
1721544Seschrock  *	ARC_mfu		- frequently used, currently cached
1731544Seschrock  *	ARC_mfu_ghost	- frequently used, no longer in cache
174*5450Sbrendan  *	ARC_l2c_only	- exists in L2ARC but not other states
1754309Smaybee  * When there are no active references to the buffer, they are
1764309Smaybee  * are linked onto a list in one of these arc states.  These are
1774309Smaybee  * the only buffers that can be evicted or deleted.  Within each
1784309Smaybee  * state there are multiple lists, one for meta-data and one for
1794309Smaybee  * non-meta-data.  Meta-data (indirect blocks, blocks of dnodes,
1804309Smaybee  * etc.) is tracked separately so that it can be managed more
181*5450Sbrendan  * explicitly: favored over data, limited explicitly.
182789Sahrens  *
183789Sahrens  * Anonymous buffers are buffers that are not associated with
184789Sahrens  * a DVA.  These are buffers that hold dirty block copies
185789Sahrens  * before they are written to stable storage.  By definition,
1861544Seschrock  * they are "ref'd" and are considered part of arc_mru
187789Sahrens  * that cannot be freed.  Generally, they will aquire a DVA
1881544Seschrock  * as they are written and migrate onto the arc_mru list.
189*5450Sbrendan  *
190*5450Sbrendan  * The ARC_l2c_only state is for buffers that are in the second
191*5450Sbrendan  * level ARC but no longer in any of the ARC_m* lists.  The second
192*5450Sbrendan  * level ARC itself may also contain buffers that are in any of
193*5450Sbrendan  * the ARC_m* states - meaning that a buffer can exist in two
194*5450Sbrendan  * places.  The reason for the ARC_l2c_only state is to keep the
195*5450Sbrendan  * buffer header in the hash table, so that reads that hit the
196*5450Sbrendan  * second level ARC benefit from these fast lookups.
197789Sahrens  */
198789Sahrens 
199789Sahrens typedef struct arc_state {
2004309Smaybee 	list_t	arcs_list[ARC_BUFC_NUMTYPES];	/* list of evictable buffers */
2014309Smaybee 	uint64_t arcs_lsize[ARC_BUFC_NUMTYPES];	/* amount of evictable data */
2024309Smaybee 	uint64_t arcs_size;	/* total amount of data in this state */
2033403Sbmc 	kmutex_t arcs_mtx;
204789Sahrens } arc_state_t;
205789Sahrens 
206*5450Sbrendan /* The 6 states: */
207789Sahrens static arc_state_t ARC_anon;
2081544Seschrock static arc_state_t ARC_mru;
2091544Seschrock static arc_state_t ARC_mru_ghost;
2101544Seschrock static arc_state_t ARC_mfu;
2111544Seschrock static arc_state_t ARC_mfu_ghost;
212*5450Sbrendan static arc_state_t ARC_l2c_only;
213789Sahrens 
2143403Sbmc typedef struct arc_stats {
2153403Sbmc 	kstat_named_t arcstat_hits;
2163403Sbmc 	kstat_named_t arcstat_misses;
2173403Sbmc 	kstat_named_t arcstat_demand_data_hits;
2183403Sbmc 	kstat_named_t arcstat_demand_data_misses;
2193403Sbmc 	kstat_named_t arcstat_demand_metadata_hits;
2203403Sbmc 	kstat_named_t arcstat_demand_metadata_misses;
2213403Sbmc 	kstat_named_t arcstat_prefetch_data_hits;
2223403Sbmc 	kstat_named_t arcstat_prefetch_data_misses;
2233403Sbmc 	kstat_named_t arcstat_prefetch_metadata_hits;
2243403Sbmc 	kstat_named_t arcstat_prefetch_metadata_misses;
2253403Sbmc 	kstat_named_t arcstat_mru_hits;
2263403Sbmc 	kstat_named_t arcstat_mru_ghost_hits;
2273403Sbmc 	kstat_named_t arcstat_mfu_hits;
2283403Sbmc 	kstat_named_t arcstat_mfu_ghost_hits;
2293403Sbmc 	kstat_named_t arcstat_deleted;
2303403Sbmc 	kstat_named_t arcstat_recycle_miss;
2313403Sbmc 	kstat_named_t arcstat_mutex_miss;
2323403Sbmc 	kstat_named_t arcstat_evict_skip;
2333403Sbmc 	kstat_named_t arcstat_hash_elements;
2343403Sbmc 	kstat_named_t arcstat_hash_elements_max;
2353403Sbmc 	kstat_named_t arcstat_hash_collisions;
2363403Sbmc 	kstat_named_t arcstat_hash_chains;
2373403Sbmc 	kstat_named_t arcstat_hash_chain_max;
2383403Sbmc 	kstat_named_t arcstat_p;
2393403Sbmc 	kstat_named_t arcstat_c;
2403403Sbmc 	kstat_named_t arcstat_c_min;
2413403Sbmc 	kstat_named_t arcstat_c_max;
2423403Sbmc 	kstat_named_t arcstat_size;
243*5450Sbrendan 	kstat_named_t arcstat_hdr_size;
244*5450Sbrendan 	kstat_named_t arcstat_l2_hits;
245*5450Sbrendan 	kstat_named_t arcstat_l2_misses;
246*5450Sbrendan 	kstat_named_t arcstat_l2_feeds;
247*5450Sbrendan 	kstat_named_t arcstat_l2_rw_clash;
248*5450Sbrendan 	kstat_named_t arcstat_l2_writes_sent;
249*5450Sbrendan 	kstat_named_t arcstat_l2_writes_done;
250*5450Sbrendan 	kstat_named_t arcstat_l2_writes_error;
251*5450Sbrendan 	kstat_named_t arcstat_l2_writes_hdr_miss;
252*5450Sbrendan 	kstat_named_t arcstat_l2_evict_lock_retry;
253*5450Sbrendan 	kstat_named_t arcstat_l2_evict_reading;
254*5450Sbrendan 	kstat_named_t arcstat_l2_free_on_write;
255*5450Sbrendan 	kstat_named_t arcstat_l2_abort_lowmem;
256*5450Sbrendan 	kstat_named_t arcstat_l2_cksum_bad;
257*5450Sbrendan 	kstat_named_t arcstat_l2_io_error;
258*5450Sbrendan 	kstat_named_t arcstat_l2_size;
259*5450Sbrendan 	kstat_named_t arcstat_l2_hdr_size;
2603403Sbmc } arc_stats_t;
2613403Sbmc 
2623403Sbmc static arc_stats_t arc_stats = {
2633403Sbmc 	{ "hits",			KSTAT_DATA_UINT64 },
2643403Sbmc 	{ "misses",			KSTAT_DATA_UINT64 },
2653403Sbmc 	{ "demand_data_hits",		KSTAT_DATA_UINT64 },
2663403Sbmc 	{ "demand_data_misses",		KSTAT_DATA_UINT64 },
2673403Sbmc 	{ "demand_metadata_hits",	KSTAT_DATA_UINT64 },
2683403Sbmc 	{ "demand_metadata_misses",	KSTAT_DATA_UINT64 },
2693403Sbmc 	{ "prefetch_data_hits",		KSTAT_DATA_UINT64 },
2703403Sbmc 	{ "prefetch_data_misses",	KSTAT_DATA_UINT64 },
2713403Sbmc 	{ "prefetch_metadata_hits",	KSTAT_DATA_UINT64 },
2723403Sbmc 	{ "prefetch_metadata_misses",	KSTAT_DATA_UINT64 },
2733403Sbmc 	{ "mru_hits",			KSTAT_DATA_UINT64 },
2743403Sbmc 	{ "mru_ghost_hits",		KSTAT_DATA_UINT64 },
2753403Sbmc 	{ "mfu_hits",			KSTAT_DATA_UINT64 },
2763403Sbmc 	{ "mfu_ghost_hits",		KSTAT_DATA_UINT64 },
2773403Sbmc 	{ "deleted",			KSTAT_DATA_UINT64 },
2783403Sbmc 	{ "recycle_miss",		KSTAT_DATA_UINT64 },
2793403Sbmc 	{ "mutex_miss",			KSTAT_DATA_UINT64 },
2803403Sbmc 	{ "evict_skip",			KSTAT_DATA_UINT64 },
2813403Sbmc 	{ "hash_elements",		KSTAT_DATA_UINT64 },
2823403Sbmc 	{ "hash_elements_max",		KSTAT_DATA_UINT64 },
2833403Sbmc 	{ "hash_collisions",		KSTAT_DATA_UINT64 },
2843403Sbmc 	{ "hash_chains",		KSTAT_DATA_UINT64 },
2853403Sbmc 	{ "hash_chain_max",		KSTAT_DATA_UINT64 },
2863403Sbmc 	{ "p",				KSTAT_DATA_UINT64 },
2873403Sbmc 	{ "c",				KSTAT_DATA_UINT64 },
2883403Sbmc 	{ "c_min",			KSTAT_DATA_UINT64 },
2893403Sbmc 	{ "c_max",			KSTAT_DATA_UINT64 },
290*5450Sbrendan 	{ "size",			KSTAT_DATA_UINT64 },
291*5450Sbrendan 	{ "hdr_size",			KSTAT_DATA_UINT64 },
292*5450Sbrendan 	{ "l2_hits",			KSTAT_DATA_UINT64 },
293*5450Sbrendan 	{ "l2_misses",			KSTAT_DATA_UINT64 },
294*5450Sbrendan 	{ "l2_feeds",			KSTAT_DATA_UINT64 },
295*5450Sbrendan 	{ "l2_rw_clash",		KSTAT_DATA_UINT64 },
296*5450Sbrendan 	{ "l2_writes_sent",		KSTAT_DATA_UINT64 },
297*5450Sbrendan 	{ "l2_writes_done",		KSTAT_DATA_UINT64 },
298*5450Sbrendan 	{ "l2_writes_error",		KSTAT_DATA_UINT64 },
299*5450Sbrendan 	{ "l2_writes_hdr_miss",		KSTAT_DATA_UINT64 },
300*5450Sbrendan 	{ "l2_evict_lock_retry",	KSTAT_DATA_UINT64 },
301*5450Sbrendan 	{ "l2_evict_reading",		KSTAT_DATA_UINT64 },
302*5450Sbrendan 	{ "l2_free_on_write",		KSTAT_DATA_UINT64 },
303*5450Sbrendan 	{ "l2_abort_lowmem",		KSTAT_DATA_UINT64 },
304*5450Sbrendan 	{ "l2_cksum_bad",		KSTAT_DATA_UINT64 },
305*5450Sbrendan 	{ "l2_io_error",		KSTAT_DATA_UINT64 },
306*5450Sbrendan 	{ "l2_size",			KSTAT_DATA_UINT64 },
307*5450Sbrendan 	{ "l2_hdr_size",		KSTAT_DATA_UINT64 }
3083403Sbmc };
309789Sahrens 
3103403Sbmc #define	ARCSTAT(stat)	(arc_stats.stat.value.ui64)
3113403Sbmc 
3123403Sbmc #define	ARCSTAT_INCR(stat, val) \
3133403Sbmc 	atomic_add_64(&arc_stats.stat.value.ui64, (val));
3143403Sbmc 
3153403Sbmc #define	ARCSTAT_BUMP(stat) 	ARCSTAT_INCR(stat, 1)
3163403Sbmc #define	ARCSTAT_BUMPDOWN(stat)	ARCSTAT_INCR(stat, -1)
3173403Sbmc 
3183403Sbmc #define	ARCSTAT_MAX(stat, val) {					\
3193403Sbmc 	uint64_t m;							\
3203403Sbmc 	while ((val) > (m = arc_stats.stat.value.ui64) &&		\
3213403Sbmc 	    (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val))))	\
3223403Sbmc 		continue;						\
3233403Sbmc }
3243403Sbmc 
3253403Sbmc #define	ARCSTAT_MAXSTAT(stat) \
3263403Sbmc 	ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
327789Sahrens 
3283403Sbmc /*
3293403Sbmc  * We define a macro to allow ARC hits/misses to be easily broken down by
3303403Sbmc  * two separate conditions, giving a total of four different subtypes for
3313403Sbmc  * each of hits and misses (so eight statistics total).
3323403Sbmc  */
3333403Sbmc #define	ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
3343403Sbmc 	if (cond1) {							\
3353403Sbmc 		if (cond2) {						\
3363403Sbmc 			ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
3373403Sbmc 		} else {						\
3383403Sbmc 			ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
3393403Sbmc 		}							\
3403403Sbmc 	} else {							\
3413403Sbmc 		if (cond2) {						\
3423403Sbmc 			ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
3433403Sbmc 		} else {						\
3443403Sbmc 			ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
3453403Sbmc 		}							\
3463403Sbmc 	}
347789Sahrens 
3483403Sbmc kstat_t			*arc_ksp;
3493403Sbmc static arc_state_t 	*arc_anon;
3503403Sbmc static arc_state_t	*arc_mru;
3513403Sbmc static arc_state_t	*arc_mru_ghost;
3523403Sbmc static arc_state_t	*arc_mfu;
3533403Sbmc static arc_state_t	*arc_mfu_ghost;
354*5450Sbrendan static arc_state_t	*arc_l2c_only;
3553403Sbmc 
3563403Sbmc /*
3573403Sbmc  * There are several ARC variables that are critical to export as kstats --
3583403Sbmc  * but we don't want to have to grovel around in the kstat whenever we wish to
3593403Sbmc  * manipulate them.  For these variables, we therefore define them to be in
3603403Sbmc  * terms of the statistic variable.  This assures that we are not introducing
3613403Sbmc  * the possibility of inconsistency by having shadow copies of the variables,
3623403Sbmc  * while still allowing the code to be readable.
3633403Sbmc  */
3643403Sbmc #define	arc_size	ARCSTAT(arcstat_size)	/* actual total arc size */
3653403Sbmc #define	arc_p		ARCSTAT(arcstat_p)	/* target size of MRU */
3663403Sbmc #define	arc_c		ARCSTAT(arcstat_c)	/* target size of cache */
3673403Sbmc #define	arc_c_min	ARCSTAT(arcstat_c_min)	/* min target cache size */
3683403Sbmc #define	arc_c_max	ARCSTAT(arcstat_c_max)	/* max target cache size */
3693403Sbmc 
3703403Sbmc static int		arc_no_grow;	/* Don't try to grow cache size */
3713403Sbmc static uint64_t		arc_tempreserve;
3724309Smaybee static uint64_t		arc_meta_used;
3734309Smaybee static uint64_t		arc_meta_limit;
3744309Smaybee static uint64_t		arc_meta_max = 0;
375789Sahrens 
376*5450Sbrendan typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
377*5450Sbrendan 
378789Sahrens typedef struct arc_callback arc_callback_t;
379789Sahrens 
380789Sahrens struct arc_callback {
3813547Smaybee 	void			*acb_private;
382789Sahrens 	arc_done_func_t		*acb_done;
383789Sahrens 	arc_byteswap_func_t	*acb_byteswap;
384789Sahrens 	arc_buf_t		*acb_buf;
385789Sahrens 	zio_t			*acb_zio_dummy;
386789Sahrens 	arc_callback_t		*acb_next;
387789Sahrens };
388789Sahrens 
3893547Smaybee typedef struct arc_write_callback arc_write_callback_t;
3903547Smaybee 
3913547Smaybee struct arc_write_callback {
3923547Smaybee 	void		*awcb_private;
3933547Smaybee 	arc_done_func_t	*awcb_ready;
3943547Smaybee 	arc_done_func_t	*awcb_done;
3953547Smaybee 	arc_buf_t	*awcb_buf;
3963547Smaybee };
3973547Smaybee 
398789Sahrens struct arc_buf_hdr {
399789Sahrens 	/* protected by hash lock */
400789Sahrens 	dva_t			b_dva;
401789Sahrens 	uint64_t		b_birth;
402789Sahrens 	uint64_t		b_cksum0;
403789Sahrens 
4043093Sahrens 	kmutex_t		b_freeze_lock;
4053093Sahrens 	zio_cksum_t		*b_freeze_cksum;
4063093Sahrens 
407789Sahrens 	arc_buf_hdr_t		*b_hash_next;
408789Sahrens 	arc_buf_t		*b_buf;
409789Sahrens 	uint32_t		b_flags;
4101544Seschrock 	uint32_t		b_datacnt;
411789Sahrens 
4123290Sjohansen 	arc_callback_t		*b_acb;
413789Sahrens 	kcondvar_t		b_cv;
4143290Sjohansen 
4153290Sjohansen 	/* immutable */
4163290Sjohansen 	arc_buf_contents_t	b_type;
4173290Sjohansen 	uint64_t		b_size;
4183290Sjohansen 	spa_t			*b_spa;
419789Sahrens 
420789Sahrens 	/* protected by arc state mutex */
421789Sahrens 	arc_state_t		*b_state;
422789Sahrens 	list_node_t		b_arc_node;
423789Sahrens 
424789Sahrens 	/* updated atomically */
425789Sahrens 	clock_t			b_arc_access;
426789Sahrens 
427789Sahrens 	/* self protecting */
428789Sahrens 	refcount_t		b_refcnt;
429*5450Sbrendan 
430*5450Sbrendan 	l2arc_buf_hdr_t		*b_l2hdr;
431*5450Sbrendan 	list_node_t		b_l2node;
432789Sahrens };
433789Sahrens 
4341544Seschrock static arc_buf_t *arc_eviction_list;
4351544Seschrock static kmutex_t arc_eviction_mtx;
4362887Smaybee static arc_buf_hdr_t arc_eviction_hdr;
4372688Smaybee static void arc_get_data_buf(arc_buf_t *buf);
4382688Smaybee static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
4394309Smaybee static int arc_evict_needed(arc_buf_contents_t type);
4404709Smaybee static void arc_evict_ghost(arc_state_t *state, int64_t bytes);
4411544Seschrock 
4421544Seschrock #define	GHOST_STATE(state)	\
443*5450Sbrendan 	((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||	\
444*5450Sbrendan 	(state) == arc_l2c_only)
4451544Seschrock 
446789Sahrens /*
447789Sahrens  * Private ARC flags.  These flags are private ARC only flags that will show up
448789Sahrens  * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
449789Sahrens  * be passed in as arc_flags in things like arc_read.  However, these flags
450789Sahrens  * should never be passed and should only be set by ARC code.  When adding new
451789Sahrens  * public flags, make sure not to smash the private ones.
452789Sahrens  */
453789Sahrens 
4541544Seschrock #define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
455789Sahrens #define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
456789Sahrens #define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
457789Sahrens #define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
4581544Seschrock #define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
4592391Smaybee #define	ARC_INDIRECT		(1 << 14)	/* this is an indirect block */
460*5450Sbrendan #define	ARC_FREE_IN_PROGRESS	(1 << 15)	/* hdr about to be freed */
461*5450Sbrendan #define	ARC_DONT_L2CACHE	(1 << 16)	/* originated by prefetch */
462*5450Sbrendan #define	ARC_L2_READING		(1 << 17)	/* L2ARC read in progress */
463*5450Sbrendan #define	ARC_L2_WRITING		(1 << 18)	/* L2ARC write in progress */
464*5450Sbrendan #define	ARC_L2_EVICTED		(1 << 19)	/* evicted during I/O */
465*5450Sbrendan #define	ARC_L2_WRITE_HEAD	(1 << 20)	/* head of write list */
466789Sahrens 
4671544Seschrock #define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
468789Sahrens #define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
469789Sahrens #define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
470789Sahrens #define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
4711544Seschrock #define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
472*5450Sbrendan #define	HDR_FREE_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
473*5450Sbrendan #define	HDR_DONT_L2CACHE(hdr)	((hdr)->b_flags & ARC_DONT_L2CACHE)
474*5450Sbrendan #define	HDR_L2_READING(hdr)	((hdr)->b_flags & ARC_L2_READING)
475*5450Sbrendan #define	HDR_L2_WRITING(hdr)	((hdr)->b_flags & ARC_L2_WRITING)
476*5450Sbrendan #define	HDR_L2_EVICTED(hdr)	((hdr)->b_flags & ARC_L2_EVICTED)
477*5450Sbrendan #define	HDR_L2_WRITE_HEAD(hdr)	((hdr)->b_flags & ARC_L2_WRITE_HEAD)
478789Sahrens 
479789Sahrens /*
480789Sahrens  * Hash table routines
481789Sahrens  */
482789Sahrens 
483789Sahrens #define	HT_LOCK_PAD	64
484789Sahrens 
485789Sahrens struct ht_lock {
486789Sahrens 	kmutex_t	ht_lock;
487789Sahrens #ifdef _KERNEL
488789Sahrens 	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
489789Sahrens #endif
490789Sahrens };
491789Sahrens 
492789Sahrens #define	BUF_LOCKS 256
493789Sahrens typedef struct buf_hash_table {
494789Sahrens 	uint64_t ht_mask;
495789Sahrens 	arc_buf_hdr_t **ht_table;
496789Sahrens 	struct ht_lock ht_locks[BUF_LOCKS];
497789Sahrens } buf_hash_table_t;
498789Sahrens 
499789Sahrens static buf_hash_table_t buf_hash_table;
500789Sahrens 
501789Sahrens #define	BUF_HASH_INDEX(spa, dva, birth) \
502789Sahrens 	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
503789Sahrens #define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
504789Sahrens #define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
505789Sahrens #define	HDR_LOCK(buf) \
506789Sahrens 	(BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
507789Sahrens 
508789Sahrens uint64_t zfs_crc64_table[256];
509789Sahrens 
510*5450Sbrendan /*
511*5450Sbrendan  * Level 2 ARC
512*5450Sbrendan  */
513*5450Sbrendan 
514*5450Sbrendan #define	L2ARC_WRITE_SIZE	(8 * 1024 * 1024)	/* initial write max */
515*5450Sbrendan #define	L2ARC_HEADROOM		4		/* num of writes */
516*5450Sbrendan #define	L2ARC_FEED_DELAY	180		/* starting grace */
517*5450Sbrendan #define	L2ARC_FEED_SECS		1		/* caching interval */
518*5450Sbrendan 
519*5450Sbrendan #define	l2arc_writes_sent	ARCSTAT(arcstat_l2_writes_sent)
520*5450Sbrendan #define	l2arc_writes_done	ARCSTAT(arcstat_l2_writes_done)
521*5450Sbrendan 
522*5450Sbrendan /*
523*5450Sbrendan  * L2ARC Performance Tunables
524*5450Sbrendan  */
525*5450Sbrendan uint64_t l2arc_write_max = L2ARC_WRITE_SIZE;	/* default max write size */
526*5450Sbrendan uint64_t l2arc_headroom = L2ARC_HEADROOM;	/* number of dev writes */
527*5450Sbrendan uint64_t l2arc_feed_secs = L2ARC_FEED_SECS;	/* interval seconds */
528*5450Sbrendan boolean_t l2arc_noprefetch = B_TRUE;		/* don't cache prefetch bufs */
529*5450Sbrendan 
530*5450Sbrendan /*
531*5450Sbrendan  * L2ARC Internals
532*5450Sbrendan  */
533*5450Sbrendan typedef struct l2arc_dev {
534*5450Sbrendan 	vdev_t			*l2ad_vdev;	/* vdev */
535*5450Sbrendan 	spa_t			*l2ad_spa;	/* spa */
536*5450Sbrendan 	uint64_t		l2ad_hand;	/* next write location */
537*5450Sbrendan 	uint64_t		l2ad_write;	/* desired write size, bytes */
538*5450Sbrendan 	uint64_t		l2ad_start;	/* first addr on device */
539*5450Sbrendan 	uint64_t		l2ad_end;	/* last addr on device */
540*5450Sbrendan 	uint64_t		l2ad_evict;	/* last addr eviction reached */
541*5450Sbrendan 	boolean_t		l2ad_first;	/* first sweep through */
542*5450Sbrendan 	list_t			*l2ad_buflist;	/* buffer list */
543*5450Sbrendan 	list_node_t		l2ad_node;	/* device list node */
544*5450Sbrendan } l2arc_dev_t;
545*5450Sbrendan 
546*5450Sbrendan static list_t L2ARC_dev_list;			/* device list */
547*5450Sbrendan static list_t *l2arc_dev_list;			/* device list pointer */
548*5450Sbrendan static kmutex_t l2arc_dev_mtx;			/* device list mutex */
549*5450Sbrendan static l2arc_dev_t *l2arc_dev_last;		/* last device used */
550*5450Sbrendan static kmutex_t l2arc_buflist_mtx;		/* mutex for all buflists */
551*5450Sbrendan static list_t L2ARC_free_on_write;		/* free after write buf list */
552*5450Sbrendan static list_t *l2arc_free_on_write;		/* free after write list ptr */
553*5450Sbrendan static kmutex_t l2arc_free_on_write_mtx;	/* mutex for list */
554*5450Sbrendan static uint64_t l2arc_ndev;			/* number of devices */
555*5450Sbrendan 
556*5450Sbrendan typedef struct l2arc_read_callback {
557*5450Sbrendan 	arc_buf_t	*l2rcb_buf;		/* read buffer */
558*5450Sbrendan 	spa_t		*l2rcb_spa;		/* spa */
559*5450Sbrendan 	blkptr_t	l2rcb_bp;		/* original blkptr */
560*5450Sbrendan 	zbookmark_t	l2rcb_zb;		/* original bookmark */
561*5450Sbrendan 	int		l2rcb_flags;		/* original flags */
562*5450Sbrendan } l2arc_read_callback_t;
563*5450Sbrendan 
564*5450Sbrendan typedef struct l2arc_write_callback {
565*5450Sbrendan 	l2arc_dev_t	*l2wcb_dev;		/* device info */
566*5450Sbrendan 	arc_buf_hdr_t	*l2wcb_head;		/* head of write buflist */
567*5450Sbrendan } l2arc_write_callback_t;
568*5450Sbrendan 
569*5450Sbrendan struct l2arc_buf_hdr {
570*5450Sbrendan 	/* protected by arc_buf_hdr  mutex */
571*5450Sbrendan 	l2arc_dev_t	*b_dev;			/* L2ARC device */
572*5450Sbrendan 	daddr_t		b_daddr;		/* disk address, offset byte */
573*5450Sbrendan };
574*5450Sbrendan 
575*5450Sbrendan typedef struct l2arc_data_free {
576*5450Sbrendan 	/* protected by l2arc_free_on_write_mtx */
577*5450Sbrendan 	void		*l2df_data;
578*5450Sbrendan 	size_t		l2df_size;
579*5450Sbrendan 	void		(*l2df_func)(void *, size_t);
580*5450Sbrendan 	list_node_t	l2df_list_node;
581*5450Sbrendan } l2arc_data_free_t;
582*5450Sbrendan 
583*5450Sbrendan static kmutex_t l2arc_feed_thr_lock;
584*5450Sbrendan static kcondvar_t l2arc_feed_thr_cv;
585*5450Sbrendan static uint8_t l2arc_thread_exit;
586*5450Sbrendan 
587*5450Sbrendan static void l2arc_read_done(zio_t *zio);
588*5450Sbrendan static void l2arc_hdr_stat_add(void);
589*5450Sbrendan static void l2arc_hdr_stat_remove(void);
590*5450Sbrendan 
591789Sahrens static uint64_t
592789Sahrens buf_hash(spa_t *spa, dva_t *dva, uint64_t birth)
593789Sahrens {
594789Sahrens 	uintptr_t spav = (uintptr_t)spa;
595789Sahrens 	uint8_t *vdva = (uint8_t *)dva;
596789Sahrens 	uint64_t crc = -1ULL;
597789Sahrens 	int i;
598789Sahrens 
599789Sahrens 	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
600789Sahrens 
601789Sahrens 	for (i = 0; i < sizeof (dva_t); i++)
602789Sahrens 		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
603789Sahrens 
604789Sahrens 	crc ^= (spav>>8) ^ birth;
605789Sahrens 
606789Sahrens 	return (crc);
607789Sahrens }
608789Sahrens 
609789Sahrens #define	BUF_EMPTY(buf)						\
610789Sahrens 	((buf)->b_dva.dva_word[0] == 0 &&			\
611789Sahrens 	(buf)->b_dva.dva_word[1] == 0 &&			\
612789Sahrens 	(buf)->b_birth == 0)
613789Sahrens 
614789Sahrens #define	BUF_EQUAL(spa, dva, birth, buf)				\
615789Sahrens 	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
616789Sahrens 	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
617789Sahrens 	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
618789Sahrens 
619789Sahrens static arc_buf_hdr_t *
620789Sahrens buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp)
621789Sahrens {
622789Sahrens 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
623789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
624789Sahrens 	arc_buf_hdr_t *buf;
625789Sahrens 
626789Sahrens 	mutex_enter(hash_lock);
627789Sahrens 	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
628789Sahrens 	    buf = buf->b_hash_next) {
629789Sahrens 		if (BUF_EQUAL(spa, dva, birth, buf)) {
630789Sahrens 			*lockp = hash_lock;
631789Sahrens 			return (buf);
632789Sahrens 		}
633789Sahrens 	}
634789Sahrens 	mutex_exit(hash_lock);
635789Sahrens 	*lockp = NULL;
636789Sahrens 	return (NULL);
637789Sahrens }
638789Sahrens 
639789Sahrens /*
640789Sahrens  * Insert an entry into the hash table.  If there is already an element
641789Sahrens  * equal to elem in the hash table, then the already existing element
642789Sahrens  * will be returned and the new element will not be inserted.
643789Sahrens  * Otherwise returns NULL.
644789Sahrens  */
645789Sahrens static arc_buf_hdr_t *
646789Sahrens buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
647789Sahrens {
648789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
649789Sahrens 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
650789Sahrens 	arc_buf_hdr_t *fbuf;
6513403Sbmc 	uint32_t i;
652789Sahrens 
6531544Seschrock 	ASSERT(!HDR_IN_HASH_TABLE(buf));
654789Sahrens 	*lockp = hash_lock;
655789Sahrens 	mutex_enter(hash_lock);
656789Sahrens 	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
657789Sahrens 	    fbuf = fbuf->b_hash_next, i++) {
658789Sahrens 		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
659789Sahrens 			return (fbuf);
660789Sahrens 	}
661789Sahrens 
662789Sahrens 	buf->b_hash_next = buf_hash_table.ht_table[idx];
663789Sahrens 	buf_hash_table.ht_table[idx] = buf;
6641544Seschrock 	buf->b_flags |= ARC_IN_HASH_TABLE;
665789Sahrens 
666789Sahrens 	/* collect some hash table performance data */
667789Sahrens 	if (i > 0) {
6683403Sbmc 		ARCSTAT_BUMP(arcstat_hash_collisions);
669789Sahrens 		if (i == 1)
6703403Sbmc 			ARCSTAT_BUMP(arcstat_hash_chains);
6713403Sbmc 
6723403Sbmc 		ARCSTAT_MAX(arcstat_hash_chain_max, i);
673789Sahrens 	}
6743403Sbmc 
6753403Sbmc 	ARCSTAT_BUMP(arcstat_hash_elements);
6763403Sbmc 	ARCSTAT_MAXSTAT(arcstat_hash_elements);
677789Sahrens 
678789Sahrens 	return (NULL);
679789Sahrens }
680789Sahrens 
681789Sahrens static void
682789Sahrens buf_hash_remove(arc_buf_hdr_t *buf)
683789Sahrens {
684789Sahrens 	arc_buf_hdr_t *fbuf, **bufp;
685789Sahrens 	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
686789Sahrens 
687789Sahrens 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
6881544Seschrock 	ASSERT(HDR_IN_HASH_TABLE(buf));
689789Sahrens 
690789Sahrens 	bufp = &buf_hash_table.ht_table[idx];
691789Sahrens 	while ((fbuf = *bufp) != buf) {
692789Sahrens 		ASSERT(fbuf != NULL);
693789Sahrens 		bufp = &fbuf->b_hash_next;
694789Sahrens 	}
695789Sahrens 	*bufp = buf->b_hash_next;
696789Sahrens 	buf->b_hash_next = NULL;
6971544Seschrock 	buf->b_flags &= ~ARC_IN_HASH_TABLE;
698789Sahrens 
699789Sahrens 	/* collect some hash table performance data */
7003403Sbmc 	ARCSTAT_BUMPDOWN(arcstat_hash_elements);
7013403Sbmc 
702789Sahrens 	if (buf_hash_table.ht_table[idx] &&
703789Sahrens 	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
7043403Sbmc 		ARCSTAT_BUMPDOWN(arcstat_hash_chains);
705789Sahrens }
706789Sahrens 
707789Sahrens /*
708789Sahrens  * Global data structures and functions for the buf kmem cache.
709789Sahrens  */
710789Sahrens static kmem_cache_t *hdr_cache;
711789Sahrens static kmem_cache_t *buf_cache;
712789Sahrens 
713789Sahrens static void
714789Sahrens buf_fini(void)
715789Sahrens {
716789Sahrens 	int i;
717789Sahrens 
718789Sahrens 	kmem_free(buf_hash_table.ht_table,
719789Sahrens 	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
720789Sahrens 	for (i = 0; i < BUF_LOCKS; i++)
721789Sahrens 		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
722789Sahrens 	kmem_cache_destroy(hdr_cache);
723789Sahrens 	kmem_cache_destroy(buf_cache);
724789Sahrens }
725789Sahrens 
726789Sahrens /*
727789Sahrens  * Constructor callback - called when the cache is empty
728789Sahrens  * and a new buf is requested.
729789Sahrens  */
730789Sahrens /* ARGSUSED */
731789Sahrens static int
732789Sahrens hdr_cons(void *vbuf, void *unused, int kmflag)
733789Sahrens {
734789Sahrens 	arc_buf_hdr_t *buf = vbuf;
735789Sahrens 
736789Sahrens 	bzero(buf, sizeof (arc_buf_hdr_t));
737789Sahrens 	refcount_create(&buf->b_refcnt);
738789Sahrens 	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
7394831Sgw25295 	mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
740*5450Sbrendan 
741*5450Sbrendan 	ARCSTAT_INCR(arcstat_hdr_size, sizeof (arc_buf_hdr_t));
742789Sahrens 	return (0);
743789Sahrens }
744789Sahrens 
745789Sahrens /*
746789Sahrens  * Destructor callback - called when a cached buf is
747789Sahrens  * no longer required.
748789Sahrens  */
749789Sahrens /* ARGSUSED */
750789Sahrens static void
751789Sahrens hdr_dest(void *vbuf, void *unused)
752789Sahrens {
753789Sahrens 	arc_buf_hdr_t *buf = vbuf;
754789Sahrens 
755789Sahrens 	refcount_destroy(&buf->b_refcnt);
756789Sahrens 	cv_destroy(&buf->b_cv);
7574831Sgw25295 	mutex_destroy(&buf->b_freeze_lock);
758*5450Sbrendan 
759*5450Sbrendan 	ARCSTAT_INCR(arcstat_hdr_size, -sizeof (arc_buf_hdr_t));
760789Sahrens }
761789Sahrens 
762789Sahrens /*
763789Sahrens  * Reclaim callback -- invoked when memory is low.
764789Sahrens  */
765789Sahrens /* ARGSUSED */
766789Sahrens static void
767789Sahrens hdr_recl(void *unused)
768789Sahrens {
769789Sahrens 	dprintf("hdr_recl called\n");
7703158Smaybee 	/*
7713158Smaybee 	 * umem calls the reclaim func when we destroy the buf cache,
7723158Smaybee 	 * which is after we do arc_fini().
7733158Smaybee 	 */
7743158Smaybee 	if (!arc_dead)
7753158Smaybee 		cv_signal(&arc_reclaim_thr_cv);
776789Sahrens }
777789Sahrens 
778789Sahrens static void
779789Sahrens buf_init(void)
780789Sahrens {
781789Sahrens 	uint64_t *ct;
7821544Seschrock 	uint64_t hsize = 1ULL << 12;
783789Sahrens 	int i, j;
784789Sahrens 
785789Sahrens 	/*
786789Sahrens 	 * The hash table is big enough to fill all of physical memory
7871544Seschrock 	 * with an average 64K block size.  The table will take up
7881544Seschrock 	 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
789789Sahrens 	 */
7901544Seschrock 	while (hsize * 65536 < physmem * PAGESIZE)
791789Sahrens 		hsize <<= 1;
7921544Seschrock retry:
793789Sahrens 	buf_hash_table.ht_mask = hsize - 1;
7941544Seschrock 	buf_hash_table.ht_table =
7951544Seschrock 	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
7961544Seschrock 	if (buf_hash_table.ht_table == NULL) {
7971544Seschrock 		ASSERT(hsize > (1ULL << 8));
7981544Seschrock 		hsize >>= 1;
7991544Seschrock 		goto retry;
8001544Seschrock 	}
801789Sahrens 
802789Sahrens 	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
803789Sahrens 	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
804789Sahrens 	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
805789Sahrens 	    0, NULL, NULL, NULL, NULL, NULL, 0);
806789Sahrens 
807789Sahrens 	for (i = 0; i < 256; i++)
808789Sahrens 		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
809789Sahrens 			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
810789Sahrens 
811789Sahrens 	for (i = 0; i < BUF_LOCKS; i++) {
812789Sahrens 		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
813789Sahrens 		    NULL, MUTEX_DEFAULT, NULL);
814789Sahrens 	}
815789Sahrens }
816789Sahrens 
817789Sahrens #define	ARC_MINTIME	(hz>>4) /* 62 ms */
818789Sahrens 
819789Sahrens static void
8203093Sahrens arc_cksum_verify(arc_buf_t *buf)
8213093Sahrens {
8223093Sahrens 	zio_cksum_t zc;
8233093Sahrens 
8243312Sahrens 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
8253093Sahrens 		return;
8263093Sahrens 
8273093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
8283265Sahrens 	if (buf->b_hdr->b_freeze_cksum == NULL ||
8293265Sahrens 	    (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
8303093Sahrens 		mutex_exit(&buf->b_hdr->b_freeze_lock);
8313093Sahrens 		return;
8323093Sahrens 	}
8333093Sahrens 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
8343093Sahrens 	if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
8353093Sahrens 		panic("buffer modified while frozen!");
8363093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
8373093Sahrens }
8383093Sahrens 
839*5450Sbrendan static int
840*5450Sbrendan arc_cksum_equal(arc_buf_t *buf)
841*5450Sbrendan {
842*5450Sbrendan 	zio_cksum_t zc;
843*5450Sbrendan 	int equal;
844*5450Sbrendan 
845*5450Sbrendan 	mutex_enter(&buf->b_hdr->b_freeze_lock);
846*5450Sbrendan 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
847*5450Sbrendan 	equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
848*5450Sbrendan 	mutex_exit(&buf->b_hdr->b_freeze_lock);
849*5450Sbrendan 
850*5450Sbrendan 	return (equal);
851*5450Sbrendan }
852*5450Sbrendan 
8533093Sahrens static void
854*5450Sbrendan arc_cksum_compute(arc_buf_t *buf, boolean_t force)
8553093Sahrens {
856*5450Sbrendan 	if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
8573093Sahrens 		return;
8583093Sahrens 
8593093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
8603093Sahrens 	if (buf->b_hdr->b_freeze_cksum != NULL) {
8613093Sahrens 		mutex_exit(&buf->b_hdr->b_freeze_lock);
8623093Sahrens 		return;
8633093Sahrens 	}
8643093Sahrens 	buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
8653093Sahrens 	fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
8663093Sahrens 	    buf->b_hdr->b_freeze_cksum);
8673093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
8683093Sahrens }
8693093Sahrens 
8703093Sahrens void
8713093Sahrens arc_buf_thaw(arc_buf_t *buf)
8723093Sahrens {
873*5450Sbrendan 	if (zfs_flags & ZFS_DEBUG_MODIFY) {
874*5450Sbrendan 		if (buf->b_hdr->b_state != arc_anon)
875*5450Sbrendan 			panic("modifying non-anon buffer!");
876*5450Sbrendan 		if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
877*5450Sbrendan 			panic("modifying buffer while i/o in progress!");
878*5450Sbrendan 		arc_cksum_verify(buf);
879*5450Sbrendan 	}
880*5450Sbrendan 
8813093Sahrens 	mutex_enter(&buf->b_hdr->b_freeze_lock);
8823093Sahrens 	if (buf->b_hdr->b_freeze_cksum != NULL) {
8833093Sahrens 		kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
8843093Sahrens 		buf->b_hdr->b_freeze_cksum = NULL;
8853093Sahrens 	}
8863093Sahrens 	mutex_exit(&buf->b_hdr->b_freeze_lock);
8873093Sahrens }
8883093Sahrens 
8893093Sahrens void
8903093Sahrens arc_buf_freeze(arc_buf_t *buf)
8913093Sahrens {
8923312Sahrens 	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
8933312Sahrens 		return;
8943312Sahrens 
8953093Sahrens 	ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
8963403Sbmc 	    buf->b_hdr->b_state == arc_anon);
897*5450Sbrendan 	arc_cksum_compute(buf, B_FALSE);
8983093Sahrens }
8993093Sahrens 
9003093Sahrens static void
901789Sahrens add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
902789Sahrens {
903789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
904789Sahrens 
905789Sahrens 	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
9063403Sbmc 	    (ab->b_state != arc_anon)) {
9073700Sek110237 		uint64_t delta = ab->b_size * ab->b_datacnt;
9084309Smaybee 		list_t *list = &ab->b_state->arcs_list[ab->b_type];
9094309Smaybee 		uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
910789Sahrens 
9113403Sbmc 		ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
9123403Sbmc 		mutex_enter(&ab->b_state->arcs_mtx);
913789Sahrens 		ASSERT(list_link_active(&ab->b_arc_node));
9144309Smaybee 		list_remove(list, ab);
9151544Seschrock 		if (GHOST_STATE(ab->b_state)) {
9161544Seschrock 			ASSERT3U(ab->b_datacnt, ==, 0);
9171544Seschrock 			ASSERT3P(ab->b_buf, ==, NULL);
9181544Seschrock 			delta = ab->b_size;
9191544Seschrock 		}
9201544Seschrock 		ASSERT(delta > 0);
9214309Smaybee 		ASSERT3U(*size, >=, delta);
9224309Smaybee 		atomic_add_64(size, -delta);
9233403Sbmc 		mutex_exit(&ab->b_state->arcs_mtx);
9242391Smaybee 		/* remove the prefetch flag is we get a reference */
9252391Smaybee 		if (ab->b_flags & ARC_PREFETCH)
9262391Smaybee 			ab->b_flags &= ~ARC_PREFETCH;
927789Sahrens 	}
928789Sahrens }
929789Sahrens 
930789Sahrens static int
931789Sahrens remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
932789Sahrens {
933789Sahrens 	int cnt;
9343403Sbmc 	arc_state_t *state = ab->b_state;
935789Sahrens 
9363403Sbmc 	ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
9373403Sbmc 	ASSERT(!GHOST_STATE(state));
938789Sahrens 
939789Sahrens 	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
9403403Sbmc 	    (state != arc_anon)) {
9414309Smaybee 		uint64_t *size = &state->arcs_lsize[ab->b_type];
9424309Smaybee 
9433403Sbmc 		ASSERT(!MUTEX_HELD(&state->arcs_mtx));
9443403Sbmc 		mutex_enter(&state->arcs_mtx);
945789Sahrens 		ASSERT(!list_link_active(&ab->b_arc_node));
9464309Smaybee 		list_insert_head(&state->arcs_list[ab->b_type], ab);
9471544Seschrock 		ASSERT(ab->b_datacnt > 0);
9484309Smaybee 		atomic_add_64(size, ab->b_size * ab->b_datacnt);
9493403Sbmc 		mutex_exit(&state->arcs_mtx);
950789Sahrens 	}
951789Sahrens 	return (cnt);
952789Sahrens }
953789Sahrens 
954789Sahrens /*
955789Sahrens  * Move the supplied buffer to the indicated state.  The mutex
956789Sahrens  * for the buffer must be held by the caller.
957789Sahrens  */
958789Sahrens static void
9591544Seschrock arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
960789Sahrens {
9611544Seschrock 	arc_state_t *old_state = ab->b_state;
9623700Sek110237 	int64_t refcnt = refcount_count(&ab->b_refcnt);
9633700Sek110237 	uint64_t from_delta, to_delta;
964789Sahrens 
965789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
9661544Seschrock 	ASSERT(new_state != old_state);
9671544Seschrock 	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
9681544Seschrock 	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
9691544Seschrock 
9701544Seschrock 	from_delta = to_delta = ab->b_datacnt * ab->b_size;
971789Sahrens 
972789Sahrens 	/*
973789Sahrens 	 * If this buffer is evictable, transfer it from the
974789Sahrens 	 * old state list to the new state list.
975789Sahrens 	 */
9761544Seschrock 	if (refcnt == 0) {
9773403Sbmc 		if (old_state != arc_anon) {
9783403Sbmc 			int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
9794309Smaybee 			uint64_t *size = &old_state->arcs_lsize[ab->b_type];
9801544Seschrock 
9811544Seschrock 			if (use_mutex)
9823403Sbmc 				mutex_enter(&old_state->arcs_mtx);
9831544Seschrock 
9841544Seschrock 			ASSERT(list_link_active(&ab->b_arc_node));
9854309Smaybee 			list_remove(&old_state->arcs_list[ab->b_type], ab);
986789Sahrens 
9872391Smaybee 			/*
9882391Smaybee 			 * If prefetching out of the ghost cache,
9892391Smaybee 			 * we will have a non-null datacnt.
9902391Smaybee 			 */
9912391Smaybee 			if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
9922391Smaybee 				/* ghost elements have a ghost size */
9931544Seschrock 				ASSERT(ab->b_buf == NULL);
9941544Seschrock 				from_delta = ab->b_size;
995789Sahrens 			}
9964309Smaybee 			ASSERT3U(*size, >=, from_delta);
9974309Smaybee 			atomic_add_64(size, -from_delta);
9981544Seschrock 
9991544Seschrock 			if (use_mutex)
10003403Sbmc 				mutex_exit(&old_state->arcs_mtx);
1001789Sahrens 		}
10023403Sbmc 		if (new_state != arc_anon) {
10033403Sbmc 			int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
10044309Smaybee 			uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1005789Sahrens 
10061544Seschrock 			if (use_mutex)
10073403Sbmc 				mutex_enter(&new_state->arcs_mtx);
10081544Seschrock 
10094309Smaybee 			list_insert_head(&new_state->arcs_list[ab->b_type], ab);
10101544Seschrock 
10111544Seschrock 			/* ghost elements have a ghost size */
10121544Seschrock 			if (GHOST_STATE(new_state)) {
10131544Seschrock 				ASSERT(ab->b_datacnt == 0);
10141544Seschrock 				ASSERT(ab->b_buf == NULL);
10151544Seschrock 				to_delta = ab->b_size;
10161544Seschrock 			}
10174309Smaybee 			atomic_add_64(size, to_delta);
10181544Seschrock 
10191544Seschrock 			if (use_mutex)
10203403Sbmc 				mutex_exit(&new_state->arcs_mtx);
1021789Sahrens 		}
1022789Sahrens 	}
1023789Sahrens 
1024789Sahrens 	ASSERT(!BUF_EMPTY(ab));
1025*5450Sbrendan 	if (new_state == arc_anon) {
1026789Sahrens 		buf_hash_remove(ab);
1027789Sahrens 	}
1028789Sahrens 
10291544Seschrock 	/* adjust state sizes */
10301544Seschrock 	if (to_delta)
10313403Sbmc 		atomic_add_64(&new_state->arcs_size, to_delta);
10321544Seschrock 	if (from_delta) {
10333403Sbmc 		ASSERT3U(old_state->arcs_size, >=, from_delta);
10343403Sbmc 		atomic_add_64(&old_state->arcs_size, -from_delta);
1035789Sahrens 	}
1036789Sahrens 	ab->b_state = new_state;
1037*5450Sbrendan 
1038*5450Sbrendan 	/* adjust l2arc hdr stats */
1039*5450Sbrendan 	if (new_state == arc_l2c_only)
1040*5450Sbrendan 		l2arc_hdr_stat_add();
1041*5450Sbrendan 	else if (old_state == arc_l2c_only)
1042*5450Sbrendan 		l2arc_hdr_stat_remove();
1043789Sahrens }
1044789Sahrens 
10454309Smaybee void
10464309Smaybee arc_space_consume(uint64_t space)
10474309Smaybee {
10484309Smaybee 	atomic_add_64(&arc_meta_used, space);
10494309Smaybee 	atomic_add_64(&arc_size, space);
10504309Smaybee }
10514309Smaybee 
10524309Smaybee void
10534309Smaybee arc_space_return(uint64_t space)
10544309Smaybee {
10554309Smaybee 	ASSERT(arc_meta_used >= space);
10564309Smaybee 	if (arc_meta_max < arc_meta_used)
10574309Smaybee 		arc_meta_max = arc_meta_used;
10584309Smaybee 	atomic_add_64(&arc_meta_used, -space);
10594309Smaybee 	ASSERT(arc_size >= space);
10604309Smaybee 	atomic_add_64(&arc_size, -space);
10614309Smaybee }
10624309Smaybee 
10634309Smaybee void *
10644309Smaybee arc_data_buf_alloc(uint64_t size)
10654309Smaybee {
10664309Smaybee 	if (arc_evict_needed(ARC_BUFC_DATA))
10674309Smaybee 		cv_signal(&arc_reclaim_thr_cv);
10684309Smaybee 	atomic_add_64(&arc_size, size);
10694309Smaybee 	return (zio_data_buf_alloc(size));
10704309Smaybee }
10714309Smaybee 
10724309Smaybee void
10734309Smaybee arc_data_buf_free(void *buf, uint64_t size)
10744309Smaybee {
10754309Smaybee 	zio_data_buf_free(buf, size);
10764309Smaybee 	ASSERT(arc_size >= size);
10774309Smaybee 	atomic_add_64(&arc_size, -size);
10784309Smaybee }
10794309Smaybee 
1080789Sahrens arc_buf_t *
10813290Sjohansen arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1082789Sahrens {
1083789Sahrens 	arc_buf_hdr_t *hdr;
1084789Sahrens 	arc_buf_t *buf;
1085789Sahrens 
1086789Sahrens 	ASSERT3U(size, >, 0);
1087789Sahrens 	hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
1088789Sahrens 	ASSERT(BUF_EMPTY(hdr));
1089789Sahrens 	hdr->b_size = size;
10903290Sjohansen 	hdr->b_type = type;
1091789Sahrens 	hdr->b_spa = spa;
10923403Sbmc 	hdr->b_state = arc_anon;
1093789Sahrens 	hdr->b_arc_access = 0;
1094789Sahrens 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
1095789Sahrens 	buf->b_hdr = hdr;
10962688Smaybee 	buf->b_data = NULL;
10971544Seschrock 	buf->b_efunc = NULL;
10981544Seschrock 	buf->b_private = NULL;
1099789Sahrens 	buf->b_next = NULL;
1100789Sahrens 	hdr->b_buf = buf;
11012688Smaybee 	arc_get_data_buf(buf);
11021544Seschrock 	hdr->b_datacnt = 1;
1103789Sahrens 	hdr->b_flags = 0;
1104789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
1105789Sahrens 	(void) refcount_add(&hdr->b_refcnt, tag);
1106789Sahrens 
1107789Sahrens 	return (buf);
1108789Sahrens }
1109789Sahrens 
11102688Smaybee static arc_buf_t *
11112688Smaybee arc_buf_clone(arc_buf_t *from)
11121544Seschrock {
11132688Smaybee 	arc_buf_t *buf;
11142688Smaybee 	arc_buf_hdr_t *hdr = from->b_hdr;
11152688Smaybee 	uint64_t size = hdr->b_size;
11161544Seschrock 
11172688Smaybee 	buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
11182688Smaybee 	buf->b_hdr = hdr;
11192688Smaybee 	buf->b_data = NULL;
11202688Smaybee 	buf->b_efunc = NULL;
11212688Smaybee 	buf->b_private = NULL;
11222688Smaybee 	buf->b_next = hdr->b_buf;
11232688Smaybee 	hdr->b_buf = buf;
11242688Smaybee 	arc_get_data_buf(buf);
11252688Smaybee 	bcopy(from->b_data, buf->b_data, size);
11262688Smaybee 	hdr->b_datacnt += 1;
11272688Smaybee 	return (buf);
11281544Seschrock }
11291544Seschrock 
11301544Seschrock void
11311544Seschrock arc_buf_add_ref(arc_buf_t *buf, void* tag)
11321544Seschrock {
11332887Smaybee 	arc_buf_hdr_t *hdr;
11341544Seschrock 	kmutex_t *hash_lock;
11351544Seschrock 
11362724Smaybee 	/*
11372724Smaybee 	 * Check to see if this buffer is currently being evicted via
11382887Smaybee 	 * arc_do_user_evicts().
11392724Smaybee 	 */
11402887Smaybee 	mutex_enter(&arc_eviction_mtx);
11412887Smaybee 	hdr = buf->b_hdr;
11422887Smaybee 	if (hdr == NULL) {
11432887Smaybee 		mutex_exit(&arc_eviction_mtx);
11442724Smaybee 		return;
11452887Smaybee 	}
11462887Smaybee 	hash_lock = HDR_LOCK(hdr);
11472887Smaybee 	mutex_exit(&arc_eviction_mtx);
11482724Smaybee 
11492724Smaybee 	mutex_enter(hash_lock);
11501544Seschrock 	if (buf->b_data == NULL) {
11511544Seschrock 		/*
11521544Seschrock 		 * This buffer is evicted.
11531544Seschrock 		 */
11542724Smaybee 		mutex_exit(hash_lock);
11551544Seschrock 		return;
11561544Seschrock 	}
11571544Seschrock 
11582724Smaybee 	ASSERT(buf->b_hdr == hdr);
11593403Sbmc 	ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
11601544Seschrock 	add_reference(hdr, hash_lock, tag);
11612688Smaybee 	arc_access(hdr, hash_lock);
11622688Smaybee 	mutex_exit(hash_lock);
11633403Sbmc 	ARCSTAT_BUMP(arcstat_hits);
11643403Sbmc 	ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
11653403Sbmc 	    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
11663403Sbmc 	    data, metadata, hits);
11671544Seschrock }
11681544Seschrock 
1169*5450Sbrendan /*
1170*5450Sbrendan  * Free the arc data buffer.  If it is an l2arc write in progress,
1171*5450Sbrendan  * the buffer is placed on l2arc_free_on_write to be freed later.
1172*5450Sbrendan  */
1173*5450Sbrendan static void
1174*5450Sbrendan arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t),
1175*5450Sbrendan     void *data, size_t size)
1176*5450Sbrendan {
1177*5450Sbrendan 	if (HDR_L2_WRITING(hdr)) {
1178*5450Sbrendan 		l2arc_data_free_t *df;
1179*5450Sbrendan 		df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1180*5450Sbrendan 		df->l2df_data = data;
1181*5450Sbrendan 		df->l2df_size = size;
1182*5450Sbrendan 		df->l2df_func = free_func;
1183*5450Sbrendan 		mutex_enter(&l2arc_free_on_write_mtx);
1184*5450Sbrendan 		list_insert_head(l2arc_free_on_write, df);
1185*5450Sbrendan 		mutex_exit(&l2arc_free_on_write_mtx);
1186*5450Sbrendan 		ARCSTAT_BUMP(arcstat_l2_free_on_write);
1187*5450Sbrendan 	} else {
1188*5450Sbrendan 		free_func(data, size);
1189*5450Sbrendan 	}
1190*5450Sbrendan }
1191*5450Sbrendan 
1192789Sahrens static void
11932688Smaybee arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
11941544Seschrock {
11951544Seschrock 	arc_buf_t **bufp;
11961544Seschrock 
11971544Seschrock 	/* free up data associated with the buf */
11981544Seschrock 	if (buf->b_data) {
11991544Seschrock 		arc_state_t *state = buf->b_hdr->b_state;
12001544Seschrock 		uint64_t size = buf->b_hdr->b_size;
12013290Sjohansen 		arc_buf_contents_t type = buf->b_hdr->b_type;
12021544Seschrock 
12033093Sahrens 		arc_cksum_verify(buf);
12042688Smaybee 		if (!recycle) {
12053290Sjohansen 			if (type == ARC_BUFC_METADATA) {
1206*5450Sbrendan 				arc_buf_data_free(buf->b_hdr, zio_buf_free,
1207*5450Sbrendan 				    buf->b_data, size);
12084309Smaybee 				arc_space_return(size);
12093290Sjohansen 			} else {
12103290Sjohansen 				ASSERT(type == ARC_BUFC_DATA);
1211*5450Sbrendan 				arc_buf_data_free(buf->b_hdr,
1212*5450Sbrendan 				    zio_data_buf_free, buf->b_data, size);
12134309Smaybee 				atomic_add_64(&arc_size, -size);
12143290Sjohansen 			}
12152688Smaybee 		}
12161544Seschrock 		if (list_link_active(&buf->b_hdr->b_arc_node)) {
12174309Smaybee 			uint64_t *cnt = &state->arcs_lsize[type];
12184309Smaybee 
12191544Seschrock 			ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
12203403Sbmc 			ASSERT(state != arc_anon);
12214309Smaybee 
12224309Smaybee 			ASSERT3U(*cnt, >=, size);
12234309Smaybee 			atomic_add_64(cnt, -size);
12241544Seschrock 		}
12253403Sbmc 		ASSERT3U(state->arcs_size, >=, size);
12263403Sbmc 		atomic_add_64(&state->arcs_size, -size);
12271544Seschrock 		buf->b_data = NULL;
12281544Seschrock 		ASSERT(buf->b_hdr->b_datacnt > 0);
12291544Seschrock 		buf->b_hdr->b_datacnt -= 1;
12301544Seschrock 	}
12311544Seschrock 
12321544Seschrock 	/* only remove the buf if requested */
12331544Seschrock 	if (!all)
12341544Seschrock 		return;
12351544Seschrock 
12361544Seschrock 	/* remove the buf from the hdr list */
12371544Seschrock 	for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
12381544Seschrock 		continue;
12391544Seschrock 	*bufp = buf->b_next;
12401544Seschrock 
12411544Seschrock 	ASSERT(buf->b_efunc == NULL);
12421544Seschrock 
12431544Seschrock 	/* clean up the buf */
12441544Seschrock 	buf->b_hdr = NULL;
12451544Seschrock 	kmem_cache_free(buf_cache, buf);
12461544Seschrock }
12471544Seschrock 
12481544Seschrock static void
12491544Seschrock arc_hdr_destroy(arc_buf_hdr_t *hdr)
1250789Sahrens {
1251789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt));
12523403Sbmc 	ASSERT3P(hdr->b_state, ==, arc_anon);
12531544Seschrock 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1254789Sahrens 
1255*5450Sbrendan 	if (hdr->b_l2hdr != NULL) {
1256*5450Sbrendan 		if (!MUTEX_HELD(&l2arc_buflist_mtx)) {
1257*5450Sbrendan 			/*
1258*5450Sbrendan 			 * To prevent arc_free() and l2arc_evict() from
1259*5450Sbrendan 			 * attempting to free the same buffer at the same time,
1260*5450Sbrendan 			 * a FREE_IN_PROGRESS flag is given to arc_free() to
1261*5450Sbrendan 			 * give it priority.  l2arc_evict() can't destroy this
1262*5450Sbrendan 			 * header while we are waiting on l2arc_buflist_mtx.
1263*5450Sbrendan 			 */
1264*5450Sbrendan 			mutex_enter(&l2arc_buflist_mtx);
1265*5450Sbrendan 			ASSERT(hdr->b_l2hdr != NULL);
1266*5450Sbrendan 
1267*5450Sbrendan 			list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr);
1268*5450Sbrendan 			mutex_exit(&l2arc_buflist_mtx);
1269*5450Sbrendan 		} else {
1270*5450Sbrendan 			list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr);
1271*5450Sbrendan 		}
1272*5450Sbrendan 		ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1273*5450Sbrendan 		kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t));
1274*5450Sbrendan 		if (hdr->b_state == arc_l2c_only)
1275*5450Sbrendan 			l2arc_hdr_stat_remove();
1276*5450Sbrendan 		hdr->b_l2hdr = NULL;
1277*5450Sbrendan 	}
1278*5450Sbrendan 
1279789Sahrens 	if (!BUF_EMPTY(hdr)) {
12801544Seschrock 		ASSERT(!HDR_IN_HASH_TABLE(hdr));
1281789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
1282789Sahrens 		hdr->b_birth = 0;
1283789Sahrens 		hdr->b_cksum0 = 0;
1284789Sahrens 	}
12851544Seschrock 	while (hdr->b_buf) {
1286789Sahrens 		arc_buf_t *buf = hdr->b_buf;
1287789Sahrens 
12881544Seschrock 		if (buf->b_efunc) {
12891544Seschrock 			mutex_enter(&arc_eviction_mtx);
12901544Seschrock 			ASSERT(buf->b_hdr != NULL);
12912688Smaybee 			arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
12921544Seschrock 			hdr->b_buf = buf->b_next;
12932887Smaybee 			buf->b_hdr = &arc_eviction_hdr;
12941544Seschrock 			buf->b_next = arc_eviction_list;
12951544Seschrock 			arc_eviction_list = buf;
12961544Seschrock 			mutex_exit(&arc_eviction_mtx);
12971544Seschrock 		} else {
12982688Smaybee 			arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
12991544Seschrock 		}
1300789Sahrens 	}
13013093Sahrens 	if (hdr->b_freeze_cksum != NULL) {
13023093Sahrens 		kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
13033093Sahrens 		hdr->b_freeze_cksum = NULL;
13043093Sahrens 	}
13051544Seschrock 
1306789Sahrens 	ASSERT(!list_link_active(&hdr->b_arc_node));
1307789Sahrens 	ASSERT3P(hdr->b_hash_next, ==, NULL);
1308789Sahrens 	ASSERT3P(hdr->b_acb, ==, NULL);
1309789Sahrens 	kmem_cache_free(hdr_cache, hdr);
1310789Sahrens }
1311789Sahrens 
1312789Sahrens void
1313789Sahrens arc_buf_free(arc_buf_t *buf, void *tag)
1314789Sahrens {
1315789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
13163403Sbmc 	int hashed = hdr->b_state != arc_anon;
13171544Seschrock 
13181544Seschrock 	ASSERT(buf->b_efunc == NULL);
13191544Seschrock 	ASSERT(buf->b_data != NULL);
13201544Seschrock 
13211544Seschrock 	if (hashed) {
13221544Seschrock 		kmutex_t *hash_lock = HDR_LOCK(hdr);
13231544Seschrock 
13241544Seschrock 		mutex_enter(hash_lock);
13251544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
13261544Seschrock 		if (hdr->b_datacnt > 1)
13272688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
13281544Seschrock 		else
13291544Seschrock 			hdr->b_flags |= ARC_BUF_AVAILABLE;
13301544Seschrock 		mutex_exit(hash_lock);
13311544Seschrock 	} else if (HDR_IO_IN_PROGRESS(hdr)) {
13321544Seschrock 		int destroy_hdr;
13331544Seschrock 		/*
13341544Seschrock 		 * We are in the middle of an async write.  Don't destroy
13351544Seschrock 		 * this buffer unless the write completes before we finish
13361544Seschrock 		 * decrementing the reference count.
13371544Seschrock 		 */
13381544Seschrock 		mutex_enter(&arc_eviction_mtx);
13391544Seschrock 		(void) remove_reference(hdr, NULL, tag);
13401544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
13411544Seschrock 		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
13421544Seschrock 		mutex_exit(&arc_eviction_mtx);
13431544Seschrock 		if (destroy_hdr)
13441544Seschrock 			arc_hdr_destroy(hdr);
13451544Seschrock 	} else {
13461544Seschrock 		if (remove_reference(hdr, NULL, tag) > 0) {
13471544Seschrock 			ASSERT(HDR_IO_ERROR(hdr));
13482688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
13491544Seschrock 		} else {
13501544Seschrock 			arc_hdr_destroy(hdr);
13511544Seschrock 		}
13521544Seschrock 	}
13531544Seschrock }
13541544Seschrock 
13551544Seschrock int
13561544Seschrock arc_buf_remove_ref(arc_buf_t *buf, void* tag)
13571544Seschrock {
13581544Seschrock 	arc_buf_hdr_t *hdr = buf->b_hdr;
1359789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
13601544Seschrock 	int no_callback = (buf->b_efunc == NULL);
13611544Seschrock 
13623403Sbmc 	if (hdr->b_state == arc_anon) {
13631544Seschrock 		arc_buf_free(buf, tag);
13641544Seschrock 		return (no_callback);
13651544Seschrock 	}
1366789Sahrens 
1367789Sahrens 	mutex_enter(hash_lock);
13683403Sbmc 	ASSERT(hdr->b_state != arc_anon);
13691544Seschrock 	ASSERT(buf->b_data != NULL);
1370789Sahrens 
13711544Seschrock 	(void) remove_reference(hdr, hash_lock, tag);
13721544Seschrock 	if (hdr->b_datacnt > 1) {
13731544Seschrock 		if (no_callback)
13742688Smaybee 			arc_buf_destroy(buf, FALSE, TRUE);
13751544Seschrock 	} else if (no_callback) {
13761544Seschrock 		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
13771544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
1378789Sahrens 	}
13791544Seschrock 	ASSERT(no_callback || hdr->b_datacnt > 1 ||
13801544Seschrock 	    refcount_is_zero(&hdr->b_refcnt));
1381789Sahrens 	mutex_exit(hash_lock);
13821544Seschrock 	return (no_callback);
1383789Sahrens }
1384789Sahrens 
1385789Sahrens int
1386789Sahrens arc_buf_size(arc_buf_t *buf)
1387789Sahrens {
1388789Sahrens 	return (buf->b_hdr->b_size);
1389789Sahrens }
1390789Sahrens 
1391789Sahrens /*
1392789Sahrens  * Evict buffers from list until we've removed the specified number of
1393789Sahrens  * bytes.  Move the removed buffers to the appropriate evict state.
13942688Smaybee  * If the recycle flag is set, then attempt to "recycle" a buffer:
13952688Smaybee  * - look for a buffer to evict that is `bytes' long.
13962688Smaybee  * - return the data block from this buffer rather than freeing it.
13972688Smaybee  * This flag is used by callers that are trying to make space for a
13982688Smaybee  * new buffer in a full arc cache.
1399789Sahrens  */
14002688Smaybee static void *
14013290Sjohansen arc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle,
14023290Sjohansen     arc_buf_contents_t type)
1403789Sahrens {
1404789Sahrens 	arc_state_t *evicted_state;
14052688Smaybee 	uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
14062918Smaybee 	arc_buf_hdr_t *ab, *ab_prev = NULL;
14074309Smaybee 	list_t *list = &state->arcs_list[type];
1408789Sahrens 	kmutex_t *hash_lock;
14092688Smaybee 	boolean_t have_lock;
14102918Smaybee 	void *stolen = NULL;
1411789Sahrens 
14123403Sbmc 	ASSERT(state == arc_mru || state == arc_mfu);
1413789Sahrens 
14143403Sbmc 	evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1415789Sahrens 
14163403Sbmc 	mutex_enter(&state->arcs_mtx);
14173403Sbmc 	mutex_enter(&evicted_state->arcs_mtx);
1418789Sahrens 
14194309Smaybee 	for (ab = list_tail(list); ab; ab = ab_prev) {
14204309Smaybee 		ab_prev = list_prev(list, ab);
14212391Smaybee 		/* prefetch buffers have a minimum lifespan */
14222688Smaybee 		if (HDR_IO_IN_PROGRESS(ab) ||
14232688Smaybee 		    (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
14242688Smaybee 		    lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) {
14252391Smaybee 			skipped++;
14262391Smaybee 			continue;
14272391Smaybee 		}
14282918Smaybee 		/* "lookahead" for better eviction candidate */
14292918Smaybee 		if (recycle && ab->b_size != bytes &&
14302918Smaybee 		    ab_prev && ab_prev->b_size == bytes)
14312688Smaybee 			continue;
1432789Sahrens 		hash_lock = HDR_LOCK(ab);
14332688Smaybee 		have_lock = MUTEX_HELD(hash_lock);
14342688Smaybee 		if (have_lock || mutex_tryenter(hash_lock)) {
1435789Sahrens 			ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
14361544Seschrock 			ASSERT(ab->b_datacnt > 0);
14371544Seschrock 			while (ab->b_buf) {
14381544Seschrock 				arc_buf_t *buf = ab->b_buf;
14392688Smaybee 				if (buf->b_data) {
14401544Seschrock 					bytes_evicted += ab->b_size;
14413290Sjohansen 					if (recycle && ab->b_type == type &&
1442*5450Sbrendan 					    ab->b_size == bytes &&
1443*5450Sbrendan 					    !HDR_L2_WRITING(ab)) {
14442918Smaybee 						stolen = buf->b_data;
14452918Smaybee 						recycle = FALSE;
14462918Smaybee 					}
14472688Smaybee 				}
14481544Seschrock 				if (buf->b_efunc) {
14491544Seschrock 					mutex_enter(&arc_eviction_mtx);
14502918Smaybee 					arc_buf_destroy(buf,
14512918Smaybee 					    buf->b_data == stolen, FALSE);
14521544Seschrock 					ab->b_buf = buf->b_next;
14532887Smaybee 					buf->b_hdr = &arc_eviction_hdr;
14541544Seschrock 					buf->b_next = arc_eviction_list;
14551544Seschrock 					arc_eviction_list = buf;
14561544Seschrock 					mutex_exit(&arc_eviction_mtx);
14571544Seschrock 				} else {
14582918Smaybee 					arc_buf_destroy(buf,
14592918Smaybee 					    buf->b_data == stolen, TRUE);
14601544Seschrock 				}
14611544Seschrock 			}
14621544Seschrock 			ASSERT(ab->b_datacnt == 0);
1463789Sahrens 			arc_change_state(evicted_state, ab, hash_lock);
14641544Seschrock 			ASSERT(HDR_IN_HASH_TABLE(ab));
1465*5450Sbrendan 			ab->b_flags |= ARC_IN_HASH_TABLE;
1466*5450Sbrendan 			ab->b_flags &= ~ARC_BUF_AVAILABLE;
1467789Sahrens 			DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
14682688Smaybee 			if (!have_lock)
14692688Smaybee 				mutex_exit(hash_lock);
14701544Seschrock 			if (bytes >= 0 && bytes_evicted >= bytes)
1471789Sahrens 				break;
1472789Sahrens 		} else {
14732688Smaybee 			missed += 1;
1474789Sahrens 		}
1475789Sahrens 	}
14763403Sbmc 
14773403Sbmc 	mutex_exit(&evicted_state->arcs_mtx);
14783403Sbmc 	mutex_exit(&state->arcs_mtx);
1479789Sahrens 
1480789Sahrens 	if (bytes_evicted < bytes)
1481789Sahrens 		dprintf("only evicted %lld bytes from %x",
1482789Sahrens 		    (longlong_t)bytes_evicted, state);
1483789Sahrens 
14842688Smaybee 	if (skipped)
14853403Sbmc 		ARCSTAT_INCR(arcstat_evict_skip, skipped);
14863403Sbmc 
14872688Smaybee 	if (missed)
14883403Sbmc 		ARCSTAT_INCR(arcstat_mutex_miss, missed);
14893403Sbmc 
14904709Smaybee 	/*
14914709Smaybee 	 * We have just evicted some date into the ghost state, make
14924709Smaybee 	 * sure we also adjust the ghost state size if necessary.
14934709Smaybee 	 */
14944709Smaybee 	if (arc_no_grow &&
14954709Smaybee 	    arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) {
14964709Smaybee 		int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size +
14974709Smaybee 		    arc_mru_ghost->arcs_size - arc_c;
14984709Smaybee 
14994709Smaybee 		if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
15004709Smaybee 			int64_t todelete =
15014709Smaybee 			    MIN(arc_mru_ghost->arcs_lsize[type], mru_over);
15024709Smaybee 			arc_evict_ghost(arc_mru_ghost, todelete);
15034709Smaybee 		} else if (arc_mfu_ghost->arcs_lsize[type] > 0) {
15044709Smaybee 			int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type],
15054709Smaybee 			    arc_mru_ghost->arcs_size +
15064709Smaybee 			    arc_mfu_ghost->arcs_size - arc_c);
15074709Smaybee 			arc_evict_ghost(arc_mfu_ghost, todelete);
15084709Smaybee 		}
15094709Smaybee 	}
15104709Smaybee 
15112918Smaybee 	return (stolen);
1512789Sahrens }
1513789Sahrens 
1514789Sahrens /*
1515789Sahrens  * Remove buffers from list until we've removed the specified number of
1516789Sahrens  * bytes.  Destroy the buffers that are removed.
1517789Sahrens  */
1518789Sahrens static void
15191544Seschrock arc_evict_ghost(arc_state_t *state, int64_t bytes)
1520789Sahrens {
1521789Sahrens 	arc_buf_hdr_t *ab, *ab_prev;
15224309Smaybee 	list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1523789Sahrens 	kmutex_t *hash_lock;
15241544Seschrock 	uint64_t bytes_deleted = 0;
15253700Sek110237 	uint64_t bufs_skipped = 0;
1526789Sahrens 
15271544Seschrock 	ASSERT(GHOST_STATE(state));
1528789Sahrens top:
15293403Sbmc 	mutex_enter(&state->arcs_mtx);
15304309Smaybee 	for (ab = list_tail(list); ab; ab = ab_prev) {
15314309Smaybee 		ab_prev = list_prev(list, ab);
1532789Sahrens 		hash_lock = HDR_LOCK(ab);
1533789Sahrens 		if (mutex_tryenter(hash_lock)) {
15342391Smaybee 			ASSERT(!HDR_IO_IN_PROGRESS(ab));
15351544Seschrock 			ASSERT(ab->b_buf == NULL);
15363403Sbmc 			ARCSTAT_BUMP(arcstat_deleted);
15371544Seschrock 			bytes_deleted += ab->b_size;
1538*5450Sbrendan 
1539*5450Sbrendan 			if (ab->b_l2hdr != NULL) {
1540*5450Sbrendan 				/*
1541*5450Sbrendan 				 * This buffer is cached on the 2nd Level ARC;
1542*5450Sbrendan 				 * don't destroy the header.
1543*5450Sbrendan 				 */
1544*5450Sbrendan 				arc_change_state(arc_l2c_only, ab, hash_lock);
1545*5450Sbrendan 				mutex_exit(hash_lock);
1546*5450Sbrendan 			} else {
1547*5450Sbrendan 				arc_change_state(arc_anon, ab, hash_lock);
1548*5450Sbrendan 				mutex_exit(hash_lock);
1549*5450Sbrendan 				arc_hdr_destroy(ab);
1550*5450Sbrendan 			}
1551*5450Sbrendan 
1552789Sahrens 			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1553789Sahrens 			if (bytes >= 0 && bytes_deleted >= bytes)
1554789Sahrens 				break;
1555789Sahrens 		} else {
1556789Sahrens 			if (bytes < 0) {
15573403Sbmc 				mutex_exit(&state->arcs_mtx);
1558789Sahrens 				mutex_enter(hash_lock);
1559789Sahrens 				mutex_exit(hash_lock);
1560789Sahrens 				goto top;
1561789Sahrens 			}
1562789Sahrens 			bufs_skipped += 1;
1563789Sahrens 		}
1564789Sahrens 	}
15653403Sbmc 	mutex_exit(&state->arcs_mtx);
1566789Sahrens 
15674309Smaybee 	if (list == &state->arcs_list[ARC_BUFC_DATA] &&
15684309Smaybee 	    (bytes < 0 || bytes_deleted < bytes)) {
15694309Smaybee 		list = &state->arcs_list[ARC_BUFC_METADATA];
15704309Smaybee 		goto top;
15714309Smaybee 	}
15724309Smaybee 
1573789Sahrens 	if (bufs_skipped) {
15743403Sbmc 		ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
1575789Sahrens 		ASSERT(bytes >= 0);
1576789Sahrens 	}
1577789Sahrens 
1578789Sahrens 	if (bytes_deleted < bytes)
1579789Sahrens 		dprintf("only deleted %lld bytes from %p",
1580789Sahrens 		    (longlong_t)bytes_deleted, state);
1581789Sahrens }
1582789Sahrens 
1583789Sahrens static void
1584789Sahrens arc_adjust(void)
1585789Sahrens {
15863403Sbmc 	int64_t top_sz, mru_over, arc_over, todelete;
1587789Sahrens 
15883403Sbmc 	top_sz = arc_anon->arcs_size + arc_mru->arcs_size;
1589789Sahrens 
15904309Smaybee 	if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
15914309Smaybee 		int64_t toevict =
15924309Smaybee 		    MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p);
15934309Smaybee 		(void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_DATA);
15944309Smaybee 		top_sz = arc_anon->arcs_size + arc_mru->arcs_size;
15954309Smaybee 	}
15964309Smaybee 
15974309Smaybee 	if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
15984309Smaybee 		int64_t toevict =
15994309Smaybee 		    MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p);
16004309Smaybee 		(void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_METADATA);
16013403Sbmc 		top_sz = arc_anon->arcs_size + arc_mru->arcs_size;
1602789Sahrens 	}
1603789Sahrens 
16043403Sbmc 	mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c;
1605789Sahrens 
1606789Sahrens 	if (mru_over > 0) {
16074309Smaybee 		if (arc_mru_ghost->arcs_size > 0) {
16084309Smaybee 			todelete = MIN(arc_mru_ghost->arcs_size, mru_over);
16093403Sbmc 			arc_evict_ghost(arc_mru_ghost, todelete);
1610789Sahrens 		}
1611789Sahrens 	}
1612789Sahrens 
16133403Sbmc 	if ((arc_over = arc_size - arc_c) > 0) {
16141544Seschrock 		int64_t tbl_over;
1615789Sahrens 
16164309Smaybee 		if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
16174309Smaybee 			int64_t toevict =
16184309Smaybee 			    MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over);
16193403Sbmc 			(void) arc_evict(arc_mfu, toevict, FALSE,
16204309Smaybee 			    ARC_BUFC_DATA);
16214309Smaybee 			arc_over = arc_size - arc_c;
1622789Sahrens 		}
1623789Sahrens 
16244309Smaybee 		if (arc_over > 0 &&
16254309Smaybee 		    arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
16264309Smaybee 			int64_t toevict =
16274309Smaybee 			    MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA],
16284309Smaybee 			    arc_over);
16294309Smaybee 			(void) arc_evict(arc_mfu, toevict, FALSE,
16304309Smaybee 			    ARC_BUFC_METADATA);
16314309Smaybee 		}
16324309Smaybee 
16334309Smaybee 		tbl_over = arc_size + arc_mru_ghost->arcs_size +
16344309Smaybee 		    arc_mfu_ghost->arcs_size - arc_c * 2;
16354309Smaybee 
16364309Smaybee 		if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) {
16374309Smaybee 			todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over);
16383403Sbmc 			arc_evict_ghost(arc_mfu_ghost, todelete);
1639789Sahrens 		}
1640789Sahrens 	}
1641789Sahrens }
1642789Sahrens 
16431544Seschrock static void
16441544Seschrock arc_do_user_evicts(void)
16451544Seschrock {
16461544Seschrock 	mutex_enter(&arc_eviction_mtx);
16471544Seschrock 	while (arc_eviction_list != NULL) {
16481544Seschrock 		arc_buf_t *buf = arc_eviction_list;
16491544Seschrock 		arc_eviction_list = buf->b_next;
16501544Seschrock 		buf->b_hdr = NULL;
16511544Seschrock 		mutex_exit(&arc_eviction_mtx);
16521544Seschrock 
16531819Smaybee 		if (buf->b_efunc != NULL)
16541819Smaybee 			VERIFY(buf->b_efunc(buf) == 0);
16551544Seschrock 
16561544Seschrock 		buf->b_efunc = NULL;
16571544Seschrock 		buf->b_private = NULL;
16581544Seschrock 		kmem_cache_free(buf_cache, buf);
16591544Seschrock 		mutex_enter(&arc_eviction_mtx);
16601544Seschrock 	}
16611544Seschrock 	mutex_exit(&arc_eviction_mtx);
16621544Seschrock }
16631544Seschrock 
1664789Sahrens /*
1665789Sahrens  * Flush all *evictable* data from the cache.
1666789Sahrens  * NOTE: this will not touch "active" (i.e. referenced) data.
1667789Sahrens  */
1668789Sahrens void
1669789Sahrens arc_flush(void)
1670789Sahrens {
16714309Smaybee 	while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA]))
16724309Smaybee 		(void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_DATA);
16734309Smaybee 	while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA]))
16744309Smaybee 		(void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_METADATA);
16754309Smaybee 	while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA]))
16764309Smaybee 		(void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_DATA);
16774309Smaybee 	while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA]))
16784309Smaybee 		(void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_METADATA);
1679789Sahrens 
16803403Sbmc 	arc_evict_ghost(arc_mru_ghost, -1);
16813403Sbmc 	arc_evict_ghost(arc_mfu_ghost, -1);
16821544Seschrock 
16831544Seschrock 	mutex_enter(&arc_reclaim_thr_lock);
16841544Seschrock 	arc_do_user_evicts();
16851544Seschrock 	mutex_exit(&arc_reclaim_thr_lock);
16861544Seschrock 	ASSERT(arc_eviction_list == NULL);
1687789Sahrens }
1688789Sahrens 
16893158Smaybee int arc_shrink_shift = 5;		/* log2(fraction of arc to reclaim) */
16902391Smaybee 
1691789Sahrens void
16923158Smaybee arc_shrink(void)
1693789Sahrens {
16943403Sbmc 	if (arc_c > arc_c_min) {
16953158Smaybee 		uint64_t to_free;
1696789Sahrens 
16972048Sstans #ifdef _KERNEL
16983403Sbmc 		to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
16992048Sstans #else
17003403Sbmc 		to_free = arc_c >> arc_shrink_shift;
17012048Sstans #endif
17023403Sbmc 		if (arc_c > arc_c_min + to_free)
17033403Sbmc 			atomic_add_64(&arc_c, -to_free);
17043158Smaybee 		else
17053403Sbmc 			arc_c = arc_c_min;
17062048Sstans 
17073403Sbmc 		atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
17083403Sbmc 		if (arc_c > arc_size)
17093403Sbmc 			arc_c = MAX(arc_size, arc_c_min);
17103403Sbmc 		if (arc_p > arc_c)
17113403Sbmc 			arc_p = (arc_c >> 1);
17123403Sbmc 		ASSERT(arc_c >= arc_c_min);
17133403Sbmc 		ASSERT((int64_t)arc_p >= 0);
17143158Smaybee 	}
1715789Sahrens 
17163403Sbmc 	if (arc_size > arc_c)
17173158Smaybee 		arc_adjust();
1718789Sahrens }
1719789Sahrens 
1720789Sahrens static int
1721789Sahrens arc_reclaim_needed(void)
1722789Sahrens {
1723789Sahrens 	uint64_t extra;
1724789Sahrens 
1725789Sahrens #ifdef _KERNEL
17262048Sstans 
17272048Sstans 	if (needfree)
17282048Sstans 		return (1);
17292048Sstans 
1730789Sahrens 	/*
1731789Sahrens 	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1732789Sahrens 	 */
1733789Sahrens 	extra = desfree;
1734789Sahrens 
1735789Sahrens 	/*
1736789Sahrens 	 * check that we're out of range of the pageout scanner.  It starts to
1737789Sahrens 	 * schedule paging if freemem is less than lotsfree and needfree.
1738789Sahrens 	 * lotsfree is the high-water mark for pageout, and needfree is the
1739789Sahrens 	 * number of needed free pages.  We add extra pages here to make sure
1740789Sahrens 	 * the scanner doesn't start up while we're freeing memory.
1741789Sahrens 	 */
1742789Sahrens 	if (freemem < lotsfree + needfree + extra)
1743789Sahrens 		return (1);
1744789Sahrens 
1745789Sahrens 	/*
1746789Sahrens 	 * check to make sure that swapfs has enough space so that anon
1747*5450Sbrendan 	 * reservations can still succeed. anon_resvmem() checks that the
1748789Sahrens 	 * availrmem is greater than swapfs_minfree, and the number of reserved
1749789Sahrens 	 * swap pages.  We also add a bit of extra here just to prevent
1750789Sahrens 	 * circumstances from getting really dire.
1751789Sahrens 	 */
1752789Sahrens 	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1753789Sahrens 		return (1);
1754789Sahrens 
17551936Smaybee #if defined(__i386)
1756789Sahrens 	/*
1757789Sahrens 	 * If we're on an i386 platform, it's possible that we'll exhaust the
1758789Sahrens 	 * kernel heap space before we ever run out of available physical
1759789Sahrens 	 * memory.  Most checks of the size of the heap_area compare against
1760789Sahrens 	 * tune.t_minarmem, which is the minimum available real memory that we
1761789Sahrens 	 * can have in the system.  However, this is generally fixed at 25 pages
1762789Sahrens 	 * which is so low that it's useless.  In this comparison, we seek to
1763789Sahrens 	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1764*5450Sbrendan 	 * heap is allocated.  (Or, in the calculation, if less than 1/4th is
1765789Sahrens 	 * free)
1766789Sahrens 	 */
1767789Sahrens 	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1768789Sahrens 	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1769789Sahrens 		return (1);
1770789Sahrens #endif
1771789Sahrens 
1772789Sahrens #else
1773789Sahrens 	if (spa_get_random(100) == 0)
1774789Sahrens 		return (1);
1775789Sahrens #endif
1776789Sahrens 	return (0);
1777789Sahrens }
1778789Sahrens 
1779789Sahrens static void
1780789Sahrens arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1781789Sahrens {
1782789Sahrens 	size_t			i;
1783789Sahrens 	kmem_cache_t		*prev_cache = NULL;
17843290Sjohansen 	kmem_cache_t		*prev_data_cache = NULL;
1785789Sahrens 	extern kmem_cache_t	*zio_buf_cache[];
17863290Sjohansen 	extern kmem_cache_t	*zio_data_buf_cache[];
1787789Sahrens 
17881484Sek110237 #ifdef _KERNEL
17894309Smaybee 	if (arc_meta_used >= arc_meta_limit) {
17904309Smaybee 		/*
17914309Smaybee 		 * We are exceeding our meta-data cache limit.
17924309Smaybee 		 * Purge some DNLC entries to release holds on meta-data.
17934309Smaybee 		 */
17944309Smaybee 		dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
17954309Smaybee 	}
17961936Smaybee #if defined(__i386)
17971936Smaybee 	/*
17981936Smaybee 	 * Reclaim unused memory from all kmem caches.
17991936Smaybee 	 */
18001936Smaybee 	kmem_reap();
18011936Smaybee #endif
18021484Sek110237 #endif
18031484Sek110237 
1804789Sahrens 	/*
1805*5450Sbrendan 	 * An aggressive reclamation will shrink the cache size as well as
18061544Seschrock 	 * reap free buffers from the arc kmem caches.
1807789Sahrens 	 */
1808789Sahrens 	if (strat == ARC_RECLAIM_AGGR)
18093158Smaybee 		arc_shrink();
1810789Sahrens 
1811789Sahrens 	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1812789Sahrens 		if (zio_buf_cache[i] != prev_cache) {
1813789Sahrens 			prev_cache = zio_buf_cache[i];
1814789Sahrens 			kmem_cache_reap_now(zio_buf_cache[i]);
1815789Sahrens 		}
18163290Sjohansen 		if (zio_data_buf_cache[i] != prev_data_cache) {
18173290Sjohansen 			prev_data_cache = zio_data_buf_cache[i];
18183290Sjohansen 			kmem_cache_reap_now(zio_data_buf_cache[i]);
18193290Sjohansen 		}
1820789Sahrens 	}
18211544Seschrock 	kmem_cache_reap_now(buf_cache);
18221544Seschrock 	kmem_cache_reap_now(hdr_cache);
1823789Sahrens }
1824789Sahrens 
1825789Sahrens static void
1826789Sahrens arc_reclaim_thread(void)
1827789Sahrens {
1828789Sahrens 	clock_t			growtime = 0;
1829789Sahrens 	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
1830789Sahrens 	callb_cpr_t		cpr;
1831789Sahrens 
1832789Sahrens 	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1833789Sahrens 
1834789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
1835789Sahrens 	while (arc_thread_exit == 0) {
1836789Sahrens 		if (arc_reclaim_needed()) {
1837789Sahrens 
18383403Sbmc 			if (arc_no_grow) {
1839789Sahrens 				if (last_reclaim == ARC_RECLAIM_CONS) {
1840789Sahrens 					last_reclaim = ARC_RECLAIM_AGGR;
1841789Sahrens 				} else {
1842789Sahrens 					last_reclaim = ARC_RECLAIM_CONS;
1843789Sahrens 				}
1844789Sahrens 			} else {
18453403Sbmc 				arc_no_grow = TRUE;
1846789Sahrens 				last_reclaim = ARC_RECLAIM_AGGR;
1847789Sahrens 				membar_producer();
1848789Sahrens 			}
1849789Sahrens 
1850789Sahrens 			/* reset the growth delay for every reclaim */
1851789Sahrens 			growtime = lbolt + (arc_grow_retry * hz);
1852789Sahrens 
1853789Sahrens 			arc_kmem_reap_now(last_reclaim);
1854789Sahrens 
18554309Smaybee 		} else if (arc_no_grow && lbolt >= growtime) {
18563403Sbmc 			arc_no_grow = FALSE;
1857789Sahrens 		}
1858789Sahrens 
18593403Sbmc 		if (2 * arc_c < arc_size +
18603403Sbmc 		    arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)
18613298Smaybee 			arc_adjust();
18623298Smaybee 
18631544Seschrock 		if (arc_eviction_list != NULL)
18641544Seschrock 			arc_do_user_evicts();
18651544Seschrock 
1866789Sahrens 		/* block until needed, or one second, whichever is shorter */
1867789Sahrens 		CALLB_CPR_SAFE_BEGIN(&cpr);
1868789Sahrens 		(void) cv_timedwait(&arc_reclaim_thr_cv,
1869789Sahrens 		    &arc_reclaim_thr_lock, (lbolt + hz));
1870789Sahrens 		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1871789Sahrens 	}
1872789Sahrens 
1873789Sahrens 	arc_thread_exit = 0;
1874789Sahrens 	cv_broadcast(&arc_reclaim_thr_cv);
1875789Sahrens 	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
1876789Sahrens 	thread_exit();
1877789Sahrens }
1878789Sahrens 
18791544Seschrock /*
18801544Seschrock  * Adapt arc info given the number of bytes we are trying to add and
18811544Seschrock  * the state that we are comming from.  This function is only called
18821544Seschrock  * when we are adding new content to the cache.
18831544Seschrock  */
1884789Sahrens static void
18851544Seschrock arc_adapt(int bytes, arc_state_t *state)
1886789Sahrens {
18871544Seschrock 	int mult;
18881544Seschrock 
1889*5450Sbrendan 	if (state == arc_l2c_only)
1890*5450Sbrendan 		return;
1891*5450Sbrendan 
18921544Seschrock 	ASSERT(bytes > 0);
1893789Sahrens 	/*
18941544Seschrock 	 * Adapt the target size of the MRU list:
18951544Seschrock 	 *	- if we just hit in the MRU ghost list, then increase
18961544Seschrock 	 *	  the target size of the MRU list.
18971544Seschrock 	 *	- if we just hit in the MFU ghost list, then increase
18981544Seschrock 	 *	  the target size of the MFU list by decreasing the
18991544Seschrock 	 *	  target size of the MRU list.
1900789Sahrens 	 */
19013403Sbmc 	if (state == arc_mru_ghost) {
19023403Sbmc 		mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
19033403Sbmc 		    1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
19041544Seschrock 
19053403Sbmc 		arc_p = MIN(arc_c, arc_p + bytes * mult);
19063403Sbmc 	} else if (state == arc_mfu_ghost) {
19073403Sbmc 		mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
19083403Sbmc 		    1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
19091544Seschrock 
19103403Sbmc 		arc_p = MAX(0, (int64_t)arc_p - bytes * mult);
19111544Seschrock 	}
19123403Sbmc 	ASSERT((int64_t)arc_p >= 0);
1913789Sahrens 
1914789Sahrens 	if (arc_reclaim_needed()) {
1915789Sahrens 		cv_signal(&arc_reclaim_thr_cv);
1916789Sahrens 		return;
1917789Sahrens 	}
1918789Sahrens 
19193403Sbmc 	if (arc_no_grow)
1920789Sahrens 		return;
1921789Sahrens 
19223403Sbmc 	if (arc_c >= arc_c_max)
19231544Seschrock 		return;
19241544Seschrock 
1925789Sahrens 	/*
19261544Seschrock 	 * If we're within (2 * maxblocksize) bytes of the target
19271544Seschrock 	 * cache size, increment the target cache size
1928789Sahrens 	 */
19293403Sbmc 	if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
19303403Sbmc 		atomic_add_64(&arc_c, (int64_t)bytes);
19313403Sbmc 		if (arc_c > arc_c_max)
19323403Sbmc 			arc_c = arc_c_max;
19333403Sbmc 		else if (state == arc_anon)
19343403Sbmc 			atomic_add_64(&arc_p, (int64_t)bytes);
19353403Sbmc 		if (arc_p > arc_c)
19363403Sbmc 			arc_p = arc_c;
1937789Sahrens 	}
19383403Sbmc 	ASSERT((int64_t)arc_p >= 0);
1939789Sahrens }
1940789Sahrens 
1941789Sahrens /*
19421544Seschrock  * Check if the cache has reached its limits and eviction is required
19431544Seschrock  * prior to insert.
1944789Sahrens  */
1945789Sahrens static int
19464309Smaybee arc_evict_needed(arc_buf_contents_t type)
1947789Sahrens {
19484309Smaybee 	if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
19494309Smaybee 		return (1);
19504309Smaybee 
19514309Smaybee #ifdef _KERNEL
19524309Smaybee 	/*
19534309Smaybee 	 * If zio data pages are being allocated out of a separate heap segment,
19544309Smaybee 	 * then enforce that the size of available vmem for this area remains
19554309Smaybee 	 * above about 1/32nd free.
19564309Smaybee 	 */
19574309Smaybee 	if (type == ARC_BUFC_DATA && zio_arena != NULL &&
19584309Smaybee 	    vmem_size(zio_arena, VMEM_FREE) <
19594309Smaybee 	    (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
19604309Smaybee 		return (1);
19614309Smaybee #endif
19624309Smaybee 
1963789Sahrens 	if (arc_reclaim_needed())
1964789Sahrens 		return (1);
1965789Sahrens 
19663403Sbmc 	return (arc_size > arc_c);
1967789Sahrens }
1968789Sahrens 
1969789Sahrens /*
19702688Smaybee  * The buffer, supplied as the first argument, needs a data block.
19712688Smaybee  * So, if we are at cache max, determine which cache should be victimized.
19722688Smaybee  * We have the following cases:
1973789Sahrens  *
19743403Sbmc  * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
1975789Sahrens  * In this situation if we're out of space, but the resident size of the MFU is
1976789Sahrens  * under the limit, victimize the MFU cache to satisfy this insertion request.
1977789Sahrens  *
19783403Sbmc  * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
1979789Sahrens  * Here, we've used up all of the available space for the MRU, so we need to
1980789Sahrens  * evict from our own cache instead.  Evict from the set of resident MRU
1981789Sahrens  * entries.
1982789Sahrens  *
19833403Sbmc  * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
1984789Sahrens  * c minus p represents the MFU space in the cache, since p is the size of the
1985789Sahrens  * cache that is dedicated to the MRU.  In this situation there's still space on
1986789Sahrens  * the MFU side, so the MRU side needs to be victimized.
1987789Sahrens  *
19883403Sbmc  * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
1989789Sahrens  * MFU's resident set is consuming more space than it has been allotted.  In
1990789Sahrens  * this situation, we must victimize our own cache, the MFU, for this insertion.
1991789Sahrens  */
1992789Sahrens static void
19932688Smaybee arc_get_data_buf(arc_buf_t *buf)
1994789Sahrens {
19953290Sjohansen 	arc_state_t		*state = buf->b_hdr->b_state;
19963290Sjohansen 	uint64_t		size = buf->b_hdr->b_size;
19973290Sjohansen 	arc_buf_contents_t	type = buf->b_hdr->b_type;
19982688Smaybee 
19992688Smaybee 	arc_adapt(size, state);
2000789Sahrens 
20012688Smaybee 	/*
20022688Smaybee 	 * We have not yet reached cache maximum size,
20032688Smaybee 	 * just allocate a new buffer.
20042688Smaybee 	 */
20054309Smaybee 	if (!arc_evict_needed(type)) {
20063290Sjohansen 		if (type == ARC_BUFC_METADATA) {
20073290Sjohansen 			buf->b_data = zio_buf_alloc(size);
20084309Smaybee 			arc_space_consume(size);
20093290Sjohansen 		} else {
20103290Sjohansen 			ASSERT(type == ARC_BUFC_DATA);
20113290Sjohansen 			buf->b_data = zio_data_buf_alloc(size);
20124309Smaybee 			atomic_add_64(&arc_size, size);
20133290Sjohansen 		}
20142688Smaybee 		goto out;
20152688Smaybee 	}
20162688Smaybee 
20172688Smaybee 	/*
20182688Smaybee 	 * If we are prefetching from the mfu ghost list, this buffer
20192688Smaybee 	 * will end up on the mru list; so steal space from there.
20202688Smaybee 	 */
20213403Sbmc 	if (state == arc_mfu_ghost)
20223403Sbmc 		state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
20233403Sbmc 	else if (state == arc_mru_ghost)
20243403Sbmc 		state = arc_mru;
2025789Sahrens 
20263403Sbmc 	if (state == arc_mru || state == arc_anon) {
20273403Sbmc 		uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
20284309Smaybee 		state = (arc_mfu->arcs_lsize[type] > 0 &&
20294309Smaybee 		    arc_p > mru_used) ? arc_mfu : arc_mru;
2030789Sahrens 	} else {
20312688Smaybee 		/* MFU cases */
20323403Sbmc 		uint64_t mfu_space = arc_c - arc_p;
20334309Smaybee 		state =  (arc_mru->arcs_lsize[type] > 0 &&
20344309Smaybee 		    mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
20352688Smaybee 	}
20363290Sjohansen 	if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) {
20373290Sjohansen 		if (type == ARC_BUFC_METADATA) {
20383290Sjohansen 			buf->b_data = zio_buf_alloc(size);
20394309Smaybee 			arc_space_consume(size);
20403290Sjohansen 		} else {
20413290Sjohansen 			ASSERT(type == ARC_BUFC_DATA);
20423290Sjohansen 			buf->b_data = zio_data_buf_alloc(size);
20434309Smaybee 			atomic_add_64(&arc_size, size);
20443290Sjohansen 		}
20453403Sbmc 		ARCSTAT_BUMP(arcstat_recycle_miss);
20462688Smaybee 	}
20472688Smaybee 	ASSERT(buf->b_data != NULL);
20482688Smaybee out:
20492688Smaybee 	/*
20502688Smaybee 	 * Update the state size.  Note that ghost states have a
20512688Smaybee 	 * "ghost size" and so don't need to be updated.
20522688Smaybee 	 */
20532688Smaybee 	if (!GHOST_STATE(buf->b_hdr->b_state)) {
20542688Smaybee 		arc_buf_hdr_t *hdr = buf->b_hdr;
20552688Smaybee 
20563403Sbmc 		atomic_add_64(&hdr->b_state->arcs_size, size);
20572688Smaybee 		if (list_link_active(&hdr->b_arc_node)) {
20582688Smaybee 			ASSERT(refcount_is_zero(&hdr->b_refcnt));
20594309Smaybee 			atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2060789Sahrens 		}
20613298Smaybee 		/*
20623298Smaybee 		 * If we are growing the cache, and we are adding anonymous
20633403Sbmc 		 * data, and we have outgrown arc_p, update arc_p
20643298Smaybee 		 */
20653403Sbmc 		if (arc_size < arc_c && hdr->b_state == arc_anon &&
20663403Sbmc 		    arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
20673403Sbmc 			arc_p = MIN(arc_c, arc_p + size);
2068789Sahrens 	}
2069789Sahrens }
2070789Sahrens 
2071789Sahrens /*
2072789Sahrens  * This routine is called whenever a buffer is accessed.
20731544Seschrock  * NOTE: the hash lock is dropped in this function.
2074789Sahrens  */
2075789Sahrens static void
20762688Smaybee arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2077789Sahrens {
2078789Sahrens 	ASSERT(MUTEX_HELD(hash_lock));
2079789Sahrens 
20803403Sbmc 	if (buf->b_state == arc_anon) {
2081789Sahrens 		/*
2082789Sahrens 		 * This buffer is not in the cache, and does not
2083789Sahrens 		 * appear in our "ghost" list.  Add the new buffer
2084789Sahrens 		 * to the MRU state.
2085789Sahrens 		 */
2086789Sahrens 
2087789Sahrens 		ASSERT(buf->b_arc_access == 0);
2088789Sahrens 		buf->b_arc_access = lbolt;
20891544Seschrock 		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
20903403Sbmc 		arc_change_state(arc_mru, buf, hash_lock);
2091789Sahrens 
20923403Sbmc 	} else if (buf->b_state == arc_mru) {
2093789Sahrens 		/*
20942391Smaybee 		 * If this buffer is here because of a prefetch, then either:
20952391Smaybee 		 * - clear the flag if this is a "referencing" read
20962391Smaybee 		 *   (any subsequent access will bump this into the MFU state).
20972391Smaybee 		 * or
20982391Smaybee 		 * - move the buffer to the head of the list if this is
20992391Smaybee 		 *   another prefetch (to make it less likely to be evicted).
2100789Sahrens 		 */
2101789Sahrens 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
21022391Smaybee 			if (refcount_count(&buf->b_refcnt) == 0) {
21032391Smaybee 				ASSERT(list_link_active(&buf->b_arc_node));
21042391Smaybee 			} else {
21052391Smaybee 				buf->b_flags &= ~ARC_PREFETCH;
21063403Sbmc 				ARCSTAT_BUMP(arcstat_mru_hits);
21072391Smaybee 			}
21082391Smaybee 			buf->b_arc_access = lbolt;
2109789Sahrens 			return;
2110789Sahrens 		}
2111789Sahrens 
2112789Sahrens 		/*
2113789Sahrens 		 * This buffer has been "accessed" only once so far,
2114789Sahrens 		 * but it is still in the cache. Move it to the MFU
2115789Sahrens 		 * state.
2116789Sahrens 		 */
2117789Sahrens 		if (lbolt > buf->b_arc_access + ARC_MINTIME) {
2118789Sahrens 			/*
2119789Sahrens 			 * More than 125ms have passed since we
2120789Sahrens 			 * instantiated this buffer.  Move it to the
2121789Sahrens 			 * most frequently used state.
2122789Sahrens 			 */
2123789Sahrens 			buf->b_arc_access = lbolt;
21241544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
21253403Sbmc 			arc_change_state(arc_mfu, buf, hash_lock);
2126789Sahrens 		}
21273403Sbmc 		ARCSTAT_BUMP(arcstat_mru_hits);
21283403Sbmc 	} else if (buf->b_state == arc_mru_ghost) {
2129789Sahrens 		arc_state_t	*new_state;
2130789Sahrens 		/*
2131789Sahrens 		 * This buffer has been "accessed" recently, but
2132789Sahrens 		 * was evicted from the cache.  Move it to the
2133789Sahrens 		 * MFU state.
2134789Sahrens 		 */
2135789Sahrens 
2136789Sahrens 		if (buf->b_flags & ARC_PREFETCH) {
21373403Sbmc 			new_state = arc_mru;
21382391Smaybee 			if (refcount_count(&buf->b_refcnt) > 0)
21392391Smaybee 				buf->b_flags &= ~ARC_PREFETCH;
21401544Seschrock 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2141789Sahrens 		} else {
21423403Sbmc 			new_state = arc_mfu;
21431544Seschrock 			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2144789Sahrens 		}
2145789Sahrens 
2146789Sahrens 		buf->b_arc_access = lbolt;
2147789Sahrens 		arc_change_state(new_state, buf, hash_lock);
2148789Sahrens 
21493403Sbmc 		ARCSTAT_BUMP(arcstat_mru_ghost_hits);
21503403Sbmc 	} else if (buf->b_state == arc_mfu) {
2151789Sahrens 		/*
2152789Sahrens 		 * This buffer has been accessed more than once and is
2153789Sahrens 		 * still in the cache.  Keep it in the MFU state.
2154789Sahrens 		 *
21552391Smaybee 		 * NOTE: an add_reference() that occurred when we did
21562391Smaybee 		 * the arc_read() will have kicked this off the list.
21572391Smaybee 		 * If it was a prefetch, we will explicitly move it to
21582391Smaybee 		 * the head of the list now.
2159789Sahrens 		 */
21602391Smaybee 		if ((buf->b_flags & ARC_PREFETCH) != 0) {
21612391Smaybee 			ASSERT(refcount_count(&buf->b_refcnt) == 0);
21622391Smaybee 			ASSERT(list_link_active(&buf->b_arc_node));
21632391Smaybee 		}
21643403Sbmc 		ARCSTAT_BUMP(arcstat_mfu_hits);
21652391Smaybee 		buf->b_arc_access = lbolt;
21663403Sbmc 	} else if (buf->b_state == arc_mfu_ghost) {
21673403Sbmc 		arc_state_t	*new_state = arc_mfu;
2168789Sahrens 		/*
2169789Sahrens 		 * This buffer has been accessed more than once but has
2170789Sahrens 		 * been evicted from the cache.  Move it back to the
2171789Sahrens 		 * MFU state.
2172789Sahrens 		 */
2173789Sahrens 
21742391Smaybee 		if (buf->b_flags & ARC_PREFETCH) {
21752391Smaybee 			/*
21762391Smaybee 			 * This is a prefetch access...
21772391Smaybee 			 * move this block back to the MRU state.
21782391Smaybee 			 */
21792391Smaybee 			ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
21803403Sbmc 			new_state = arc_mru;
21812391Smaybee 		}
21822391Smaybee 
2183789Sahrens 		buf->b_arc_access = lbolt;
21841544Seschrock 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
21852391Smaybee 		arc_change_state(new_state, buf, hash_lock);
2186789Sahrens 
21873403Sbmc 		ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2188*5450Sbrendan 	} else if (buf->b_state == arc_l2c_only) {
2189*5450Sbrendan 		/*
2190*5450Sbrendan 		 * This buffer is on the 2nd Level ARC.
2191*5450Sbrendan 		 */
2192*5450Sbrendan 
2193*5450Sbrendan 		buf->b_arc_access = lbolt;
2194*5450Sbrendan 		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2195*5450Sbrendan 		arc_change_state(arc_mfu, buf, hash_lock);
2196789Sahrens 	} else {
2197789Sahrens 		ASSERT(!"invalid arc state");
2198789Sahrens 	}
2199789Sahrens }
2200789Sahrens 
2201789Sahrens /* a generic arc_done_func_t which you can use */
2202789Sahrens /* ARGSUSED */
2203789Sahrens void
2204789Sahrens arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2205789Sahrens {
2206789Sahrens 	bcopy(buf->b_data, arg, buf->b_hdr->b_size);
22071544Seschrock 	VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2208789Sahrens }
2209789Sahrens 
22104309Smaybee /* a generic arc_done_func_t */
2211789Sahrens void
2212789Sahrens arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2213789Sahrens {
2214789Sahrens 	arc_buf_t **bufp = arg;
2215789Sahrens 	if (zio && zio->io_error) {
22161544Seschrock 		VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2217789Sahrens 		*bufp = NULL;
2218789Sahrens 	} else {
2219789Sahrens 		*bufp = buf;
2220789Sahrens 	}
2221789Sahrens }
2222789Sahrens 
2223789Sahrens static void
2224789Sahrens arc_read_done(zio_t *zio)
2225789Sahrens {
22261589Smaybee 	arc_buf_hdr_t	*hdr, *found;
2227789Sahrens 	arc_buf_t	*buf;
2228789Sahrens 	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
2229789Sahrens 	kmutex_t	*hash_lock;
2230789Sahrens 	arc_callback_t	*callback_list, *acb;
2231789Sahrens 	int		freeable = FALSE;
2232789Sahrens 
2233789Sahrens 	buf = zio->io_private;
2234789Sahrens 	hdr = buf->b_hdr;
2235789Sahrens 
22361589Smaybee 	/*
22371589Smaybee 	 * The hdr was inserted into hash-table and removed from lists
22381589Smaybee 	 * prior to starting I/O.  We should find this header, since
22391589Smaybee 	 * it's in the hash table, and it should be legit since it's
22401589Smaybee 	 * not possible to evict it during the I/O.  The only possible
22411589Smaybee 	 * reason for it not to be found is if we were freed during the
22421589Smaybee 	 * read.
22431589Smaybee 	 */
22441589Smaybee 	found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
22453093Sahrens 	    &hash_lock);
2246789Sahrens 
22471589Smaybee 	ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2248*5450Sbrendan 	    (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2249*5450Sbrendan 	    (found == hdr && HDR_L2_READING(hdr)));
2250*5450Sbrendan 
2251*5450Sbrendan 	hdr->b_flags &= ~(ARC_L2_READING|ARC_L2_EVICTED);
2252*5450Sbrendan 	if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2253*5450Sbrendan 		hdr->b_flags |= ARC_DONT_L2CACHE;
2254789Sahrens 
2255789Sahrens 	/* byteswap if necessary */
2256789Sahrens 	callback_list = hdr->b_acb;
2257789Sahrens 	ASSERT(callback_list != NULL);
2258789Sahrens 	if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap)
2259789Sahrens 		callback_list->acb_byteswap(buf->b_data, hdr->b_size);
2260789Sahrens 
2261*5450Sbrendan 	arc_cksum_compute(buf, B_FALSE);
22623093Sahrens 
2263789Sahrens 	/* create copies of the data buffer for the callers */
2264789Sahrens 	abuf = buf;
2265789Sahrens 	for (acb = callback_list; acb; acb = acb->acb_next) {
2266789Sahrens 		if (acb->acb_done) {
22672688Smaybee 			if (abuf == NULL)
22682688Smaybee 				abuf = arc_buf_clone(buf);
2269789Sahrens 			acb->acb_buf = abuf;
2270789Sahrens 			abuf = NULL;
2271789Sahrens 		}
2272789Sahrens 	}
2273789Sahrens 	hdr->b_acb = NULL;
2274789Sahrens 	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
22751544Seschrock 	ASSERT(!HDR_BUF_AVAILABLE(hdr));
22761544Seschrock 	if (abuf == buf)
22771544Seschrock 		hdr->b_flags |= ARC_BUF_AVAILABLE;
2278789Sahrens 
2279789Sahrens 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2280789Sahrens 
2281789Sahrens 	if (zio->io_error != 0) {
2282789Sahrens 		hdr->b_flags |= ARC_IO_ERROR;
22833403Sbmc 		if (hdr->b_state != arc_anon)
22843403Sbmc 			arc_change_state(arc_anon, hdr, hash_lock);
22851544Seschrock 		if (HDR_IN_HASH_TABLE(hdr))
22861544Seschrock 			buf_hash_remove(hdr);
2287789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
22882391Smaybee 		/* convert checksum errors into IO errors */
22891544Seschrock 		if (zio->io_error == ECKSUM)
22901544Seschrock 			zio->io_error = EIO;
2291789Sahrens 	}
2292789Sahrens 
22931544Seschrock 	/*
22942391Smaybee 	 * Broadcast before we drop the hash_lock to avoid the possibility
22952391Smaybee 	 * that the hdr (and hence the cv) might be freed before we get to
22962391Smaybee 	 * the cv_broadcast().
22971544Seschrock 	 */
22981544Seschrock 	cv_broadcast(&hdr->b_cv);
22991544Seschrock 
23001589Smaybee 	if (hash_lock) {
2301789Sahrens 		/*
2302789Sahrens 		 * Only call arc_access on anonymous buffers.  This is because
2303789Sahrens 		 * if we've issued an I/O for an evicted buffer, we've already
2304789Sahrens 		 * called arc_access (to prevent any simultaneous readers from
2305789Sahrens 		 * getting confused).
2306789Sahrens 		 */
23073403Sbmc 		if (zio->io_error == 0 && hdr->b_state == arc_anon)
23082688Smaybee 			arc_access(hdr, hash_lock);
23092688Smaybee 		mutex_exit(hash_lock);
2310789Sahrens 	} else {
2311789Sahrens 		/*
2312789Sahrens 		 * This block was freed while we waited for the read to
2313789Sahrens 		 * complete.  It has been removed from the hash table and
2314789Sahrens 		 * moved to the anonymous state (so that it won't show up
2315789Sahrens 		 * in the cache).
2316789Sahrens 		 */
23173403Sbmc 		ASSERT3P(hdr->b_state, ==, arc_anon);
2318789Sahrens 		freeable = refcount_is_zero(&hdr->b_refcnt);
2319789Sahrens 	}
2320789Sahrens 
2321789Sahrens 	/* execute each callback and free its structure */
2322789Sahrens 	while ((acb = callback_list) != NULL) {
2323789Sahrens 		if (acb->acb_done)
2324789Sahrens 			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2325789Sahrens 
2326789Sahrens 		if (acb->acb_zio_dummy != NULL) {
2327789Sahrens 			acb->acb_zio_dummy->io_error = zio->io_error;
2328789Sahrens 			zio_nowait(acb->acb_zio_dummy);
2329789Sahrens 		}
2330789Sahrens 
2331789Sahrens 		callback_list = acb->acb_next;
2332789Sahrens 		kmem_free(acb, sizeof (arc_callback_t));
2333789Sahrens 	}
2334789Sahrens 
2335789Sahrens 	if (freeable)
23361544Seschrock 		arc_hdr_destroy(hdr);
2337789Sahrens }
2338789Sahrens 
2339789Sahrens /*
2340789Sahrens  * "Read" the block block at the specified DVA (in bp) via the
2341789Sahrens  * cache.  If the block is found in the cache, invoke the provided
2342789Sahrens  * callback immediately and return.  Note that the `zio' parameter
2343789Sahrens  * in the callback will be NULL in this case, since no IO was
2344789Sahrens  * required.  If the block is not in the cache pass the read request
2345789Sahrens  * on to the spa with a substitute callback function, so that the
2346789Sahrens  * requested block will be added to the cache.
2347789Sahrens  *
2348789Sahrens  * If a read request arrives for a block that has a read in-progress,
2349789Sahrens  * either wait for the in-progress read to complete (and return the
2350789Sahrens  * results); or, if this is a read with a "done" func, add a record
2351789Sahrens  * to the read to invoke the "done" func when the read completes,
2352789Sahrens  * and return; or just return.
2353789Sahrens  *
2354789Sahrens  * arc_read_done() will invoke all the requested "done" functions
2355789Sahrens  * for readers of this block.
2356789Sahrens  */
2357789Sahrens int
2358789Sahrens arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap,
2359789Sahrens     arc_done_func_t *done, void *private, int priority, int flags,
23602391Smaybee     uint32_t *arc_flags, zbookmark_t *zb)
2361789Sahrens {
2362789Sahrens 	arc_buf_hdr_t *hdr;
2363789Sahrens 	arc_buf_t *buf;
2364789Sahrens 	kmutex_t *hash_lock;
2365*5450Sbrendan 	zio_t *rzio;
2366789Sahrens 
2367789Sahrens top:
2368789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
23691544Seschrock 	if (hdr && hdr->b_datacnt > 0) {
2370789Sahrens 
23712391Smaybee 		*arc_flags |= ARC_CACHED;
23722391Smaybee 
2373789Sahrens 		if (HDR_IO_IN_PROGRESS(hdr)) {
23742391Smaybee 
23752391Smaybee 			if (*arc_flags & ARC_WAIT) {
23762391Smaybee 				cv_wait(&hdr->b_cv, hash_lock);
23772391Smaybee 				mutex_exit(hash_lock);
23782391Smaybee 				goto top;
23792391Smaybee 			}
23802391Smaybee 			ASSERT(*arc_flags & ARC_NOWAIT);
23812391Smaybee 
23822391Smaybee 			if (done) {
2383789Sahrens 				arc_callback_t	*acb = NULL;
2384789Sahrens 
2385789Sahrens 				acb = kmem_zalloc(sizeof (arc_callback_t),
2386789Sahrens 				    KM_SLEEP);
2387789Sahrens 				acb->acb_done = done;
2388789Sahrens 				acb->acb_private = private;
2389789Sahrens 				acb->acb_byteswap = swap;
2390789Sahrens 				if (pio != NULL)
2391789Sahrens 					acb->acb_zio_dummy = zio_null(pio,
2392789Sahrens 					    spa, NULL, NULL, flags);
2393789Sahrens 
2394789Sahrens 				ASSERT(acb->acb_done != NULL);
2395789Sahrens 				acb->acb_next = hdr->b_acb;
2396789Sahrens 				hdr->b_acb = acb;
2397789Sahrens 				add_reference(hdr, hash_lock, private);
2398789Sahrens 				mutex_exit(hash_lock);
2399789Sahrens 				return (0);
2400789Sahrens 			}
2401789Sahrens 			mutex_exit(hash_lock);
2402789Sahrens 			return (0);
2403789Sahrens 		}
2404789Sahrens 
24053403Sbmc 		ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2406789Sahrens 
24071544Seschrock 		if (done) {
24082688Smaybee 			add_reference(hdr, hash_lock, private);
24091544Seschrock 			/*
24101544Seschrock 			 * If this block is already in use, create a new
24111544Seschrock 			 * copy of the data so that we will be guaranteed
24121544Seschrock 			 * that arc_release() will always succeed.
24131544Seschrock 			 */
24141544Seschrock 			buf = hdr->b_buf;
24151544Seschrock 			ASSERT(buf);
24161544Seschrock 			ASSERT(buf->b_data);
24172688Smaybee 			if (HDR_BUF_AVAILABLE(hdr)) {
24181544Seschrock 				ASSERT(buf->b_efunc == NULL);
24191544Seschrock 				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
24202688Smaybee 			} else {
24212688Smaybee 				buf = arc_buf_clone(buf);
24221544Seschrock 			}
24232391Smaybee 		} else if (*arc_flags & ARC_PREFETCH &&
24242391Smaybee 		    refcount_count(&hdr->b_refcnt) == 0) {
24252391Smaybee 			hdr->b_flags |= ARC_PREFETCH;
2426789Sahrens 		}
2427789Sahrens 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
24282688Smaybee 		arc_access(hdr, hash_lock);
24292688Smaybee 		mutex_exit(hash_lock);
24303403Sbmc 		ARCSTAT_BUMP(arcstat_hits);
24313403Sbmc 		ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
24323403Sbmc 		    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
24333403Sbmc 		    data, metadata, hits);
24343403Sbmc 
2435789Sahrens 		if (done)
2436789Sahrens 			done(NULL, buf, private);
2437789Sahrens 	} else {
2438789Sahrens 		uint64_t size = BP_GET_LSIZE(bp);
2439789Sahrens 		arc_callback_t	*acb;
2440789Sahrens 
2441789Sahrens 		if (hdr == NULL) {
2442789Sahrens 			/* this block is not in the cache */
2443789Sahrens 			arc_buf_hdr_t	*exists;
24443290Sjohansen 			arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
24453290Sjohansen 			buf = arc_buf_alloc(spa, size, private, type);
2446789Sahrens 			hdr = buf->b_hdr;
2447789Sahrens 			hdr->b_dva = *BP_IDENTITY(bp);
2448789Sahrens 			hdr->b_birth = bp->blk_birth;
2449789Sahrens 			hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2450789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
2451789Sahrens 			if (exists) {
2452789Sahrens 				/* somebody beat us to the hash insert */
2453789Sahrens 				mutex_exit(hash_lock);
2454789Sahrens 				bzero(&hdr->b_dva, sizeof (dva_t));
2455789Sahrens 				hdr->b_birth = 0;
2456789Sahrens 				hdr->b_cksum0 = 0;
24571544Seschrock 				(void) arc_buf_remove_ref(buf, private);
2458789Sahrens 				goto top; /* restart the IO request */
2459789Sahrens 			}
24602391Smaybee 			/* if this is a prefetch, we don't have a reference */
24612391Smaybee 			if (*arc_flags & ARC_PREFETCH) {
24622391Smaybee 				(void) remove_reference(hdr, hash_lock,
24632391Smaybee 				    private);
24642391Smaybee 				hdr->b_flags |= ARC_PREFETCH;
24652391Smaybee 			}
24662391Smaybee 			if (BP_GET_LEVEL(bp) > 0)
24672391Smaybee 				hdr->b_flags |= ARC_INDIRECT;
2468789Sahrens 		} else {
2469789Sahrens 			/* this block is in the ghost cache */
24701544Seschrock 			ASSERT(GHOST_STATE(hdr->b_state));
24711544Seschrock 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
24722391Smaybee 			ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
24732391Smaybee 			ASSERT(hdr->b_buf == NULL);
2474789Sahrens 
24752391Smaybee 			/* if this is a prefetch, we don't have a reference */
24762391Smaybee 			if (*arc_flags & ARC_PREFETCH)
24772391Smaybee 				hdr->b_flags |= ARC_PREFETCH;
24782391Smaybee 			else
24792391Smaybee 				add_reference(hdr, hash_lock, private);
2480789Sahrens 			buf = kmem_cache_alloc(buf_cache, KM_SLEEP);
24811544Seschrock 			buf->b_hdr = hdr;
24822688Smaybee 			buf->b_data = NULL;
24831544Seschrock 			buf->b_efunc = NULL;
24841544Seschrock 			buf->b_private = NULL;
24851544Seschrock 			buf->b_next = NULL;
24861544Seschrock 			hdr->b_buf = buf;
24872688Smaybee 			arc_get_data_buf(buf);
24881544Seschrock 			ASSERT(hdr->b_datacnt == 0);
24891544Seschrock 			hdr->b_datacnt = 1;
24902391Smaybee 
2491789Sahrens 		}
2492789Sahrens 
2493789Sahrens 		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2494789Sahrens 		acb->acb_done = done;
2495789Sahrens 		acb->acb_private = private;
2496789Sahrens 		acb->acb_byteswap = swap;
2497789Sahrens 
2498789Sahrens 		ASSERT(hdr->b_acb == NULL);
2499789Sahrens 		hdr->b_acb = acb;
2500789Sahrens 		hdr->b_flags |= ARC_IO_IN_PROGRESS;
2501789Sahrens 
2502789Sahrens 		/*
2503789Sahrens 		 * If the buffer has been evicted, migrate it to a present state
2504789Sahrens 		 * before issuing the I/O.  Once we drop the hash-table lock,
2505789Sahrens 		 * the header will be marked as I/O in progress and have an
2506789Sahrens 		 * attached buffer.  At this point, anybody who finds this
2507789Sahrens 		 * buffer ought to notice that it's legit but has a pending I/O.
2508789Sahrens 		 */
2509789Sahrens 
25101544Seschrock 		if (GHOST_STATE(hdr->b_state))
25112688Smaybee 			arc_access(hdr, hash_lock);
2512789Sahrens 
2513789Sahrens 		ASSERT3U(hdr->b_size, ==, size);
25141596Sahrens 		DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
25151596Sahrens 		    zbookmark_t *, zb);
25163403Sbmc 		ARCSTAT_BUMP(arcstat_misses);
25173403Sbmc 		ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
25183403Sbmc 		    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
25193403Sbmc 		    data, metadata, misses);
25201544Seschrock 
2521*5450Sbrendan 		if (l2arc_ndev != 0) {
2522*5450Sbrendan 			/*
2523*5450Sbrendan 			 * Read from the L2ARC if the following are true:
2524*5450Sbrendan 			 * 1. This buffer has L2ARC metadata.
2525*5450Sbrendan 			 * 2. This buffer isn't currently writing to the L2ARC.
2526*5450Sbrendan 			 */
2527*5450Sbrendan 			if (hdr->b_l2hdr != NULL && !HDR_L2_WRITING(hdr)) {
2528*5450Sbrendan 				vdev_t *vd = hdr->b_l2hdr->b_dev->l2ad_vdev;
2529*5450Sbrendan 				daddr_t addr = hdr->b_l2hdr->b_daddr;
2530*5450Sbrendan 				l2arc_read_callback_t *cb;
2531*5450Sbrendan 
2532*5450Sbrendan 				DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
2533*5450Sbrendan 				ARCSTAT_BUMP(arcstat_l2_hits);
2534*5450Sbrendan 
2535*5450Sbrendan 				hdr->b_flags |= ARC_L2_READING;
2536*5450Sbrendan 				mutex_exit(hash_lock);
2537*5450Sbrendan 
2538*5450Sbrendan 				cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
2539*5450Sbrendan 				    KM_SLEEP);
2540*5450Sbrendan 				cb->l2rcb_buf = buf;
2541*5450Sbrendan 				cb->l2rcb_spa = spa;
2542*5450Sbrendan 				cb->l2rcb_bp = *bp;
2543*5450Sbrendan 				cb->l2rcb_zb = *zb;
2544*5450Sbrendan 				cb->l2rcb_flags = flags;
2545*5450Sbrendan 
2546*5450Sbrendan 				/*
2547*5450Sbrendan 				 * l2arc read.
2548*5450Sbrendan 				 */
2549*5450Sbrendan 				rzio = zio_read_phys(pio, vd, addr, size,
2550*5450Sbrendan 				    buf->b_data, ZIO_CHECKSUM_OFF,
2551*5450Sbrendan 				    l2arc_read_done, cb, priority,
2552*5450Sbrendan 				    flags | ZIO_FLAG_DONT_CACHE, B_FALSE);
2553*5450Sbrendan 				DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
2554*5450Sbrendan 				    zio_t *, rzio);
2555*5450Sbrendan 
2556*5450Sbrendan 				if (*arc_flags & ARC_WAIT)
2557*5450Sbrendan 					return (zio_wait(rzio));
2558*5450Sbrendan 
2559*5450Sbrendan 				ASSERT(*arc_flags & ARC_NOWAIT);
2560*5450Sbrendan 				zio_nowait(rzio);
2561*5450Sbrendan 				return (0);
2562*5450Sbrendan 			} else {
2563*5450Sbrendan 				DTRACE_PROBE1(l2arc__miss,
2564*5450Sbrendan 				    arc_buf_hdr_t *, hdr);
2565*5450Sbrendan 				ARCSTAT_BUMP(arcstat_l2_misses);
2566*5450Sbrendan 				if (HDR_L2_WRITING(hdr))
2567*5450Sbrendan 					ARCSTAT_BUMP(arcstat_l2_rw_clash);
2568*5450Sbrendan 			}
2569*5450Sbrendan 		}
2570*5450Sbrendan 		mutex_exit(hash_lock);
2571*5450Sbrendan 
2572789Sahrens 		rzio = zio_read(pio, spa, bp, buf->b_data, size,
25731544Seschrock 		    arc_read_done, buf, priority, flags, zb);
2574789Sahrens 
25752391Smaybee 		if (*arc_flags & ARC_WAIT)
2576789Sahrens 			return (zio_wait(rzio));
2577789Sahrens 
25782391Smaybee 		ASSERT(*arc_flags & ARC_NOWAIT);
2579789Sahrens 		zio_nowait(rzio);
2580789Sahrens 	}
2581789Sahrens 	return (0);
2582789Sahrens }
2583789Sahrens 
2584789Sahrens /*
2585789Sahrens  * arc_read() variant to support pool traversal.  If the block is already
2586789Sahrens  * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
2587789Sahrens  * The idea is that we don't want pool traversal filling up memory, but
2588789Sahrens  * if the ARC already has the data anyway, we shouldn't pay for the I/O.
2589789Sahrens  */
2590789Sahrens int
2591789Sahrens arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
2592789Sahrens {
2593789Sahrens 	arc_buf_hdr_t *hdr;
2594789Sahrens 	kmutex_t *hash_mtx;
2595789Sahrens 	int rc = 0;
2596789Sahrens 
2597789Sahrens 	hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
2598789Sahrens 
25991544Seschrock 	if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
26001544Seschrock 		arc_buf_t *buf = hdr->b_buf;
26011544Seschrock 
26021544Seschrock 		ASSERT(buf);
26031544Seschrock 		while (buf->b_data == NULL) {
26041544Seschrock 			buf = buf->b_next;
26051544Seschrock 			ASSERT(buf);
26061544Seschrock 		}
26071544Seschrock 		bcopy(buf->b_data, data, hdr->b_size);
26081544Seschrock 	} else {
2609789Sahrens 		rc = ENOENT;
26101544Seschrock 	}
2611789Sahrens 
2612789Sahrens 	if (hash_mtx)
2613789Sahrens 		mutex_exit(hash_mtx);
2614789Sahrens 
2615789Sahrens 	return (rc);
2616789Sahrens }
2617789Sahrens 
26181544Seschrock void
26191544Seschrock arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
26201544Seschrock {
26211544Seschrock 	ASSERT(buf->b_hdr != NULL);
26223403Sbmc 	ASSERT(buf->b_hdr->b_state != arc_anon);
26231544Seschrock 	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
26241544Seschrock 	buf->b_efunc = func;
26251544Seschrock 	buf->b_private = private;
26261544Seschrock }
26271544Seschrock 
26281544Seschrock /*
26291544Seschrock  * This is used by the DMU to let the ARC know that a buffer is
26301544Seschrock  * being evicted, so the ARC should clean up.  If this arc buf
26311544Seschrock  * is not yet in the evicted state, it will be put there.
26321544Seschrock  */
26331544Seschrock int
26341544Seschrock arc_buf_evict(arc_buf_t *buf)
26351544Seschrock {
26362887Smaybee 	arc_buf_hdr_t *hdr;
26371544Seschrock 	kmutex_t *hash_lock;
26381544Seschrock 	arc_buf_t **bufp;
26391544Seschrock 
26402887Smaybee 	mutex_enter(&arc_eviction_mtx);
26412887Smaybee 	hdr = buf->b_hdr;
26421544Seschrock 	if (hdr == NULL) {
26431544Seschrock 		/*
26441544Seschrock 		 * We are in arc_do_user_evicts().
26451544Seschrock 		 */
26461544Seschrock 		ASSERT(buf->b_data == NULL);
26472887Smaybee 		mutex_exit(&arc_eviction_mtx);
26481544Seschrock 		return (0);
26491544Seschrock 	}
26502887Smaybee 	hash_lock = HDR_LOCK(hdr);
26512887Smaybee 	mutex_exit(&arc_eviction_mtx);
26521544Seschrock 
26531544Seschrock 	mutex_enter(hash_lock);
26541544Seschrock 
26552724Smaybee 	if (buf->b_data == NULL) {
26562724Smaybee 		/*
26572724Smaybee 		 * We are on the eviction list.
26582724Smaybee 		 */
26592724Smaybee 		mutex_exit(hash_lock);
26602724Smaybee 		mutex_enter(&arc_eviction_mtx);
26612724Smaybee 		if (buf->b_hdr == NULL) {
26622724Smaybee 			/*
26632724Smaybee 			 * We are already in arc_do_user_evicts().
26642724Smaybee 			 */
26652724Smaybee 			mutex_exit(&arc_eviction_mtx);
26662724Smaybee 			return (0);
26672724Smaybee 		} else {
26682724Smaybee 			arc_buf_t copy = *buf; /* structure assignment */
26692724Smaybee 			/*
26702724Smaybee 			 * Process this buffer now
26712724Smaybee 			 * but let arc_do_user_evicts() do the reaping.
26722724Smaybee 			 */
26732724Smaybee 			buf->b_efunc = NULL;
26742724Smaybee 			mutex_exit(&arc_eviction_mtx);
26752724Smaybee 			VERIFY(copy.b_efunc(&copy) == 0);
26762724Smaybee 			return (1);
26772724Smaybee 		}
26782724Smaybee 	}
26792724Smaybee 
26802724Smaybee 	ASSERT(buf->b_hdr == hdr);
26812724Smaybee 	ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
26823403Sbmc 	ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
26831544Seschrock 
26841544Seschrock 	/*
26851544Seschrock 	 * Pull this buffer off of the hdr
26861544Seschrock 	 */
26871544Seschrock 	bufp = &hdr->b_buf;
26881544Seschrock 	while (*bufp != buf)
26891544Seschrock 		bufp = &(*bufp)->b_next;
26901544Seschrock 	*bufp = buf->b_next;
26911544Seschrock 
26921544Seschrock 	ASSERT(buf->b_data != NULL);
26932688Smaybee 	arc_buf_destroy(buf, FALSE, FALSE);
26941544Seschrock 
26951544Seschrock 	if (hdr->b_datacnt == 0) {
26961544Seschrock 		arc_state_t *old_state = hdr->b_state;
26971544Seschrock 		arc_state_t *evicted_state;
26981544Seschrock 
26991544Seschrock 		ASSERT(refcount_is_zero(&hdr->b_refcnt));
27001544Seschrock 
27011544Seschrock 		evicted_state =
27023403Sbmc 		    (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
27031544Seschrock 
27043403Sbmc 		mutex_enter(&old_state->arcs_mtx);
27053403Sbmc 		mutex_enter(&evicted_state->arcs_mtx);
27061544Seschrock 
27071544Seschrock 		arc_change_state(evicted_state, hdr, hash_lock);
27081544Seschrock 		ASSERT(HDR_IN_HASH_TABLE(hdr));
2709*5450Sbrendan 		hdr->b_flags |= ARC_IN_HASH_TABLE;
2710*5450Sbrendan 		hdr->b_flags &= ~ARC_BUF_AVAILABLE;
27111544Seschrock 
27123403Sbmc 		mutex_exit(&evicted_state->arcs_mtx);
27133403Sbmc 		mutex_exit(&old_state->arcs_mtx);
27141544Seschrock 	}
27151544Seschrock 	mutex_exit(hash_lock);
27161819Smaybee 
27171544Seschrock 	VERIFY(buf->b_efunc(buf) == 0);
27181544Seschrock 	buf->b_efunc = NULL;
27191544Seschrock 	buf->b_private = NULL;
27201544Seschrock 	buf->b_hdr = NULL;
27211544Seschrock 	kmem_cache_free(buf_cache, buf);
27221544Seschrock 	return (1);
27231544Seschrock }
27241544Seschrock 
2725789Sahrens /*
2726789Sahrens  * Release this buffer from the cache.  This must be done
2727789Sahrens  * after a read and prior to modifying the buffer contents.
2728789Sahrens  * If the buffer has more than one reference, we must make
2729789Sahrens  * make a new hdr for the buffer.
2730789Sahrens  */
2731789Sahrens void
2732789Sahrens arc_release(arc_buf_t *buf, void *tag)
2733789Sahrens {
2734789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
2735789Sahrens 	kmutex_t *hash_lock = HDR_LOCK(hdr);
2736*5450Sbrendan 	l2arc_buf_hdr_t *l2hdr = NULL;
2737*5450Sbrendan 	uint64_t buf_size;
2738789Sahrens 
2739789Sahrens 	/* this buffer is not on any list */
2740789Sahrens 	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2741789Sahrens 
27423403Sbmc 	if (hdr->b_state == arc_anon) {
2743789Sahrens 		/* this buffer is already released */
2744789Sahrens 		ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2745789Sahrens 		ASSERT(BUF_EMPTY(hdr));
27461544Seschrock 		ASSERT(buf->b_efunc == NULL);
27473093Sahrens 		arc_buf_thaw(buf);
2748789Sahrens 		return;
2749789Sahrens 	}
2750789Sahrens 
2751789Sahrens 	mutex_enter(hash_lock);
2752789Sahrens 
27531544Seschrock 	/*
27541544Seschrock 	 * Do we have more than one buf?
27551544Seschrock 	 */
27561544Seschrock 	if (hdr->b_buf != buf || buf->b_next != NULL) {
2757789Sahrens 		arc_buf_hdr_t *nhdr;
2758789Sahrens 		arc_buf_t **bufp;
2759789Sahrens 		uint64_t blksz = hdr->b_size;
2760789Sahrens 		spa_t *spa = hdr->b_spa;
27613290Sjohansen 		arc_buf_contents_t type = hdr->b_type;
2762*5450Sbrendan 		uint32_t flags = hdr->b_flags;
2763789Sahrens 
27641544Seschrock 		ASSERT(hdr->b_datacnt > 1);
2765789Sahrens 		/*
2766789Sahrens 		 * Pull the data off of this buf and attach it to
2767789Sahrens 		 * a new anonymous buf.
2768789Sahrens 		 */
27691544Seschrock 		(void) remove_reference(hdr, hash_lock, tag);
2770789Sahrens 		bufp = &hdr->b_buf;
27711544Seschrock 		while (*bufp != buf)
2772789Sahrens 			bufp = &(*bufp)->b_next;
2773789Sahrens 		*bufp = (*bufp)->b_next;
27743897Smaybee 		buf->b_next = NULL;
27751544Seschrock 
27763403Sbmc 		ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
27773403Sbmc 		atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
27781544Seschrock 		if (refcount_is_zero(&hdr->b_refcnt)) {
27794309Smaybee 			uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
27804309Smaybee 			ASSERT3U(*size, >=, hdr->b_size);
27814309Smaybee 			atomic_add_64(size, -hdr->b_size);
27821544Seschrock 		}
27831544Seschrock 		hdr->b_datacnt -= 1;
2784*5450Sbrendan 		if (hdr->b_l2hdr != NULL) {
2785*5450Sbrendan 			mutex_enter(&l2arc_buflist_mtx);
2786*5450Sbrendan 			l2hdr = hdr->b_l2hdr;
2787*5450Sbrendan 			hdr->b_l2hdr = NULL;
2788*5450Sbrendan 			buf_size = hdr->b_size;
2789*5450Sbrendan 		}
27903547Smaybee 		arc_cksum_verify(buf);
27911544Seschrock 
2792789Sahrens 		mutex_exit(hash_lock);
2793789Sahrens 
2794789Sahrens 		nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP);
2795789Sahrens 		nhdr->b_size = blksz;
2796789Sahrens 		nhdr->b_spa = spa;
27973290Sjohansen 		nhdr->b_type = type;
2798789Sahrens 		nhdr->b_buf = buf;
27993403Sbmc 		nhdr->b_state = arc_anon;
2800789Sahrens 		nhdr->b_arc_access = 0;
2801*5450Sbrendan 		nhdr->b_flags = flags & ARC_L2_WRITING;
2802*5450Sbrendan 		nhdr->b_l2hdr = NULL;
28031544Seschrock 		nhdr->b_datacnt = 1;
28043547Smaybee 		nhdr->b_freeze_cksum = NULL;
28053897Smaybee 		(void) refcount_add(&nhdr->b_refcnt, tag);
2806789Sahrens 		buf->b_hdr = nhdr;
28073403Sbmc 		atomic_add_64(&arc_anon->arcs_size, blksz);
2808789Sahrens 	} else {
28091544Seschrock 		ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2810789Sahrens 		ASSERT(!list_link_active(&hdr->b_arc_node));
2811789Sahrens 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
28123403Sbmc 		arc_change_state(arc_anon, hdr, hash_lock);
2813789Sahrens 		hdr->b_arc_access = 0;
2814*5450Sbrendan 		if (hdr->b_l2hdr != NULL) {
2815*5450Sbrendan 			mutex_enter(&l2arc_buflist_mtx);
2816*5450Sbrendan 			l2hdr = hdr->b_l2hdr;
2817*5450Sbrendan 			hdr->b_l2hdr = NULL;
2818*5450Sbrendan 			buf_size = hdr->b_size;
2819*5450Sbrendan 		}
2820789Sahrens 		mutex_exit(hash_lock);
2821*5450Sbrendan 
2822789Sahrens 		bzero(&hdr->b_dva, sizeof (dva_t));
2823789Sahrens 		hdr->b_birth = 0;
2824789Sahrens 		hdr->b_cksum0 = 0;
28253547Smaybee 		arc_buf_thaw(buf);
2826789Sahrens 	}
28271544Seschrock 	buf->b_efunc = NULL;
28281544Seschrock 	buf->b_private = NULL;
2829*5450Sbrendan 
2830*5450Sbrendan 	if (l2hdr) {
2831*5450Sbrendan 		list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
2832*5450Sbrendan 		kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
2833*5450Sbrendan 		ARCSTAT_INCR(arcstat_l2_size, -buf_size);
2834*5450Sbrendan 	}
2835*5450Sbrendan 	if (MUTEX_HELD(&l2arc_buflist_mtx))
2836*5450Sbrendan 		mutex_exit(&l2arc_buflist_mtx);
2837789Sahrens }
2838789Sahrens 
2839789Sahrens int
2840789Sahrens arc_released(arc_buf_t *buf)
2841789Sahrens {
28423403Sbmc 	return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
28431544Seschrock }
28441544Seschrock 
28451544Seschrock int
28461544Seschrock arc_has_callback(arc_buf_t *buf)
28471544Seschrock {
28481544Seschrock 	return (buf->b_efunc != NULL);
2849789Sahrens }
2850789Sahrens 
28511544Seschrock #ifdef ZFS_DEBUG
28521544Seschrock int
28531544Seschrock arc_referenced(arc_buf_t *buf)
28541544Seschrock {
28551544Seschrock 	return (refcount_count(&buf->b_hdr->b_refcnt));
28561544Seschrock }
28571544Seschrock #endif
28581544Seschrock 
2859789Sahrens static void
28603547Smaybee arc_write_ready(zio_t *zio)
28613547Smaybee {
28623547Smaybee 	arc_write_callback_t *callback = zio->io_private;
28633547Smaybee 	arc_buf_t *buf = callback->awcb_buf;
28645329Sgw25295 	arc_buf_hdr_t *hdr = buf->b_hdr;
28655329Sgw25295 
28665329Sgw25295 	if (zio->io_error == 0 && callback->awcb_ready) {
28673547Smaybee 		ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
28683547Smaybee 		callback->awcb_ready(zio, buf, callback->awcb_private);
28693547Smaybee 	}
28705329Sgw25295 	/*
28715329Sgw25295 	 * If the IO is already in progress, then this is a re-write
28725329Sgw25295 	 * attempt, so we need to thaw and re-compute the cksum. It is
28735329Sgw25295 	 * the responsibility of the callback to handle the freeing
28745329Sgw25295 	 * and accounting for any re-write attempt. If we don't have a
28755329Sgw25295 	 * callback registered then simply free the block here.
28765329Sgw25295 	 */
28775329Sgw25295 	if (HDR_IO_IN_PROGRESS(hdr)) {
28785329Sgw25295 		if (!BP_IS_HOLE(&zio->io_bp_orig) &&
28795329Sgw25295 		    callback->awcb_ready == NULL) {
28805329Sgw25295 			zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg,
28815329Sgw25295 			    &zio->io_bp_orig, NULL, NULL));
28825329Sgw25295 		}
28835329Sgw25295 		mutex_enter(&hdr->b_freeze_lock);
28845329Sgw25295 		if (hdr->b_freeze_cksum != NULL) {
28855329Sgw25295 			kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
28865329Sgw25295 			hdr->b_freeze_cksum = NULL;
28875329Sgw25295 		}
28885329Sgw25295 		mutex_exit(&hdr->b_freeze_lock);
28895329Sgw25295 	}
2890*5450Sbrendan 	arc_cksum_compute(buf, B_FALSE);
28915329Sgw25295 	hdr->b_flags |= ARC_IO_IN_PROGRESS;
28923547Smaybee }
28933547Smaybee 
28943547Smaybee static void
2895789Sahrens arc_write_done(zio_t *zio)
2896789Sahrens {
28973547Smaybee 	arc_write_callback_t *callback = zio->io_private;
28983547Smaybee 	arc_buf_t *buf = callback->awcb_buf;
28993547Smaybee 	arc_buf_hdr_t *hdr = buf->b_hdr;
2900789Sahrens 
2901789Sahrens 	hdr->b_acb = NULL;
2902789Sahrens 
2903789Sahrens 	/* this buffer is on no lists and is not in the hash table */
29043403Sbmc 	ASSERT3P(hdr->b_state, ==, arc_anon);
2905789Sahrens 
2906789Sahrens 	hdr->b_dva = *BP_IDENTITY(zio->io_bp);
2907789Sahrens 	hdr->b_birth = zio->io_bp->blk_birth;
2908789Sahrens 	hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
29091544Seschrock 	/*
29101544Seschrock 	 * If the block to be written was all-zero, we may have
29111544Seschrock 	 * compressed it away.  In this case no write was performed
29121544Seschrock 	 * so there will be no dva/birth-date/checksum.  The buffer
29131544Seschrock 	 * must therefor remain anonymous (and uncached).
29141544Seschrock 	 */
2915789Sahrens 	if (!BUF_EMPTY(hdr)) {
2916789Sahrens 		arc_buf_hdr_t *exists;
2917789Sahrens 		kmutex_t *hash_lock;
2918789Sahrens 
29193093Sahrens 		arc_cksum_verify(buf);
29203093Sahrens 
2921789Sahrens 		exists = buf_hash_insert(hdr, &hash_lock);
2922789Sahrens 		if (exists) {
2923789Sahrens 			/*
2924789Sahrens 			 * This can only happen if we overwrite for
2925789Sahrens 			 * sync-to-convergence, because we remove
2926789Sahrens 			 * buffers from the hash table when we arc_free().
2927789Sahrens 			 */
2928789Sahrens 			ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
2929789Sahrens 			    BP_IDENTITY(zio->io_bp)));
2930789Sahrens 			ASSERT3U(zio->io_bp_orig.blk_birth, ==,
2931789Sahrens 			    zio->io_bp->blk_birth);
2932789Sahrens 
2933789Sahrens 			ASSERT(refcount_is_zero(&exists->b_refcnt));
29343403Sbmc 			arc_change_state(arc_anon, exists, hash_lock);
2935789Sahrens 			mutex_exit(hash_lock);
29361544Seschrock 			arc_hdr_destroy(exists);
2937789Sahrens 			exists = buf_hash_insert(hdr, &hash_lock);
2938789Sahrens 			ASSERT3P(exists, ==, NULL);
2939789Sahrens 		}
29401544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
29412688Smaybee 		arc_access(hdr, hash_lock);
29422688Smaybee 		mutex_exit(hash_lock);
29433547Smaybee 	} else if (callback->awcb_done == NULL) {
29441544Seschrock 		int destroy_hdr;
29451544Seschrock 		/*
29461544Seschrock 		 * This is an anonymous buffer with no user callback,
29471544Seschrock 		 * destroy it if there are no active references.
29481544Seschrock 		 */
29491544Seschrock 		mutex_enter(&arc_eviction_mtx);
29501544Seschrock 		destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
29511544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
29521544Seschrock 		mutex_exit(&arc_eviction_mtx);
29531544Seschrock 		if (destroy_hdr)
29541544Seschrock 			arc_hdr_destroy(hdr);
29551544Seschrock 	} else {
29561544Seschrock 		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2957789Sahrens 	}
29581544Seschrock 
29593547Smaybee 	if (callback->awcb_done) {
2960789Sahrens 		ASSERT(!refcount_is_zero(&hdr->b_refcnt));
29613547Smaybee 		callback->awcb_done(zio, buf, callback->awcb_private);
2962789Sahrens 	}
2963789Sahrens 
29643547Smaybee 	kmem_free(callback, sizeof (arc_write_callback_t));
2965789Sahrens }
2966789Sahrens 
29673547Smaybee zio_t *
29681775Sbillm arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
2969789Sahrens     uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
29703547Smaybee     arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority,
29713547Smaybee     int flags, zbookmark_t *zb)
2972789Sahrens {
2973789Sahrens 	arc_buf_hdr_t *hdr = buf->b_hdr;
29743547Smaybee 	arc_write_callback_t *callback;
29753547Smaybee 	zio_t	*zio;
2976789Sahrens 
2977789Sahrens 	/* this is a private buffer - no locking required */
29783403Sbmc 	ASSERT3P(hdr->b_state, ==, arc_anon);
2979789Sahrens 	ASSERT(BUF_EMPTY(hdr));
2980789Sahrens 	ASSERT(!HDR_IO_ERROR(hdr));
29812237Smaybee 	ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
29822237Smaybee 	ASSERT(hdr->b_acb == 0);
29833547Smaybee 	callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
29843547Smaybee 	callback->awcb_ready = ready;
29853547Smaybee 	callback->awcb_done = done;
29863547Smaybee 	callback->awcb_private = private;
29873547Smaybee 	callback->awcb_buf = buf;
29883547Smaybee 	zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp,
29893547Smaybee 	    buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback,
29903547Smaybee 	    priority, flags, zb);
2991789Sahrens 
29923547Smaybee 	return (zio);
2993789Sahrens }
2994789Sahrens 
2995789Sahrens int
2996789Sahrens arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
2997789Sahrens     zio_done_func_t *done, void *private, uint32_t arc_flags)
2998789Sahrens {
2999789Sahrens 	arc_buf_hdr_t *ab;
3000789Sahrens 	kmutex_t *hash_lock;
3001789Sahrens 	zio_t	*zio;
3002789Sahrens 
3003789Sahrens 	/*
3004789Sahrens 	 * If this buffer is in the cache, release it, so it
3005789Sahrens 	 * can be re-used.
3006789Sahrens 	 */
3007789Sahrens 	ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
3008789Sahrens 	if (ab != NULL) {
3009789Sahrens 		/*
3010789Sahrens 		 * The checksum of blocks to free is not always
3011789Sahrens 		 * preserved (eg. on the deadlist).  However, if it is
3012789Sahrens 		 * nonzero, it should match what we have in the cache.
3013789Sahrens 		 */
3014789Sahrens 		ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
3015789Sahrens 		    ab->b_cksum0 == bp->blk_cksum.zc_word[0]);
30163403Sbmc 		if (ab->b_state != arc_anon)
30173403Sbmc 			arc_change_state(arc_anon, ab, hash_lock);
30182391Smaybee 		if (HDR_IO_IN_PROGRESS(ab)) {
30192391Smaybee 			/*
30202391Smaybee 			 * This should only happen when we prefetch.
30212391Smaybee 			 */
30222391Smaybee 			ASSERT(ab->b_flags & ARC_PREFETCH);
30232391Smaybee 			ASSERT3U(ab->b_datacnt, ==, 1);
30242391Smaybee 			ab->b_flags |= ARC_FREED_IN_READ;
30252391Smaybee 			if (HDR_IN_HASH_TABLE(ab))
30262391Smaybee 				buf_hash_remove(ab);
30272391Smaybee 			ab->b_arc_access = 0;
30282391Smaybee 			bzero(&ab->b_dva, sizeof (dva_t));
30292391Smaybee 			ab->b_birth = 0;
30302391Smaybee 			ab->b_cksum0 = 0;
30312391Smaybee 			ab->b_buf->b_efunc = NULL;
30322391Smaybee 			ab->b_buf->b_private = NULL;
30332391Smaybee 			mutex_exit(hash_lock);
30342391Smaybee 		} else if (refcount_is_zero(&ab->b_refcnt)) {
3035*5450Sbrendan 			ab->b_flags |= ARC_FREE_IN_PROGRESS;
3036789Sahrens 			mutex_exit(hash_lock);
30371544Seschrock 			arc_hdr_destroy(ab);
30383403Sbmc 			ARCSTAT_BUMP(arcstat_deleted);
3039789Sahrens 		} else {
30401589Smaybee 			/*
30412391Smaybee 			 * We still have an active reference on this
30422391Smaybee 			 * buffer.  This can happen, e.g., from
30432391Smaybee 			 * dbuf_unoverride().
30441589Smaybee 			 */
30452391Smaybee 			ASSERT(!HDR_IN_HASH_TABLE(ab));
3046789Sahrens 			ab->b_arc_access = 0;
3047789Sahrens 			bzero(&ab->b_dva, sizeof (dva_t));
3048789Sahrens 			ab->b_birth = 0;
3049789Sahrens 			ab->b_cksum0 = 0;
30501544Seschrock 			ab->b_buf->b_efunc = NULL;
30511544Seschrock 			ab->b_buf->b_private = NULL;
3052789Sahrens 			mutex_exit(hash_lock);
3053789Sahrens 		}
3054789Sahrens 	}
3055789Sahrens 
3056789Sahrens 	zio = zio_free(pio, spa, txg, bp, done, private);
3057789Sahrens 
3058789Sahrens 	if (arc_flags & ARC_WAIT)
3059789Sahrens 		return (zio_wait(zio));
3060789Sahrens 
3061789Sahrens 	ASSERT(arc_flags & ARC_NOWAIT);
3062789Sahrens 	zio_nowait(zio);
3063789Sahrens 
3064789Sahrens 	return (0);
3065789Sahrens }
3066789Sahrens 
3067789Sahrens void
3068789Sahrens arc_tempreserve_clear(uint64_t tempreserve)
3069789Sahrens {
3070789Sahrens 	atomic_add_64(&arc_tempreserve, -tempreserve);
3071789Sahrens 	ASSERT((int64_t)arc_tempreserve >= 0);
3072789Sahrens }
3073789Sahrens 
3074789Sahrens int
3075789Sahrens arc_tempreserve_space(uint64_t tempreserve)
3076789Sahrens {
3077789Sahrens #ifdef ZFS_DEBUG
3078789Sahrens 	/*
3079789Sahrens 	 * Once in a while, fail for no reason.  Everything should cope.
3080789Sahrens 	 */
3081789Sahrens 	if (spa_get_random(10000) == 0) {
3082789Sahrens 		dprintf("forcing random failure\n");
3083789Sahrens 		return (ERESTART);
3084789Sahrens 	}
3085789Sahrens #endif
30863403Sbmc 	if (tempreserve > arc_c/4 && !arc_no_grow)
30873403Sbmc 		arc_c = MIN(arc_c_max, tempreserve * 4);
30883403Sbmc 	if (tempreserve > arc_c)
3089982Smaybee 		return (ENOMEM);
3090982Smaybee 
3091789Sahrens 	/*
3092982Smaybee 	 * Throttle writes when the amount of dirty data in the cache
3093982Smaybee 	 * gets too large.  We try to keep the cache less than half full
3094982Smaybee 	 * of dirty blocks so that our sync times don't grow too large.
3095982Smaybee 	 * Note: if two requests come in concurrently, we might let them
3096982Smaybee 	 * both succeed, when one of them should fail.  Not a huge deal.
3097982Smaybee 	 *
3098982Smaybee 	 * XXX The limit should be adjusted dynamically to keep the time
3099982Smaybee 	 * to sync a dataset fixed (around 1-5 seconds?).
3100789Sahrens 	 */
3101789Sahrens 
31023403Sbmc 	if (tempreserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 &&
31033403Sbmc 	    arc_tempreserve + arc_anon->arcs_size > arc_c / 4) {
31044309Smaybee 		dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
31054309Smaybee 		    "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
31064309Smaybee 		    arc_tempreserve>>10,
31074309Smaybee 		    arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
31084309Smaybee 		    arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
31093403Sbmc 		    tempreserve>>10, arc_c>>10);
3110789Sahrens 		return (ERESTART);
3111789Sahrens 	}
3112789Sahrens 	atomic_add_64(&arc_tempreserve, tempreserve);
3113789Sahrens 	return (0);
3114789Sahrens }
3115789Sahrens 
3116789Sahrens void
3117789Sahrens arc_init(void)
3118789Sahrens {
3119789Sahrens 	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3120789Sahrens 	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3121789Sahrens 
31222391Smaybee 	/* Convert seconds to clock ticks */
31232638Sperrin 	arc_min_prefetch_lifespan = 1 * hz;
31242391Smaybee 
3125789Sahrens 	/* Start out with 1/8 of all memory */
31263403Sbmc 	arc_c = physmem * PAGESIZE / 8;
3127789Sahrens 
3128789Sahrens #ifdef _KERNEL
3129789Sahrens 	/*
3130789Sahrens 	 * On architectures where the physical memory can be larger
3131789Sahrens 	 * than the addressable space (intel in 32-bit mode), we may
3132789Sahrens 	 * need to limit the cache to 1/8 of VM size.
3133789Sahrens 	 */
31343403Sbmc 	arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3135789Sahrens #endif
3136789Sahrens 
3137982Smaybee 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
31383403Sbmc 	arc_c_min = MAX(arc_c / 4, 64<<20);
3139982Smaybee 	/* set max to 3/4 of all memory, or all but 1GB, whichever is more */
31403403Sbmc 	if (arc_c * 8 >= 1<<30)
31413403Sbmc 		arc_c_max = (arc_c * 8) - (1<<30);
3142789Sahrens 	else
31433403Sbmc 		arc_c_max = arc_c_min;
31443403Sbmc 	arc_c_max = MAX(arc_c * 6, arc_c_max);
31452885Sahrens 
31462885Sahrens 	/*
31472885Sahrens 	 * Allow the tunables to override our calculations if they are
31482885Sahrens 	 * reasonable (ie. over 64MB)
31492885Sahrens 	 */
31502885Sahrens 	if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
31513403Sbmc 		arc_c_max = zfs_arc_max;
31523403Sbmc 	if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
31533403Sbmc 		arc_c_min = zfs_arc_min;
31542885Sahrens 
31553403Sbmc 	arc_c = arc_c_max;
31563403Sbmc 	arc_p = (arc_c >> 1);
3157789Sahrens 
31584309Smaybee 	/* limit meta-data to 1/4 of the arc capacity */
31594309Smaybee 	arc_meta_limit = arc_c_max / 4;
31604645Sek110237 
31614645Sek110237 	/* Allow the tunable to override if it is reasonable */
31624645Sek110237 	if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
31634645Sek110237 		arc_meta_limit = zfs_arc_meta_limit;
31644645Sek110237 
31654309Smaybee 	if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
31664309Smaybee 		arc_c_min = arc_meta_limit / 2;
31674309Smaybee 
3168789Sahrens 	/* if kmem_flags are set, lets try to use less memory */
3169789Sahrens 	if (kmem_debugging())
31703403Sbmc 		arc_c = arc_c / 2;
31713403Sbmc 	if (arc_c < arc_c_min)
31723403Sbmc 		arc_c = arc_c_min;
3173789Sahrens 
31743403Sbmc 	arc_anon = &ARC_anon;
31753403Sbmc 	arc_mru = &ARC_mru;
31763403Sbmc 	arc_mru_ghost = &ARC_mru_ghost;
31773403Sbmc 	arc_mfu = &ARC_mfu;
31783403Sbmc 	arc_mfu_ghost = &ARC_mfu_ghost;
3179*5450Sbrendan 	arc_l2c_only = &ARC_l2c_only;
31803403Sbmc 	arc_size = 0;
3181789Sahrens 
31823403Sbmc 	mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
31833403Sbmc 	mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
31843403Sbmc 	mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
31853403Sbmc 	mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
31863403Sbmc 	mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3187*5450Sbrendan 	mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
31882688Smaybee 
31894309Smaybee 	list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
31904309Smaybee 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
31914309Smaybee 	list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
31924309Smaybee 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
31934309Smaybee 	list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
31944309Smaybee 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
31954309Smaybee 	list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
31964309Smaybee 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
31974309Smaybee 	list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
31984309Smaybee 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
31994309Smaybee 	list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
32004309Smaybee 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
32014309Smaybee 	list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
32024309Smaybee 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
32034309Smaybee 	list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
32044309Smaybee 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3205*5450Sbrendan 	list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3206*5450Sbrendan 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3207*5450Sbrendan 	list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3208*5450Sbrendan 	    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3209789Sahrens 
3210789Sahrens 	buf_init();
3211789Sahrens 
3212789Sahrens 	arc_thread_exit = 0;
32131544Seschrock 	arc_eviction_list = NULL;
32141544Seschrock 	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
32152887Smaybee 	bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3216789Sahrens 
32173403Sbmc 	arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
32183403Sbmc 	    sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
32193403Sbmc 
32203403Sbmc 	if (arc_ksp != NULL) {
32213403Sbmc 		arc_ksp->ks_data = &arc_stats;
32223403Sbmc 		kstat_install(arc_ksp);
32233403Sbmc 	}
32243403Sbmc 
3225789Sahrens 	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3226789Sahrens 	    TS_RUN, minclsyspri);
32273158Smaybee 
32283158Smaybee 	arc_dead = FALSE;
3229789Sahrens }
3230789Sahrens 
3231789Sahrens void
3232789Sahrens arc_fini(void)
3233789Sahrens {
3234789Sahrens 	mutex_enter(&arc_reclaim_thr_lock);
3235789Sahrens 	arc_thread_exit = 1;
3236789Sahrens 	while (arc_thread_exit != 0)
3237789Sahrens 		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3238789Sahrens 	mutex_exit(&arc_reclaim_thr_lock);
3239789Sahrens 
3240789Sahrens 	arc_flush();
3241789Sahrens 
3242789Sahrens 	arc_dead = TRUE;
3243789Sahrens 
32443403Sbmc 	if (arc_ksp != NULL) {
32453403Sbmc 		kstat_delete(arc_ksp);
32463403Sbmc 		arc_ksp = NULL;
32473403Sbmc 	}
32483403Sbmc 
32491544Seschrock 	mutex_destroy(&arc_eviction_mtx);
3250789Sahrens 	mutex_destroy(&arc_reclaim_thr_lock);
3251789Sahrens 	cv_destroy(&arc_reclaim_thr_cv);
3252789Sahrens 
32534309Smaybee 	list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
32544309Smaybee 	list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
32554309Smaybee 	list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
32564309Smaybee 	list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
32574309Smaybee 	list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
32584309Smaybee 	list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
32594309Smaybee 	list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
32604309Smaybee 	list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3261789Sahrens 
32623403Sbmc 	mutex_destroy(&arc_anon->arcs_mtx);
32633403Sbmc 	mutex_destroy(&arc_mru->arcs_mtx);
32643403Sbmc 	mutex_destroy(&arc_mru_ghost->arcs_mtx);
32653403Sbmc 	mutex_destroy(&arc_mfu->arcs_mtx);
32663403Sbmc 	mutex_destroy(&arc_mfu_ghost->arcs_mtx);
32672856Snd150628 
3268789Sahrens 	buf_fini();
3269789Sahrens }
3270*5450Sbrendan 
3271*5450Sbrendan /*
3272*5450Sbrendan  * Level 2 ARC
3273*5450Sbrendan  *
3274*5450Sbrendan  * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3275*5450Sbrendan  * It uses dedicated storage devices to hold cached data, which are populated
3276*5450Sbrendan  * using large infrequent writes.  The main role of this cache is to boost
3277*5450Sbrendan  * the performance of random read workloads.  The intended L2ARC devices
3278*5450Sbrendan  * include short-stroked disks, solid state disks, and other media with
3279*5450Sbrendan  * substantially faster read latency than disk.
3280*5450Sbrendan  *
3281*5450Sbrendan  *                 +-----------------------+
3282*5450Sbrendan  *                 |         ARC           |
3283*5450Sbrendan  *                 +-----------------------+
3284*5450Sbrendan  *                    |         ^     ^
3285*5450Sbrendan  *                    |         |     |
3286*5450Sbrendan  *      l2arc_feed_thread()    arc_read()
3287*5450Sbrendan  *                    |         |     |
3288*5450Sbrendan  *                    |  l2arc read   |
3289*5450Sbrendan  *                    V         |     |
3290*5450Sbrendan  *               +---------------+    |
3291*5450Sbrendan  *               |     L2ARC     |    |
3292*5450Sbrendan  *               +---------------+    |
3293*5450Sbrendan  *                   |    ^           |
3294*5450Sbrendan  *          l2arc_write() |           |
3295*5450Sbrendan  *                   |    |           |
3296*5450Sbrendan  *                   V    |           |
3297*5450Sbrendan  *                 +-------+      +-------+
3298*5450Sbrendan  *                 | vdev  |      | vdev  |
3299*5450Sbrendan  *                 | cache |      | cache |
3300*5450Sbrendan  *                 +-------+      +-------+
3301*5450Sbrendan  *                 +=========+     .-----.
3302*5450Sbrendan  *                 :  L2ARC  :    |-_____-|
3303*5450Sbrendan  *                 : devices :    | Disks |
3304*5450Sbrendan  *                 +=========+    `-_____-'
3305*5450Sbrendan  *
3306*5450Sbrendan  * Read requests are satisfied from the following sources, in order:
3307*5450Sbrendan  *
3308*5450Sbrendan  *	1) ARC
3309*5450Sbrendan  *	2) vdev cache of L2ARC devices
3310*5450Sbrendan  *	3) L2ARC devices
3311*5450Sbrendan  *	4) vdev cache of disks
3312*5450Sbrendan  *	5) disks
3313*5450Sbrendan  *
3314*5450Sbrendan  * Some L2ARC device types exhibit extremely slow write performance.
3315*5450Sbrendan  * To accommodate for this there are some significant differences between
3316*5450Sbrendan  * the L2ARC and traditional cache design:
3317*5450Sbrendan  *
3318*5450Sbrendan  * 1. There is no eviction path from the ARC to the L2ARC.  Evictions from
3319*5450Sbrendan  * the ARC behave as usual, freeing buffers and placing headers on ghost
3320*5450Sbrendan  * lists.  The ARC does not send buffers to the L2ARC during eviction as
3321*5450Sbrendan  * this would add inflated write latencies for all ARC memory pressure.
3322*5450Sbrendan  *
3323*5450Sbrendan  * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3324*5450Sbrendan  * It does this by periodically scanning buffers from the eviction-end of
3325*5450Sbrendan  * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3326*5450Sbrendan  * not already there.  It scans until a headroom of buffers is satisfied,
3327*5450Sbrendan  * which itself is a buffer for ARC eviction.  The thread that does this is
3328*5450Sbrendan  * l2arc_feed_thread(), illustrated below; example sizes are included to
3329*5450Sbrendan  * provide a better sense of ratio than this diagram:
3330*5450Sbrendan  *
3331*5450Sbrendan  *	       head -->                        tail
3332*5450Sbrendan  *	        +---------------------+----------+
3333*5450Sbrendan  *	ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->.   # already on L2ARC
3334*5450Sbrendan  *	        +---------------------+----------+   |   o L2ARC eligible
3335*5450Sbrendan  *	ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->|   : ARC buffer
3336*5450Sbrendan  *	        +---------------------+----------+   |
3337*5450Sbrendan  *	             15.9 Gbytes      ^ 32 Mbytes    |
3338*5450Sbrendan  *	                           headroom          |
3339*5450Sbrendan  *	                                      l2arc_feed_thread()
3340*5450Sbrendan  *	                                             |
3341*5450Sbrendan  *	                 l2arc write hand <--[oooo]--'
3342*5450Sbrendan  *	                         |           8 Mbyte
3343*5450Sbrendan  *	                         |          write max
3344*5450Sbrendan  *	                         V
3345*5450Sbrendan  *		  +==============================+
3346*5450Sbrendan  *	L2ARC dev |####|#|###|###|    |####| ... |
3347*5450Sbrendan  *	          +==============================+
3348*5450Sbrendan  *	                     32 Gbytes
3349*5450Sbrendan  *
3350*5450Sbrendan  * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3351*5450Sbrendan  * evicted, then the L2ARC has cached a buffer much sooner than it probably
3352*5450Sbrendan  * needed to, potentially wasting L2ARC device bandwidth and storage.  It is
3353*5450Sbrendan  * safe to say that this is an uncommon case, since buffers at the end of
3354*5450Sbrendan  * the ARC lists have moved there due to inactivity.
3355*5450Sbrendan  *
3356*5450Sbrendan  * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3357*5450Sbrendan  * then the L2ARC simply misses copying some buffers.  This serves as a
3358*5450Sbrendan  * pressure valve to prevent heavy read workloads from both stalling the ARC
3359*5450Sbrendan  * with waits and clogging the L2ARC with writes.  This also helps prevent
3360*5450Sbrendan  * the potential for the L2ARC to churn if it attempts to cache content too
3361*5450Sbrendan  * quickly, such as during backups of the entire pool.
3362*5450Sbrendan  *
3363*5450Sbrendan  * 5. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3364*5450Sbrendan  * the vdev queue can aggregate them into larger and fewer writes.  Each
3365*5450Sbrendan  * device is written to in a rotor fashion, sweeping writes through
3366*5450Sbrendan  * available space then repeating.
3367*5450Sbrendan  *
3368*5450Sbrendan  * 6. The L2ARC does not store dirty content.  It never needs to flush
3369*5450Sbrendan  * write buffers back to disk based storage.
3370*5450Sbrendan  *
3371*5450Sbrendan  * 7. If an ARC buffer is written (and dirtied) which also exists in the
3372*5450Sbrendan  * L2ARC, the now stale L2ARC buffer is immediately dropped.
3373*5450Sbrendan  *
3374*5450Sbrendan  * The performance of the L2ARC can be tweaked by a number of tunables, which
3375*5450Sbrendan  * may be necessary for different workloads:
3376*5450Sbrendan  *
3377*5450Sbrendan  *	l2arc_write_max		max write bytes per interval
3378*5450Sbrendan  *	l2arc_noprefetch	skip caching prefetched buffers
3379*5450Sbrendan  *	l2arc_headroom		number of max device writes to precache
3380*5450Sbrendan  *	l2arc_feed_secs		seconds between L2ARC writing
3381*5450Sbrendan  *
3382*5450Sbrendan  * Tunables may be removed or added as future performance improvements are
3383*5450Sbrendan  * integrated, and also may become zpool properties.
3384*5450Sbrendan  */
3385*5450Sbrendan 
3386*5450Sbrendan static void
3387*5450Sbrendan l2arc_hdr_stat_add(void)
3388*5450Sbrendan {
3389*5450Sbrendan 	ARCSTAT_INCR(arcstat_l2_hdr_size, sizeof (arc_buf_hdr_t) +
3390*5450Sbrendan 	    sizeof (l2arc_buf_hdr_t));
3391*5450Sbrendan 	ARCSTAT_INCR(arcstat_hdr_size, -sizeof (arc_buf_hdr_t));
3392*5450Sbrendan }
3393*5450Sbrendan 
3394*5450Sbrendan static void
3395*5450Sbrendan l2arc_hdr_stat_remove(void)
3396*5450Sbrendan {
3397*5450Sbrendan 	ARCSTAT_INCR(arcstat_l2_hdr_size, -sizeof (arc_buf_hdr_t) -
3398*5450Sbrendan 	    sizeof (l2arc_buf_hdr_t));
3399*5450Sbrendan 	ARCSTAT_INCR(arcstat_hdr_size, sizeof (arc_buf_hdr_t));
3400*5450Sbrendan }
3401*5450Sbrendan 
3402*5450Sbrendan /*
3403*5450Sbrendan  * Cycle through L2ARC devices.  This is how L2ARC load balances.
3404*5450Sbrendan  * This is called with l2arc_dev_mtx held, which also locks out spa removal.
3405*5450Sbrendan  */
3406*5450Sbrendan static l2arc_dev_t *
3407*5450Sbrendan l2arc_dev_get_next(void)
3408*5450Sbrendan {
3409*5450Sbrendan 	l2arc_dev_t *next;
3410*5450Sbrendan 
3411*5450Sbrendan 	if (l2arc_dev_last == NULL) {
3412*5450Sbrendan 		next = list_head(l2arc_dev_list);
3413*5450Sbrendan 	} else {
3414*5450Sbrendan 		next = list_next(l2arc_dev_list, l2arc_dev_last);
3415*5450Sbrendan 		if (next == NULL)
3416*5450Sbrendan 			next = list_head(l2arc_dev_list);
3417*5450Sbrendan 	}
3418*5450Sbrendan 
3419*5450Sbrendan 	l2arc_dev_last = next;
3420*5450Sbrendan 
3421*5450Sbrendan 	return (next);
3422*5450Sbrendan }
3423*5450Sbrendan 
3424*5450Sbrendan /*
3425*5450Sbrendan  * A write to a cache device has completed.  Update all headers to allow
3426*5450Sbrendan  * reads from these buffers to begin.
3427*5450Sbrendan  */
3428*5450Sbrendan static void
3429*5450Sbrendan l2arc_write_done(zio_t *zio)
3430*5450Sbrendan {
3431*5450Sbrendan 	l2arc_write_callback_t *cb;
3432*5450Sbrendan 	l2arc_dev_t *dev;
3433*5450Sbrendan 	list_t *buflist;
3434*5450Sbrendan 	l2arc_data_free_t *df, *df_prev;
3435*5450Sbrendan 	arc_buf_hdr_t *head, *ab, *ab_prev;
3436*5450Sbrendan 	kmutex_t *hash_lock;
3437*5450Sbrendan 
3438*5450Sbrendan 	cb = zio->io_private;
3439*5450Sbrendan 	ASSERT(cb != NULL);
3440*5450Sbrendan 	dev = cb->l2wcb_dev;
3441*5450Sbrendan 	ASSERT(dev != NULL);
3442*5450Sbrendan 	head = cb->l2wcb_head;
3443*5450Sbrendan 	ASSERT(head != NULL);
3444*5450Sbrendan 	buflist = dev->l2ad_buflist;
3445*5450Sbrendan 	ASSERT(buflist != NULL);
3446*5450Sbrendan 	DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
3447*5450Sbrendan 	    l2arc_write_callback_t *, cb);
3448*5450Sbrendan 
3449*5450Sbrendan 	if (zio->io_error != 0)
3450*5450Sbrendan 		ARCSTAT_BUMP(arcstat_l2_writes_error);
3451*5450Sbrendan 
3452*5450Sbrendan 	mutex_enter(&l2arc_buflist_mtx);
3453*5450Sbrendan 
3454*5450Sbrendan 	/*
3455*5450Sbrendan 	 * All writes completed, or an error was hit.
3456*5450Sbrendan 	 */
3457*5450Sbrendan 	for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
3458*5450Sbrendan 		ab_prev = list_prev(buflist, ab);
3459*5450Sbrendan 
3460*5450Sbrendan 		hash_lock = HDR_LOCK(ab);
3461*5450Sbrendan 		if (!mutex_tryenter(hash_lock)) {
3462*5450Sbrendan 			/*
3463*5450Sbrendan 			 * This buffer misses out.  It may be in a stage
3464*5450Sbrendan 			 * of eviction.  Its ARC_L2_WRITING flag will be
3465*5450Sbrendan 			 * left set, denying reads to this buffer.
3466*5450Sbrendan 			 */
3467*5450Sbrendan 			ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
3468*5450Sbrendan 			continue;
3469*5450Sbrendan 		}
3470*5450Sbrendan 
3471*5450Sbrendan 		if (zio->io_error != 0) {
3472*5450Sbrendan 			/*
3473*5450Sbrendan 			 * Error - invalidate L2ARC entry.
3474*5450Sbrendan 			 */
3475*5450Sbrendan 			ab->b_l2hdr = NULL;
3476*5450Sbrendan 		}
3477*5450Sbrendan 
3478*5450Sbrendan 		/*
3479*5450Sbrendan 		 * Allow ARC to begin reads to this L2ARC entry.
3480*5450Sbrendan 		 */
3481*5450Sbrendan 		ab->b_flags &= ~ARC_L2_WRITING;
3482*5450Sbrendan 
3483*5450Sbrendan 		mutex_exit(hash_lock);
3484*5450Sbrendan 	}
3485*5450Sbrendan 
3486*5450Sbrendan 	atomic_inc_64(&l2arc_writes_done);
3487*5450Sbrendan 	list_remove(buflist, head);
3488*5450Sbrendan 	kmem_cache_free(hdr_cache, head);
3489*5450Sbrendan 	mutex_exit(&l2arc_buflist_mtx);
3490*5450Sbrendan 
3491*5450Sbrendan 	/*
3492*5450Sbrendan 	 * Free buffers that were tagged for destruction.
3493*5450Sbrendan 	 */
3494*5450Sbrendan 	mutex_enter(&l2arc_free_on_write_mtx);
3495*5450Sbrendan 	buflist = l2arc_free_on_write;
3496*5450Sbrendan 	for (df = list_tail(buflist); df; df = df_prev) {
3497*5450Sbrendan 		df_prev = list_prev(buflist, df);
3498*5450Sbrendan 		ASSERT(df->l2df_data != NULL);
3499*5450Sbrendan 		ASSERT(df->l2df_func != NULL);
3500*5450Sbrendan 		df->l2df_func(df->l2df_data, df->l2df_size);
3501*5450Sbrendan 		list_remove(buflist, df);
3502*5450Sbrendan 		kmem_free(df, sizeof (l2arc_data_free_t));
3503*5450Sbrendan 	}
3504*5450Sbrendan 	mutex_exit(&l2arc_free_on_write_mtx);
3505*5450Sbrendan 
3506*5450Sbrendan 	kmem_free(cb, sizeof (l2arc_write_callback_t));
3507*5450Sbrendan }
3508*5450Sbrendan 
3509*5450Sbrendan /*
3510*5450Sbrendan  * A read to a cache device completed.  Validate buffer contents before
3511*5450Sbrendan  * handing over to the regular ARC routines.
3512*5450Sbrendan  */
3513*5450Sbrendan static void
3514*5450Sbrendan l2arc_read_done(zio_t *zio)
3515*5450Sbrendan {
3516*5450Sbrendan 	l2arc_read_callback_t *cb;
3517*5450Sbrendan 	arc_buf_hdr_t *hdr;
3518*5450Sbrendan 	arc_buf_t *buf;
3519*5450Sbrendan 	zio_t *rzio;
3520*5450Sbrendan 	kmutex_t *hash_lock;
3521*5450Sbrendan 	int equal, err = 0;
3522*5450Sbrendan 
3523*5450Sbrendan 	cb = zio->io_private;
3524*5450Sbrendan 	ASSERT(cb != NULL);
3525*5450Sbrendan 	buf = cb->l2rcb_buf;
3526*5450Sbrendan 	ASSERT(buf != NULL);
3527*5450Sbrendan 	hdr = buf->b_hdr;
3528*5450Sbrendan 	ASSERT(hdr != NULL);
3529*5450Sbrendan 
3530*5450Sbrendan 	hash_lock = HDR_LOCK(hdr);
3531*5450Sbrendan 	mutex_enter(hash_lock);
3532*5450Sbrendan 
3533*5450Sbrendan 	/*
3534*5450Sbrendan 	 * Check this survived the L2ARC journey.
3535*5450Sbrendan 	 */
3536*5450Sbrendan 	equal = arc_cksum_equal(buf);
3537*5450Sbrendan 	if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
3538*5450Sbrendan 		mutex_exit(hash_lock);
3539*5450Sbrendan 		zio->io_private = buf;
3540*5450Sbrendan 		arc_read_done(zio);
3541*5450Sbrendan 	} else {
3542*5450Sbrendan 		mutex_exit(hash_lock);
3543*5450Sbrendan 		/*
3544*5450Sbrendan 		 * Buffer didn't survive caching.  Increment stats and
3545*5450Sbrendan 		 * reissue to the original storage device.
3546*5450Sbrendan 		 */
3547*5450Sbrendan 		if (zio->io_error != 0)
3548*5450Sbrendan 			ARCSTAT_BUMP(arcstat_l2_io_error);
3549*5450Sbrendan 		if (!equal)
3550*5450Sbrendan 			ARCSTAT_BUMP(arcstat_l2_cksum_bad);
3551*5450Sbrendan 
3552*5450Sbrendan 		zio->io_flags &= ~ZIO_FLAG_DONT_CACHE;
3553*5450Sbrendan 		rzio = zio_read(NULL, cb->l2rcb_spa, &cb->l2rcb_bp,
3554*5450Sbrendan 		    buf->b_data, zio->io_size, arc_read_done, buf,
3555*5450Sbrendan 		    zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb);
3556*5450Sbrendan 
3557*5450Sbrendan 		/*
3558*5450Sbrendan 		 * Since this is a seperate thread, we can wait on this
3559*5450Sbrendan 		 * I/O whether there is an io_waiter or not.
3560*5450Sbrendan 		 */
3561*5450Sbrendan 		err = zio_wait(rzio);
3562*5450Sbrendan 
3563*5450Sbrendan 		/*
3564*5450Sbrendan 		 * Let the resent I/O call arc_read_done() instead.
3565*5450Sbrendan 		 * io_error is set to the reissued I/O error status.
3566*5450Sbrendan 		 */
3567*5450Sbrendan 		zio->io_done = NULL;
3568*5450Sbrendan 		zio->io_waiter = NULL;
3569*5450Sbrendan 		zio->io_error = err;
3570*5450Sbrendan 	}
3571*5450Sbrendan 
3572*5450Sbrendan 	kmem_free(cb, sizeof (l2arc_read_callback_t));
3573*5450Sbrendan }
3574*5450Sbrendan 
3575*5450Sbrendan /*
3576*5450Sbrendan  * This is the list priority from which the L2ARC will search for pages to
3577*5450Sbrendan  * cache.  This is used within loops (0..3) to cycle through lists in the
3578*5450Sbrendan  * desired order.  This order can have a significant effect on cache
3579*5450Sbrendan  * performance.
3580*5450Sbrendan  *
3581*5450Sbrendan  * Currently the metadata lists are hit first, MFU then MRU, followed by
3582*5450Sbrendan  * the data lists.  This function returns a locked list, and also returns
3583*5450Sbrendan  * the lock pointer.
3584*5450Sbrendan  */
3585*5450Sbrendan static list_t *
3586*5450Sbrendan l2arc_list_locked(int list_num, kmutex_t **lock)
3587*5450Sbrendan {
3588*5450Sbrendan 	list_t *list;
3589*5450Sbrendan 
3590*5450Sbrendan 	ASSERT(list_num >= 0 && list_num <= 3);
3591*5450Sbrendan 
3592*5450Sbrendan 	switch (list_num) {
3593*5450Sbrendan 	case 0:
3594*5450Sbrendan 		list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
3595*5450Sbrendan 		*lock = &arc_mfu->arcs_mtx;
3596*5450Sbrendan 		break;
3597*5450Sbrendan 	case 1:
3598*5450Sbrendan 		list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
3599*5450Sbrendan 		*lock = &arc_mru->arcs_mtx;
3600*5450Sbrendan 		break;
3601*5450Sbrendan 	case 2:
3602*5450Sbrendan 		list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
3603*5450Sbrendan 		*lock = &arc_mfu->arcs_mtx;
3604*5450Sbrendan 		break;
3605*5450Sbrendan 	case 3:
3606*5450Sbrendan 		list = &arc_mru->arcs_list[ARC_BUFC_DATA];
3607*5450Sbrendan 		*lock = &arc_mru->arcs_mtx;
3608*5450Sbrendan 		break;
3609*5450Sbrendan 	}
3610*5450Sbrendan 
3611*5450Sbrendan 	ASSERT(!(MUTEX_HELD(*lock)));
3612*5450Sbrendan 	mutex_enter(*lock);
3613*5450Sbrendan 	return (list);
3614*5450Sbrendan }
3615*5450Sbrendan 
3616*5450Sbrendan /*
3617*5450Sbrendan  * Evict buffers from the device write hand to the distance specified in
3618*5450Sbrendan  * bytes.  This distance may span populated buffers, it may span nothing.
3619*5450Sbrendan  * This is clearing a region on the L2ARC device ready for writing.
3620*5450Sbrendan  * If the 'all' boolean is set, every buffer is evicted.
3621*5450Sbrendan  */
3622*5450Sbrendan static void
3623*5450Sbrendan l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
3624*5450Sbrendan {
3625*5450Sbrendan 	list_t *buflist;
3626*5450Sbrendan 	l2arc_buf_hdr_t *abl2;
3627*5450Sbrendan 	arc_buf_hdr_t *ab, *ab_prev;
3628*5450Sbrendan 	kmutex_t *hash_lock;
3629*5450Sbrendan 	uint64_t taddr;
3630*5450Sbrendan 
3631*5450Sbrendan 	ASSERT(MUTEX_HELD(&l2arc_dev_mtx));
3632*5450Sbrendan 
3633*5450Sbrendan 	buflist = dev->l2ad_buflist;
3634*5450Sbrendan 
3635*5450Sbrendan 	if (buflist == NULL)
3636*5450Sbrendan 		return;
3637*5450Sbrendan 
3638*5450Sbrendan 	if (!all && dev->l2ad_first) {
3639*5450Sbrendan 		/*
3640*5450Sbrendan 		 * This is the first sweep through the device.  There is
3641*5450Sbrendan 		 * nothing to evict.
3642*5450Sbrendan 		 */
3643*5450Sbrendan 		return;
3644*5450Sbrendan 	}
3645*5450Sbrendan 
3646*5450Sbrendan 	if (dev->l2ad_hand >= (dev->l2ad_end - (2 * dev->l2ad_write))) {
3647*5450Sbrendan 		/*
3648*5450Sbrendan 		 * When nearing the end of the device, evict to the end
3649*5450Sbrendan 		 * before the device write hand jumps to the start.
3650*5450Sbrendan 		 */
3651*5450Sbrendan 		taddr = dev->l2ad_end;
3652*5450Sbrendan 	} else {
3653*5450Sbrendan 		taddr = dev->l2ad_hand + distance;
3654*5450Sbrendan 	}
3655*5450Sbrendan 	DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
3656*5450Sbrendan 	    uint64_t, taddr, boolean_t, all);
3657*5450Sbrendan 
3658*5450Sbrendan top:
3659*5450Sbrendan 	mutex_enter(&l2arc_buflist_mtx);
3660*5450Sbrendan 	for (ab = list_tail(buflist); ab; ab = ab_prev) {
3661*5450Sbrendan 		ab_prev = list_prev(buflist, ab);
3662*5450Sbrendan 
3663*5450Sbrendan 		hash_lock = HDR_LOCK(ab);
3664*5450Sbrendan 		if (!mutex_tryenter(hash_lock)) {
3665*5450Sbrendan 			/*
3666*5450Sbrendan 			 * Missed the hash lock.  Retry.
3667*5450Sbrendan 			 */
3668*5450Sbrendan 			ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
3669*5450Sbrendan 			mutex_exit(&l2arc_buflist_mtx);
3670*5450Sbrendan 			mutex_enter(hash_lock);
3671*5450Sbrendan 			mutex_exit(hash_lock);
3672*5450Sbrendan 			goto top;
3673*5450Sbrendan 		}
3674*5450Sbrendan 
3675*5450Sbrendan 		if (HDR_L2_WRITE_HEAD(ab)) {
3676*5450Sbrendan 			/*
3677*5450Sbrendan 			 * We hit a write head node.  Leave it for
3678*5450Sbrendan 			 * l2arc_write_done().
3679*5450Sbrendan 			 */
3680*5450Sbrendan 			list_remove(buflist, ab);
3681*5450Sbrendan 			mutex_exit(hash_lock);
3682*5450Sbrendan 			continue;
3683*5450Sbrendan 		}
3684*5450Sbrendan 
3685*5450Sbrendan 		if (!all && ab->b_l2hdr != NULL &&
3686*5450Sbrendan 		    (ab->b_l2hdr->b_daddr > taddr ||
3687*5450Sbrendan 		    ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
3688*5450Sbrendan 			/*
3689*5450Sbrendan 			 * We've evicted to the target address,
3690*5450Sbrendan 			 * or the end of the device.
3691*5450Sbrendan 			 */
3692*5450Sbrendan 			mutex_exit(hash_lock);
3693*5450Sbrendan 			break;
3694*5450Sbrendan 		}
3695*5450Sbrendan 
3696*5450Sbrendan 		if (HDR_FREE_IN_PROGRESS(ab)) {
3697*5450Sbrendan 			/*
3698*5450Sbrendan 			 * Already on the path to destruction.
3699*5450Sbrendan 			 */
3700*5450Sbrendan 			mutex_exit(hash_lock);
3701*5450Sbrendan 			continue;
3702*5450Sbrendan 		}
3703*5450Sbrendan 
3704*5450Sbrendan 		if (ab->b_state == arc_l2c_only) {
3705*5450Sbrendan 			ASSERT(!HDR_L2_READING(ab));
3706*5450Sbrendan 			/*
3707*5450Sbrendan 			 * This doesn't exist in the ARC.  Destroy.
3708*5450Sbrendan 			 * arc_hdr_destroy() will call list_remove()
3709*5450Sbrendan 			 * and decrement arcstat_l2_size.
3710*5450Sbrendan 			 */
3711*5450Sbrendan 			arc_change_state(arc_anon, ab, hash_lock);
3712*5450Sbrendan 			arc_hdr_destroy(ab);
3713*5450Sbrendan 		} else {
3714*5450Sbrendan 			/*
3715*5450Sbrendan 			 * Tell ARC this no longer exists in L2ARC.
3716*5450Sbrendan 			 */
3717*5450Sbrendan 			if (ab->b_l2hdr != NULL) {
3718*5450Sbrendan 				abl2 = ab->b_l2hdr;
3719*5450Sbrendan 				ab->b_l2hdr = NULL;
3720*5450Sbrendan 				kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
3721*5450Sbrendan 				ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
3722*5450Sbrendan 			}
3723*5450Sbrendan 			list_remove(buflist, ab);
3724*5450Sbrendan 
3725*5450Sbrendan 			/*
3726*5450Sbrendan 			 * This may have been leftover after a
3727*5450Sbrendan 			 * failed write.
3728*5450Sbrendan 			 */
3729*5450Sbrendan 			ab->b_flags &= ~ARC_L2_WRITING;
3730*5450Sbrendan 
3731*5450Sbrendan 			/*
3732*5450Sbrendan 			 * Invalidate issued or about to be issued
3733*5450Sbrendan 			 * reads, since we may be about to write
3734*5450Sbrendan 			 * over this location.
3735*5450Sbrendan 			 */
3736*5450Sbrendan 			if (HDR_L2_READING(ab)) {
3737*5450Sbrendan 				ARCSTAT_BUMP(arcstat_l2_evict_reading);
3738*5450Sbrendan 				ab->b_flags |= ARC_L2_EVICTED;
3739*5450Sbrendan 			}
3740*5450Sbrendan 		}
3741*5450Sbrendan 		mutex_exit(hash_lock);
3742*5450Sbrendan 	}
3743*5450Sbrendan 	mutex_exit(&l2arc_buflist_mtx);
3744*5450Sbrendan 
3745*5450Sbrendan 	spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict));
3746*5450Sbrendan 	dev->l2ad_evict = taddr;
3747*5450Sbrendan }
3748*5450Sbrendan 
3749*5450Sbrendan /*
3750*5450Sbrendan  * Find and write ARC buffers to the L2ARC device.
3751*5450Sbrendan  *
3752*5450Sbrendan  * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
3753*5450Sbrendan  * for reading until they have completed writing.
3754*5450Sbrendan  */
3755*5450Sbrendan static void
3756*5450Sbrendan l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev)
3757*5450Sbrendan {
3758*5450Sbrendan 	arc_buf_hdr_t *ab, *ab_prev, *head;
3759*5450Sbrendan 	l2arc_buf_hdr_t *hdrl2;
3760*5450Sbrendan 	list_t *list;
3761*5450Sbrendan 	uint64_t passed_sz, write_sz, buf_sz;
3762*5450Sbrendan 	uint64_t target_sz = dev->l2ad_write;
3763*5450Sbrendan 	uint64_t headroom = dev->l2ad_write * l2arc_headroom;
3764*5450Sbrendan 	void *buf_data;
3765*5450Sbrendan 	kmutex_t *hash_lock, *list_lock;
3766*5450Sbrendan 	boolean_t have_lock, full;
3767*5450Sbrendan 	l2arc_write_callback_t *cb;
3768*5450Sbrendan 	zio_t *pio, *wzio;
3769*5450Sbrendan 
3770*5450Sbrendan 	ASSERT(MUTEX_HELD(&l2arc_dev_mtx));
3771*5450Sbrendan 	ASSERT(dev->l2ad_vdev != NULL);
3772*5450Sbrendan 
3773*5450Sbrendan 	pio = NULL;
3774*5450Sbrendan 	write_sz = 0;
3775*5450Sbrendan 	full = B_FALSE;
3776*5450Sbrendan 	head = kmem_cache_alloc(hdr_cache, KM_SLEEP);
3777*5450Sbrendan 	head->b_flags |= ARC_L2_WRITE_HEAD;
3778*5450Sbrendan 
3779*5450Sbrendan 	/*
3780*5450Sbrendan 	 * Copy buffers for L2ARC writing.
3781*5450Sbrendan 	 */
3782*5450Sbrendan 	mutex_enter(&l2arc_buflist_mtx);
3783*5450Sbrendan 	for (int try = 0; try <= 3; try++) {
3784*5450Sbrendan 		list = l2arc_list_locked(try, &list_lock);
3785*5450Sbrendan 		passed_sz = 0;
3786*5450Sbrendan 
3787*5450Sbrendan 		for (ab = list_tail(list); ab; ab = ab_prev) {
3788*5450Sbrendan 			ab_prev = list_prev(list, ab);
3789*5450Sbrendan 
3790*5450Sbrendan 			hash_lock = HDR_LOCK(ab);
3791*5450Sbrendan 			have_lock = MUTEX_HELD(hash_lock);
3792*5450Sbrendan 			if (!have_lock && !mutex_tryenter(hash_lock)) {
3793*5450Sbrendan 				/*
3794*5450Sbrendan 				 * Skip this buffer rather than waiting.
3795*5450Sbrendan 				 */
3796*5450Sbrendan 				continue;
3797*5450Sbrendan 			}
3798*5450Sbrendan 
3799*5450Sbrendan 			passed_sz += ab->b_size;
3800*5450Sbrendan 			if (passed_sz > headroom) {
3801*5450Sbrendan 				/*
3802*5450Sbrendan 				 * Searched too far.
3803*5450Sbrendan 				 */
3804*5450Sbrendan 				mutex_exit(hash_lock);
3805*5450Sbrendan 				break;
3806*5450Sbrendan 			}
3807*5450Sbrendan 
3808*5450Sbrendan 			if (ab->b_spa != spa) {
3809*5450Sbrendan 				mutex_exit(hash_lock);
3810*5450Sbrendan 				continue;
3811*5450Sbrendan 			}
3812*5450Sbrendan 
3813*5450Sbrendan 			if (ab->b_l2hdr != NULL) {
3814*5450Sbrendan 				/*
3815*5450Sbrendan 				 * Already in L2ARC.
3816*5450Sbrendan 				 */
3817*5450Sbrendan 				mutex_exit(hash_lock);
3818*5450Sbrendan 				continue;
3819*5450Sbrendan 			}
3820*5450Sbrendan 
3821*5450Sbrendan 			if (HDR_IO_IN_PROGRESS(ab) || HDR_DONT_L2CACHE(ab)) {
3822*5450Sbrendan 				mutex_exit(hash_lock);
3823*5450Sbrendan 				continue;
3824*5450Sbrendan 			}
3825*5450Sbrendan 
3826*5450Sbrendan 			if ((write_sz + ab->b_size) > target_sz) {
3827*5450Sbrendan 				full = B_TRUE;
3828*5450Sbrendan 				mutex_exit(hash_lock);
3829*5450Sbrendan 				break;
3830*5450Sbrendan 			}
3831*5450Sbrendan 
3832*5450Sbrendan 			if (ab->b_buf == NULL) {
3833*5450Sbrendan 				DTRACE_PROBE1(l2arc__buf__null, void *, ab);
3834*5450Sbrendan 				mutex_exit(hash_lock);
3835*5450Sbrendan 				continue;
3836*5450Sbrendan 			}
3837*5450Sbrendan 
3838*5450Sbrendan 			if (pio == NULL) {
3839*5450Sbrendan 				/*
3840*5450Sbrendan 				 * Insert a dummy header on the buflist so
3841*5450Sbrendan 				 * l2arc_write_done() can find where the
3842*5450Sbrendan 				 * write buffers begin without searching.
3843*5450Sbrendan 				 */
3844*5450Sbrendan 				list_insert_head(dev->l2ad_buflist, head);
3845*5450Sbrendan 
3846*5450Sbrendan 				cb = kmem_alloc(
3847*5450Sbrendan 				    sizeof (l2arc_write_callback_t), KM_SLEEP);
3848*5450Sbrendan 				cb->l2wcb_dev = dev;
3849*5450Sbrendan 				cb->l2wcb_head = head;
3850*5450Sbrendan 				pio = zio_root(spa, l2arc_write_done, cb,
3851*5450Sbrendan 				    ZIO_FLAG_CANFAIL);
3852*5450Sbrendan 			}
3853*5450Sbrendan 
3854*5450Sbrendan 			/*
3855*5450Sbrendan 			 * Create and add a new L2ARC header.
3856*5450Sbrendan 			 */
3857*5450Sbrendan 			hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
3858*5450Sbrendan 			hdrl2->b_dev = dev;
3859*5450Sbrendan 			hdrl2->b_daddr = dev->l2ad_hand;
3860*5450Sbrendan 
3861*5450Sbrendan 			ab->b_flags |= ARC_L2_WRITING;
3862*5450Sbrendan 			ab->b_l2hdr = hdrl2;
3863*5450Sbrendan 			list_insert_head(dev->l2ad_buflist, ab);
3864*5450Sbrendan 			buf_data = ab->b_buf->b_data;
3865*5450Sbrendan 			buf_sz = ab->b_size;
3866*5450Sbrendan 
3867*5450Sbrendan 			/*
3868*5450Sbrendan 			 * Compute and store the buffer cksum before
3869*5450Sbrendan 			 * writing.  On debug the cksum is verified first.
3870*5450Sbrendan 			 */
3871*5450Sbrendan 			arc_cksum_verify(ab->b_buf);
3872*5450Sbrendan 			arc_cksum_compute(ab->b_buf, B_TRUE);
3873*5450Sbrendan 
3874*5450Sbrendan 			mutex_exit(hash_lock);
3875*5450Sbrendan 
3876*5450Sbrendan 			wzio = zio_write_phys(pio, dev->l2ad_vdev,
3877*5450Sbrendan 			    dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
3878*5450Sbrendan 			    NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
3879*5450Sbrendan 			    ZIO_FLAG_CANFAIL, B_FALSE);
3880*5450Sbrendan 
3881*5450Sbrendan 			DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
3882*5450Sbrendan 			    zio_t *, wzio);
3883*5450Sbrendan 			(void) zio_nowait(wzio);
3884*5450Sbrendan 
3885*5450Sbrendan 			write_sz += buf_sz;
3886*5450Sbrendan 			dev->l2ad_hand += buf_sz;
3887*5450Sbrendan 		}
3888*5450Sbrendan 
3889*5450Sbrendan 		mutex_exit(list_lock);
3890*5450Sbrendan 
3891*5450Sbrendan 		if (full == B_TRUE)
3892*5450Sbrendan 			break;
3893*5450Sbrendan 	}
3894*5450Sbrendan 	mutex_exit(&l2arc_buflist_mtx);
3895*5450Sbrendan 
3896*5450Sbrendan 	if (pio == NULL) {
3897*5450Sbrendan 		ASSERT3U(write_sz, ==, 0);
3898*5450Sbrendan 		kmem_cache_free(hdr_cache, head);
3899*5450Sbrendan 		return;
3900*5450Sbrendan 	}
3901*5450Sbrendan 
3902*5450Sbrendan 	ASSERT3U(write_sz, <=, target_sz);
3903*5450Sbrendan 	ARCSTAT_BUMP(arcstat_l2_writes_sent);
3904*5450Sbrendan 	ARCSTAT_INCR(arcstat_l2_size, write_sz);
3905*5450Sbrendan 	spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz);
3906*5450Sbrendan 
3907*5450Sbrendan 	/*
3908*5450Sbrendan 	 * Bump device hand to the device start if it is approaching the end.
3909*5450Sbrendan 	 * l2arc_evict() will already have evicted ahead for this case.
3910*5450Sbrendan 	 */
3911*5450Sbrendan 	if (dev->l2ad_hand >= (dev->l2ad_end - dev->l2ad_write)) {
3912*5450Sbrendan 		spa_l2cache_space_update(dev->l2ad_vdev, 0,
3913*5450Sbrendan 		    dev->l2ad_end - dev->l2ad_hand);
3914*5450Sbrendan 		dev->l2ad_hand = dev->l2ad_start;
3915*5450Sbrendan 		dev->l2ad_evict = dev->l2ad_start;
3916*5450Sbrendan 		dev->l2ad_first = B_FALSE;
3917*5450Sbrendan 	}
3918*5450Sbrendan 
3919*5450Sbrendan 	(void) zio_wait(pio);
3920*5450Sbrendan }
3921*5450Sbrendan 
3922*5450Sbrendan /*
3923*5450Sbrendan  * This thread feeds the L2ARC at regular intervals.  This is the beating
3924*5450Sbrendan  * heart of the L2ARC.
3925*5450Sbrendan  */
3926*5450Sbrendan static void
3927*5450Sbrendan l2arc_feed_thread(void)
3928*5450Sbrendan {
3929*5450Sbrendan 	callb_cpr_t cpr;
3930*5450Sbrendan 	l2arc_dev_t *dev;
3931*5450Sbrendan 	spa_t *spa;
3932*5450Sbrendan 	int interval;
3933*5450Sbrendan 	boolean_t startup = B_TRUE;
3934*5450Sbrendan 
3935*5450Sbrendan 	CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
3936*5450Sbrendan 
3937*5450Sbrendan 	mutex_enter(&l2arc_feed_thr_lock);
3938*5450Sbrendan 
3939*5450Sbrendan 	while (l2arc_thread_exit == 0) {
3940*5450Sbrendan 		/*
3941*5450Sbrendan 		 * Initially pause for L2ARC_FEED_DELAY seconds as a grace
3942*5450Sbrendan 		 * interval during boot, followed by l2arc_feed_secs seconds
3943*5450Sbrendan 		 * thereafter.
3944*5450Sbrendan 		 */
3945*5450Sbrendan 		CALLB_CPR_SAFE_BEGIN(&cpr);
3946*5450Sbrendan 		if (startup) {
3947*5450Sbrendan 			interval = L2ARC_FEED_DELAY;
3948*5450Sbrendan 			startup = B_FALSE;
3949*5450Sbrendan 		} else {
3950*5450Sbrendan 			interval = l2arc_feed_secs;
3951*5450Sbrendan 		}
3952*5450Sbrendan 		(void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
3953*5450Sbrendan 		    lbolt + (hz * interval));
3954*5450Sbrendan 		CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
3955*5450Sbrendan 
3956*5450Sbrendan 		/*
3957*5450Sbrendan 		 * Do nothing until L2ARC devices exist.
3958*5450Sbrendan 		 */
3959*5450Sbrendan 		mutex_enter(&l2arc_dev_mtx);
3960*5450Sbrendan 		if (l2arc_ndev == 0) {
3961*5450Sbrendan 			mutex_exit(&l2arc_dev_mtx);
3962*5450Sbrendan 			continue;
3963*5450Sbrendan 		}
3964*5450Sbrendan 
3965*5450Sbrendan 		/*
3966*5450Sbrendan 		 * Avoid contributing to memory pressure.
3967*5450Sbrendan 		 */
3968*5450Sbrendan 		if (arc_reclaim_needed()) {
3969*5450Sbrendan 			ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
3970*5450Sbrendan 			mutex_exit(&l2arc_dev_mtx);
3971*5450Sbrendan 			continue;
3972*5450Sbrendan 		}
3973*5450Sbrendan 
3974*5450Sbrendan 		/*
3975*5450Sbrendan 		 * This selects the next l2arc device to write to, and in
3976*5450Sbrendan 		 * doing so the next spa to feed from: dev->l2ad_spa.
3977*5450Sbrendan 		 */
3978*5450Sbrendan 		if ((dev = l2arc_dev_get_next()) == NULL) {
3979*5450Sbrendan 			mutex_exit(&l2arc_dev_mtx);
3980*5450Sbrendan 			continue;
3981*5450Sbrendan 		}
3982*5450Sbrendan 		spa = dev->l2ad_spa;
3983*5450Sbrendan 		ASSERT(spa != NULL);
3984*5450Sbrendan 		ARCSTAT_BUMP(arcstat_l2_feeds);
3985*5450Sbrendan 
3986*5450Sbrendan 		/*
3987*5450Sbrendan 		 * Evict L2ARC buffers that will be overwritten.
3988*5450Sbrendan 		 */
3989*5450Sbrendan 		l2arc_evict(dev, dev->l2ad_write, B_FALSE);
3990*5450Sbrendan 
3991*5450Sbrendan 		/*
3992*5450Sbrendan 		 * Write ARC buffers.
3993*5450Sbrendan 		 */
3994*5450Sbrendan 		l2arc_write_buffers(spa, dev);
3995*5450Sbrendan 		mutex_exit(&l2arc_dev_mtx);
3996*5450Sbrendan 	}
3997*5450Sbrendan 
3998*5450Sbrendan 	l2arc_thread_exit = 0;
3999*5450Sbrendan 	cv_broadcast(&l2arc_feed_thr_cv);
4000*5450Sbrendan 	CALLB_CPR_EXIT(&cpr);		/* drops l2arc_feed_thr_lock */
4001*5450Sbrendan 	thread_exit();
4002*5450Sbrendan }
4003*5450Sbrendan 
4004*5450Sbrendan /*
4005*5450Sbrendan  * Add a vdev for use by the L2ARC.  By this point the spa has already
4006*5450Sbrendan  * validated the vdev and opened it.
4007*5450Sbrendan  */
4008*5450Sbrendan void
4009*5450Sbrendan l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end)
4010*5450Sbrendan {
4011*5450Sbrendan 	l2arc_dev_t *adddev;
4012*5450Sbrendan 
4013*5450Sbrendan 	/*
4014*5450Sbrendan 	 * Create a new l2arc device entry.
4015*5450Sbrendan 	 */
4016*5450Sbrendan 	adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
4017*5450Sbrendan 	adddev->l2ad_spa = spa;
4018*5450Sbrendan 	adddev->l2ad_vdev = vd;
4019*5450Sbrendan 	adddev->l2ad_write = l2arc_write_max;
4020*5450Sbrendan 	adddev->l2ad_start = start;
4021*5450Sbrendan 	adddev->l2ad_end = end;
4022*5450Sbrendan 	adddev->l2ad_hand = adddev->l2ad_start;
4023*5450Sbrendan 	adddev->l2ad_evict = adddev->l2ad_start;
4024*5450Sbrendan 	adddev->l2ad_first = B_TRUE;
4025*5450Sbrendan 	ASSERT3U(adddev->l2ad_write, >, 0);
4026*5450Sbrendan 
4027*5450Sbrendan 	/*
4028*5450Sbrendan 	 * This is a list of all ARC buffers that are still valid on the
4029*5450Sbrendan 	 * device.
4030*5450Sbrendan 	 */
4031*5450Sbrendan 	adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
4032*5450Sbrendan 	list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
4033*5450Sbrendan 	    offsetof(arc_buf_hdr_t, b_l2node));
4034*5450Sbrendan 
4035*5450Sbrendan 	spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0);
4036*5450Sbrendan 
4037*5450Sbrendan 	/*
4038*5450Sbrendan 	 * Add device to global list
4039*5450Sbrendan 	 */
4040*5450Sbrendan 	mutex_enter(&l2arc_dev_mtx);
4041*5450Sbrendan 	list_insert_head(l2arc_dev_list, adddev);
4042*5450Sbrendan 	atomic_inc_64(&l2arc_ndev);
4043*5450Sbrendan 	mutex_exit(&l2arc_dev_mtx);
4044*5450Sbrendan }
4045*5450Sbrendan 
4046*5450Sbrendan /*
4047*5450Sbrendan  * Remove a vdev from the L2ARC.
4048*5450Sbrendan  */
4049*5450Sbrendan void
4050*5450Sbrendan l2arc_remove_vdev(vdev_t *vd)
4051*5450Sbrendan {
4052*5450Sbrendan 	l2arc_dev_t *dev, *nextdev, *remdev = NULL;
4053*5450Sbrendan 
4054*5450Sbrendan 	/*
4055*5450Sbrendan 	 * We can only grab the spa config lock when cache device writes
4056*5450Sbrendan 	 * complete.
4057*5450Sbrendan 	 */
4058*5450Sbrendan 	ASSERT3U(l2arc_writes_sent, ==, l2arc_writes_done);
4059*5450Sbrendan 
4060*5450Sbrendan 	/*
4061*5450Sbrendan 	 * Find the device by vdev
4062*5450Sbrendan 	 */
4063*5450Sbrendan 	mutex_enter(&l2arc_dev_mtx);
4064*5450Sbrendan 	for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
4065*5450Sbrendan 		nextdev = list_next(l2arc_dev_list, dev);
4066*5450Sbrendan 		if (vd == dev->l2ad_vdev) {
4067*5450Sbrendan 			remdev = dev;
4068*5450Sbrendan 			break;
4069*5450Sbrendan 		}
4070*5450Sbrendan 	}
4071*5450Sbrendan 	ASSERT(remdev != NULL);
4072*5450Sbrendan 
4073*5450Sbrendan 	/*
4074*5450Sbrendan 	 * Remove device from global list
4075*5450Sbrendan 	 */
4076*5450Sbrendan 	list_remove(l2arc_dev_list, remdev);
4077*5450Sbrendan 	l2arc_dev_last = NULL;		/* may have been invalidated */
4078*5450Sbrendan 
4079*5450Sbrendan 	/*
4080*5450Sbrendan 	 * Clear all buflists and ARC references.  L2ARC device flush.
4081*5450Sbrendan 	 */
4082*5450Sbrendan 	l2arc_evict(remdev, 0, B_TRUE);
4083*5450Sbrendan 	list_destroy(remdev->l2ad_buflist);
4084*5450Sbrendan 	kmem_free(remdev->l2ad_buflist, sizeof (list_t));
4085*5450Sbrendan 	kmem_free(remdev, sizeof (l2arc_dev_t));
4086*5450Sbrendan 
4087*5450Sbrendan 	atomic_dec_64(&l2arc_ndev);
4088*5450Sbrendan 	mutex_exit(&l2arc_dev_mtx);
4089*5450Sbrendan }
4090*5450Sbrendan 
4091*5450Sbrendan void
4092*5450Sbrendan l2arc_init()
4093*5450Sbrendan {
4094*5450Sbrendan 	l2arc_thread_exit = 0;
4095*5450Sbrendan 	l2arc_ndev = 0;
4096*5450Sbrendan 	l2arc_writes_sent = 0;
4097*5450Sbrendan 	l2arc_writes_done = 0;
4098*5450Sbrendan 
4099*5450Sbrendan 	mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4100*5450Sbrendan 	cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
4101*5450Sbrendan 	mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
4102*5450Sbrendan 	mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
4103*5450Sbrendan 	mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
4104*5450Sbrendan 
4105*5450Sbrendan 	l2arc_dev_list = &L2ARC_dev_list;
4106*5450Sbrendan 	l2arc_free_on_write = &L2ARC_free_on_write;
4107*5450Sbrendan 	list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
4108*5450Sbrendan 	    offsetof(l2arc_dev_t, l2ad_node));
4109*5450Sbrendan 	list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
4110*5450Sbrendan 	    offsetof(l2arc_data_free_t, l2df_list_node));
4111*5450Sbrendan 
4112*5450Sbrendan 	(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
4113*5450Sbrendan 	    TS_RUN, minclsyspri);
4114*5450Sbrendan }
4115*5450Sbrendan 
4116*5450Sbrendan void
4117*5450Sbrendan l2arc_fini()
4118*5450Sbrendan {
4119*5450Sbrendan 	mutex_enter(&l2arc_feed_thr_lock);
4120*5450Sbrendan 	cv_signal(&l2arc_feed_thr_cv);	/* kick thread out of startup */
4121*5450Sbrendan 	l2arc_thread_exit = 1;
4122*5450Sbrendan 	while (l2arc_thread_exit != 0)
4123*5450Sbrendan 		cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
4124*5450Sbrendan 	mutex_exit(&l2arc_feed_thr_lock);
4125*5450Sbrendan 
4126*5450Sbrendan 	mutex_destroy(&l2arc_feed_thr_lock);
4127*5450Sbrendan 	cv_destroy(&l2arc_feed_thr_cv);
4128*5450Sbrendan 	mutex_destroy(&l2arc_dev_mtx);
4129*5450Sbrendan 	mutex_destroy(&l2arc_buflist_mtx);
4130*5450Sbrendan 	mutex_destroy(&l2arc_free_on_write_mtx);
4131*5450Sbrendan 
4132*5450Sbrendan 	list_destroy(l2arc_dev_list);
4133*5450Sbrendan 	list_destroy(l2arc_free_on_write);
4134*5450Sbrendan }
4135